diff --git a/.circleci/config.yml b/.circleci/config.yml index b4bd8bfa..dd58d595 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,7 +2,7 @@ version: 2 jobs: build: docker: - - image: circleci/golang:1.12 + - image: circleci/golang:1.16 steps: - checkout @@ -10,4 +10,4 @@ jobs: - run: make tools && make zip - store_artifacts: - path: artifacts/zips + path: build/zips diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 91c7b76e..4c1fb402 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -7,7 +7,7 @@ about: Create a report to help us improve ### Prerequisites * [ ] I am running the [latest version](https://github.com/dan-v/rattlesnakeos-stack/releases) of rattlesnakeos-stack -* [ ] I am able to reproduce this issue without any advanced customization options +* [ ] I am able to reproduce this issue without any customization options ### Description @@ -31,8 +31,6 @@ You can mask stack name and email from here. ``` chromium-version = "" device = "crosshatch" -encrypted-keys = true -ignore-version-checks = false instance-regions = "us-west-2,us-west-1,us-east-1,us-east-2" instance-type = "c5.4xlarge" max-price = "1.00" @@ -47,7 +45,7 @@ ssh-key = "rattlesnakeos" e.g. T-mobile, etc ### Email Notification Output -Should look something like this. You can mask stack name from here and you may want to verify contents of log output before pasting. +Should look something like this. You can mask stack name from here, and you may want to verify contents of log output before pasting. ``` RattlesnakeOS build FAILED Device: crosshatch diff --git a/.gitignore b/.gitignore index 8e3470be..fcabbdf8 100644 --- a/.gitignore +++ b/.gitignore @@ -30,9 +30,11 @@ # End of https://www.gitignore.io/api/go,terraform -artifacts +build rattlesnakeos-stack .vscode/ *.toml .DS_Store .idea/ +output*/ +venv/ diff --git a/AOSP_VERSION b/AOSP_VERSION new file mode 100644 index 00000000..3b5b5d8f --- /dev/null +++ b/AOSP_VERSION @@ -0,0 +1 @@ +11.0 \ No newline at end of file diff --git a/FLASHING.md b/FLASHING.md index 197b28ff..a853b7a4 100644 --- a/FLASHING.md +++ b/FLASHING.md @@ -63,11 +63,6 @@ Therefore, the public key needs to be set before locking the bootloader again. F ``` aws s3 cp s3://-keys/crosshatch/avb_pkmd.bin . ``` -Or if you used the `encrypted-keys` option, you will need to download the encrypted key and decrypt it. -``` -aws s3 cp s3://-keys-encrypted/crosshatch/avb_pkmd.bin.gpg . -gpg -d avb_pkmd.bin.gpg > avb_pkmd.bin -``` Now use fastboot to flash the avb_custom_key ``` diff --git a/Makefile b/Makefile index afe81f24..54cc9888 100644 --- a/Makefile +++ b/Makefile @@ -6,29 +6,22 @@ VERSION := $(shell cat VERSION) OS := darwin linux windows ARCH := amd64 +PKGS := $(shell go list ./internal/... ./cmd...) .PHONY: \ help \ - default \ clean \ - clean-artifacts \ - clean-vendor \ tools \ deps \ test \ - coverage \ vet \ - errors \ lint \ fmt \ - env \ build \ build-all \ - doc \ - check \ version -all: fmt lint vet build-all +all: fmt lint vet shellcheck test build-all help: @echo 'Usage: make ... ' @@ -37,39 +30,22 @@ help: @echo '' @echo ' help Show this help screen.' @echo ' clean Remove binaries, artifacts and releases.' - @echo ' clean-artifacts Remove build artifacts only.' - @echo ' clean-vendor Remove content of the vendor directory.' @echo ' tools Install tools needed by the project.' @echo ' deps Download and install build time dependencies.' @echo ' test Run unit tests.' - @echo ' coverage Report code tests coverage.' @echo ' vet Run go vet.' @echo ' lint Run golint.' @echo ' fmt Run go fmt.' @echo ' env Display Go environment.' @echo ' build Build project for current platform.' @echo ' build-all Build project for all supported platforms.' - @echo ' doc Start Go documentation server on port 8080.' - @echo ' check Verify compiled binary.' - @echo ' version Display Go version.' - @echo '' - @echo 'Targets run by default are: imports, fmt, lint, vet, errors and build.' @echo '' print-%: @echo $* = $($*) -clean: clean-artifacts - go clean -i ./... - rm -vf $(CURDIR)/coverage.* - -clean-artifacts: - rm -Rf artifacts - -clean-vendor: - find $(CURDIR)/vendor -type d -print0 2>/dev/null | xargs -0 rm -Rf - -clean-all: clean clean-artifacts clean-vendor +clean: + rm -Rf build tools: go get golang.org/x/lint/golint @@ -79,60 +55,37 @@ tools: go get github.com/mitchellh/gox deps: - dep ensure + go mod tidy test: - go test -v ./... - -coverage: - gocov test ./... > $(CURDIR)/coverage.out 2>/dev/null - gocov report $(CURDIR)/coverage.out - if test -z "$$CI"; then \ - gocov-html $(CURDIR)/coverage.out > $(CURDIR)/coverage.html; \ - if which open &>/dev/null; then \ - open $(CURDIR)/coverage.html; \ - fi; \ - fi + go test ${PKGS} vet: - go vet -v ./... + go vet ${PKGS} lint: - golint $(go list ./... | grep -v /vendor/) + golint ${PKGS} + golangci-lint run cmd/... internal/... || true fmt: - go fmt ./... + go fmt ${PKGS} -env: - @go env +shellcheck: + shellcheck --severity=warning templates/build.sh || true build: go build -race -ldflags "-X github.com/dan-v/rattlesnakeos-stack/cli.version=$(VERSION)" -v -o "$(TARGET)" . build-all: - mkdir -v -p $(CURDIR)/artifacts/$(VERSION) + mkdir -v -p $(CURDIR)/build/$(VERSION) gox -verbose -ldflags "-X github.com/dan-v/rattlesnakeos-stack/cli.version=$(VERSION)" \ -os "$(OS)" -arch "$(ARCH)" \ - -output "$(CURDIR)/artifacts/$(VERSION)/{{.OS}}/$(TARGET)" . + -output "$(CURDIR)/build/$(VERSION)/{{.OS}}/$(TARGET)" . cp -v -f \ - $(CURDIR)/artifacts/$(VERSION)/$$(go env GOOS)/$(TARGET) . - -doc: - godoc -http=:8080 -index - -check: - @test -x $(CURDIR)/$(TARGET) || exit 1 - if $(CURDIR)/$(TARGET) --version | grep -qF '$(VERSION)'; then \ - echo "$(CURDIR)/$(TARGET): OK"; \ - else \ - exit 1; \ - fi - -version: - @go version + $(CURDIR)/build/$(VERSION)/$$(go env GOOS)/$(TARGET) . zip: all - mkdir -p artifacts/zips - pushd artifacts/$(VERSION)/darwin && zip -r ../../../artifacts/zips/rattlesnakeos-stack-osx-${VERSION}.zip $(TARGET) && popd - pushd artifacts/$(VERSION)/windows && zip -r ../../../artifacts/zips/rattlesnakeos-stack-windows-${VERSION}.zip $(TARGET).exe && popd - pushd artifacts/$(VERSION)/linux && zip -r ../../../artifacts/zips/rattlesnakeos-stack-linux-${VERSION}.zip $(TARGET) && popd \ No newline at end of file + mkdir -p build/zips + pushd build/$(VERSION)/darwin && zip -r ../../../build/zips/rattlesnakeos-stack-osx-${VERSION}.zip $(TARGET) && popd + pushd build/$(VERSION)/windows && zip -r ../../../build/zips/rattlesnakeos-stack-windows-${VERSION}.zip $(TARGET).exe && popd + pushd build/$(VERSION)/linux && zip -r ../../../build/zips/rattlesnakeos-stack-linux-${VERSION}.zip $(TARGET) && popd \ No newline at end of file diff --git a/README.md b/README.md index 6ef5f9eb..aa91d613 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,17 @@ -RattlesnakeOS is a privacy and security focused Android OS for Google Pixel phones. +Build your own customized Android OS for Google Pixel phones using [AWS](https://aws.amazon.com/) cloud infrastructure. The default OS that this tools builds without any customizations is called `RattlesnakeOS`. If there is something you don't like about the default OS, you can add customizations on top of it or start with a completely blank slate and build your own OS. ## Features +* Support for Google Pixel phones * Based on latest [AOSP](https://source.android.com/) 11.0 -* Support for Google Pixel 2-4(a) (XL) -* Monthly software and firmware security fixes delivered through built in OTA updater -* [Verified boot](https://source.android.com/security/verifiedboot/) with a locked bootloader just like official Android but with your own personal signing keys -* Latest stable Chromium [browser](https://www.chromium.org) and [webview](https://www.chromium.org/developers/how-tos/build-instructions-android-webview) -* Latest stable [F-Droid](https://f-droid.org/) app store and [privileged extension](https://gitlab.com/fdroid/privileged-extension) -* Free of Google’s apps and services -* Advanced build customization options +* Software and firmware security updates delivered through built in OTA updater +* Maintains [verified boot](https://source.android.com/security/verifiedboot/) with a locked bootloader just like official Android but with your own personal signing keys +* Support for building latest stable Chromium [browser](https://www.chromium.org) and [webview](https://www.chromium.org/developers/how-tos/build-instructions-android-webview) +* Support for custom OS builds +## Overview +The default OS built by this tool, `RattlesnakeOS`, is just stock AOSP and has all the baseline privacy and security features from there. Unlike other alternative Android OSes, it aims to keep security on par with stock Android by keeping critical security features like verified boot enabled and ensuring monthly OTA security updates not only update the OS but also the device specific drivers and firmware. -## Background -RattlesnakeOS is truly just stock AOSP and has all of the baseline privacy and security features from there. Unlike other alternative Android OSes, it aims to keep security on par with stock Android by keeping critical security features like verified boot enabled, ensuring monthly OTA security updates not only update the OS but also the device specific drivers and firmware, and by not adding additional features or software that will needlessly increase attack surface. By not deviating from stock AOSP, updating to new major Android releases doesn't require any major porting effort and this means devices running RattlesnakeOS continue to receive proper security updates without delay. - -## What is rattlesnakeos-stack? -Rather than providing random binaries of RattlesnakeOS to install on your phone, I've gone the route of creating a cross platform tool, `rattlesnakeos-stack`, that provisions a "stack", which is just all of the [AWS](https://aws.amazon.com/) cloud infrastructure needed to continuously build your own personal RattlesnakeOS, with your own signing keys, and your own OTA updates. It uses [AWS Lambda](https://aws.amazon.com/lambda/features/) to provision [EC2 spot instances](https://aws.amazon.com/ec2/spot/) that build RattlesnakeOS and upload artifacts to [S3](https://aws.amazon.com/s3/). Resulting OS builds are configured to receive over the air updates from this environment. It only costs a few dollars a month to run (see FAQ for detailed cost breakdown). +Rather than providing random binaries of an Android OS to install on your phone, I've gone the route of creating a cross platform tool, `rattlesnakeos-stack`, that provisions a "stack", which is just all the AWS cloud infrastructure needed to continuously build your own personal Android OS, with your own signing keys, and your own OTA updates. It uses [AWS Lambda](https://aws.amazon.com/lambda/features/) to provision [EC2 spot instances](https://aws.amazon.com/ec2/spot/) that build the OS and upload artifacts to [S3](https://aws.amazon.com/s3/). Resulting OS builds are configured to receive over the air updates from this environment. It only costs a few dollars a month to run (see FAQ for detailed cost breakdown). ![](/images/overview.png?raw=true) @@ -26,13 +22,12 @@ Rather than providing random binaries of RattlesnakeOS to install on your phone, * [Deployment](#deployment) * [Default Examples](#default-examples) * [Advanced Examples](#advanced-examples) - * [All Options](#all-options) * [First Time Setup After Deployment](#first-time-setup-after-deployment) + * [Customizations](#customizations) * [FAQ](#faq) * [General](#general) * [Costs](#costs) * [Builds](#builds) - * [Customizations](#customizations) * [Security](#security) * [Uninstalling](#uninstalling) @@ -60,8 +55,8 @@ The rattlesnakeos-stack `config` subcommand should be run first to initialize a ```none ./rattlesnakeos-stack config -Device is the device codename (e.g. sunfish). Supported devices: walleye (Pixel 2), taimen (Pixel 2 XL), blueline (Pixel 3), crosshatch (Pixel 3 XL), sargo (Pixel 3a), bonito (Pixel 3a XL) -device: taimen +Device is the device codename (e.g. sunfish). +device: sunfish Stack name is used as an identifier for all the AWS components that get deployed. THIS NAME MUST BE UNIQUE OR DEPLOYMENT WILL FAIL. Stack name: @@ -79,7 +74,7 @@ INFO[0005] rattlesnakeos-stack config file has been written to /Users/username/. ``` ## Deployment -The rattlesnakeos-stack `deploy` subcommand handles deploying (and updating) your stack. After stack deployment, you will need to manually start a build. By default it is configured to automatically build once a month on the 10th of the month so that monthly security updates can be picked up and built without the need for manual builds. Anytime you make a config change, you will first need to deploy those changes using this command before starting a new build. +The rattlesnakeos-stack `deploy` subcommand handles deploying (and updating) your stack. After stack deployment, you will need to manually start a build. By default, it is configured to automatically build once a month on the 10th of the month so that monthly security updates can be picked up and built without the need for manual builds. Anytime you make a config change, you will first need to deploy those changes using this command before starting a new build. #### Default Examples Deploy stack using default generated config file: @@ -91,8 +86,6 @@ INFO[0000] Current settings: chromium-version: "" device: taimen email: user@domain.com -encrypted-keys: false -ignore-version-checks: false hosts-file: "" instance-regions: us-west-2,us-west-1,us-east-2 instance-type: c5.4xlarge @@ -124,62 +117,27 @@ Or you can specify a different config file to use ... ``` +To see full list of options you can run `rattlesnakeos-stack deploy -h`. These flags can also be set as config values in the config file. + #### Advanced Examples -Here is an example of a more advanced config file that: locks to a specific version of Chromium, specifies a hosts file to install, uses a larger EC2 instance type, builds every 2 days, and pulls in custom patches from the [community patches repo](https://github.com/RattlesnakeOS/community_patches). You can read more about [advanced customization options in FAQ](#customizations). +Here is an example of a more advanced config file that: disables chromium build (warning: if you do this - you should provide your own up to date webview), disables scheduled monthly builds, specifies a custom configuration repo (more on that in customization section), and uses a much larger c5.24xlarge instance type. ```toml -chromium-version = "80.0.3971.4" -device = "crosshatch" -email = "user@domain.com" -encrypted-keys = "false" -ignore-version-checks = false -hosts-file = "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" +chromium-build-disabled = true +chromium-version = "" +cloud = "aws" +core-config-repo = "https://github.com/rattlesnakeos/core" +custom-config-repo = "https://github.com/myrepo/custom" +device = "sunfish" +email = "dan@vittegleo.com" instance-regions = "us-west-2,us-west-1,us-east-2" -instance-type = "c5.18xlarge" -max-price = "1.00" -name = "" -region = "us-west-2" -schedule = "rate(2 days)" -skip-price = "1.00" +instance-type = "c5.24xlarge" +latest-url = "https://raw.githubusercontent.com/RattlesnakeOS/latest/11.0/latest.json" +max-price = "5.00" +name = "sunfish-cyoydyw3j2" +region = "us-east-2" +schedule = "" +skip-price = "5.00" ssh-key = "rattlesnakeos" - -[[custom-patches]] - patches = [ - "00001-global-internet-permission-toggle.patch", - ] - repo = "https://github.com/RattlesnakeOS/community_patches" - branch = "11.0" -``` - -#### All Options -To see full list of options you can pass rattlesnakeos-stack you can use the help flag (-h). These flags can also be set as config values in the config file. - -```none -... - -Usage: - rattlesnakeos-stack deploy [flags] - -Flags: - --chromium-version string specify the version of Chromium you want (e.g. 69.0.3497.100) to pin to. if not specified, the latest stable version of Chromium is used. - -d, --device string device you want to build for (e.g. crosshatch): to list supported devices use '-d list' - -e, --email string email address you want to use for build notifications - --encrypted-keys an advanced option that allows signing keys to be stored with symmetric gpg encryption and decrypted into memory during the build process. this option requires manual intervention during builds where you will be sent a notification and need to provide the key required for decryption over SSH to continue the build process. important: if you have an existing stack - please see the FAQ for how to migrate your keys - -h, --help help for deploy - --hosts-file string an advanced option that allows you to specify a replacement /etc/hosts file to enable global dns adblocking (e.g. https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts). note: be careful with this, as you 1) won't get any sort of notification on blocking 2) if you need to unblock something you'll have to rebuild the OS - --ignore-version-checks ignore the versions checks for stack, AOSP, Chromium, and F-Droid and always do a build. - --instance-regions string possible regions to launch spot instance. the region with cheapest spot instance price will be used. (default "us-west-2,us-west-1,us-east-2") - --instance-type string EC2 instance type (e.g. c4.4xlarge) to use for the build. (default "c5.4xlarge") - --max-price string max ec2 spot instance bid. if this value is too low, you may not obtain an instance or it may terminate during a build. (default "1.00") - -n, --name string name for stack. note: this must be a valid/unique S3 bucket name. - --prevent-shutdown for debugging purposes only - will prevent ec2 instance from shutting down after build. - -r, --region string aws region for stack deployment (e.g. us-west-2) - --save-config allows you to save all passed CLI flags to config file - --schedule string cron expression that defines when to kick off builds. by default this is set to build on the 10th of every month. note: if you give an invalid expression it will fail to deploy the stack. see this for cron format details: https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions (default "cron(0 0 10 * ? *)") - --skip-price string skip requesting ec2 spot instance if price is above this value to begin with. (default "0.68") - --ssh-key string aws ssh key to add to ec2 spot instances. this is optional but is useful for debugging build issues on the instance. - -Global Flags: - --config-file string config file (default location to look for config is $HOME/.rattlesnakeos.toml) ``` ## First Time Setup After Deployment @@ -196,58 +154,70 @@ Global Flags: ./rattlesnakeos-stack build list ``` -* The initial build will likely take 5+ hours to complete. Looking at the EC2 instance metrics like CPU, etc is NOT a good way to determine if the build is progressing. See the FAQ for details on how to monitor live build progress. -* After the build finishes, a factory image should be uploaded to the S3 release bucket that you can download. Be sure to replace the command below with your stack name and your device name (e.g. taimen). +* The initial build will likely take 7+ hours to complete. Looking at the EC2 instance metrics like CPU, etc is NOT a good way to determine if the build is progressing. See the FAQ for details on how to monitor live build progress. +* After the build finishes, a factory image should be uploaded to the S3 release bucket that you can download. Be sure to replace the command below with your stack name and your device name (e.g. sunfish). ```sh - aws s3 cp s3://-release/-factory-latest.tar.xz . + aws s3 cp s3://-release/-factory-latest.zip . ``` * Use this factory image and [follow the instructions on flashing your device carefully](FLASHING.md). -* After successfully flashing your device, you will now be running RattlesnakeOS and all future updates will happen through the built in OTA updater. -* I HIGHLY suggest backing up your generated signing keys and storing them somewhere safe. To backup your signing keys: +* After successfully flashing your device, you will now be running RattlesnakeOS and all future updates will happen through the built-in OTA updater. +* I HIGHLY suggest backing up your generated signing keys and storing them somewhere safe. To back up your signing keys: ```sh aws s3 sync s3://-keys/ . - # or if you are using encrypted keys - aws s3 sync s3://-keys-encrypted/ . ``` +## Customizations +It is possible to customize OS builds to your liking by specifying a custom config repo with the config option `custom-config-repo = "https://github.com/yourrepo/name"`. This git repo needs to adhere to a specific format that will be covered below. + +IMPORTANT: using any Git repo here that is not in your control is a security risk, as you are giving control of your build process to the owner of the repo. They could steal your signing keys, inject malicious code, etc. + +### Custom Config Repo format +The custom config git repo needs to be laid out in a specific format to work with the build process. An example repo can be found here: https://github.com/RattlesnakeOS/example-custom-config-repo. The directory structure looks like this: +``` +hooks/ +local_manifests/ +vendor/ +``` +* `hooks` - this directory can contain shell scripts that can hook into the build process at various steps along the way. There are `pre` and `post` entry points. The shell scripts need to be named `_.sh` (e.g. aosp_build_pre.sh). Right now these hooks scripts are sourced in a subshell, so all environment variables from the core build script are available to these hooks (e.g. AOSP_BUILD_DIR, etc), but it's best to limit environment dependencies, as backwards compatibility is not guaranteed as the core build script changes. +* `local_manifests` - this is a directory for local AOSP manifests to be placed. These manifests will be synced to the AOSP build tree. +* `vendor` - is a place to override vendor configuration. You can make use of the support for AOSP overlays to easily modify configuration settings. Under the `vendor` directory, there needs to be a mk file at `config/main.mk`. + ## FAQ ### General #### Should I use rattlesnakeos-stack? Use this at your own risk. #### Where can I get help, ask questions, keep up to date on development? * For general questions and keeping up to date, use subreddit [/r/RattlesnakeOS](https://www.reddit.com/r/RattlesnakeOS/) -* If you run into any issues with rattlesnakeos-stack, please [file an issue or feature request on Github](https://github.com/dan-v/rattlesnakeos-stack/issues) and provide all of the requested information in the issue template. +* If you run into any issues with rattlesnakeos-stack, please [file an issue or feature request on Github](https://github.com/dan-v/rattlesnakeos-stack/issues) and provide all the requested information in the issue template. #### How do I update rattlesnakeos-stack? Just download the new version of rattlesnakeos-stack and run deploy again (e.g. ./rattlesnakeos-stack deploy) #### How do OTA updates work? -If you go to `Settings -> System -> Advanced (to expand) -> System update settings`, you'll see the updater app settings. The updater app will check S3 to see if there are updates and if it finds one will download and apply it your device. There is no progress indicator unfortunately - you'll just got a notification when it's done and it will ask you to reboot. If you want to force a check for OTA updates, you can toggle the `Require battery above warning level` setting and it will check for a new build in your S3 bucket. +If you go to `Settings -> System -> Advanced (to expand) -> System update settings`, you'll see the updater app settings. The updater app will check S3 to see if there are updates and if it finds one will download and apply it your device. #### What network carriers are supported? I only have access to a single device and carrier to test this on, so I can't make any promises about it working with your specific carrier. Confirmed working: T-Mobile, Rogers, Cricket, Ting. Likely not to work: Sprint (has requirements about specific carrier app being on phone to work), Project Fi. #### Why is this project so closely tied to AWS? -Building AOSP and Chromium requires a fairly powerful server, which is not something everyone readily has access to. Using a cloud provider allows you to spin up compute resources capable of building these projects and only paying for the time you use them. It could really be any cloud provider, but just happened to choose AWS. There are pros and cons to building AOSP in the cloud. On the positive side, cloud providers allow you to easily write automation that can spin up and down resources as needed which allows rattlesnakeos-stack to automate the entire process of building an Android OS and distributing OTA updates. On the downsides, for those that are very security conscious, they may be wary of building an OS on shared cloud resources. You can checkout the [security section of the FAQ](#security) for more details on this. +Building AOSP and Chromium requires a fairly powerful server, which is not something everyone readily has access to. Using a cloud provider allows you to spin up compute resources capable of building these projects and only paying for the time you use them. It could really be any cloud provider, but just happened to choose AWS. There are pros and cons to building AOSP in the cloud. On the positive side, cloud providers allow you to easily write automation that can spin up and down resources as needed which allows rattlesnakeos-stack to automate the entire process of building an Android OS and distributing OTA updates. On the downsides, for those that are very security conscious, they may be wary of building an OS on shared cloud resources. You can check out the [security section of the FAQ](#security) for more details on this. #### Will you support other devices? -It's not likely that other devices will be supported beyond the Pixel line. Here are some of the reasons: -* Support for verified boot with a locked bootloader is a requirement for this project. The Pixel line of phones are fairly unique in that they support verified boot with custom signing keys while locking the bootloader with an alternative OS. I am sure there are/will be devices that support this but I don't know of any others. +It's not likely that other devices will be supported beyond the Google Pixel line. Here are some reasons: +* Support for verified boot with a locked bootloader is a requirement for this project. The Pixel line of phones are fairly unique in that they support verified boot with custom signing keys while locking the bootloader with an alternative OS. I am sure there are/will be devices that support this, but I don't know of any others. * Being able to get monthly AOSP security updates for a device is a requirement for this project. Google provides proper AOSP releases every month for Pixel devices which makes it very simple to build and stay up to date with monthly security updates - most vendors don't provide this. * Being able to get monthly firmware and driver updates is a requirement for this project. Google provides updated firmware and drivers for Pixel devices every month (although incomplete - the vendor specific code ends up being extracted from monthly updated factory images) - regardless most vendors don't provide this. * Even if there is another device that meets these requirements, the build process would likely differ enough that supporting it would be too much overhead. The current build differences between each Pixel device is relatively minor. #### Is this a fork of CopperheadOS? No. RattlesnakeOS was created initially as an alternative to [CopperheadOS](https://en.wikipedia.org/wiki/CopperheadOS), a security hardened Android OS created by [Daniel Micay](https://twitter.com/DanielMicay), after it stopped being properly maintained back in June 2018. To be clear, this project is not attempting to add or recreate any of the security hardening features that were present in CopperheadOS. If you are interested in the continuation of the CopperheadOS project you can check out [GrapheneOS](https://grapheneos.org/). -#### Why is my webview not working? -* If you are using work profiles, it is a known issue and it has a [workaround](https://support.google.com/work/android/answer/7506908?hl=en). ### Costs #### How much does this cost to run? -The costs are going to be variable by AWS region and by day and time you are running your builds as spot instances have a variable price depending on market demand. Below is an example scenario that should give you a rough estimate of costs: - * The majority of the cost will come from builds on EC2. It currently launches spot instances of type c5.4xlarge which average maybe $.30 an hour in us-west-2 (will vary by region) but can get up over $1 an hour depending on the day and time. You can modify the default `max-price` config value to set the max price you are willing to pay and if market price exceeds that then your instance will be terminated. Builds can take anywhere from 2-6 hours depending on if Chromium needs to be built. So let's say you're doing a build every month at $0.50 an hour and it is taking on average 4 hours - you'd pay ~$2 in EC2 costs per month. - * The other very minimal cost would be S3. Storage costs are almost non existent as a stack will only store about 3GB worth of files (factory image, ota file, target file) and at $0.023 per GB you're looking at $0.07 per month in S3 storage costs. The other S3 cost would be for data transfer out for OTA updates - let's say you are just downloading an update per month (~500MB file) at $0.09 per GB you're looking at $0.05 per month in S3 network costs. +The costs are going to be variable by AWS region and by day and time you are running your builds, as spot instances have a variable price depending on market demand. Below is an example scenario that should give you a rough estimate of costs: + * The majority of the cost will come from builds on EC2. It currently launches spot instances of type c5.4xlarge which average maybe $.30 an hour in us-west-2 (will vary by region) but can get up over $1 an hour depending on the day and time. You can modify the default `max-price` config value to set the max price you are willing to pay and if market price exceeds that then your instance will be terminated. Builds can take anywhere from 3-7 hours depending on if Chromium needs to be built. So let's say you're doing a build every month at $0.50 an hour, and it is taking on average 4 hours - you'd pay ~$2 in EC2 costs per month. + * The other very minimal cost would be S3. Storage costs are almost non-existent as a stack will only store about 2GB worth of files (factory image, ota file) and at $0.023 per GB you're looking at $0.07 per month in S3 storage costs. The other S3 cost would be for data transfer out for OTA updates - let's say you are just downloading an update per month (~500MB file) at $0.09 per GB you're looking at $0.05 per month in S3 network costs. ### Builds #### How do I change build frequency? -By default it is configured to automatically build once a month on the 10th of the month so that monthly security updates can be picked up and built without the need for manual builds. There is a config option to specify how frequently builds are kicked off automatically. For example you could set `schedule = "rate(14 days)"` in the config file to build every 14 days. Also note, the default behavior is to only run a build if there have been version updates in stack, AOSP, Chromium, or F-Droid versions. +By default, it is configured to automatically build once a month on the 10th of the month so that monthly updates can be picked up and built without the need for manual builds. There is a config option to specify how frequently builds are kicked off automatically. For example you could set `schedule = "rate(14 days)"` in the config file to build every 14 days. Also note, the default behavior is to only run a build if there have been version updates in stack, AOSP, or Chromium versions. #### How do I manually start a build? You can manually kick off a build with the CLI. Note that this shouldn't normally be necessary as builds are set to happen automatically on a cron schedule. ```sh @@ -260,105 +230,16 @@ There are a few steps required to be able to do this: * In the [default security group](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html#DefaultSecurityGroup), you'll need to [open up SSH access](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/authorizing-access-to-an-instance.html). * You should be able to SSH into the instance (can get IP address from EC2 console or email notification): `ssh -i yourprivatekey ubuntu@yourinstancepublicip` * Tail the logfile to view progress `tail -f /var/log/cloud-init-output.log` -#### How can I debug build issues? -There is a flag you can pass `rattlesnakeos-stack` called `--prevent-shutdown` that will prevent the EC2 instance from terminating so that you can SSH into the instance and debug. Note that this will keep the instance online for 12 hours or until you manually terminate it. #### Why did my EC2 instance randomly terminate? -If there wasn't an error notification, this is likely because the [Spot Instance](https://aws.amazon.com/ec2/spot/) max price was not high enough or EC2 is low on capacity and needs to reclaim instances. You can see historical spot instance pricing in the [EC2 console](https://console.aws.amazon.com/ec2sp/v1/spot/home). Click `Pricing History`, select c5.4xlarge for `Instance Type` and pick a date range. I would recommend not setting your `max-price` beyond the on demand price. - -### Customizations -#### How do I customize RattlesnakeOS builds? -There are some advanced options that allow you to customize RattlesnakeOS builds to your liking by adding additional patches and prebuilt apps. These can only be setup in the config file and not through CLI flags. - -Important: using any Git repo here that is not in your control is a security risk, as you are giving control of your build process to the owner of the repo. They could steal your signing keys, inject malicious code, etc just by updating a patch file. - -##### Patches and Scripts -There is an option to execute patches and shell scripts against the AOSP build tree using `[[custom-patches]]` in the config file. This requires you provide a Git repo and a list of patches you want to apply during the build process. [There is a repo of useful patches that have been contributed by the community](https://github.com/RattlesnakeOS/community_patches) that are trusted and can be used here - or you could use your own if you wanted. - -```toml -[[custom-patches]] - repo = "https://github.com/RattlesnakeOS/community_patches" - patches = [ - "00001-global-internet-permission-toggle.patch", - ] - branch = "11.0" - -[[custom-scripts]] - repo = "https://github.com/RattlesnakeOS/example_patch_shellscript" - scripts = [ "00002-custom-boot-animation.sh" ] -``` - -##### Prebuilts -There is also an option to add prebuilt applications to the AOSP build tree using `[[custom-prebuilts]]` in the config file. This requires you provide a git repo and a list of module names defined in Android.mk files within this repository that you want to have included in the build. -```toml -[[custom-prebuilts]] - modules = ["app1", "app2"] - repo = "https://github.com/RattlesnakeOS/example_prebuilts" -``` - -##### Example for Manifest Customizations: Seedvault -It's also possible to add remotes and projects to the AOSP build manifest file. These will get added to the manifest and get pulled into the AOSP build tree as part of normal build process. - -```toml -# to add a remote line to manifest like this: -[[custom-manifest-remotes]] - name = "seedvault-app" - fetch = "https://github.com/seedvault-app/" - revision = "master" - -# to add a project line to manifest like this: -# you can also add modules here that you want to include into the build process -# in this example it is adding a really great backup app (https://github.com/seedvault-app/seedvault) -[[custom-manifest-projects]] - path = "packages/apps/Seedvault" - name = "seedvault" - remote = "seedvault-app" - modules = [ "Seedvault" ] -``` -In this special example further changes to the source code are needed to ensure that the Seevault app can be used. This can be either achieved by a [patch](https://github.com/davze/ros-patches/blob/11/0007-set-seedvault-as-dftl-bkp-provider.patch) or you can do it manually by selecting Seedvault as backup provider via adb: - -``` -# adb shell bmgr enable true -# adb shell bmgr transport com.stevesoltys.seedvault.transport.ConfigurableBackupTransport -``` - - -#### Can I change the boot animation? -It is possible to change the boot animation using patches, there is an example repo [here](https://github.com/RattlesnakeOS/example_patch_shellscript). -#### Can I add microG to the build? -I don't recommend installing microG as it requires you to enable signature spoofing. By enabling signature spoofing, this is a global change to the OS even though it has to be requested by each application as a permission. Just having the possibility for an application to request this ability reduces security of your OS. Having said all that, if you are fine with the security implications of doing so - it is possible to install microG using the custom patches and prebuilts features. See the [microG community supported repo](https://github.com/RattlesnakeOS/microg) for details on how to do this. +If there wasn't an error notification, this is likely because the [Spot Instance](https://aws.amazon.com/ec2/spot/) max price was not high enough or EC2 is low on capacity and needs to reclaim instances. You can see historical spot instance pricing in the [EC2 console](https://console.aws.amazon.com/ec2sp/v1/spot/home). Click `Pricing History`, select c5.4xlarge for `Instance Type` and pick a date range. ### Security #### How secure is this? -Your abilty to secure your signing keys determines how secure RattlesnakeOS is. RattlesnakeOS generates and stores signing keys, optionally encrypted, in AWS, which means the security of your AWS account becomes critical to ensuring the security of your device. If you aren't able to properly secure your local workstation and your AWS account, then these additional security protections like verified boot become less useful. Using the encrypted signing keys option can reduce impact of an AWS account compromise by keeping signing keys encrypted at rest and only decrypted into memory at build time. Is this infallible? Absolutely not, as your unencrypted keys are going to be in memory on a VM running in the cloud. So if your threat model includes more targeted attacks that try to extract signing keys while they are in memory, then RattlesnakeOS is probably not for you. +Your ability to secure your signing keys determines how secure RattlesnakeOS is. RattlesnakeOS generates and stores signing keys in AWS, which means the security of your AWS account becomes critical to ensuring the security of your device. If you aren't able to properly secure your local workstation, and your AWS account, then these additional security protections like verified boot become less useful. Cloud based builds are never going to be as secure as a locally built AOSP signed with highly secured keys generated from an HSM or air gapped computer, so if this is the level of security you require then there really is no other way. Would I recommend cloud builds like this for a large OEM or a company like CopperheadOS where the signing key being generated is protecting thousands of users? No, this becomes a high profile target as getting a hold of these keys essentially gives an attacker access to thousands of devices. On the other hand, for a single user generating their own key protecting a single device, there is less concern in my mind unless your threat profile includes very targeted attacks. #### What are some security best practices for AWS accounts? -Some minimimum steps worth considering are having an account solely for building RattlesnakeOS with a strong password, enabling two factor authentication, enabling auditing with CloudTrail, and locking down access to your AWS API credentials. -#### What's the difference between the default option and encrypted signing keys option and what one should I use? -There are different configurations for RattlesnakeOS builds based on your threat model. Here's a breakdown of the two primary build configurations and how they compare: -* Using the standard RattlesnakeOS build process, your keys are autogenerated and stored in S3. This means your AWS account security become the most important part of maintaining secure signing keys. This default build option is a good fit for someone that is OK with putting some trust in AWS, wants hands off builds with no manual intervention, and doesn't want to deal with maintaining a passphrase for encrypting/decrypting signing keys. Even with this setup, this still means AWS has potential access to your signing keys. If your threat model included an attacker compromising your AWS account, then this would not suffice as they would be able to get access to your unencrypted signing keys. -* The encrypted signing keys option allows you to prevent storing signing keys in an unencrypted form within AWS. It does this by using GPG symmetric encryption to store your keys at rest. This means that even AWS or someone that got control of your account wouldn't be able to extract your signing keys assuming the passphrase used to encrypt them was strong enough to prevent a brute force attack. Using this option puts less trust in AWS and more trust in your ability to secure the passphrase used for encrypting/decrypting your signing keys. -#### How does the encrypted signing keys option work in practice? -When using the encrypted signing keys option - the workflow is not fully automated like the standard build process. It requires a user to provide a passphrase to encrypt/decrypt signing keys to be used during the build process. The general workflow looks like this: -* Stack is deployed with config option `encrypted-keys = true`. -* When a build starts, an email notification will be sent that your EC2 instance is waiting for a passphrase - or will timeout in 10 mins and terminate the build. This email notification will give you an SSH command to run to provide your passphrase to the build process running on an EC2 instance. If this is your first build, encrypted signing keys don't exist yet in S3, and this passphrase will be used to store newly generated signing keys in encrypted form in S3. On future builds, these encrypted signing keys will be detected and the email notification you get to provide your passphrase to the build process will be used to decrypt your signing keys for use in the build signing process. -* Build continues as usual -#### How do I migrate to using the encrypted signing key option? -If you have an existing stack and want to move to encrypted signing keys you'll need to migrate your keys. Note: if you don't do this migration process new signing keys will be generated during the build process and you'll need to flash a new factory image (losing all data) to be able to use these builds. -* First you'll need to update your stack config file to use `encrypted-keys = true` and then run `rattlesnakeos-stack deploy` to update your stack. -* Next you'll need to copy your existing signing keys from S3 bucket `-keys`, encrypt them with GPG using a strong passphrase, and then copy over encrypted keys to S3 encrypted keys bucket `-keys-encrypted`. - ```sh - mkdir -p key-migration && cd key-migration - aws s3 sync s3://-keys/ . - echo -n "Encryption passphrase: " - read -s key - echo - for f in $(find . -type f); do - gpg --symmetric --batch --passphrase "${key}" --cipher-algo AES256 $f - done - aws s3 sync . s3://-keys-encrypted/ --exclude "*" --include "*.gpg" - ``` -* After running a full build and updating your device, you can remove the keys from the original `s3://-keys` bucket. +Some minimum steps worth considering are having an account solely for building RattlesnakeOS with a strong password, enabling two-factor authentication, enabling auditing with CloudTrail, and locking down access to your AWS API credentials. ## Uninstalling ### Remove AWS resources @@ -368,7 +249,7 @@ If you decide this isn't for you and you want to remove all the provisioned AWS ./rattlesnakeos-stack remove --name --region us-west-2 ``` -Important note: this will not terminate any running EC2 instances that may have launched and these will need to be terminated manually. +IMPORTANT NOTE: this will not terminate any running EC2 instances that may have launched, and these will need to be terminated manually. ### Revert back to stock Android You'll need to clear the configured AVB public key after unlocking the bootloader and before locking it again with the stock factory images. @@ -384,12 +265,13 @@ fastboot erase avb_custom_key ## Powered by * [android-prepare-vendor](https://github.com/anestisb/android-prepare-vendor) +* [CalyxOS](https://github.com/CalyxOS) * [GrapheneOS](https://github.com/GrapheneOS) * [Terraform](https://www.terraform.io/) * Huimin Zhang - author of the original underlying build script that was written for CopperheadOS. ## Build from Source - * To compile from source you'll need to install Go (https://golang.org/) for your platform + * To compile from source code you'll need to install Go 1.16+ (https://golang.org/) for your platform ```sh git clone github.com/dan-v/rattlesnakeos-stack make tools diff --git a/VERSION b/VERSION index 5326b8b7..d65b4d44 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -11.0.6 +11.0.7 \ No newline at end of file diff --git a/cli/build.go b/cli/build.go deleted file mode 100644 index cbda7a04..00000000 --- a/cli/build.go +++ /dev/null @@ -1,204 +0,0 @@ -package cli - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/service/lambda" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// TODO: this command is very happy path at the moment - -var listBuilds, startBuild, forceBuild bool -var terminateInstanceID, terminateRegion, listRegions, listName, buildName string -var aospBuild, aospBranch string - -func init() { - rootCmd.AddCommand(buildCmd) - - buildCmd.AddCommand(buildListCmd) - buildListCmd.Flags().StringVar(&name, "name", "", "name for stack") - buildListCmd.Flags().StringVar(&listRegions, "instance-regions", "", "regions to look for running builds") - - buildCmd.AddCommand(buildStartCmd) - buildStartCmd.Flags().StringVar(&name, "name", "", "name for stack") - buildStartCmd.Flags().BoolVar(&forceBuild, "force-build", false, "force build even if there are no changes in "+ - "available version of AOSP, Chromium, or F-Droid. this will override stack setting ignore-version-checks.") - buildStartCmd.Flags().StringVar(&aospBuild, "aosp-build", "", "advanced option - specify the specific factory image build number (e.g. PQ3A.190505.002)") - buildStartCmd.Flags().StringVar(&aospBranch, "aosp-branch", "", "advanced option - specify the corresponding AOSP branch to use for build (e.g. android-9.0.0_r37)") - - buildCmd.AddCommand(buildTerminateCmd) - buildTerminateCmd.Flags().StringVarP(&terminateInstanceID, "instance-id", "i", "", "EC2 instance id "+ - "you want to terminate (e.g. i-07ff0f2ed84ff2e8d)") - buildTerminateCmd.Flags().StringVarP(&terminateRegion, "region", "r", "", "Region of instance you "+ - "want to terminate") -} - -var buildCmd = &cobra.Command{ - Use: "build", - Short: "Commands to list, start, and terminate builds.", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("Need to specify a subcommand") - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) {}, -} - -var buildStartCmd = &cobra.Command{ - Use: "start", - Short: "Manually start a build", - Args: func(cmd *cobra.Command, args []string) error { - if viper.GetString("name") == "" && name == "" { - return fmt.Errorf("must provide a stack name") - } - if viper.GetString("region") == "" && region == "" { - return fmt.Errorf("must provide stack region") - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - if name == "" { - name = viper.GetString("name") - } - if region == "" { - region = viper.GetString("region") - } - - sess, err := session.NewSession(aws.NewConfig().WithCredentialsChainVerboseErrors(true)) - if err != nil { - log.Fatalf("Failed to setup AWS session: %v", err) - } - - lambdaPayload := struct { - ForceBuild bool - AOSPBuild string - AOSPBranch string - }{ - ForceBuild: forceBuild, - AOSPBuild: aospBuild, - AOSPBranch: aospBranch, - } - payload, err := json.Marshal(lambdaPayload) - if err != nil { - log.Fatalf("Failed to create payload for Lambda function: %v", err) - } - - lambdaClient := lambda.New(sess, &aws.Config{Region: ®ion}) - out, err := lambdaClient.Invoke(&lambda.InvokeInput{ - FunctionName: aws.String(name + "-build"), - InvocationType: aws.String("RequestResponse"), - Payload: payload, - }) - if err != nil { - log.Fatalf("Failed to start manual build: %v", err) - } - if out.FunctionError != nil { - log.Fatalf("Failed to start manual build. Function error: %v. Output: %v", *out.FunctionError, string(out.Payload)) - } - if *out.StatusCode != 200 { - log.Fatalf("Failed to start manual build. Status code calling Lambda function %v != 200", *out.StatusCode) - } - log.Infof("Successfully started manual build for stack %v", name) - }, -} - -var buildTerminateCmd = &cobra.Command{ - Use: "terminate", - Short: "Terminate a running a build", - Args: func(cmd *cobra.Command, args []string) error { - if terminateInstanceID == "" { - return fmt.Errorf("must provide an instance id to terminate") - } - if terminateRegion == "" { - return fmt.Errorf("must provide region for instance to terminate") - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - sess, err := session.NewSession(aws.NewConfig().WithCredentialsChainVerboseErrors(true)) - if err != nil { - log.Fatalf("Failed to setup AWS session: %v", err) - } - ec2Client := ec2.New(sess, &aws.Config{Region: &terminateRegion}) - _, err = ec2Client.TerminateInstances(&ec2.TerminateInstancesInput{ - InstanceIds: aws.StringSlice([]string{terminateInstanceID}), - }) - if err != nil { - log.Fatalf("Failed to terminate EC2 instance %v in region %v: %v", terminateInstanceID, terminateRegion, err) - } - log.Infof("Terminated instance %v in region %v", terminateInstanceID, terminateRegion) - }, -} - -var buildListCmd = &cobra.Command{ - Use: "list", - Short: "List in progress RattlesnakeOS builds", - Args: func(cmd *cobra.Command, args []string) error { - if viper.GetString("name") == "" && name == "" { - return fmt.Errorf("must provide a stack name") - } - if viper.GetString("instance-regions") == "" && listRegions == "" { - return fmt.Errorf("must provide instance regions") - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - if name == "" { - name = viper.GetString("name") - } - if listRegions == "" { - listRegions = viper.GetString("instance-regions") - } - - sess, err := session.NewSession(aws.NewConfig().WithCredentialsChainVerboseErrors(true)) - if err != nil { - log.Fatalf("Failed to setup AWS session: %v", err) - } - - log.Infof("Looking for builds for stack %v in the following regions: %v", name, listRegions) - runningInstances := 0 - for _, region := range strings.Split(listRegions, ",") { - ec2Client := ec2.New(sess, &aws.Config{Region: ®ion}) - resp, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{ - Filters: []*ec2.Filter{ - &ec2.Filter{ - Name: aws.String("instance-state-name"), - Values: []*string{aws.String("running")}}, - }}) - if err != nil { - log.Fatalf("Failed to describe EC2 instances in region %v", region) - } - if len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { - continue - } - - for _, reservation := range resp.Reservations { - for _, instance := range reservation.Instances { - if instance.IamInstanceProfile == nil || instance.IamInstanceProfile.Arn == nil { - continue - } - - instanceIamProfileName := strings.Split(*instance.IamInstanceProfile.Arn, "/")[1] - if instanceIamProfileName == name+"-ec2" { - log.Printf("Instance '%v': ip='%v' region='%v' launched='%v'", *instance.InstanceId, *instance.PublicIpAddress, region, *instance.LaunchTime) - runningInstances++ - } - } - } - } - if runningInstances == 0 { - log.Info("No active builds found") - } - }, -} diff --git a/cli/deploy.go b/cli/deploy.go deleted file mode 100644 index 83861bb5..00000000 --- a/cli/deploy.go +++ /dev/null @@ -1,254 +0,0 @@ -package cli - -import ( - "errors" - "fmt" - "os" - "strconv" - "strings" - - "github.com/dan-v/rattlesnakeos-stack/stack" - "github.com/manifoldco/promptui" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/spf13/viper" - yaml "gopkg.in/yaml.v2" -) - -const defaultInstanceRegions = "us-west-2,us-west-1,us-east-2" -const minimumChromiumVersion = 86 - -var name, region, email, device, sshKey, maxPrice, skipPrice, schedule string -var instanceType, instanceRegions, hostsFile, chromiumVersion string -var preventShutdown, ignoreVersionChecks, encryptedKeys, saveConfig bool -var patches = &stack.CustomPatches{} -var scripts = &stack.CustomScripts{} -var prebuilts = &stack.CustomPrebuilts{} -var manifestRemotes = &stack.CustomManifestRemotes{} -var manifestProjects = &stack.CustomManifestProjects{} -var trustedRepoBase = "https://github.com/rattlesnakeos/" -var supportedRegions = []string{"ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-southeast-1", - "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-north-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", - "us-east-1", "us-east-2", "us-west-1", "us-west-2", "cn-northwest-1", "cn-north-1"} - -var supportedDevicesFriendly = []string{ - "Pixel 2", "Pixel 2 XL", - "Pixel 3", "Pixel 3 XL", "Pixel 3a", "Pixel 3a XL", - "Pixel 4", "Pixel 4 XL", "Pixel 4a", "Pixel 5"} -var supportedDevicesCodename = []string{ - "walleye", "taimen", - "blueline", "crosshatch", "sargo", "bonito", - "flame", "coral", "sunfish", "redfin"} -var supportDevicesOutput string - -func init() { - rootCmd.AddCommand(deployCmd) - - for i, d := range supportedDevicesCodename { - supportDevicesOutput += fmt.Sprintf("%v (%v)", d, supportedDevicesFriendly[i]) - if i < len(supportedDevicesCodename)-1 { - supportDevicesOutput += ", " - } - } - - flags := deployCmd.Flags() - flags.StringVarP(&name, "name", "n", "", - "name for stack. note: this must be a valid/unique S3 bucket name.") - viper.BindPFlag("name", flags.Lookup("name")) - - flags.StringVarP(®ion, "region", "r", "", - "aws region for stack deployment (e.g. us-west-2)") - viper.BindPFlag("region", flags.Lookup("region")) - - flags.StringVarP(&device, "device", "d", "", - "device you want to build for (e.g. crosshatch): to list supported devices use '-d list'") - viper.BindPFlag("device", flags.Lookup("device")) - - flags.StringVarP(&email, "email", "e", "", - "email address you want to use for build notifications") - viper.BindPFlag("email", flags.Lookup("email")) - - flags.StringVar(&sshKey, "ssh-key", "", - "aws ssh key to add to ec2 spot instances. this is optional but is useful for debugging build issues on the instance.") - viper.BindPFlag("ssh-key", flags.Lookup("ssh-key")) - - flags.StringVar(&skipPrice, "skip-price", "0.68", - "skip requesting ec2 spot instance if price is above this value to begin with.") - viper.BindPFlag("skip-price", flags.Lookup("skip-price")) - - flags.StringVar(&maxPrice, "max-price", "1.00", - "max ec2 spot instance price. if this value is too low, you may not obtain an instance or it may terminate during a build.") - viper.BindPFlag("max-price", flags.Lookup("max-price")) - - flags.StringVar(&instanceType, "instance-type", "c5.4xlarge", "EC2 instance type (e.g. c4.4xlarge) to use for the build.") - viper.BindPFlag("instance-type", flags.Lookup("instance-type")) - - flags.StringVar(&instanceRegions, "instance-regions", defaultInstanceRegions, - "possible regions to launch spot instance. the region with cheapest spot instance price will be used.") - viper.BindPFlag("instance-regions", flags.Lookup("instance-regions")) - - flags.StringVar(&schedule, "schedule", "cron(0 0 10 * ? *)", - "cron expression that defines when to kick off builds. by default this is set to build on the 10th of every month. "+ - "note: if you give an invalid expression it will fail to deploy the stack. "+ - "see this for cron format details: https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions") - viper.BindPFlag("schedule", flags.Lookup("schedule")) - - flags.StringVar(&hostsFile, "hosts-file", "", - "an advanced option that allows you to specify a replacement /etc/hosts file to enable global dns adblocking "+ - "(e.g. https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts). note: be careful with this, as you "+ - "1) won't get any sort of notification on blocking 2) if you need to unblock something you'll have to rebuild the OS") - viper.BindPFlag("hosts-file", flags.Lookup("hosts-file")) - - flags.StringVar(&chromiumVersion, "chromium-version", "", - "specify the version of Chromium you want (e.g. 80.0.3971.4) to pin to. if not specified, the latest stable "+ - "version of Chromium is used.") - viper.BindPFlag("chromium-version", flags.Lookup("chromium-version")) - - flags.BoolVar(&encryptedKeys, "encrypted-keys", false, "an advanced option that allows signing keys to "+ - "be stored with symmetric gpg encryption and decrypted into memory during the build process. this option requires "+ - "manual intervention during builds where you will be sent a notification and need to provide the key required for "+ - "decryption over SSH to continue the build process. important: if you have an existing stack - please see the FAQ for how to "+ - "migrate your keys") - viper.BindPFlag("encrypted-keys", flags.Lookup("encrypted-keys")) - - flags.BoolVar(&ignoreVersionChecks, "ignore-version-checks", false, - "ignore the versions checks for stack, AOSP, Chromium, and F-Droid and always do a build.") - viper.BindPFlag("ignore-version-checks", flags.Lookup("ignore-version-checks")) - - flags.BoolVar(&saveConfig, "save-config", false, "allows you to save all passed CLI flags to config file") - - flags.BoolVar(&preventShutdown, "prevent-shutdown", false, - "for debugging purposes only - will prevent ec2 instance from shutting down after build.") -} - -var deployCmd = &cobra.Command{ - Use: "deploy", - Short: "Deploy or update the AWS infrastructure used for building RattlesnakeOS", - Args: func(cmd *cobra.Command, args []string) error { - if viper.GetString("name") == "" { - return fmt.Errorf("must provide a stack name") - } - if viper.GetString("region") == "" { - return fmt.Errorf("must provide a region") - } - if viper.GetString("email") == "" { - return errors.New("must specify email") - } - if viper.GetString("ssh-key") == "" { - return fmt.Errorf("must provide ssh key name") - } - if viper.GetString("device") == "" { - return errors.New("must specify device type") - } - if viper.GetString("chromium-version") != "" { - chromiumVersionSplit := strings.Split(viper.GetString("chromium-version"), ".") - if len(chromiumVersionSplit) != 4 { - return errors.New("invalid chromium-version specified") - } - chromiumMajorNumber, err := strconv.Atoi(chromiumVersionSplit[0]) - if err != nil { - return fmt.Errorf("unable to parse specified chromium-version: %v", err) - } - if chromiumMajorNumber < minimumChromiumVersion { - return fmt.Errorf("pinned chromium-version must have major version of at least %v", minimumChromiumVersion) - } - } - if viper.GetString("force-build") != "" { - log.Warnf("The force-build setting has been deprecated and can be removed from your config file. it has been replaced with ignore-version-checks.") - } - if device == "list" { - fmt.Printf("Valid devices are: %v\n", supportDevicesOutput) - os.Exit(0) - } - for _, device := range supportedDevicesCodename { - if device == viper.GetString("device") { - return nil - } - } - return fmt.Errorf("must specify a supported device: %v", strings.Join(supportedDevicesCodename, ", ")) - }, - Run: func(cmd *cobra.Command, args []string) { - viper.UnmarshalKey("custom-patches", patches) - viper.UnmarshalKey("custom-scripts", scripts) - viper.UnmarshalKey("custom-prebuilts", prebuilts) - viper.UnmarshalKey("custom-manifest-remotes", manifestRemotes) - viper.UnmarshalKey("custom-manifest-projects", manifestProjects) - - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - log.Println("Current settings:") - fmt.Println(string(bs)) - - if saveConfig { - log.Printf("These settings will be saved to config file %v.", configFileFullPath) - } - - for _, r := range *patches { - if !strings.Contains(strings.ToLower(r.Repo), trustedRepoBase) { - log.Warnf("You are using an untrusted repository (%v) for patches - this is risky unless you own the repository", r.Repo) - } - } - - for _, r := range *scripts { - if !strings.Contains(strings.ToLower(r.Repo), trustedRepoBase) { - log.Warnf("You are using an untrusted repository (%v) for scripts - this is risky unless you own the repository", r.Repo) - } - } - - for _, r := range *prebuilts { - if !strings.Contains(strings.ToLower(r.Repo), trustedRepoBase) { - log.Warnf("You are using an untrusted repository (%v) for prebuilts - this is risky unless you own the repository", r.Repo) - } - } - - prompt := promptui.Prompt{ - Label: "Do you want to continue ", - IsConfirm: true, - } - _, err = prompt.Run() - if err != nil { - log.Fatalf("Exiting %v", err) - } - - s, err := stack.NewAWSStack(&stack.AWSStackConfig{ - Name: viper.GetString("name"), - Region: viper.GetString("region"), - Device: viper.GetString("device"), - Email: viper.GetString("email"), - InstanceType: viper.GetString("instance-type"), - InstanceRegions: viper.GetString("instance-regions"), - SSHKey: viper.GetString("ssh-key"), - SkipPrice: viper.GetString("skip-price"), - MaxPrice: viper.GetString("max-price"), - Schedule: viper.GetString("schedule"), - ChromiumVersion: viper.GetString("chromium-version"), - HostsFile: viper.GetString("hosts-file"), - EncryptedKeys: viper.GetBool("encrypted-keys"), - IgnoreVersionChecks: viper.GetBool("ignore-version-checks"), - CustomPatches: patches, - CustomScripts: scripts, - CustomPrebuilts: prebuilts, - CustomManifestRemotes: manifestRemotes, - CustomManifestProjects: manifestProjects, - PreventShutdown: preventShutdown, - Version: version, - }) - if err != nil { - log.Fatal(err) - } - if err := s.Apply(); err != nil { - log.Fatal(err) - } - - if saveConfig { - log.Printf("Saved settings to config file %v.", configFileFullPath) - err := viper.WriteConfigAs(configFileFullPath) - if err != nil { - log.Fatalf("Failed to write config file %v", configFileFullPath) - } - } - }, -} diff --git a/cmd/build.go b/cmd/build.go new file mode 100644 index 00000000..0752049a --- /dev/null +++ b/cmd/build.go @@ -0,0 +1,168 @@ +package cmd + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "github.com/dan-v/rattlesnakeos-stack/internal/cloudaws" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "time" +) + +var ( + terminateInstanceID, terminateRegion, listRegions string + aospBuildID, aospTag string + forceBuild bool + defaultExecuteLambdaTimeout = time.Second * 200 + defaultTerminateInstanceTimeout = time.Second * 10 + defaultListInstancesTimeout = time.Second * 10 +) + +func buildInit() { + rootCmd.AddCommand(buildCmd) + + buildCmd.AddCommand(buildListCmd) + buildListCmd.Flags().StringVar(&name, "name", "", "name for stack") + buildListCmd.Flags().StringVar(&listRegions, "instance-regions", "", "regions to look for running builds") + + buildCmd.AddCommand(buildStartCmd) + buildStartCmd.Flags().StringVar(&name, "name", "", "name of stack") + buildStartCmd.Flags().BoolVar(&forceBuild, "force-build", false, "force build even if there are no changes in component versions") + buildStartCmd.Flags().StringVar(&aospBuildID, "aosp-build-id", "", "advanced option - specify the specific the AOSP build id (e.g. RQ1A.210205.004)") + buildStartCmd.Flags().StringVar(&aospTag, "aosp-tag", "", "advanced option - specify the corresponding AOSP tag to use for build (e.g. android-11.0.0_r29)") + + buildCmd.AddCommand(buildTerminateCmd) + buildTerminateCmd.Flags().StringVarP(&terminateInstanceID, "instance-id", "i", "", "EC2 instance id "+ + "you want to terminate (e.g. i-07ff0f2ed84ff2e8d)") + buildTerminateCmd.Flags().StringVarP(&terminateRegion, "region", "r", "", "Region of instance you "+ + "want to terminate") +} + +var buildCmd = &cobra.Command{ + Use: "build", + Short: "commands to list, start, and terminate builds.", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("Need to specify a subcommand") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) {}, +} + +var buildStartCmd = &cobra.Command{ + Use: "start", + Short: "manually start a build", + Args: func(cmd *cobra.Command, args []string) error { + if viper.GetString("name") == "" && name == "" { + return fmt.Errorf("must provide a stack name") + } + if viper.GetString("region") == "" && region == "" { + return fmt.Errorf("must provide stack region") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if name == "" { + name = viper.GetString("name") + } + if region == "" { + region = viper.GetString("region") + } + + payload, err := json.Marshal(struct { + ForceBuild bool `json:"force-build"` + AOSPBuildID string `json:"aosp-build-id"` + AOSPTag string `json:"aosp-tag"` + }{ + ForceBuild: forceBuild, + AOSPBuildID: aospBuildID, + AOSPTag: aospTag, + }) + if err != nil { + log.Fatalf("failed to create payload for lambda function: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultExecuteLambdaTimeout) + defer cancel() + + log.Infof("calling lambda function to start manual build for stack %v. waiting for spot instance launch...", name) + output, err := cloudaws.ExecuteLambdaFunction(ctx, name, region, payload) + if err != nil { + log.Fatalf("failed to start manual build for stack %v: err=%v", name, err) + } + if output != nil && (output.FunctionError != nil || output.StatusCode != 200) { + log.Fatalf("failed to start manual build for stack %v: statuscode=%v payload:%v", + name, output.StatusCode, string(output.Payload)) + } + + log.Infof("successfully started manual build for stack %v", name) + }, +} + +var buildTerminateCmd = &cobra.Command{ + Use: "terminate", + Short: "terminate a running a build", + Args: func(cmd *cobra.Command, args []string) error { + if terminateInstanceID == "" { + return fmt.Errorf("must provide an instance id to terminate") + } + if terminateRegion == "" { + return fmt.Errorf("must provide region for instance to terminate") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTerminateInstanceTimeout) + defer cancel() + + output, err := cloudaws.TerminateEC2Instance(ctx, terminateInstanceID, terminateRegion) + if err != nil { + log.Fatal(err) + } + + log.Infof("terminated instance %v in region %v: %v", terminateInstanceID, terminateRegion, output.TerminatingInstances) + }, +} + +var buildListCmd = &cobra.Command{ + Use: "list", + Short: "list in progress builds", + Args: func(cmd *cobra.Command, args []string) error { + if viper.GetString("name") == "" && name == "" { + return fmt.Errorf("must provide a stack name") + } + if viper.GetString("instance-regions") == "" && listRegions == "" { + return fmt.Errorf("must provide instance regions") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + if name == "" { + name = viper.GetString("name") + } + if listRegions == "" { + listRegions = viper.GetString("instance-regions") + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultListInstancesTimeout) + defer cancel() + + instances, err := cloudaws.GetRunningEC2InstancesWithProfileName(ctx, fmt.Sprintf("%v-ec2", name), listRegions) + if err != nil { + log.Fatal(err) + } + + if len(instances) == 0 { + log.Info("no active builds found") + return + } + + for _, instance := range instances { + fmt.Println(instance) + } + }, +} diff --git a/cli/config.go b/cmd/config.go similarity index 83% rename from cli/config.go rename to cmd/config.go index 1d04a614..c251c041 100644 --- a/cli/config.go +++ b/cmd/config.go @@ -1,8 +1,9 @@ -package cli +package cmd import ( "errors" "fmt" + "github.com/dan-v/rattlesnakeos-stack/internal/cloudaws" "math/rand" "strings" "time" @@ -14,28 +15,21 @@ import ( "github.com/spf13/viper" ) -func init() { +func configInit() { rootCmd.AddCommand(configCmd) } var configCmd = &cobra.Command{ Use: "config", - Short: "Setup config file for rattlesnakeos-stack", + Short: "setup config file for rattlesnakeos-stack", Run: func(cmd *cobra.Command, args []string) { - color.Cyan(fmt.Sprintln("Device is the device codename (e.g. sailfish). Supported devices:", supportDevicesOutput)) + color.Cyan(fmt.Sprintln("Device is the device codename (e.g. sunfish). Supported devices:", supportedDevices.GetSupportedDevicesOutput())) validate := func(input string) error { if len(input) < 1 { return errors.New("Device name is too short") } - found := false - for _, d := range supportedDevicesCodename { - if input == d { - found = true - break - } - } - if !found { - return errors.New("Invalid device") + if !supportedDevices.IsSupportedDevice(input) { + return errors.New("invalid device") } return nil } @@ -46,7 +40,7 @@ var configCmd = &cobra.Command{ } result, err := devicePrompt.Run() if err != nil { - log.Fatalf("Prompt failed %v\n", err) + log.Fatalf("prompt failed %v\n", err) } viper.Set("device", result) @@ -68,24 +62,17 @@ var configCmd = &cobra.Command{ } result, err = namePrompt.Run() if err != nil { - log.Fatalf("Prompt failed %v\n", err) + log.Fatalf("prompt failed %v\n", err) } viper.Set("name", result) color.Cyan(fmt.Sprintf("Stack region is the AWS region where you would like to deploy your stack. Valid options: %v\n", - strings.Join(supportedRegions, ", "))) + strings.Join(cloudaws.GetSupportedRegions(), ", "))) validate = func(input string) error { if len(input) < 1 { return errors.New("Stack region is too short") } - found := false - for _, region := range supportedRegions { - if input == region { - found = true - break - } - } - if !found { + if !cloudaws.IsSupportedRegion(input) { return errors.New("Invalid region") } return nil @@ -97,7 +84,7 @@ var configCmd = &cobra.Command{ } result, err = regionPrompt.Run() if err != nil { - log.Fatalf("Prompt failed %v\n", err) + log.Fatalf("prompt failed %v\n", err) } viper.Set("region", result) @@ -115,7 +102,7 @@ var configCmd = &cobra.Command{ } result, err = emailPrompt.Run() if err != nil { - log.Fatalf("Prompt failed %v\n", err) + log.Fatalf("prompt failed %v\n", err) } viper.Set("email", result) @@ -137,7 +124,7 @@ var configCmd = &cobra.Command{ } result, err = keypairPrompt.Run() if err != nil { - log.Fatalf("Prompt failed %v\n", err) + log.Fatalf("prompt failed %v\n", err) } viper.Set("ssh-key", result) diff --git a/cmd/deploy.go b/cmd/deploy.go new file mode 100644 index 00000000..d03adbe3 --- /dev/null +++ b/cmd/deploy.go @@ -0,0 +1,306 @@ +package cmd + +import ( + "context" + "errors" + "fmt" + "github.com/dan-v/rattlesnakeos-stack/internal/cloudaws" + "github.com/dan-v/rattlesnakeos-stack/internal/stack" + "github.com/dan-v/rattlesnakeos-stack/internal/templates" + "github.com/dan-v/rattlesnakeos-stack/internal/terraform" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/manifoldco/promptui" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + yaml "gopkg.in/yaml.v2" +) + +const ( + minimumChromiumVersion = 86 +) + +var ( + name, region, email, device, sshKey, maxPrice, skipPrice, schedule, cloud string + instanceType, instanceRegions, chromiumVersion, releasesURL string + saveConfig, dryRun, chromiumBuildDisabled bool + coreConfigRepo, customConfigRepo string + coreConfigRepoBranch, customConfigRepoBranch string + outputDir string +) + +func deployInit() { + rootCmd.AddCommand(deployCmd) + + flags := deployCmd.Flags() + + flags.StringVarP(&name, "name", "n", "", + "name for stack. note: this must be a valid/unique S3 bucket name.") + _ = viper.BindPFlag("name", flags.Lookup("name")) + + flags.StringVarP(®ion, "region", "r", "", + "aws region for stack deployment (e.g. us-west-2)") + _ = viper.BindPFlag("region", flags.Lookup("region")) + + flags.StringVarP(&device, "device", "d", "", + "device you want to build for (e.g. crosshatch)") + _ = viper.BindPFlag("device", flags.Lookup("device")) + + flags.StringVarP(&email, "email", "e", "", + "email address you want to use for build notifications") + _ = viper.BindPFlag("email", flags.Lookup("email")) + + flags.StringVar(&sshKey, "ssh-key", "", + "aws ssh key to add to ec2 spot instances. this is optional but is useful for debugging build issues on the instance.") + _ = viper.BindPFlag("ssh-key", flags.Lookup("ssh-key")) + + flags.StringVar(&skipPrice, "skip-price", "0.68", + "skip requesting ec2 spot instance if price is above this value to begin with.") + _ = viper.BindPFlag("skip-price", flags.Lookup("skip-price")) + + flags.StringVar(&maxPrice, "max-price", "1.00", + "max ec2 spot instance price. if this value is too low, you may not obtain an instance or it may terminate during a build.") + _ = viper.BindPFlag("max-price", flags.Lookup("max-price")) + + flags.StringVar(&instanceType, "instance-type", "c5.4xlarge", "EC2 instance type (e.g. c5.4xlarge) to use for the build.") + _ = viper.BindPFlag("instance-type", flags.Lookup("instance-type")) + + flags.StringVar(&instanceRegions, "instance-regions", cloudaws.DefaultInstanceRegions, + "possible regions to launch spot instance. the region with cheapest spot instance price will be used.") + _ = viper.BindPFlag("instance-regions", flags.Lookup("instance-regions")) + + flags.StringVar(&schedule, "schedule", "cron(0 0 10 * ? *)", + "cron expression that defines when to kick off builds. by default this is set to build on the 10th of every month. you can also set to empty string to disable cron."+ + "note: if you give an invalid expression it will fail to deploy the stack. "+ + "see this for cron format details: https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions") + _ = viper.BindPFlag("schedule", flags.Lookup("schedule")) + + flags.BoolVar(&chromiumBuildDisabled, "chromium-build-disabled", false, "control whether chromium builds are enabled or disabled.") + _ = viper.BindPFlag("chromium-build-disabled", flags.Lookup("chromium-build-disabled")) + + flags.StringVar(&chromiumVersion, "chromium-version", "", + "specify the version of Chromium you want (e.g. 80.0.3971.4) to pin to. if not specified, the latest stable version of Chromium is used.") + _ = viper.BindPFlag("chromium-version", flags.Lookup("chromium-version")) + + flags.StringVar(&coreConfigRepo, "core-config-repo", templates.DefaultCoreConfigRepo, "a specially formatted repo that contains core customizations on top of AOSP.") + _ = viper.BindPFlag("core-config-repo", flags.Lookup("core-config-repo")) + + flags.StringVar(&coreConfigRepoBranch, "core-config-repo-branch", aospVersion, "the branch to use for the core config repo.") + _ = viper.BindPFlag("core-config-repo-branch", flags.Lookup("core-config-repo-branch")) + + flags.StringVar(&customConfigRepo, "custom-config-repo", "", "a specially formatted repo that contains customizations on top of core.") + _ = viper.BindPFlag("custom-config-repo", flags.Lookup("custom-config-repo")) + + flags.StringVar(&customConfigRepoBranch, "custom-config-repo-branch", "", "the branch to use for the custom config repo. if left blanked the default branch will be checked out.") + _ = viper.BindPFlag("custom-config-repo-branch", flags.Lookup("custom-config-repo-branch")) + + flags.StringVar(&releasesURL, "releases-url", fmt.Sprintf(templates.DefaultReleasesURLTemplate, aospVersion), "url that is used to check versions of aosp/chromium and whether build is required.") + _ = viper.BindPFlag("releases-url", flags.Lookup("releases-url")) + + flags.StringVar(&cloud, "cloud", "aws", "cloud (aws only right now)") + _ = viper.BindPFlag("cloud", flags.Lookup("cloud")) + + flags.StringVar(&outputDir, "output-dir", "", "where to generate all files used for the deployment") + _ = viper.BindPFlag("output-dir", flags.Lookup("output-dir")) + + flags.BoolVar(&saveConfig, "save-config", false, "allows you to save all passed CLI flags to config file") + + flags.BoolVar(&dryRun, "dry-run", false, "only generate the output files, but do not deploy with terraform.") +} + +var deployCmd = &cobra.Command{ + Use: "deploy", + Short: "deploy or update the cloud infrastructure used for OS building", + Args: func(cmd *cobra.Command, args []string) error { + if viper.GetString("name") == "" { + return fmt.Errorf("must provide a stack name") + } + if viper.GetString("region") == "" { + return fmt.Errorf("must provide a region") + } + if viper.GetString("email") == "" { + return errors.New("must specify email") + } + if viper.GetString("ssh-key") == "" { + return fmt.Errorf("must provide ssh key name") + } + if viper.GetString("device") == "" { + return errors.New("must specify device type") + } + if viper.GetString("chromium-version") != "" { + chromiumVersionSplit := strings.Split(viper.GetString("chromium-version"), ".") + if len(chromiumVersionSplit) != 4 { + return errors.New("invalid chromium-version specified") + } + chromiumMajorNumber, err := strconv.Atoi(chromiumVersionSplit[0]) + if err != nil { + return fmt.Errorf("unable to parse specified chromium-version: %v", err) + } + if chromiumMajorNumber < minimumChromiumVersion { + return fmt.Errorf("pinned chromium-version must have major version of at least %v", minimumChromiumVersion) + } + } + if !supportedDevices.IsSupportedDevice(viper.GetString("device")) { + return fmt.Errorf("must specify a supported device: %v", strings.Join(supportedDevices.GetDeviceCodeNames(), ", ")) + } + + // deprecated checks + if viper.GetBool("encrypted-keys") { + return fmt.Errorf("encrypted-keys functionality has been removed (it may return in the future). migration required to use non encrypted keys for now") + } + if viper.GetString("core-config-repo-branch") != aospVersion { + log.Warnf("core-config-repo-branch '%v' does not match aosp version '%v' - if this is not intended, update your config file", + viper.GetString("core-config-repo-branch"), aospVersion) + } + if viper.GetString("hosts-file") != "" { + log.Warn("hosts-file functionality has been removed - it can be removed from config file") + } + if viper.Get("custom-manifest-remotes") != nil { + return fmt.Errorf("custom-manifest-remotes has been deprecated in favor of custom-config-repo option") + } + if viper.Get("custom-manifest-projects") != nil { + return fmt.Errorf("custom-manifest-projects has been deprecated in favor of custom-config-repo option") + } + if viper.Get("custom-patches") != nil { + return fmt.Errorf("custom-patches has been deprecated in favor of custom-config-repo option") + } + if viper.Get("custom-prebuilts") != nil { + return fmt.Errorf("custom-prebuilts has been deprecated in favor of custom-config-repo option") + } + + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + log.Fatalf("unable to marshal config to YAML: %v", err) + } + log.Println("Current settings:") + fmt.Println(string(bs)) + + if !dryRun { + prompt := promptui.Prompt{ + Label: "Do you want to continue ", + IsConfirm: true, + } + _, err = prompt.Run() + if err != nil { + log.Fatalf("exiting: %v", err) + } + } + + configuredOutputDir, err := getOutputDir() + if err != nil { + log.Fatal(err) + } + log.Infof("all generated files will be placed in %v", configuredOutputDir) + + templateConfig := &templates.Config{ + Version: stackVersion, + Name: viper.GetString("name"), + Region: viper.GetString("region"), + Device: viper.GetString("device"), + DeviceDetails: supportedDevices.GetDeviceDetails(viper.GetString("device")), + Email: viper.GetString("email"), + InstanceType: viper.GetString("instance-type"), + InstanceRegions: viper.GetString("instance-regions"), + SkipPrice: viper.GetString("skip-price"), + MaxPrice: viper.GetString("max-price"), + SSHKey: viper.GetString("ssh-key"), + Schedule: viper.GetString("schedule"), + ChromiumBuildDisabled: viper.GetBool("chromium-build-disabled"), + ChromiumVersion: viper.GetString("chromium-version"), + CoreConfigRepo: viper.GetString("core-config-repo"), + CoreConfigRepoBranch: viper.GetString("core-config-repo-branch"), + CustomConfigRepo: viper.GetString("custom-config-repo"), + CustomConfigRepoBranch: viper.GetString("custom-config-repo-branch"), + ReleasesURL: viper.GetString("releases-url"), + Cloud: viper.GetString("cloud"), + } + + templateRenderer, err := templates.New(templateConfig, templatesFiles, configuredOutputDir) + if err != nil { + log.Fatalf("failed to create template client: %v", err) + } + + if saveConfig { + log.Printf("Saved settings to config file %v.", configFileFullPath) + err := viper.WriteConfigAs(configFileFullPath) + if err != nil { + log.Fatalf("Failed to write config file %v", configFileFullPath) + } + } + + if dryRun { + log.Infof("rendering all templates to '%v'", configuredOutputDir) + err = templateRenderer.RenderAll() + if err != nil { + log.Fatal(err) + } + log.Info("skipping deployment as skip deploy option was specified") + return + } + if viper.GetString("cloud") != "aws" { + log.Fatal("'aws' is only supported option for cloud at the moment") + } + + configFileFullPath, err := filepath.Abs(cfgFile) + if err != nil { + log.Fatal(err) + } + awsSetupClient, err := cloudaws.NewSetupClient( + viper.GetString("name"), + viper.GetString("region"), + configFileFullPath, + ) + if err != nil { + log.Fatalf("failed to create aws setup client: %v", err) + } + + awsSubscribeClient, err := cloudaws.NewSubscribeClient( + viper.GetString("name"), + viper.GetString("region"), + viper.GetString("email"), + ) + if err != nil { + log.Fatalf("failed to create aws subscribe client: %v", err) + } + + terraformClient, err := terraform.New(configuredOutputDir) + if err != nil { + log.Fatalf("failed to create terraform client: %v", err) + } + + s := stack.New(viper.GetString("name"), templateRenderer, awsSetupClient, awsSubscribeClient, terraformClient) + if err != nil { + log.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), stack.DefaultDeployTimeout) + defer cancel() + + if err := s.Deploy(ctx); err != nil { + log.Fatal(err) + } + }, +} + +func getOutputDir() (string, error) { + configuredOutputDir := viper.GetString("output-dir") + if configuredOutputDir == "" { + configuredOutputDir = fmt.Sprintf("output_%v", viper.GetString("name")) + } + configuredOutputDir, err := filepath.Abs(configuredOutputDir) + if err != nil { + return "", err + } + if err := os.MkdirAll(configuredOutputDir, os.ModePerm); err != nil { + log.Fatal(err) + } + return configuredOutputDir, nil +} \ No newline at end of file diff --git a/cli/remove.go b/cmd/remove.go similarity index 58% rename from cli/remove.go rename to cmd/remove.go index eea28bfc..3a3bbf44 100644 --- a/cli/remove.go +++ b/cmd/remove.go @@ -1,9 +1,9 @@ -package cli +package cmd import ( + "context" "fmt" - - "github.com/dan-v/rattlesnakeos-stack/stack" + "github.com/dan-v/rattlesnakeos-stack/internal/terraform" "github.com/fatih/color" "github.com/manifoldco/promptui" log "github.com/sirupsen/logrus" @@ -11,7 +11,7 @@ import ( "github.com/spf13/viper" ) -func init() { +func removeInit() { rootCmd.AddCommand(removeCmd) removeCmd.Flags().StringVarP(&name, "name", "n", "", @@ -23,7 +23,7 @@ func init() { var removeCmd = &cobra.Command{ Use: "remove", - Short: "Remove all AWS infrastructure used for building RattlesnakeOS", + Short: "remove all cloud infrastructure used for OS building", Args: func(cmd *cobra.Command, args []string) error { if viper.GetString("name") == "" && name == "" { return fmt.Errorf("must provide a stack name") @@ -41,15 +41,15 @@ var removeCmd = &cobra.Command{ region = viper.GetString("region") } - log.Println("Details of stack to be deleted:") + log.Println("details of stack to be deleted:") fmt.Println("Stack name:", name) fmt.Println("Stack region:", region) fmt.Println("") - color.Red("This is a destructive action! All S3 buckets will be removed and all data will be destroyed. " + - "Make sure to backup anything you might want to keep!") + color.Red("this is a destructive action! all S3 buckets will be removed and all data will be destroyed. " + + "make sure to backup anything you might want to keep!") prompt := promptui.Prompt{ - Label: fmt.Sprintf("This will remove all AWS infrastructure for stack %v. Do you want to continue ", viper.GetString("name")), + Label: fmt.Sprintf("this will remove all AWS infrastructure for stack %v. do you want to continue ", viper.GetString("name")), IsConfirm: true, } _, err := prompt.Run() @@ -57,15 +57,22 @@ var removeCmd = &cobra.Command{ log.Fatalf("Exiting %v", err) } - s, err := stack.NewAWSStack(&stack.AWSStackConfig{ - Name: name, - Region: region, - }) + configuredOutputDir, err := getOutputDir() if err != nil { log.Fatal(err) } - if err := s.Destroy(); err != nil { - log.Fatal(err) + + terraformClient, err := terraform.New(configuredOutputDir) + if err != nil { + log.Fatalf("failed to create terraform client: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), terraform.DefaultTerraformDestroyTimeout) + defer cancel() + + _, err = terraformClient.Destroy(ctx) + if err != nil { + log.Fatalf("failed to run terraform destroy: %v", err) } }, } diff --git a/cli/root.go b/cmd/root.go similarity index 59% rename from cli/root.go rename to cmd/root.go index b924cbfa..b7abd79d 100644 --- a/cli/root.go +++ b/cmd/root.go @@ -1,8 +1,9 @@ -package cli +package cmd import ( - "errors" "fmt" + "github.com/dan-v/rattlesnakeos-stack/internal/devices" + "github.com/dan-v/rattlesnakeos-stack/internal/templates" "os" homedir "github.com/mitchellh/go-homedir" @@ -12,20 +13,39 @@ import ( ) var ( - version string cfgFile string defaultConfigFileBase = ".rattlesnakeos" defaultConfigFileFormat = "toml" defaultConfigFile = fmt.Sprintf("%v.%v", defaultConfigFileBase, defaultConfigFileFormat) defaultConfigFileFullPath string configFileFullPath string + supportedDevices *devices.SupportedDevices + stackVersion string + aospVersion string + templatesFiles *templates.TemplateFiles ) // Execute the CLI -func Execute() { +func Execute(_supportedDevices *devices.SupportedDevices, _aospVersion, _stackVersion string, _templatesFiles *templates.TemplateFiles) { + supportedDevices = _supportedDevices + aospVersion = _aospVersion + stackVersion = _stackVersion + templatesFiles = _templatesFiles + + // initialize cobra + cobra.OnInitialize(initConfig) + rootCmd.PersistentFlags().StringVar(&cfgFile, "config-file", "", fmt.Sprintf("config file (default location to look for config is $HOME/%s)", defaultConfigFile)) + + // init sub commands + buildInit() + configInit() + deployInit() + removeInit() + versionInit() + + // execute root if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) + log.Fatal(err) } } @@ -40,10 +60,10 @@ func initConfig() { viper.SetConfigFile(cfgFile) configFileFullPath = cfgFile if _, err := os.Stat(configFileFullPath); os.IsNotExist(err) { - log.Infof("Config file %v doesn't exist yet - creating it", configFileFullPath) + log.Infof("config file %v doesn't exist yet - creating it", configFileFullPath) _, err := os.Create(configFileFullPath) if err != nil { - log.Fatalf("Failed to create config file %v", configFileFullPath) + log.Fatalf("failed to create config file %v", configFileFullPath) } } } else { @@ -55,31 +75,16 @@ func initConfig() { if err := viper.ReadInConfig(); err != nil { if viper.ConfigFileUsed() != "" { - log.Fatalf("Failed to parse config file %v. Error: %v", viper.ConfigFileUsed(), err) - } else { - log.Printf("No config file found. Using CLI options only.") + log.Fatalf("failed to parse config file %v. error: %v", viper.ConfigFileUsed(), err) } } if viper.ConfigFileUsed() != "" { - log.Printf("Using config file: %v\n", viper.ConfigFileUsed()) + log.Printf("using config file: %v\n", viper.ConfigFileUsed()) } } -func init() { - cobra.OnInitialize(initConfig) - rootCmd.PersistentFlags().StringVar(&cfgFile, "config-file", "", fmt.Sprintf("config file (default location to look for config is $HOME/%s)", defaultConfigFile)) -} - var rootCmd = &cobra.Command{ Use: "rattlesnakeos-stack", - Short: "A cross platform tool that provisions all of the AWS infrastructure required to build your own privacy " + + Short: "a cross platform tool that provisions all of the cloud infrastructure required to build your own privacy " + "focused Android OS on a continuous basis with OTA updates.", - Version: version, - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("Need to specify a subcommand") - } - return nil - }, - Run: func(cmd *cobra.Command, args []string) {}, } diff --git a/cmd/version.go b/cmd/version.go new file mode 100644 index 00000000..d96dea6b --- /dev/null +++ b/cmd/version.go @@ -0,0 +1,18 @@ +package cmd + +import ( + "fmt" + "github.com/spf13/cobra" +) + +func versionInit() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "print the cli version", + Run: func(cmd *cobra.Command, args []string) { + fmt.Println(stackVersion) + }, +} diff --git a/go.mod b/go.mod index ab104eb5..52bd00e8 100644 --- a/go.mod +++ b/go.mod @@ -1,42 +1,32 @@ module github.com/dan-v/rattlesnakeos-stack -go 1.12 +go 1.16 require ( - github.com/BurntSushi/toml v0.3.1 - github.com/aws/aws-sdk-go v1.21.8 - github.com/axw/gocov v1.0.0 // indirect - github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e - github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 - github.com/cpuguy83/go-md2man v1.0.10 - github.com/davecgh/go-spew v1.1.1 - github.com/fatih/color v1.7.0 - github.com/fsnotify/fsnotify v1.4.7 - github.com/inconshreveable/mousetrap v1.0.0 - github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af - github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a - github.com/kr/fs v0.1.0 // indirect - github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a - github.com/magiconair/properties v1.8.0 - github.com/manifoldco/promptui v0.3.2 - github.com/matm/gocov-html v0.0.0-20160206185555-f6dd0fd0ebc7 // indirect - github.com/mattn/go-colorable v0.0.9 - github.com/mattn/go-isatty v0.0.4 + github.com/aws/aws-sdk-go-v2 v1.2.0 + github.com/aws/aws-sdk-go-v2/config v1.1.1 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.1 + github.com/aws/aws-sdk-go-v2/service/iam v1.1.1 + github.com/aws/aws-sdk-go-v2/service/lambda v1.1.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.2.0 + github.com/aws/aws-sdk-go-v2/service/sns v1.1.1 + github.com/fatih/color v1.10.0 + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/lunixbochs/vtclean v1.0.0 // indirect + github.com/magiconair/properties v1.8.4 // indirect + github.com/manifoldco/promptui v0.8.0 github.com/mitchellh/go-homedir v1.1.0 - github.com/mitchellh/gox v1.0.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d - github.com/pelletier/go-toml v1.2.0 - github.com/sirupsen/logrus v1.4.2 - github.com/spf13/cobra v0.0.5 - github.com/spf13/pflag v1.0.3 - github.com/spf13/viper v1.4.0 - github.com/stretchr/testify v1.2.2 - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 - golang.org/x/lint v0.0.0-20190409202823-959b441ac422 // indirect - golang.org/x/sys v0.0.0-20190422165155-953cdadca894 - golang.org/x/text v0.3.0 - golang.org/x/tools v0.0.0-20190617190820-da514acc4774 - gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 - gopkg.in/yaml.v2 v2.2.2 + github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/pelletier/go-toml v1.8.1 // indirect + github.com/sirupsen/logrus v1.7.0 + github.com/spf13/afero v1.5.1 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cobra v1.1.1 + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/viper v1.7.1 + github.com/stretchr/testify v1.7.0 + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect + golang.org/x/text v0.3.5 // indirect + gopkg.in/ini.v1 v1.62.0 // indirect + gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index 6df82273..94f0df01 100644 --- a/go.sum +++ b/go.sum @@ -1,19 +1,58 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/gometalinter v2.0.11+incompatible h1:ENdXMllZNSVDTJUUVIzBW9CSEpntTrQa76iRsEFLX/M= -github.com/alecthomas/gometalinter v2.0.11+incompatible/go.mod h1:qfIpQGGz3d+NmgyPBqv+LSh50emm1pt72EtcX2vKYQk= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aws/aws-sdk-go v1.21.8 h1:Lv6hW2twBhC6mGZAuWtqplEpIIqtVctJg02sE7Qn0Zw= -github.com/aws/aws-sdk-go v1.21.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/axw/gocov v1.0.0 h1:YsqYR66hUmilVr23tu8USgnJIJvnwh3n7j5zRn7x4LU= -github.com/axw/gocov v1.0.0/go.mod h1:LvQpEYiwwIb2nYkXY2fDWhg9/AsYqkhmrCshjlUJECE= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go-v2 v1.2.0 h1:BS+UYpbsElC82gB+2E2jiCBg36i8HlubTB/dO/moQ9c= +github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= +github.com/aws/aws-sdk-go-v2/config v1.1.1 h1:ZAoq32boMzcaTW9bcUacBswAmHTbvlvDJICgHFZuECo= +github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1 h1:NbvWIM1Mx6sNPTxowHgS2ewXCRp+NGTzUYb/96FZJbY= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 h1:EtEU7WRaWliitZh2nmuxEXrN0Cb8EgPUFGIoTMeqbzI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.1 h1:xZYDtbub5yhn+ASvD26m76Cgb0k+0+ShE+nZwK9djUQ= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.1/go.mod h1:L7nNXGNEV0lkTauKM/KcEIZkT262pckC0YNykwAtX20= +github.com/aws/aws-sdk-go-v2/service/iam v1.1.1 h1:dfpLUOEtz+DlRpeAA9vg/tM/VjAcDGBE47m2ZbdXrZU= +github.com/aws/aws-sdk-go-v2/service/iam v1.1.1/go.mod h1:vBPRxb7qWfonEjDJR+ckM+1b1z2gXOogQjmTIPw15L4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.1 h1:q+3dVb1s3piv/Q/Ft0+OjU5iKItBRfCvU5wNLQUyIbA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.1/go.mod h1:zurGx7QI3Bk2OFwswSXl3PtJDdgD3QzjkfskiukJ2Mg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2 h1:4AH9fFjUlVktQMznF+YN33aWNXaR4VgDXyP28qokJC0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.1.0 h1:6yUvdqgAAWoKAotui7AI4QvJASrjI6rkJtweSyjH6M4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.1.0/go.mod h1:q+4U7Z1uD6Iimym8uPQp0Ong/XICxInhzIKVSwn7bUU= +github.com/aws/aws-sdk-go-v2/service/lambda v1.1.1 h1:ptubVb1eLQgZh7U4i+k2vpf3PlL4ZoTmGdTj+VowqqM= +github.com/aws/aws-sdk-go-v2/service/lambda v1.1.1/go.mod h1:iSHLnnmJNKoAUdzKnUFh4rIGM3V58fxa+XCYtRpeFX8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.2.0 h1:p20kkvl+DwV3wYsnLGcmsspBzWGD6EsWKi/W+09Z1NI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.2.0/go.mod h1:nHAD0aOk81kN3xdNYzKg4g9JISKSwRdUUDEXOgIojf4= +github.com/aws/aws-sdk-go-v2/service/sns v1.1.1 h1:5Js3R6coB5uI/h/Gua2Vm+uyuZrgmXs80zqtkOBumxk= +github.com/aws/aws-sdk-go-v2/service/sns v1.1.1/go.mod h1:V2HdUZQcKhcF58AwYU78fkQ5Drfw3qAGMUd9o1uvrf8= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 h1:37QubsarExl5ZuCBlnRP+7l1tNwZPBSTqpTBrPH98RU= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 h1:TJoIfnIFubCX0ACVeJ0w46HEH5MwjwYN4iFhuYIhfIY= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= +github.com/aws/smithy-go v1.1.0 h1:D6CSsM3gdxaGaqXnPgOBCeL6Mophqzu7KJOu7zW78sU= +github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -21,25 +60,26 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -48,38 +88,69 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20181026193005-c67002cb31c3 h1:I4BOK3PBMjhWfQM2zPJKK7lOBGsrsvOB7kBELP33hiE= -github.com/golang/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf h1:7+FW5aGwISbqUtkfmIpZJGRgNFg2ioYPvFaUxdqpDsg= -github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= -github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc h1:cJlkeAx1QYgO5N80aF5xRGstVsRQwgLR7uA2FnP1ZjY= -github.com/gordonklaus/ineffassign v0.0.0-20180909121442-1003c8bd00dc/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= -github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -87,36 +158,48 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/manifoldco/promptui v0.3.2 h1:rir7oByTERac6jhpHUPErHuopoRDvO3jxS+FdadEns8= -github.com/manifoldco/promptui v0.3.2/go.mod h1:8JU+igZ+eeiiRku4T5BjtKh2ms8sziGpSYl1gN8Bazw= -github.com/matm/gocov-html v0.0.0-20160206185555-f6dd0fd0ebc7 h1:IpusRbIZ1Z5j96YpxRD7vTwpfR7Cv3vgETmilcHF5BE= -github.com/matm/gocov-html v0.0.0-20160206185555-f6dd0fd0ebc7/go.mod h1:2amKdhwK7Jz2kRhLYmUH2NIOeBs6Tmhpy5UgDXhRbHc= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.4 h1:8KGKTcQQGm0Kv7vEbKFErAoAOFyyacLStRtQSeYtvkY= +github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/manifoldco/promptui v0.8.0 h1:R95mMF+McvXZQ7j1g8ucVZE1gLP3Sv6j9vlF9kyRqQo= +github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= -github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= -github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -127,94 +210,189 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/afero v1.5.1 h1:VHu76Lk0LSP1x254maIu2bplkWpfBWI+B+6fdoZprcg= +github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tools/godep v0.0.0-20180126220526-ce0bfadeb516 h1:h4a8ZFxjlRVGsFGP4l/AdnoUYcF3pfxzyepS3oKZ8mE= -github.com/tools/godep v0.0.0-20180126220526-ce0bfadeb516/go.mod h1:OGh2HQGYVW+2+ZdB+DgJhI75kivkKWtVcIxI/pesDsY= -github.com/tsenart/deadcode v0.0.0-20160724212837-210d2dc333e9 h1:vY5WqiEon0ZSTGM3ayVVi+twaHKHDFUVloaQ/wug9/c= -github.com/tsenart/deadcode v0.0.0-20160724212837-210d2dc333e9/go.mod h1:q+QjxYvZ+fpjMXqs+XEriussHjSYqeXVnAdSV1tkMYk= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181122213734-04b5d21e00f1/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774 h1:CQVOmarCBFzTx0kbOU0ru54Cvot8SdSrNYjZPhQl+gk= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c h1:vTxShRUnK60yd8DZU+f95p1zSLj814+5CuEh7NjF2/Y= -gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/internal/cloudaws/ec2.go b/internal/cloudaws/ec2.go new file mode 100644 index 00000000..e66850d4 --- /dev/null +++ b/internal/cloudaws/ec2.go @@ -0,0 +1,73 @@ +package cloudaws + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "strings" +) + +// TerminateEC2Instance terminates the specified ec2 instance +func TerminateEC2Instance(ctx context.Context, instanceID, region string) (*ec2.TerminateInstancesOutput, error) { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + if err != nil { + return nil, err + } + + ec2Client := ec2.NewFromConfig(cfg) + output, err := ec2Client.TerminateInstances(ctx, &ec2.TerminateInstancesInput{ + InstanceIds: []string{instanceID}, + }) + if err != nil { + return nil, fmt.Errorf("failed to terminate ec2 instance '%v' in region '%v': output:%v error:%v", instanceID, region, output, err) + } + + return output, nil +} + +// GetRunningEC2InstancesWithProfileName returns a list of instances running with a profile name +func GetRunningEC2InstancesWithProfileName(ctx context.Context, profileName, listRegions string) ([]string, error) { + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return nil, err + } + + var instances []string + for _, region := range strings.Split(listRegions, ",") { + ec2Client := ec2.NewFromConfig(cfg, func(o *ec2.Options) { + o.Region = region + }) + resp, err := ec2Client.DescribeInstances(ctx, &ec2.DescribeInstancesInput{ + Filters: []ec2types.Filter{ + { + Name: aws.String("instance-state-name"), + Values: []string{"running"}}, + }, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to describe ec2 instances in region %v", region) + } + if len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { + continue + } + + for _, reservation := range resp.Reservations { + for _, instance := range reservation.Instances { + if instance.IamInstanceProfile == nil || instance.IamInstanceProfile.Arn == nil { + continue + } + + instanceIamProfileName := strings.Split(*instance.IamInstanceProfile.Arn, "/")[1] + if instanceIamProfileName == profileName { + instances = append(instances, fmt.Sprintf("instance='%v' ip='%v' region='%v' launched='%v", + *instance.InstanceId, *instance.PublicIpAddress, region, *instance.LaunchTime)) + } + } + } + } + return instances, nil +} diff --git a/internal/cloudaws/lambda.go b/internal/cloudaws/lambda.go new file mode 100644 index 00000000..47b535af --- /dev/null +++ b/internal/cloudaws/lambda.go @@ -0,0 +1,30 @@ +package cloudaws + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/lambda" + lambdatypes "github.com/aws/aws-sdk-go-v2/service/lambda/types" +) + +// ExecuteLambdaFunction executes a synchronous Lambda function +func ExecuteLambdaFunction(ctx context.Context, functionName, region string, payload []byte) (*lambda.InvokeOutput, error) { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + if err != nil { + return nil, err + } + + lambdaClient := lambda.NewFromConfig(cfg) + output, err := lambdaClient.Invoke(ctx, &lambda.InvokeInput{ + FunctionName: aws.String(functionName), + InvocationType: lambdatypes.InvocationTypeRequestResponse, + Payload: payload, + }) + if err != nil { + return output, fmt.Errorf("failed to execute lambda function: %w", err) + } + + return output, nil +} diff --git a/internal/cloudaws/regions.go b/internal/cloudaws/regions.go new file mode 100644 index 00000000..b996252b --- /dev/null +++ b/internal/cloudaws/regions.go @@ -0,0 +1,74 @@ +package cloudaws + +var ( + supportedRegions = map[string]Region{} + regionSortOrder []string +) + +// Region contains details about an AWS region +type Region struct { + // Name is the name of the region (e.g. us-west-2) + Name string + // AMI is the AMI to use in this region + AMI string +} + +func init() { + // curl -s https://cloud-images.ubuntu.com/locator/ec2/releasesTable | grep '20.04' | grep 'amd64' | grep 'hvm:ebs-ssd' | awk -F'"' '{print $2, $15}' | awk -F"launchAmi=" '{print $1,$2}' | awk '{print $1,$3}' | awk -F'\' '{print $1}' | awk '{printf "Region{\"%s\", \"%s\"},\n",$1,$2 }' + addRegions( + Region{"af-south-1", "ami-0f072aafc9dfcb24f"}, + Region{"ap-east-1", "ami-04864d873127e4b0a"}, + Region{"ap-northeast-1", "ami-0e039c7d64008bd84"}, + Region{"ap-northeast-2", "ami-067abcae434ee508b"}, + Region{"ap-northeast-3", "ami-08dfee60cf1895207"}, + Region{"ap-south-1", "ami-073c8c0760395aab8"}, + Region{"ap-southeast-1", "ami-09a6a7e49bd29554b"}, + Region{"ap-southeast-2", "ami-0d767dd04ac152743"}, + Region{"ca-central-1", "ami-0df58bd52157c6e83"}, + Region{"eu-central-1", "ami-0932440befd74cdba"}, + Region{"eu-north-1", "ami-09b44b5f46219ee86"}, + Region{"eu-south-1", "ami-0e0812e2467b24796"}, + Region{"eu-west-1", "ami-022e8cc8f0d3c52fd"}, + Region{"eu-west-2", "ami-005383956f2e5fb96"}, + Region{"eu-west-3", "ami-00f6fe7d6cbb56a78"}, + Region{"me-south-1", "ami-07bf297712e054a41"}, + Region{"sa-east-1", "ami-0e765cee959bcbfce"}, + Region{"us-east-1", "ami-03d315ad33b9d49c4"}, + Region{"us-east-2", "ami-0996d3051b72b5b2c"}, + Region{"us-west-1", "ami-0ebef2838fb2605b7"}, + Region{"us-west-2", "ami-0928f4202481dfdf6"}, + Region{"cn-north-1", "ami-0592ccadb56e65f8d"}, + Region{"cn-northwest-1", "ami-007d0f254ea0f8588"}, + Region{"us-gov-west-1", "ami-a7edd7c6"}, + Region{"us-gov-east-1", "ami-c39973b2"}, + ) +} + +func addRegions(regions ...Region) { + for _, region := range regions { + supportedRegions[region.Name] = region + regionSortOrder = append(regionSortOrder, region.Name) + } +} + +// GetSupportedRegions returns a list of all supported regions +func GetSupportedRegions() []string { + return regionSortOrder +} + +// IsSupportedRegion returns whether a specified region is supported +func IsSupportedRegion(region string) bool { + if _, ok := supportedRegions[region]; !ok { + return false + } + return true +} + +// GetAMIs returns a region to AMI mapping for all supported regions +func GetAMIs() map[string]string { + amis := map[string]string{} + for _, region := range regionSortOrder { + amis[region] = supportedRegions[region].AMI + } + return amis +} diff --git a/internal/cloudaws/setup_client.go b/internal/cloudaws/setup_client.go new file mode 100644 index 00000000..d4cec2f6 --- /dev/null +++ b/internal/cloudaws/setup_client.go @@ -0,0 +1,157 @@ +package cloudaws + +import ( + "bytes" + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/iam" + iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "net/http" + "os" + "time" +) + +const ( + // DefaultInstanceRegions is the default regions to use for spot instances + DefaultInstanceRegions = "us-west-2,us-west-1,us-east-2" +) + +// SetupClient provides non Terraform cloud specific setup +type SetupClient struct { + awsConfig aws.Config + name string + region string + configFile string +} + +// NewSetupClient returns an initialized SetupClient +func NewSetupClient(name, region, configFile string) (*SetupClient, error) { + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("failed to load default aws config: %w", err) + } + + if err := checkS3Access(cfg); err != nil { + return nil, err + } + + return &SetupClient{ + awsConfig: cfg, + name: name, + region: region, + configFile: configFile, + }, nil +} + +// Setup executes all the required non Terraform cloud specific setup +func (c *SetupClient) Setup(ctx context.Context) error { + if err := c.s3BucketSetup(ctx); err != nil { + return err + } + if err := c.backupConfigFile(ctx); err != nil { + return err + } + if err := c.serviceLinkedRolesSetup(ctx); err != nil { + return err + } + return nil +} + +func (c *SetupClient) s3BucketSetup(ctx context.Context) error { + s3Client := s3.NewFromConfig(c.awsConfig) + _, err := s3Client.HeadBucket(ctx, &s3.HeadBucketInput{Bucket: &c.name}) + if err != nil { + var notFound *s3types.NotFound + if !errors.As(err, ¬Found) { + return fmt.Errorf("unknown S3 error: %w", err) + } + + bucketInput := &s3.CreateBucketInput{ + Bucket: &c.name, + } + if c.region != "us-east-1" { + bucketInput.CreateBucketConfiguration = &s3types.CreateBucketConfiguration{ + LocationConstraint: s3types.BucketLocationConstraint(c.region), + } + } + + output, err := s3Client.CreateBucket(ctx, bucketInput) + if err != nil { + return fmt.Errorf("failed to create bucket %v - note that this bucket name must be globally unique: output:%v err:%w", c.name, output, err) + } + } + return nil +} + +func (c *SetupClient) backupConfigFile(ctx context.Context) error { + s3Client := s3.NewFromConfig(c.awsConfig) + + file, err := os.Open(c.configFile) + if err != nil { + return err + } + defer file.Close() + + fileInfo, _ := file.Stat() + var size int64 = fileInfo.Size() + buffer := make([]byte, size) + _, err = file.Read(buffer) + if err != nil { + return err + } + + _, err = s3Client.PutObject(ctx, &s3.PutObjectInput{ + Bucket: aws.String(c.name), + Key: aws.String("stack-config.toml"), + ACL: s3types.ObjectCannedACLPrivate, + Body: bytes.NewReader(buffer), + ContentLength: size, + ContentType: aws.String(http.DetectContentType(buffer)), + ContentDisposition: aws.String("attachment"), + ServerSideEncryption: s3types.ServerSideEncryptionAes256, + }) + return err +} + +func (c *SetupClient) serviceLinkedRolesSetup(ctx context.Context) error { + iamClient := iam.NewFromConfig(c.awsConfig) + + _, err := iamClient.CreateServiceLinkedRole(ctx, &iam.CreateServiceLinkedRoleInput{ + AWSServiceName: aws.String("spot.amazonaws.com"), + }) + if err != nil { + var invalidInputException *iamtypes.InvalidInputException + if errors.Is(err, invalidInputException) { + return fmt.Errorf("failed to create spot.amazonaws.com service linked role: %w", err) + } + } + + _, err = iamClient.CreateServiceLinkedRole(ctx, &iam.CreateServiceLinkedRoleInput{ + AWSServiceName: aws.String("spotfleet.amazonaws.com"), + }) + if err != nil { + var invalidInputException *iamtypes.InvalidInputException + if errors.Is(err, invalidInputException) { + return fmt.Errorf("failed to create spotfleet.amazonaws.com service linked role: %w", err) + } + } + + return nil +} + +func checkS3Access(cfg aws.Config) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + s3Client := s3.NewFromConfig(cfg) + _, err := s3Client.ListBuckets(ctx, &s3.ListBucketsInput{}) + if err != nil { + return fmt.Errorf("unable to list S3 buckets - make sure you have valid admin AWS credentials: %w", err) + } + return nil +} diff --git a/internal/cloudaws/subscribe_client.go b/internal/cloudaws/subscribe_client.go new file mode 100644 index 00000000..1120fde7 --- /dev/null +++ b/internal/cloudaws/subscribe_client.go @@ -0,0 +1,90 @@ +package cloudaws + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/sns" + "strings" + "time" +) + +// SubscribeClient is a client that allows subscription to SNS topic +type SubscribeClient struct { + cfg aws.Config + name string + region string + email string +} + +// NewSubscribeClient provides an initialized SubscribeClient +func NewSubscribeClient(name, region, email string) (*SubscribeClient, error) { + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("failed to load default aws config: %w", err) + } + + if err := checkSNSAccess(cfg); err != nil { + return nil, err + } + + return &SubscribeClient{ + cfg: cfg, + name: name, + region: region, + email: email, + }, nil +} + +// Subscribe look for a topic with name and subscribes email. If subscribe happens, returns true, otherwise false. +func (c *SubscribeClient) Subscribe(ctx context.Context) (bool, error) { + snsClient := sns.NewFromConfig(c.cfg) + resp, err := snsClient.ListTopics(ctx, &sns.ListTopicsInput{NextToken: aws.String("")}) + if err != nil { + return false, fmt.Errorf("failed to list sns topics: %w", err) + } + + for _, topic := range resp.Topics { + if c.name == strings.Split(*topic.TopicArn, ":")[5] { + resp, err := snsClient.ListSubscriptionsByTopic(ctx, &sns.ListSubscriptionsByTopicInput{ + NextToken: aws.String(""), + TopicArn: aws.String(*topic.TopicArn), + }) + if err != nil { + return false, fmt.Errorf("failed to list SNS subscriptions for topic %v: %w", *topic.TopicArn, err) + } + + // if subscription already exists return + for _, subscription := range resp.Subscriptions { + if *subscription.Endpoint == c.email { + return false, nil + } + } + + // subscribe if not setup + _, err = snsClient.Subscribe(ctx, &sns.SubscribeInput{ + Protocol: aws.String("email"), + TopicArn: aws.String(*topic.TopicArn), + Endpoint: aws.String(c.email), + }) + if err != nil { + return false, fmt.Errorf("failed to setup email notifications: %w", err) + } + return true, nil + } + } + return false, fmt.Errorf("failed to subscribe to notifications - unable to find topic %v", c.name) +} + +func checkSNSAccess(cfg aws.Config) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + snsClient := sns.NewFromConfig(cfg) + _, err := snsClient.ListTopics(ctx, &sns.ListTopicsInput{NextToken: aws.String("")}) + if err != nil { + return fmt.Errorf("unable to list S3 buckets - make sure you have valid admin AWS credentials: %w", err) + } + return nil +} diff --git a/internal/devices/devices.go b/internal/devices/devices.go new file mode 100644 index 00000000..109c284f --- /dev/null +++ b/internal/devices/devices.go @@ -0,0 +1,110 @@ +package devices + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // AVBModeChained is chained AVB mode in build script + AVBModeChained = "vbmeta_chained" + // AVBModeChainedV2 is chained AVB v2 mode in build script + AVBModeChainedV2 = "vbmeta_chained_v2" + // ExtraOTARetrofitDynamicPartitions is additional OTA option to retrofit dynamics partitions + ExtraOTARetrofitDynamicPartitions = "(--retrofit_dynamic_partitions)" +) + +var ( + // ErrMissingName is returned if device is missing name + ErrMissingName = errors.New("supported device is missing required name") + // ErrMissingFriendly is returned if friendly name for device is missing + ErrMissingFriendly = errors.New("supported device is missing required friendly name") + // ErrMissingFamily is returned if family name for device is missing + ErrMissingFamily = errors.New("supported device is missing required family name") + // ErrMissingAVBMode is returned if avb mode is missing for device + ErrMissingAVBMode = errors.New("supported device is missing required avb mode") +) + +// Device contains details and metadata about a device +type Device struct { + Name string + Friendly string + Family string + AVBMode string + ExtraOTA string +} + +// SupportedDevices contains all the supported devices, their details, and sort order +type SupportedDevices struct { + supportedDevices map[string]*Device + deviceSortOrder []string +} + +// NewSupportedDevices takes in all devices, validates them, and returns initialized SupportedDevices +// which contains helper functions to get details about the supported devices +func NewSupportedDevices(devices ...*Device) (*SupportedDevices, error) { + var deviceSortOrder []string + supportedDevices := map[string]*Device{} + for _, device := range devices { + if device.Name == "" { + return nil, ErrMissingName + } + if device.Friendly == "" { + return nil, fmt.Errorf("'%v': %w", device.Name, ErrMissingFriendly) + } + if device.Family == "" { + return nil, fmt.Errorf("'%v': %w", device.Name, ErrMissingFamily) + } + if device.AVBMode == "" { + return nil, fmt.Errorf("'%v': %w", device.Name, ErrMissingAVBMode) + } + deviceSortOrder = append(deviceSortOrder, device.Name) + supportedDevices[device.Name] = device + } + + return &SupportedDevices{ + supportedDevices: supportedDevices, + deviceSortOrder: deviceSortOrder, + }, nil +} + +// IsSupportedDevice takes device name (e.g. redfin) and returns boolean support value +func (s *SupportedDevices) IsSupportedDevice(device string) bool { + if _, ok := s.supportedDevices[device]; !ok { + return false + } + return true +} + +// GetDeviceDetails takes device name (e.g. redfin) and returns full Device details +func (s *SupportedDevices) GetDeviceDetails(device string) *Device { + if _, ok := s.supportedDevices[device]; !ok { + return nil + } + return s.supportedDevices[device] +} + +// GetDeviceFriendlyNames returns list of all supported device friendly names (e.g. Pixel 4a) +func (s *SupportedDevices) GetDeviceFriendlyNames() []string { + var output []string + for _, device := range s.deviceSortOrder { + output = append(output, s.supportedDevices[device].Friendly) + } + return output +} + +// GetDeviceCodeNames returns list of all supported devices code names (e.g. redfin) +func (s *SupportedDevices) GetDeviceCodeNames() []string { + return s.deviceSortOrder +} + +// GetSupportedDevicesOutput returns a nicely formatted comma separated list of codename (friendly name) +func (s *SupportedDevices) GetSupportedDevicesOutput() string { + var supportDevicesOutput []string + supportedDevicesFriendly := s.GetDeviceFriendlyNames() + for i, d := range s.deviceSortOrder { + supportDevicesOutput = append(supportDevicesOutput, fmt.Sprintf("%v (%v)", d, supportedDevicesFriendly[i])) + } + return strings.Join(supportDevicesOutput, ", ") +} diff --git a/internal/devices/devices_test.go b/internal/devices/devices_test.go new file mode 100644 index 00000000..52a727be --- /dev/null +++ b/internal/devices/devices_test.go @@ -0,0 +1,259 @@ +package devices + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSupportedDevices_IsSupportedDevice(t *testing.T) { + tests := map[string]struct { + devices []*Device + input string + expected bool + expectedErr error + }{ + "devices that is supported returns true": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + }, + input: "name", + expected: true, + expectedErr: nil, + }, + "devices that is not supported returns false": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + }, + input: "unsupported", + expected: false, + expectedErr: nil, + }, + "if no supported devices returns false": { + devices: []*Device{}, + input: "empty", + expected: false, + expectedErr: nil, + }, + "missing name returns error": { + devices: []*Device{ + &Device{ + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + }, + input: "name", + expected: false, + expectedErr: ErrMissingName, + }, + "missing friendly name returns error": { + devices: []*Device{ + &Device{ + Name: "name", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + }, + input: "name", + expected: false, + expectedErr: ErrMissingFriendly, + }, + "missing family returns error": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + AVBMode: AVBModeChainedV2, + }, + }, + input: "name", + expected: false, + expectedErr: ErrMissingFamily, + }, + "missing avb mode returns error": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + }, + }, + input: "name", + expected: false, + expectedErr: ErrMissingAVBMode, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + supportedDevices, err := NewSupportedDevices(tc.devices...) + assert.ErrorIs(t, err, tc.expectedErr) + if err == nil { + assert.Equal(t, tc.expected, supportedDevices.IsSupportedDevice(tc.input)) + } + }) + } +} + +func TestSupportedDevices_GetDeviceDetails(t *testing.T) { + tests := map[string]struct { + devices []*Device + input string + expected *Device + }{ + "valid device returns with details": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + }, + input: "name", + expected: &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + }, + "non existing device returns nil": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + }, + input: "non existing", + expected: nil, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + supportedDevices, err := NewSupportedDevices(tc.devices...) + assert.Nil(t, err) + + device := supportedDevices.GetDeviceDetails(tc.input) + assert.Equal(t, tc.expected, device) + }) + } +} + +func TestSupportedDevices_GetDeviceFriendlyNames(t *testing.T) { + tests := map[string]struct { + devices []*Device + expected []string + }{ + "returns friendly names": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + &Device{ + Name: "name 2", + Friendly: "friendly 2", + Family: "family 2", + AVBMode: AVBModeChainedV2, + }, + }, + expected: []string{"friendly", "friendly 2"}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + supportedDevices, err := NewSupportedDevices(tc.devices...) + assert.Nil(t, err) + + device := supportedDevices.GetDeviceFriendlyNames() + assert.Equal(t, tc.expected, device) + }) + } +} + +func TestSupportedDevices_GetDeviceCodeNames(t *testing.T) { + tests := map[string]struct { + devices []*Device + expected []string + }{ + "returns friendly names": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + &Device{ + Name: "name 2", + Friendly: "friendly 2", + Family: "family 2", + AVBMode: AVBModeChainedV2, + }, + }, + expected: []string{"name", "name 2"}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + supportedDevices, err := NewSupportedDevices(tc.devices...) + assert.Nil(t, err) + + device := supportedDevices.GetDeviceCodeNames() + assert.Equal(t, tc.expected, device) + }) + } +} + +func TestSupportedDevices_GetSupportedDevicesOutput(t *testing.T) { + tests := map[string]struct { + devices []*Device + expected string + }{ + "returns expected output": { + devices: []*Device{ + &Device{ + Name: "name", + Friendly: "friendly", + Family: "family", + AVBMode: AVBModeChainedV2, + }, + &Device{ + Name: "name 2", + Friendly: "friendly 2", + Family: "family 2", + AVBMode: AVBModeChainedV2, + }, + }, + expected: "name (friendly), name 2 (friendly 2)", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + supportedDevices, err := NewSupportedDevices(tc.devices...) + assert.Nil(t, err) + + output := supportedDevices.GetSupportedDevicesOutput() + assert.Equal(t, tc.expected, output) + }) + } +} diff --git a/internal/stack/stack.go b/internal/stack/stack.go new file mode 100644 index 00000000..d4dedeb2 --- /dev/null +++ b/internal/stack/stack.go @@ -0,0 +1,83 @@ +package stack + +import ( + "context" + log "github.com/sirupsen/logrus" + "time" +) + +const ( + // DefaultDeployTimeout is the default timeout for deployments + DefaultDeployTimeout = time.Minute * 5 +) + +// TemplateRenderer is an interface for template rendering +type TemplateRenderer interface { + RenderAll() error +} + +// CloudSetup is an interface for cloud setup +type CloudSetup interface { + Setup(ctx context.Context) error +} + +// CloudSubscriber is an interface for cloud subscription +type CloudSubscriber interface { + Subscribe(ctx context.Context) (bool, error) +} + +// TerraformApplier is an interface for applying terraform +type TerraformApplier interface { + Apply(ctx context.Context) ([]byte, error) +} + +// Stack contains all the necessary pieces to generate and deploy a stack +type Stack struct { + name string + templateRenderer TemplateRenderer + cloudSetup CloudSetup + cloudSubscriber CloudSubscriber + terraformApplier TerraformApplier +} + +// New returns an initialized Stack that is ready for deployment +func New(name string, templateRenderer TemplateRenderer, cloudSetup CloudSetup, cloudSubscriber CloudSubscriber, terraformApplier TerraformApplier) *Stack { + return &Stack{ + name: name, + templateRenderer: templateRenderer, + cloudSetup: cloudSetup, + cloudSubscriber: cloudSubscriber, + terraformApplier: terraformApplier, + } +} + +// Deploy renders files, runs cloud setup, runs terraform apply, and ensures notifications are setup +func (s *Stack) Deploy(ctx context.Context) error { + log.Infof("Rendering all templates files for stack %v", s.name) + if err := s.templateRenderer.RenderAll(); err != nil { + return err + } + + log.Infof("Creating/updating non terraform resources for stack %v", s.name) + if err := s.cloudSetup.Setup(ctx); err != nil { + return err + } + + log.Infof("Executing terraform apply for stack %v", s.name) + if _, err := s.terraformApplier.Apply(ctx); err != nil { + return err + } + + log.Infof("Ensuring notifications enabled for stack %v", s.name) + subscribed, err := s.cloudSubscriber.Subscribe(ctx) + if err != nil { + return err + } + if subscribed { + log.Infof("Successfully setup email notifications for stack %v - you'll need to click link in "+ + "confirmation email to get notifications.", s.name) + } + + log.Infof("Successfully deployed/updated resources for stack %v", s.name) + return nil +} diff --git a/internal/stack/stack_test.go b/internal/stack/stack_test.go new file mode 100644 index 00000000..c55ed79f --- /dev/null +++ b/internal/stack/stack_test.go @@ -0,0 +1,125 @@ +package stack_test + +import ( + "context" + "errors" + "github.com/dan-v/rattlesnakeos-stack/internal/stack" + "github.com/stretchr/testify/assert" + "testing" +) + +var ( + errTemplateRender = errors.New("template renderer error") + errCloudSetup = errors.New("cloud setup error") + errCloudSubscribe = errors.New("cloud subscriber error") + errTerraformApply = errors.New("terraform apply error") +) + +func TestDeploy(t *testing.T) { + tests := map[string]struct { + stack *stack.Stack + expected error + }{ + "deploy with no errors and subscribe true": { + stack: stack.New( + "test", + &fakeTemplateRenderer{err: nil}, + &fakeCloudSetup{err: nil}, + &fakeCloudSubscriber{subscribed: true, err: nil}, + &fakeTerraformApplier{output: []byte("test"), err: nil}, + ), + expected: nil, + }, + "deploy with no errors and subscribe false": { + stack: stack.New( + "test", + &fakeTemplateRenderer{err: nil}, + &fakeCloudSetup{err: nil}, + &fakeCloudSubscriber{subscribed: false, err: nil}, + &fakeTerraformApplier{output: []byte("test"), err: nil}, + ), + expected: nil, + }, + "template render error": { + stack: stack.New( + "test", + &fakeTemplateRenderer{err: errTemplateRender}, + &fakeCloudSetup{err: nil}, + &fakeCloudSubscriber{subscribed: false, err: nil}, + &fakeTerraformApplier{output: []byte("test"), err: nil}, + ), + expected: errTemplateRender, + }, + "cloud setup error": { + stack: stack.New( + "test", + &fakeTemplateRenderer{err: nil}, + &fakeCloudSetup{err: errCloudSetup}, + &fakeCloudSubscriber{subscribed: false, err: nil}, + &fakeTerraformApplier{output: []byte("test"), err: nil}, + ), + expected: errCloudSetup, + }, + "cloud subscribe error": { + stack: stack.New( + "test", + &fakeTemplateRenderer{err: nil}, + &fakeCloudSetup{err: nil}, + &fakeCloudSubscriber{subscribed: false, err: errCloudSubscribe}, + &fakeTerraformApplier{output: []byte("test"), err: nil}, + ), + expected: errCloudSubscribe, + }, + "terraform apply error": { + stack: stack.New( + "test", + &fakeTemplateRenderer{err: nil}, + &fakeCloudSetup{err: nil}, + &fakeCloudSubscriber{subscribed: false, err: nil}, + &fakeTerraformApplier{output: []byte("test"), err: errTerraformApply}, + ), + expected: errTerraformApply, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + err := tc.stack.Deploy(context.Background()) + assert.ErrorIs(t, err, tc.expected) + }) + } +} + +type fakeTemplateRenderer struct { + err error +} + +func (f *fakeTemplateRenderer) RenderAll() error { + return f.err +} + +type fakeCloudSetup struct { + err error +} + +func (f *fakeCloudSetup) Setup(ctx context.Context) error { + return f.err +} + +type fakeCloudSubscriber struct { + subscribed bool + err error +} + +func (f *fakeCloudSubscriber) Subscribe(ctx context.Context) (bool, error) { + return f.subscribed, f.err +} + +type fakeTerraformApplier struct { + output []byte + err error +} + +func (f *fakeTerraformApplier) Apply(ctx context.Context) ([]byte, error) { + return f.output, f.err +} diff --git a/internal/templates/templates.go b/internal/templates/templates.go new file mode 100644 index 00000000..c71ba337 --- /dev/null +++ b/internal/templates/templates.go @@ -0,0 +1,284 @@ +package templates + +import ( + "archive/zip" + "bytes" + "encoding/json" + "errors" + "fmt" + "github.com/dan-v/rattlesnakeos-stack/internal/cloudaws" + "github.com/dan-v/rattlesnakeos-stack/internal/devices" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "text/template" +) + +const ( + // DefaultReleasesURLTemplate is the URL to use for gathering latest versions of components for builds. This is + // template string and should be provided a branch name (e.g. 11.0) + DefaultReleasesURLTemplate = "https://raw.githubusercontent.com/RattlesnakeOS/releases/%v/latest.json" + // DefaultCoreConfigRepo is the default core config repo to use + DefaultCoreConfigRepo = "https://github.com/rattlesnakeos/core-config-repo" + // DefaultRattlesnakeOSStackReleaseURL is the default rattlesnakeos-stack api releases github page + DefaultRattlesnakeOSStackReleaseURL = "https://api.github.com/repos/dan-v/rattlesnakeos-stack/releases/latest" +) + +const ( + defaultBuildScriptFilename = "build.sh" + defaultLambdaFunctionFilename = "lambda_spot_function.py" + defaultLambdaZipFilename = "lambda_spot.zip" + defaultTFMainFilename = "main.tf" + defaultGeneratedVarReplaceString = "#### ####" +) + +var ( + // ErrTemplateExecute is returned if there is an error executing template + ErrTemplateExecute = errors.New("error executing template") +) + +// TemplateFiles are all of the files from the root templates directory +type TemplateFiles struct { + // BuildScript is just the raw build shell script (no templating in this file) + BuildScript string + // BuildScriptVars is a template file with variables and functions that gets inserted into build script after render + BuildScriptVars string + // LambdaTemplate is a template file of the python Lambda function + LambdaTemplate string + // TerraformTemplate is a template file of the Terraform code + TerraformTemplate string +} + +// Config contains all of the template config values +type Config struct { + // Version is the version of stack + Version string + // Name is the name of the stack + Name string + // Region is the region to deploy stack + Region string + // Device is the device to build for + Device string + // Device details is full device details + DeviceDetails *devices.Device + // Email is the email address to subscribe to notifications for stack + Email string + // InstanceType is the instance type to use for builds + InstanceType string + // InstanceRegions is the comma separated list of regions to use for builds + InstanceRegions string + // SkipPrice is the spot price at which the build should not start + SkipPrice string + // MaxPrice is the maximum spot price to set + MaxPrice string + // SSHKey is the name of the SSH key to use for launched spot instances + SSHKey string + // Schedule is the cron schedule for builds, can be left empty to disable + Schedule string + // ChromiumBuildDisabled can be used to turn of building Chromium + ChromiumBuildDisabled bool + // ChromiumVersion can be used to lock Chromium to a specific version + ChromiumVersion string + // CoreConfigRepo is the git repo to use for the core configuration of the OS + CoreConfigRepo string + // CoreConfigRepoBranch specifies which branch to use for the core configuration repo + CoreConfigRepoBranch string + // CustomConfigRepo is the git repo to use for customization on top of core + CustomConfigRepo string + // CustomConfigRepoBranch is the branch to use for the custom configuration repo + CustomConfigRepoBranch string + // ReleasesURL is the URL to use for gathering latest versions of components for builds + ReleasesURL string + // Cloud specifies which cloud to build on (only aws supported right now) + Cloud string +} + +// Templates provides the ability to render templates and write them to disk +type Templates struct { + config *Config + templateFiles *TemplateFiles + buildScriptFilePath string + lambdaFunctionFilePath string + lambdaZipFilePath string + tfMainFilePath string +} + +// New returns an initialized Templates +func New(config *Config, templateFiles *TemplateFiles, outputDir string) (*Templates, error) { + return &Templates{ + config: config, + templateFiles: templateFiles, + buildScriptFilePath: filepath.Join(outputDir, defaultBuildScriptFilename), + lambdaFunctionFilePath: filepath.Join(outputDir, defaultLambdaFunctionFilename), + lambdaZipFilePath: filepath.Join(outputDir, defaultLambdaZipFilename), + tfMainFilePath: filepath.Join(outputDir, defaultTFMainFilename), + }, nil +} + +// RenderAll renders all templates and writes them to output directory +func (t *Templates) RenderAll() error { + renderedBuildScript, err := t.renderBuildScript() + if err != nil { + return err + } + err = t.writeBuildScript(renderedBuildScript) + if err != nil { + return err + } + + renderedLambdaFunction, err := t.renderLambdaFunction() + if err != nil { + return err + } + err = t.writeLambdaFunction(renderedLambdaFunction) + if err != nil { + return err + } + + renderedTerraform, err := t.renderTerraform() + if err != nil { + return err + } + err = t.writeTerraform(renderedTerraform) + if err != nil { + return err + } + + return nil +} + +func (t *Templates) renderBuildScript() ([]byte, error) { + renderedBuildScriptTemplate, err := renderTemplate(t.templateFiles.BuildScriptVars, t.config) + if err != nil { + return nil, err + } + + // insert the generated vars and funcs into raw build script + updatedBuildScript := strings.Replace(t.templateFiles.BuildScript, defaultGeneratedVarReplaceString, string(renderedBuildScriptTemplate), 1) + + return []byte(updatedBuildScript), nil +} + +func (t *Templates) renderLambdaFunction() ([]byte, error) { + regionAMIs, err := json.Marshal(cloudaws.GetAMIs()) + if err != nil { + return nil, err + } + + return renderTemplate(t.templateFiles.LambdaTemplate, struct { + Config *Config + RegionAMIs string + RattlesnakeOSStackReleasesURL string + }{ + t.config, + string(regionAMIs), + DefaultRattlesnakeOSStackReleaseURL, + }) +} + +func (t *Templates) renderTerraform() ([]byte, error) { + return renderTemplate(t.templateFiles.TerraformTemplate, struct { + Config Config + LambdaZipFileLocation string + BuildScriptFileLocation string + }{ + *t.config, + t.lambdaZipFilePath, + t.buildScriptFilePath, + }) +} + +func (t *Templates) writeBuildScript(renderedBuildScript []byte) error { + return ioutil.WriteFile(t.buildScriptFilePath, renderedBuildScript, 0644) +} + +func (t *Templates) writeLambdaFunction(renderedLambdaFunction []byte) error { + if err := ioutil.WriteFile(t.lambdaFunctionFilePath, renderedLambdaFunction, 0644); err != nil { + return err + } + + if err := os.Chmod(t.lambdaFunctionFilePath, 0644); err != nil { + return err + } + + if err := zipFiles(t.lambdaZipFilePath, []string{t.lambdaFunctionFilePath}); err != nil { + return err + } + return nil +} + +func (t *Templates) writeTerraform(renderedTerraform []byte) error { + if err := ioutil.WriteFile(t.tfMainFilePath, renderedTerraform, 0777); err != nil { + return err + } + return nil +} + +func renderTemplate(templateStr string, params interface{}) ([]byte, error) { + temp, err := template.New("templates").Delims("<%", "%>").Parse(templateStr) + if err != nil { + return nil, fmt.Errorf("failed to parse templates: %w", err) + } + + buffer := new(bytes.Buffer) + if err = temp.Execute(buffer, params); err != nil { + return nil, fmt.Errorf("%v: %w", err, ErrTemplateExecute) + } + + outputBytes, err := ioutil.ReadAll(buffer) + if err != nil { + return nil, fmt.Errorf("failed to read generated templates: %w", err) + } + + return outputBytes, nil +} + +func zipFiles(filename string, files []string) error { + newFile, err := os.Create(filename) + if err != nil { + return err + } + defer func() { + _ = newFile.Close() + }() + + zipWriter := zip.NewWriter(newFile) + defer func() { + _ = zipWriter.Close() + }() + + for _, file := range files { + zipfile, err := os.Open(file) + if err != nil { + return err + } + defer func() { + _ = zipfile.Close() + }() + + info, err := zipfile.Stat() + if err != nil { + return err + } + + header, err := zip.FileInfoHeader(info) + if err != nil { + return err + } + + header.Method = zip.Deflate + + writer, err := zipWriter.CreateHeader(header) + if err != nil { + return err + } + + _, err = io.Copy(writer, zipfile) + if err != nil { + return err + } + } + return nil +} diff --git a/internal/templates/templates_test.go b/internal/templates/templates_test.go new file mode 100644 index 00000000..c7ec577f --- /dev/null +++ b/internal/templates/templates_test.go @@ -0,0 +1,257 @@ +package templates + +import ( + "fmt" + "github.com/dan-v/rattlesnakeos-stack/internal/devices" + "github.com/stretchr/testify/assert" + "regexp" + "strings" + "testing" +) + +func TestTemplates_RenderBuildScript(t *testing.T) { + tests := map[string]struct { + config *Config + buildScript string + buildScriptVars string + expected []byte + expectedErr error + }{ + "happy path build script render": { + config: testConfig, + buildScript: dedent(fmt.Sprintf(`above + %v + below`, defaultGeneratedVarReplaceString)), + buildScriptVars: dedent(`DEVICE="<% .Device %>" + DEVICE_FRIENDLY="<% .DeviceDetails.Friendly %>" + DEVICE_FAMILY="<% .DeviceDetails.Family %>" + DEVICE_AVB_MODE="<% .DeviceDetails.AVBMode %>" + DEVICE_EXTRA_OTA=<% .DeviceDetails.ExtraOTA %> + STACK_NAME="<% .Name %>" + STACK_VERSION="<% .Version %>" + CHROMIUM_BUILD_DISABLED="<% .ChromiumBuildDisabled %>" + CORE_CONFIG_REPO="<% .CoreConfigRepo %>" + CORE_CONFIG_REPO_BRANCH="<% .CoreConfigRepoBranch %>" + CUSTOM_CONFIG_REPO="<% .CustomConfigRepo %>" + CUSTOM_CONFIG_REPO_BRANCH="<% .CustomConfigRepoBranch %>"`), + expected: []byte(dedent(`above + DEVICE="test device" + DEVICE_FRIENDLY="friendly" + DEVICE_FAMILY="family" + DEVICE_AVB_MODE="avb mode" + DEVICE_EXTRA_OTA=extra ota + STACK_NAME="test stack" + STACK_VERSION="test version" + CHROMIUM_BUILD_DISABLED="false" + CORE_CONFIG_REPO="core-config-repo" + CORE_CONFIG_REPO_BRANCH="core-config-repo-branch" + CUSTOM_CONFIG_REPO="custom-config-repo" + CUSTOM_CONFIG_REPO_BRANCH="custom-config-repo-branch" + below`)), + expectedErr: nil, + }, + "bad template variable returns error": { + config: testConfig, + buildScript: defaultGeneratedVarReplaceString, + buildScriptVars: dedent(`DEVICE="<% .Bad %>"`), + expected: nil, + expectedErr: ErrTemplateExecute, + }, + "buildscript with no defaultGeneratedVarReplaceString does not have buildScriptVars inserted": { + config: testConfig, + buildScript: "", + buildScriptVars: `DEVICE="<% .Device %>"`, + expected: []byte(""), + expectedErr: nil, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + templateFiles, err := New(tc.config, &TemplateFiles{BuildScript: tc.buildScript, BuildScriptVars: tc.buildScriptVars}, "") + assert.Nil(t, err) + + output, err := templateFiles.renderBuildScript() + assert.ErrorIs(t, err, tc.expectedErr) + + assert.Equal(t, string(tc.expected), string(output)) + }) + } +} + +func TestTemplates_RenderLambdaFunction(t *testing.T) { + tests := map[string]struct { + config *Config + lambdaTemplate string + expected []byte + expectedErr error + }{ + "happy path build script render": { + config: testConfig, + lambdaTemplate: dedent(`DEVICE="<% .Config.Device %>" + DEVICE_FRIENDLY="<% .Config.DeviceDetails.Friendly %>" + DEVICE_FAMILY="<% .Config.DeviceDetails.Family %>" + DEVICE_AVB_MODE="<% .Config.DeviceDetails.AVBMode %>" + DEVICE_EXTRA_OTA=<% .Config.DeviceDetails.ExtraOTA %> + STACK_NAME="<% .Config.Name %>" + STACK_VERSION="<% .Config.Version %>" + CHROMIUM_BUILD_DISABLED="<% .Config.ChromiumBuildDisabled %>" + CORE_CONFIG_REPO="<% .Config.CoreConfigRepo %>" + CORE_CONFIG_REPO_BRANCH="<% .Config.CoreConfigRepoBranch %>" + CUSTOM_CONFIG_REPO="<% .Config.CustomConfigRepo %>" + CUSTOM_CONFIG_REPO_BRANCH="<% .Config.CustomConfigRepoBranch %>"`), + expected: []byte(dedent(`DEVICE="test device" + DEVICE_FRIENDLY="friendly" + DEVICE_FAMILY="family" + DEVICE_AVB_MODE="avb mode" + DEVICE_EXTRA_OTA=extra ota + STACK_NAME="test stack" + STACK_VERSION="test version" + CHROMIUM_BUILD_DISABLED="false" + CORE_CONFIG_REPO="core-config-repo" + CORE_CONFIG_REPO_BRANCH="core-config-repo-branch" + CUSTOM_CONFIG_REPO="custom-config-repo" + CUSTOM_CONFIG_REPO_BRANCH="custom-config-repo-branch"`)), + expectedErr: nil, + }, + "bad template variable returns error": { + config: testConfig, + lambdaTemplate: dedent(`DEVICE="<% .Bad %>""`), + expected: nil, + expectedErr: ErrTemplateExecute, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + templateFiles, err := New(tc.config, &TemplateFiles{LambdaTemplate: tc.lambdaTemplate}, "") + assert.Nil(t, err) + + output, err := templateFiles.renderLambdaFunction() + assert.ErrorIs(t, err, tc.expectedErr) + + assert.Equal(t, string(tc.expected), string(output)) + }) + } +} + +func TestTemplates_RenderTerraform(t *testing.T) { + tests := map[string]struct { + config *Config + terraformTemplate string + expected []byte + expectedErr error + }{ + "happy path build script render": { + config: testConfig, + terraformTemplate: dedent(`DEVICE="<% .Config.Device %>" + DEVICE_FRIENDLY="<% .Config.DeviceDetails.Friendly %>" + DEVICE_FAMILY="<% .Config.DeviceDetails.Family %>" + DEVICE_AVB_MODE="<% .Config.DeviceDetails.AVBMode %>" + DEVICE_EXTRA_OTA=<% .Config.DeviceDetails.ExtraOTA %> + STACK_NAME="<% .Config.Name %>" + STACK_VERSION="<% .Config.Version %>" + CHROMIUM_BUILD_DISABLED="<% .Config.ChromiumBuildDisabled %>" + CORE_CONFIG_REPO="<% .Config.CoreConfigRepo %>" + CORE_CONFIG_REPO_BRANCH="<% .Config.CoreConfigRepoBranch %>" + CUSTOM_CONFIG_REPO="<% .Config.CustomConfigRepo %>" + CUSTOM_CONFIG_REPO_BRANCH="<% .Config.CustomConfigRepoBranch %>"`), + expected: []byte(dedent(`DEVICE="test device" + DEVICE_FRIENDLY="friendly" + DEVICE_FAMILY="family" + DEVICE_AVB_MODE="avb mode" + DEVICE_EXTRA_OTA=extra ota + STACK_NAME="test stack" + STACK_VERSION="test version" + CHROMIUM_BUILD_DISABLED="false" + CORE_CONFIG_REPO="core-config-repo" + CORE_CONFIG_REPO_BRANCH="core-config-repo-branch" + CUSTOM_CONFIG_REPO="custom-config-repo" + CUSTOM_CONFIG_REPO_BRANCH="custom-config-repo-branch"`)), + expectedErr: nil, + }, + "bad template variable returns error": { + config: testConfig, + terraformTemplate: dedent(`DEVICE="<% .Bad %>""`), + expected: nil, + expectedErr: ErrTemplateExecute, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + templateFiles, err := New(tc.config, &TemplateFiles{TerraformTemplate: tc.terraformTemplate}, "") + assert.Nil(t, err) + + output, err := templateFiles.renderTerraform() + assert.ErrorIs(t, err, tc.expectedErr) + + assert.Equal(t, string(tc.expected), string(output)) + }) + } +} + +var testConfig = &Config{ + Version: "test version", + Name: "test stack", + Region: "test region", + Device: "test device", + DeviceDetails: &devices.Device{ + Name: "test device", + Friendly: "friendly", + Family: "family", + AVBMode: "avb mode", + ExtraOTA: "extra ota", + }, + Email: "email", + InstanceType: "instance type", + InstanceRegions: "region1,region2", + SkipPrice: "skip price", + MaxPrice: "max price", + SSHKey: "ssh key", + Schedule: "schedule", + ChromiumBuildDisabled: false, + ChromiumVersion: "chromium version", + CoreConfigRepo: "core-config-repo", + CoreConfigRepoBranch: "core-config-repo-branch", + CustomConfigRepo: "custom-config-repo", + CustomConfigRepoBranch: "custom-config-repo-branch", + ReleasesURL: "latest url", + Cloud: "cloud", +} + +// source https://github.com/lithammer/dedent +func dedent(text string) string { + var margin string + var whitespaceOnly = regexp.MustCompile("(?m)^[ \t]+$") + var leadingWhitespace = regexp.MustCompile("(?m)(^[ \t]*)(?:[^ \t\n])") + + text = whitespaceOnly.ReplaceAllString(text, "") + indents := leadingWhitespace.FindAllStringSubmatch(text, -1) + + // Look for the longest leading string of spaces and tabs common to all + // lines. + for i, indent := range indents { + if i == 0 { + margin = indent[1] + } else if strings.HasPrefix(indent[1], margin) { + // Current line more deeply indented than previous winner: + // no change (previous winner is still on top). + continue + } else if strings.HasPrefix(margin, indent[1]) { + // Current line consistent with and no deeper than previous winner: + // it's the new winner. + margin = indent[1] + } else { + // Current line and previous winner have no common whitespace: + // there is no margin. + margin = "" + break + } + } + + if margin != "" { + text = regexp.MustCompile("(?m)^"+margin).ReplaceAllString(text, "") + } + return text +} diff --git a/internal/terraform/terraform.go b/internal/terraform/terraform.go new file mode 100644 index 00000000..ea8e3eab --- /dev/null +++ b/internal/terraform/terraform.go @@ -0,0 +1,226 @@ +package terraform + +import ( + "archive/zip" + "bufio" + "bytes" + "context" + _ "embed" + "fmt" + log "github.com/sirupsen/logrus" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" +) + +const ( + // Version is the Terraform version that is downloaded and used + // TODO: this version of Terraform is getting quite old, but don't have a great plan for seamless major version upgrade. + Version = "0.11.14" + // DefaultTerraformDestroyTimeout is the default timeout for running Terraform destroy + DefaultTerraformDestroyTimeout = time.Minute * 2 +) + +var ( + darwinBinaryURL = fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_darwin_amd64.zip", Version, Version) + linuxBinaryURL = fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_amd64.zip", Version, Version) + windowsBinaryURL = fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_windows_amd64.zip", Version, Version) +) + +// Client provides a basic Terraform client +type Client struct { + rootDir string + terraformBinaryFile string +} + +// New downloads the Terraform binary for current platform and returns an initialized Client +func New(rootDir string) (*Client, error) { + terraformBinary, err := setupBinary(rootDir) + if err != nil { + return nil, err + } + + client := &Client{ + rootDir: rootDir, + terraformBinaryFile: terraformBinary, + } + return client, nil +} + +// Apply runs terraform init and apply +func (c *Client) Apply(ctx context.Context) ([]byte, error) { + output, err := c.init(ctx) + if err != nil { + return output, err + } + + cmd := c.setup(ctx, []string{"apply", "-auto-approve", c.rootDir}) + return c.run(cmd) +} + +// Destroy runs terraform destroy +func (c *Client) Destroy(ctx context.Context) ([]byte, error) { + cmd := c.setup(ctx, []string{"destroy", "-auto-approve", c.rootDir}) + return c.run(cmd) +} + +func (c *Client) init(ctx context.Context) ([]byte, error) { + cmd := c.setup(ctx, []string{"init", c.rootDir}) + return c.run(cmd) +} + +func (c *Client) setup(ctx context.Context, args []string) *exec.Cmd { + cmd := exec.CommandContext(ctx, c.terraformBinaryFile, args...) + cmd.Dir = c.rootDir + return cmd +} + +func (c *Client) run(cmd *exec.Cmd) ([]byte, error) { + cmdOutput, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmd.Stderr = cmd.Stdout + + err = cmd.Start() + if err != nil { + return nil, err + } + + b := &bytes.Buffer{} + scanner := bufio.NewScanner(cmdOutput) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + fmt.Println(scanner.Text()) + b.WriteString(scanner.Text() + "\n") + } + + err = cmd.Wait() + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func getTerraformURL() (string, error) { + osName := runtime.GOOS + if osName == "darwin" { + return darwinBinaryURL, nil + } else if osName == "linux" { + return linuxBinaryURL, nil + } else if osName == "windows" { + return windowsBinaryURL, nil + } + return "", fmt.Errorf("unknown os: `%s`", osName) +} + +func setupBinary(outputDir string) (string, error) { + terraformZipFilename := fmt.Sprintf("terraform-%v.zip", Version) + terraformZipFullPathFilename := filepath.Join(outputDir, terraformZipFilename) + + downloadRequired := true + if _, err := os.Stat(terraformZipFullPathFilename); err == nil { + downloadRequired = false + log.Infof("Skipping download of terraform zip as it already exists %v", terraformZipFullPathFilename) + } + + if downloadRequired { + fileHandler, err := os.Create(terraformZipFullPathFilename) + if err != nil { + return "", err + } + defer func() { + _ = fileHandler.Close() + }() + + url, err := getTerraformURL() + if err != nil { + return "", err + } + + log.Infoln("Downloading Terraform binary from URL:", url) + resp, err := http.Get(url) + if err != nil { + return "", err + } + defer func() { + _ = resp.Body.Close() + }() + + if _, err := io.Copy(fileHandler, resp.Body); err != nil { + return "", err + } + if err := fileHandler.Sync(); err != nil { + return "", err + } + } + + err := unzip(terraformZipFullPathFilename, outputDir) + if err != nil { + return "", err + } + + terraformBinary := "terraform" + if runtime.GOOS == "windows" { + terraformBinary = "terraform.exe" + } + terraformBinaryFullPath := filepath.Join(outputDir, terraformBinary) + if err := os.Chmod(terraformBinaryFullPath, 0700); err != nil { + return "", err + } + + return terraformBinaryFullPath, nil +} + +func unzip(src, dest string) error { + r, err := zip.OpenReader(src) + if err != nil { + return err + } + defer func() { + _ = r.Close() + }() + + for _, f := range r.File { + rc, err := f.Open() + if err != nil { + return err + } + + fpath := filepath.Join(dest, f.Name) + if f.FileInfo().IsDir() { + err := os.MkdirAll(fpath, f.Mode()) + if err != nil { + return err + } + } else { + var fdir string + if lastIndex := strings.LastIndex(fpath, string(os.PathSeparator)); lastIndex > -1 { + fdir = fpath[:lastIndex] + } + + err = os.MkdirAll(fdir, f.Mode()) + if err != nil { + return err + } + f, err := os.OpenFile( + fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(f, rc) + if err != nil { + return err + } + _ = f.Close() + } + _ = rc.Close() + } + return nil +} diff --git a/main.go b/main.go index b5468470..2ba73121 100644 --- a/main.go +++ b/main.go @@ -1,9 +1,99 @@ package main import ( - "github.com/dan-v/rattlesnakeos-stack/cli" + _ "embed" + "github.com/dan-v/rattlesnakeos-stack/cmd" + "github.com/dan-v/rattlesnakeos-stack/internal/devices" + "github.com/dan-v/rattlesnakeos-stack/internal/templates" + "log" ) +var ( + //go:embed AOSP_VERSION + aospVersion string + //go:embed VERSION + stackVersion string + //go:embed templates/build.sh + buildScript string + //go:embed templates/generated_vars_and_funcs.sh + buildScriptVars string + //go:embed templates/lambda.py + lambdaTemplate string + //go:embed templates/terraform.tf + terraformTemplate string +) + +var allDevices = []*devices.Device{ + &devices.Device{ + Name: "blueline", + Friendly: "Pixel 3", + Family: "crosshatch", + AVBMode: devices.AVBModeChained, + ExtraOTA: devices.ExtraOTARetrofitDynamicPartitions, + }, + &devices.Device{ + Name: "crosshatch", + Friendly: "Pixel 3 XL", + Family: "crosshatch", + AVBMode: devices.AVBModeChained, + ExtraOTA: devices.ExtraOTARetrofitDynamicPartitions, + }, + &devices.Device{ + Name: "sargo", + Friendly: "Pixel 3a", + Family: "bonito", + AVBMode: devices.AVBModeChained, + ExtraOTA: devices.ExtraOTARetrofitDynamicPartitions, + }, + &devices.Device{ + Name: "bonito", + Friendly: "Pixel 3a XL", + Family: "bonito", + AVBMode: devices.AVBModeChained, + ExtraOTA: devices.ExtraOTARetrofitDynamicPartitions, + }, + &devices.Device{ + Name: "flame", + Friendly: "Pixel 4", + Family: "coral", + AVBMode: devices.AVBModeChainedV2, + }, + &devices.Device{ + Name: "coral", + Friendly: "Pixel 4 XL", + Family: "coral", + AVBMode: devices.AVBModeChainedV2, + }, + &devices.Device{ + Name: "sunfish", + Friendly: "Pixel 4a", + Family: "sunfish", + AVBMode: devices.AVBModeChainedV2, + }, + &devices.Device{ + Name: "bramble", + Friendly: "Pixel 4a 5G", + Family: "bramble", + AVBMode: devices.AVBModeChainedV2, + }, + &devices.Device{ + Name: "redfin", + Friendly: "Pixel 5", + Family: "redfin", + AVBMode: devices.AVBModeChainedV2, + }, +} + func main() { - cli.Execute() + supportedDevices, err := devices.NewSupportedDevices(allDevices...) + if err != nil { + log.Fatal(err) + } + + cmd.Execute(supportedDevices, aospVersion, stackVersion, &templates.TemplateFiles{ + BuildScript: buildScript, + BuildScriptVars: buildScriptVars, + LambdaTemplate: lambdaTemplate, + TerraformTemplate: terraformTemplate, + }) } diff --git a/stack/aws.go b/stack/aws.go deleted file mode 100644 index aeda3368..00000000 --- a/stack/aws.go +++ /dev/null @@ -1,248 +0,0 @@ -package stack - -import ( - "fmt" - "os" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/iam" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/dan-v/rattlesnakeos-stack/templates" - log "github.com/sirupsen/logrus" -) - -const ( - awsErrCodeNoSuchBucket = "NoSuchBucket" - awsErrCodeNotFound = "NotFound" -) - -type CustomPatches []struct { - Repo string - Patches []string - Branch string -} - -type CustomScripts []struct { - Repo string - Scripts []string - Branch string -} - -type CustomPrebuilts []struct { - Repo string - Modules []string -} - -type CustomManifestRemotes []struct { - Name string - Fetch string - Revision string -} - -type CustomManifestProjects []struct { - Path string - Name string - Remote string - Modules []string -} - -type AWSStackConfig struct { - Name string - Region string - Device string - Email string - InstanceType string - InstanceRegions string - SkipPrice string - MaxPrice string - SSHKey string - PreventShutdown bool - Version string - Schedule string - IgnoreVersionChecks bool - ChromiumVersion string - CustomPatches *CustomPatches - CustomScripts *CustomScripts - CustomPrebuilts *CustomPrebuilts - CustomManifestRemotes *CustomManifestRemotes - CustomManifestProjects *CustomManifestProjects - HostsFile string - EncryptedKeys bool - AMI string -} - -type AWSStack struct { - Config *AWSStackConfig - terraformClient *terraformClient - renderedBuildScript []byte - renderedLambdaFunction []byte - LambdaZipFileLocation string - BuildScriptFileLocation string -} - -func NewAWSStack(config *AWSStackConfig) (*AWSStack, error) { - err := checkAWSCreds(config.Region) - if err != nil { - return nil, err - } - - err = s3BucketSetup(config.Name, config.Region) - if err != nil { - return nil, err - } - - renderedLambdaFunction, err := renderTemplate(templates.LambdaTemplate, config) - if err != nil { - return nil, fmt.Errorf("Failed to render Lambda function: %v", err) - } - - renderedBuildScript, err := renderTemplate(templates.BuildTemplate, config) - if err != nil { - return nil, fmt.Errorf("Failed to render build script: %v", err) - } - - stack := &AWSStack{ - Config: config, - renderedBuildScript: renderedBuildScript, - renderedLambdaFunction: renderedLambdaFunction, - } - - terraformClient, err := newTerraformClient(stack, os.Stdout, os.Stdin) - if err != nil { - return nil, fmt.Errorf("Failed to create terraform client: %v", err) - } - stack.terraformClient = terraformClient - - return stack, nil -} - -func (s *AWSStack) Apply() error { - defer s.terraformClient.Cleanup() - - sess, err := session.NewSession(aws.NewConfig().WithCredentialsChainVerboseErrors(true)) - if err != nil { - return err - } - - log.Info("Creating required service linked roles if needed") - _, err = iam.New(sess).CreateServiceLinkedRole(&iam.CreateServiceLinkedRoleInput{ - AWSServiceName: aws.String("spot.amazonaws.com"), - }) - if errWithCode, ok := err.(awserr.Error); ok && iam.ErrCodeInvalidInputException == errWithCode.Code() { - log.Debug("spot service linked role already exists") - } - - _, err = iam.New(sess).CreateServiceLinkedRole(&iam.CreateServiceLinkedRoleInput{ - AWSServiceName: aws.String("spotfleet.amazonaws.com"), - }) - if errWithCode, ok := err.(awserr.Error); ok && iam.ErrCodeInvalidInputException == errWithCode.Code() { - log.Debug("spotfleet service role already exists") - } - - log.Info("Creating/updating AWS resources") - err = s.terraformClient.Apply() - if err != nil { - return err - } - log.Infof("Successfully deployed/updated AWS resources for stack %v", s.Config.Name) - - snsClient := sns.New(sess, &aws.Config{Region: &s.Config.Region}) - resp, err := snsClient.ListTopics(&sns.ListTopicsInput{NextToken: aws.String("")}) - for _, topic := range resp.Topics { - topicName := strings.Split(*topic.TopicArn, ":")[5] - if topicName == s.Config.Name { - // check if subscription exists - resp, err := snsClient.ListSubscriptionsByTopic(&sns.ListSubscriptionsByTopicInput{ - NextToken: aws.String(""), - TopicArn: aws.String(*topic.TopicArn), - }) - if err != nil { - return fmt.Errorf("Failed to list SNS subscriptions for topic %v: %v", *topic.TopicArn, err) - } - for _, subscription := range resp.Subscriptions { - if *subscription.Endpoint == s.Config.Email { - return nil - } - } - - // subscribe if not setup - _, err = snsClient.Subscribe(&sns.SubscribeInput{ - Protocol: aws.String("email"), - TopicArn: aws.String(*topic.TopicArn), - Endpoint: aws.String(s.Config.Email), - }) - if err != nil { - return fmt.Errorf("Failed to setup email notifications: %v", err) - } - log.Infof("Successfully setup email notifications for %v - you'll "+ - "need to click link in confirmation email to get notifications.", s.Config.Email) - break - } - } - - return nil -} - -func (s *AWSStack) Destroy() error { - defer s.terraformClient.Cleanup() - - log.Info("Destroying AWS resources") - err := s.terraformClient.Destroy() - if err != nil { - return err - } - log.Info("Successfully removed AWS resources") - return nil -} - -func s3BucketSetup(name, region string) error { - sess, err := session.NewSession(aws.NewConfig().WithCredentialsChainVerboseErrors(true)) - if err != nil { - return fmt.Errorf("Failed to create new AWS session: %v", err) - } - s3Client := s3.New(sess, &aws.Config{Region: ®ion}) - - _, err = s3Client.HeadBucket(&s3.HeadBucketInput{Bucket: &name}) - if err != nil { - awsErrCode := err.(awserr.Error).Code() - if awsErrCode != awsErrCodeNotFound && awsErrCode != awsErrCodeNoSuchBucket { - return fmt.Errorf("Unknown S3 error code: %v", err) - } - - bucketInput := &s3.CreateBucketInput{ - Bucket: &name, - } - // NOTE the location constraint should only be set if using a bucket OTHER than us-east-1 - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html - if region != "us-east-1" { - bucketInput.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ - LocationConstraint: ®ion, - } - } - - log.Infof("Creating S3 bucket %s", name) - _, err = s3Client.CreateBucket(bucketInput) - if err != nil { - return fmt.Errorf("Failed to create bucket %s - note that this bucket name must be globally unique. %v", name, err) - } - } - return nil -} - -func checkAWSCreds(region string) error { - log.Info("Checking AWS credentials") - sess, err := session.NewSession(aws.NewConfig().WithCredentialsChainVerboseErrors(true)) - if err != nil { - return fmt.Errorf("Failed to create new AWS session: %v", err) - } - s3Client := s3.New(sess, &aws.Config{Region: ®ion}) - _, err = s3Client.ListBuckets(&s3.ListBucketsInput{}) - if err != nil { - return fmt.Errorf("Unable to list S3 buckets - make sure you have valid admin AWS credentials: %v", err) - } - return nil -} diff --git a/stack/terraform.go b/stack/terraform.go deleted file mode 100644 index 1522971b..00000000 --- a/stack/terraform.go +++ /dev/null @@ -1,193 +0,0 @@ -package stack - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "runtime" - "strings" - - "github.com/dan-v/rattlesnakeos-stack/templates" - log "github.com/sirupsen/logrus" -) - -const terraformVersion = "0.11.8" - -var darwinBinaryURL = fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_darwin_amd64.zip", terraformVersion, terraformVersion) -var linuxBinaryURL = fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_amd64.zip", terraformVersion, terraformVersion) -var windowsBinaryURL = fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_windows_amd64.zip", terraformVersion, terraformVersion) - -const ( - lambdaFunctionFilename = "lambda_spot_function.py" - lambdaZipFilename = "lambda_spot.zip" - buildScriptFilename = "build.sh" -) - -type terraformClient struct { - configDir string - tempDir *TempDir - stdout io.Writer - stderr io.Writer -} - -func newTerraformClient(config *AWSStack, stdout, stderr io.Writer) (*terraformClient, error) { - tempDir, err := NewTempDir("rattlesnakeos-stack") - if err != nil { - return nil, err - } - - if err := setupBinary(tempDir); err != nil { - return nil, err - } - - // write out shell script - config.BuildScriptFileLocation = tempDir.Path(buildScriptFilename) - if runtime.GOOS == "windows" { - config.BuildScriptFileLocation = strings.Replace(config.BuildScriptFileLocation, "\\", "/", -1) - } - err = ioutil.WriteFile(config.BuildScriptFileLocation, config.renderedBuildScript, 0644) - if err != nil { - return nil, err - } - - // write out lambda function and zip it up - err = ioutil.WriteFile(tempDir.Path(lambdaFunctionFilename), config.renderedLambdaFunction, 0644) - if err != nil { - return nil, err - } - // handle potential issue with non default umask as lambda function must have at least 444 permissions to run - if err = os.Chmod(tempDir.Path(lambdaFunctionFilename), 0644); err != nil { - return nil, err - } - files := []string{tempDir.Path(lambdaFunctionFilename)} - output := tempDir.Path(lambdaZipFilename) - err = zipFiles(output, files) - if err != nil { - return nil, err - } - config.LambdaZipFileLocation = tempDir.Path(lambdaZipFilename) - if runtime.GOOS == "windows" { - config.LambdaZipFileLocation = strings.Replace(config.LambdaZipFileLocation, "\\", "/", -1) - } - - // render terraform template - renderedTerraform, err := renderTemplate(templates.TerraformTemplate, config) - if err != nil { - return nil, fmt.Errorf("Failed to render terraform template: %v", err) - } - configDir := tempDir.Path("config") - if err := os.Mkdir(configDir, 0777); err != nil { - return nil, err - } - configPath := tempDir.Path("config/main.tf") - if err := ioutil.WriteFile(configPath, renderedTerraform, 0777); err != nil { - return nil, err - } - - // create client and run init - client := &terraformClient{ - tempDir: tempDir, - configDir: configDir, - stdout: stdout, - stderr: stderr, - } - devNull := bytes.NewBuffer(nil) - if err := client.terraform([]string{"init"}, devNull); err != nil { - io.Copy(stdout, devNull) - return nil, err - } - return client, nil -} - -func (client *terraformClient) Apply() error { - client.terraform([]string{ - "plan", - "-input=false", - "-out=tfplan", - }, client.stdout) - return client.terraform([]string{ - "apply", - "tfplan", - }, client.stdout) -} - -func (client *terraformClient) Destroy() error { - return client.terraform([]string{ - "destroy", - "-force", - }, client.stdout) -} - -func (client *terraformClient) terraform(args []string, stdout io.Writer) error { - terraformBinary := "terraform" - if runtime.GOOS == "windows" { - terraformBinary = "terraform.exe" - } - cmd := exec.Command(client.tempDir.Path(terraformBinary), args...) - cmd.Dir = client.configDir - cmd.Stdout = stdout - cmd.Stderr = client.stderr - return cmd.Run() -} - -func (client *terraformClient) Cleanup() error { - return os.RemoveAll(client.tempDir.path) -} - -func getTerraformURL() (string, error) { - os := runtime.GOOS - if os == "darwin" { - return darwinBinaryURL, nil - } else if os == "linux" { - return linuxBinaryURL, nil - } else if os == "windows" { - return windowsBinaryURL, nil - } - return "", fmt.Errorf("unknown os: `%s`", os) -} - -func setupBinary(tempDir *TempDir) error { - fileHandler, err := os.Create(tempDir.Path("terraform.zip")) - if err != nil { - return err - } - defer fileHandler.Close() - - url, err := getTerraformURL() - if err != nil { - return err - } - - log.Infoln("Downloading Terraform binary from URL:", url) - resp, err := http.Get(url) - if err != nil { - return err - } - defer resp.Body.Close() - - if _, err := io.Copy(fileHandler, resp.Body); err != nil { - return err - } - if err := fileHandler.Sync(); err != nil { - return err - } - - err = unzip(tempDir.Path("terraform.zip"), tempDir.path) - if err != nil { - return err - } - - terraformBinary := "terraform" - if runtime.GOOS == "windows" { - terraformBinary = "terraform.exe" - } - if err := os.Chmod(tempDir.Path(terraformBinary), 0700); err != nil { - return err - } - - return nil -} diff --git a/stack/utils.go b/stack/utils.go deleted file mode 100644 index a9ef41e2..00000000 --- a/stack/utils.go +++ /dev/null @@ -1,155 +0,0 @@ -package stack - -import ( - "archive/zip" - "bytes" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - "text/template" -) - -func renderTemplate(templateStr string, params interface{}) ([]byte, error) { - templ, err := template.New("template").Delims("<%", "%>").Parse(templateStr) - if err != nil { - return nil, err - } - - buffer := new(bytes.Buffer) - - if err = templ.Execute(buffer, params); err != nil { - return nil, err - } - - outputBytes, err := ioutil.ReadAll(buffer) - if err != nil { - return nil, err - } - return outputBytes, nil -} - -func unzip(src, dest string) error { - r, err := zip.OpenReader(src) - if err != nil { - return err - } - defer r.Close() - - for _, f := range r.File { - rc, err := f.Open() - if err != nil { - return err - } - defer rc.Close() - - fpath := filepath.Join(dest, f.Name) - if f.FileInfo().IsDir() { - os.MkdirAll(fpath, f.Mode()) - } else { - var fdir string - if lastIndex := strings.LastIndex(fpath, string(os.PathSeparator)); lastIndex > -1 { - fdir = fpath[:lastIndex] - } - - err = os.MkdirAll(fdir, f.Mode()) - if err != nil { - log.Fatal(err) - return err - } - f, err := os.OpenFile( - fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) - if err != nil { - return err - } - defer f.Close() - - _, err = io.Copy(f, rc) - if err != nil { - return err - } - } - } - return nil -} - -func zipFiles(filename string, files []string) error { - - newfile, err := os.Create(filename) - if err != nil { - return err - } - defer newfile.Close() - - zipWriter := zip.NewWriter(newfile) - defer zipWriter.Close() - - // Add files to zip - for _, file := range files { - - zipfile, err := os.Open(file) - if err != nil { - return err - } - defer zipfile.Close() - - // Get the file information - info, err := zipfile.Stat() - if err != nil { - return err - } - - header, err := zip.FileInfoHeader(info) - if err != nil { - return err - } - - // Change to deflate to gain better compression - // see http://golang.org/pkg/archive/zip/#pkg-constants - header.Method = zip.Deflate - - writer, err := zipWriter.CreateHeader(header) - if err != nil { - return err - } - _, err = io.Copy(writer, zipfile) - if err != nil { - return err - } - } - return nil -} - -type TempDir struct { - path string -} - -func NewTempDir(name string) (*TempDir, error) { - path, err := ioutil.TempDir("", name) - if err != nil { - return nil, err - } - - return &TempDir{ - path: path, - }, nil -} - -func (tempDir *TempDir) Save(filename string, contents []byte) (string, error) { - path := filepath.Join(tempDir.path, filename) - if err := ioutil.WriteFile(path, contents, 0700); err != nil { - return "", err - } - - return path, nil -} - -func (tempDir *TempDir) Path(filename string) string { - return filepath.Join(tempDir.path, filename) -} - -func (tempDir *TempDir) Cleanup() error { - return os.RemoveAll(tempDir.path) -} diff --git a/templates/build.sh b/templates/build.sh new file mode 100644 index 00000000..c4c54511 --- /dev/null +++ b/templates/build.sh @@ -0,0 +1,615 @@ +#!/usr/bin/env bash + +######################################## +######## BUILD ARGS #################### +######################################## +RELEASE=$1 +echo "RELEASE=${RELEASE}" +AOSP_BUILD_ID=$2 +echo "AOSP_BUILD_ID=${AOSP_BUILD_ID}" +AOSP_TAG=$3 +echo "AOSP_TAG=${AOSP_TAG}" +CHROMIUM_VERSION=$4 +echo "CHROMIUM_VERSION=${CHROMIUM_VERSION}" +LOCAL_MANIFEST_REVISIONS=$5 +echo "LOCAL_MANIFEST_REVISIONS=${LOCAL_MANIFEST_REVISIONS}" + +#### #### + +######################################## +######## OTHER VARS #################### +######################################## +SECONDS=0 +ROOT_DIR="${HOME}" +AOSP_BUILD_DIR="${ROOT_DIR}/aosp" +CORE_DIR="${ROOT_DIR}/core" +CUSTOM_DIR="${ROOT_DIR}/custom" +KEYS_DIR="${ROOT_DIR}/keys" +MISC_DIR="${ROOT_DIR}/misc" +RELEASE_TOOLS_DIR="${MISC_DIR}/releasetools" +PRODUCT_MAKEFILE="${AOSP_BUILD_DIR}/device/google/${DEVICE_FAMILY}/aosp_${DEVICE}.mk" +CORE_VENDOR_BASEDIR="${AOSP_BUILD_DIR}/vendor/core" +CORE_VENDOR_MAKEFILE="${CORE_VENDOR_BASEDIR}/vendor/config/main.mk" +CUSTOM_VENDOR_BASEDIR="${AOSP_BUILD_DIR}/vendor/custom" +CUSTOM_VENDOR_MAKEFILE="${CUSTOM_VENDOR_BASEDIR}/vendor/config/main.mk" + +full_run() { + log_header "${FUNCNAME[0]}" + + notify "RattlesnakeOS Build STARTED" + setup_env + import_keys + chromium_build_if_required + aosp_repo_init + aosp_local_repo_additions + aosp_repo_sync + setup_vendor + chromium_copy_to_build_tree_if_required + aosp_build + release + upload + checkpoint_versions + notify "RattlesnakeOS Build SUCCESS" +} + +setup_env() { + log_header "${FUNCNAME[0]}" + + # install required packages + sudo apt-get update + sudo DEBIAN_FRONTEND=noninteractive apt-get -y install python python2 python3 gperf jq default-jdk git-core gnupg \ + flex bison build-essential zip curl zlib1g-dev gcc-multilib g++-multilib libc6-dev-i386 lib32ncurses5-dev \ + x11proto-core-dev libx11-dev lib32z-dev ccache libgl1-mesa-dev libxml2-utils xsltproc unzip liblz4-tool \ + libncurses5 wget parallel rsync python-protobuf python3-protobuf python3-pip + + retry curl --fail -s https://storage.googleapis.com/git-repo-downloads/repo > /tmp/repo + chmod +x /tmp/repo + sudo mv /tmp/repo /usr/local/bin/ + + # setup git + git config --get --global user.name || git config --global user.name 'aosp' + git config --get --global user.email || git config --global user.email 'aosp@localhost' + git config --global color.ui true + + # mount /tmp filesystem as tmpfs + sudo mount -t tmpfs tmpfs /tmp || true + + # setup base directories + mkdir -p "${AOSP_BUILD_DIR}" + mkdir -p "${KEYS_DIR}" + mkdir -p "${MISC_DIR}" + mkdir -p "${RELEASE_TOOLS_DIR}" + + # get core repo + rm -rf "${CORE_DIR}" + retry git clone "${CORE_CONFIG_REPO}" "${CORE_DIR}" + if [ -n "${CORE_CONFIG_REPO_BRANCH}" ]; then + pushd "${CORE_DIR}" + git checkout "${CORE_CONFIG_REPO_BRANCH}" + popd + fi + + # get custom repo if specified + if [ -n "${CUSTOM_CONFIG_REPO}" ]; then + rm -rf "${CUSTOM_DIR}" + retry git clone "${CUSTOM_CONFIG_REPO}" "${CUSTOM_DIR}" + if [ -n "${CUSTOM_CONFIG_REPO_BRANCH}" ]; then + pushd "${CUSTOM_DIR}" + git checkout "${CUSTOM_CONFIG_REPO_BRANCH}" + popd + fi + fi + + # mount keys directory as tmpfs + sudo mount -t tmpfs -o size=20m tmpfs "${KEYS_DIR}" || true +} + +aosp_repo_init() { + log_header "${FUNCNAME[0]}" + cd "${AOSP_BUILD_DIR}" + + run_hook_if_exists "aosp_repo_init_pre" + + MANIFEST_URL="https://android.googlesource.com/platform/manifest" + retry repo init --manifest-url "${MANIFEST_URL}" --manifest-branch "${AOSP_TAG}" --depth 1 + + run_hook_if_exists "aosp_repo_init_post" +} + +aosp_local_repo_additions() { + log_header "${FUNCNAME[0]}" + cd "${AOSP_BUILD_DIR}" + + run_hook_if_exists "aosp_local_repo_additions_pre" + + rm -rf "${AOSP_BUILD_DIR}/.repo/local_manifests" + mkdir -p "${AOSP_BUILD_DIR}/.repo/local_manifests" + cp -f "${CORE_DIR}"/local_manifests/*.xml "${AOSP_BUILD_DIR}/.repo/local_manifests" + + if [ "${CHROMIUM_BUILD_DISABLED}" == "true" ]; then + local_chromium_manifest="${AOSP_BUILD_DIR}/.repo/local_manifests/001-chromium.xml" + if [ -f "${local_chromium_manifest}" ]; then + log "Removing ${local_chromium_manifest} as chromium build is disabled" + rm -f "${local_chromium_manifest}" || true + fi + fi + + if [ -n "${CUSTOM_CONFIG_REPO}" ]; then + cp -f "${CUSTOM_DIR}"/local_manifests/*.xml "${AOSP_BUILD_DIR}/.repo/local_manifests" || true + fi + + run_hook_if_exists "aosp_local_repo_additions_post" +} + +aosp_repo_sync() { + log_header "${FUNCNAME[0]}" + cd "${AOSP_BUILD_DIR}" + + run_hook_if_exists "aosp_repo_sync_pre" + + if [ "$(ls -l "${AOSP_BUILD_DIR}" | wc -l)" -gt 0 ]; then + log "Running git reset and clean as environment appears to already have been synced previously" + repo forall -c 'git reset --hard ; git clean --force -dx' + fi + + for i in {1..10}; do + log "Running aosp repo sync attempt ${i}/10" + repo sync -c --no-tags --no-clone-bundle --jobs 32 && break + done + + run_hook_if_exists "aosp_repo_sync_post" +} + +setup_vendor() { + log_header "${FUNCNAME[0]}" + run_hook_if_exists "setup_vendor_pre" + + # skip if already downloaded + current_vendor_build_id="" + vendor_build_id_file="${AOSP_BUILD_DIR}/vendor/google_devices/${DEVICE}/build_id.txt" + if [ -f "${vendor_build_id_file}" ]; then + current_vendor_build_id=$(cat "${vendor_build_id_file}") + fi + if [ "${current_vendor_build_id}" == "${AOSP_BUILD_ID}" ]; then + log "Skipping vendor download as ${AOSP_BUILD_ID} already exists at ${vendor_build_id_file}" + return + fi + + # get vendor files (with timeout) + timeout 30m "${AOSP_BUILD_DIR}/vendor/android-prepare-vendor/execute-all.sh" --debugfs --yes --device "${DEVICE}" \ + --buildID "${AOSP_BUILD_ID}" --output "${AOSP_BUILD_DIR}/vendor/android-prepare-vendor" + + # copy vendor files to build tree + mkdir --parents "${AOSP_BUILD_DIR}/vendor/google_devices" || true + rm -rf "${AOSP_BUILD_DIR}/vendor/google_devices/${DEVICE}" || true + mv "${AOSP_BUILD_DIR}/vendor/android-prepare-vendor/${DEVICE}/$(tr '[:upper:]' '[:lower:]' <<< "${AOSP_BUILD_ID}")/vendor/google_devices/${DEVICE}" "${AOSP_BUILD_DIR}/vendor/google_devices" + + # smaller devices need big brother vendor files + if [ "${DEVICE}" != "${DEVICE_FAMILY}" ]; then + rm -rf "${AOSP_BUILD_DIR}/vendor/google_devices/${DEVICE_FAMILY}" || true + mv "${AOSP_BUILD_DIR}/vendor/android-prepare-vendor/${DEVICE}/$(tr '[:upper:]' '[:lower:]' <<< "${AOSP_BUILD_ID}")/vendor/google_devices/${DEVICE_FAMILY}" "${AOSP_BUILD_DIR}/vendor/google_devices" + fi + + run_hook_if_exists "setup_vendor_post" +} + +insert_vendor_includes() { + log_header "${FUNCNAME[0]}" + + if ! grep -q "${CORE_VENDOR_MAKEFILE}" "${PRODUCT_MAKEFILE}"; then + sed -i "\@vendor/google_devices/${DEVICE_FAMILY}/proprietary/device-vendor.mk)@a \$(call inherit-product, ${CORE_VENDOR_MAKEFILE})" "${PRODUCT_MAKEFILE}" + fi + + if [ -n "${CUSTOM_CONFIG_REPO}" ]; then + if ! grep -q "${CUSTOM_VENDOR_MAKEFILE}" "${PRODUCT_MAKEFILE}"; then + sed -i "\@vendor/google_devices/${DEVICE_FAMILY}/proprietary/device-vendor.mk)@a \$(call inherit-product, ${CUSTOM_VENDOR_MAKEFILE})" "${PRODUCT_MAKEFILE}" + fi + fi +} + +env_setup_script() { + log_header "${FUNCNAME[0]}" + cd "${AOSP_BUILD_DIR}" + + source build/envsetup.sh + export LANG=C + export _JAVA_OPTIONS=-XX:-UsePerfData + # shellcheck disable=SC2155 + export BUILD_NUMBER=$(cat out/soong/build_number.txt 2>/dev/null || date --utc +%Y.%m.%d.%H) + log "BUILD_NUMBER=${BUILD_NUMBER}" + export DISPLAY_BUILD_NUMBER=true + chrt -b -p 0 $$ +} + +aosp_build() { + log_header "${FUNCNAME[0]}" + run_hook_if_exists "aosp_build_pre" + cd "${AOSP_BUILD_DIR}" + + insert_vendor_includes + + if [ "${CHROMIUM_BUILD_DISABLED}" == "true" ]; then + log "Removing TrichromeChrome and TrichromeWebView as chromium build is disabled" + sed -i '/PRODUCT_PACKAGES += TrichromeChrome/d' "${CORE_VENDOR_MAKEFILE}" || true + sed -i '/PRODUCT_PACKAGES += TrichromeWebView/d' "${CORE_VENDOR_MAKEFILE}" || true + fi + + ( + env_setup_script + + build_target="release aosp_${DEVICE} user" + log "Running choosecombo ${build_target}" + choosecombo ${build_target} + + log "Running target-files-package" + retry m target-files-package + + if [ ! -f "${RELEASE_TOOLS_DIR}/releasetools/sign_target_files_apks" ]; then + log "Running m otatools-package" + m otatools-package + rm -rf "${RELEASE_TOOLS_DIR}" + unzip "${AOSP_BUILD_DIR}/out/target/product/${DEVICE}/otatools.zip" -d "${RELEASE_TOOLS_DIR}" + fi + ) + + run_hook_if_exists "aosp_build_post" +} + +release() { + log_header "${FUNCNAME[0]}" + run_hook_if_exists "release_pre" + cd "${AOSP_BUILD_DIR}" + + ( + env_setup_script + + KEY_DIR="${KEYS_DIR}/${DEVICE}" + OUT="out/release-${DEVICE}-${BUILD_NUMBER}" + device="${DEVICE}" + + log "Running clear-factory-images-variables.sh" + source "device/common/clear-factory-images-variables.sh" + DEVICE="${device}" + PREFIX="aosp_" + BUILD="${BUILD_NUMBER}" + # shellcheck disable=SC2034 + PRODUCT="${DEVICE}" + TARGET_FILES="${DEVICE}-target_files-${BUILD}.zip" + BOOTLOADER=$(grep -Po "require version-bootloader=\K.+" "vendor/google_devices/${DEVICE}/vendor-board-info.txt" | tr '[:upper:]' '[:lower:]') + RADIO=$(grep -Po "require version-baseband=\K.+" "vendor/google_devices/${DEVICE}/vendor-board-info.txt" | tr '[:upper:]' '[:lower:]') + VERSION=$(grep -Po "BUILD_ID=\K.+" "build/core/build_id.mk" | tr '[:upper:]' '[:lower:]') + log "BOOTLOADER=${BOOTLOADER} RADIO=${RADIO} VERSION=${VERSION} TARGET_FILES=${TARGET_FILES}" + + # make sure output directory exists + mkdir -p "${OUT}" + + # pick avb mode depending on device and determine key size + avb_key_size=$(openssl rsa -in "${KEY_DIR}/avb.pem" -text -noout | grep 'Private-Key' | awk -F'[()]' '{print $2}' | awk '{print $1}') + log "avb_key_size=${avb_key_size}" + avb_algorithm="SHA256_RSA${avb_key_size}" + log "avb_algorithm=${avb_algorithm}" + case "${DEVICE_AVB_MODE}" in + vbmeta_chained) + AVB_SWITCHES=(--avb_vbmeta_key "${KEY_DIR}/avb.pem" + --avb_vbmeta_algorithm "${avb_algorithm}" + --avb_system_key "${KEY_DIR}/avb.pem" + --avb_system_algorithm "${avb_algorithm}") + ;; + vbmeta_chained_v2) + AVB_SWITCHES=(--avb_vbmeta_key "${KEY_DIR}/avb.pem" + --avb_vbmeta_algorithm "${avb_algorithm}" + --avb_system_key "${KEY_DIR}/avb.pem" + --avb_system_algorithm "${avb_algorithm}" + --avb_vbmeta_system_key "${KEY_DIR}/avb.pem" + --avb_vbmeta_system_algorithm "${avb_algorithm}") + ;; + esac + + export PATH="${RELEASE_TOOLS_DIR}/bin:${PATH}" + export PATH="${AOSP_BUILD_DIR}/prebuilts/jdk/jdk9/linux-x86/bin:${PATH}" + + log "Running sign_target_files_apks" + "${RELEASE_TOOLS_DIR}/releasetools/sign_target_files_apks" \ + -o -d "${KEY_DIR}" \ + -k "${AOSP_BUILD_DIR}/build/target/product/security/networkstack=${KEY_DIR}/networkstack" "${AVB_SWITCHES[@]}" \ + "${AOSP_BUILD_DIR}/out/target/product/${DEVICE}/obj/PACKAGING/target_files_intermediates/${PREFIX}${DEVICE}-target_files-${BUILD_NUMBER}.zip" \ + "${OUT}/${TARGET_FILES}" + + log "Running ota_from_target_files" + # shellcheck disable=SC2068 + "${RELEASE_TOOLS_DIR}/releasetools/ota_from_target_files" --block -k "${KEY_DIR}/releasekey" ${DEVICE_EXTRA_OTA[@]} "${OUT}/${TARGET_FILES}" \ + "${OUT}/${DEVICE}-ota_update-${BUILD}.zip" + + log "Running img_from_target_files" + "${RELEASE_TOOLS_DIR}/releasetools/img_from_target_files" "${OUT}/${TARGET_FILES}" "${OUT}/${DEVICE}-img-${BUILD}.zip" + + log "Running generate-factory-images" + cd "${OUT}" + source "../../device/common/generate-factory-images-common.sh" + mv "${DEVICE}"-*-factory-*.zip "${DEVICE}-factory-${BUILD_NUMBER}.zip" + ) + + run_hook_if_exists "release_post" +} + +upload() { + log_header "${FUNCNAME[0]}" + run_hook_if_exists "upload_pre" + cd "${AOSP_BUILD_DIR}/out" + + build_channel="stable" + release_channel="${DEVICE}-${build_channel}" + build_date="$(< soong/build_number.txt)" + build_timestamp="$(unzip -p "release-${DEVICE}-${build_date}/${DEVICE}-ota_update-${build_date}.zip" "META-INF/com/android/metadata" | grep 'post-timestamp' | cut --delimiter "=" --fields 2)" + old_metadata=$(get_current_metadata "${release_channel}") + old_date="$(cut -d ' ' -f 1 <<< "${old_metadata}")" + + # upload ota and set metadata + upload_build_artifact "${AOSP_BUILD_DIR}/out/release-${DEVICE}-${build_date}/${DEVICE}-ota_update-${build_date}.zip" "${DEVICE}-ota_update-${build_date}.zip" "public" + set_current_metadata "${release_channel}" "${build_date} ${build_timestamp} ${AOSP_BUILD_ID}" "public" + set_current_metadata "${release_channel}-true-timestamp" "${build_timestamp}" "public" + + # cleanup old ota + delete_build_artifact "${DEVICE}-ota_update-${old_date}.zip" + + # upload factory image + upload_build_artifact "${AOSP_BUILD_DIR}/out/release-${DEVICE}-${build_date}/${DEVICE}-factory-${build_date}.zip" "${DEVICE}-factory-latest.zip" + + run_hook_if_exists "upload_post" +} + +checkpoint_versions() { + log_header "${FUNCNAME[0]}" + run_hook_if_exists "checkpoint_versions_pre" + + set_current_metadata "${DEVICE}-vendor" "${AOSP_BUILD_ID}" "public" + set_current_metadata "release" "${RELEASE}" + set_current_metadata "rattlesnakeos-stack/revision" "${STACK_VERSION}" + if [ "${CHROMIUM_BUILD_DISABLED}" == "false" ]; then + set_current_metadata "chromium/included" "yes" + fi + + run_hook_if_exists "checkpoint_versions_post" +} + +gen_keys() { + log_header "${FUNCNAME[0]}" + + # download make_key and avbtool as aosp tree isn't downloaded yet + make_key="${MISC_DIR}/make_key" + retry curl --fail -s "https://android.googlesource.com/platform/development/+/refs/tags/${AOSP_TAG}/tools/make_key?format=TEXT" | base64 --decode > "${make_key}" + chmod +x "${make_key}" + avb_tool="${MISC_DIR}/avbtool" + retry curl --fail -s "https://android.googlesource.com/platform/external/avb/+/refs/tags/${AOSP_TAG}/avbtool?format=TEXT" | base64 --decode > "${avb_tool}" + chmod +x "${avb_tool}" + + # generate releasekey,platform,shared,media,networkstack keys + mkdir -p "${KEYS_DIR}/${DEVICE}" + cd "${KEYS_DIR}/${DEVICE}" + for key in {releasekey,platform,shared,media,networkstack} ; do + # make_key exits with unsuccessful code 1 instead of 0, need ! to negate + ! "${make_key}" "${key}" "/CN=RattlesnakeOS" + done + + # generate avb key + openssl genrsa -out "${KEYS_DIR}/${DEVICE}/avb.pem" 4096 + "${avb_tool}" extract_public_key --key "${KEYS_DIR}/${DEVICE}/avb.pem" --output "${KEYS_DIR}/${DEVICE}/avb_pkmd.bin" + + # generate chromium.keystore + cd "${KEYS_DIR}/${DEVICE}" + keytool -genkey -v -keystore chromium.keystore -storetype pkcs12 -alias chromium -keyalg RSA -keysize 4096 \ + -sigalg SHA512withRSA -validity 10000 -dname "cn=RattlesnakeOS" -storepass chromium +} + +run_hook_if_exists() { + local hook_name="${1}" + local core_hook_file="${CORE_DIR}/hooks/${hook_name}.sh" + local custom_hook_file="${CUSTOM_DIR}/hooks/${hook_name}.sh" + + if [ -n "${core_hook_file}" ]; then + if [ -f "${core_hook_file}" ]; then + log "Running ${core_hook_file}" + # shellcheck disable=SC1090 + (source "${core_hook_file}") + fi + fi + + if [ -n "${custom_hook_file}" ]; then + if [ -f "${custom_hook_file}" ]; then + log "Running ${custom_hook_file}" + # shellcheck disable=SC1090 + (source "${custom_hook_file}") + fi + fi +} + +log_header() { + echo "==================================" + echo "$(date "+%Y-%m-%d %H:%M:%S"): Running $1" + echo "==================================" +} + +log() { + echo "$(date "+%Y-%m-%d %H:%M:%S"): $1" +} + +retry() { + set +e + local max_attempts=${ATTEMPTS-3} + local timeout=${TIMEOUT-1} + local attempt=0 + local exitCode=0 + + while [[ $attempt < $max_attempts ]] + do + "$@" + exitCode=$? + + if [[ $exitCode == 0 ]] + then + break + fi + + log "Failure! Retrying ($*) in $timeout.." + sleep "${timeout}" + attempt=$(( attempt + 1 )) + timeout=$(( timeout * 2 )) + done + + if [[ $exitCode != 0 ]] + then + log "Failed too many times! ($*)" + fi + + set -e + + return $exitCode +} + +######################################## +######## CHROMIUM ###################### +######################################## + +chromium_build_if_required() { + log_header "${FUNCNAME[0]}" + + if [ "${CHROMIUM_BUILD_DISABLED}" == "true" ]; then + log "Chromium build is disabled" + return + fi + + current=$(get_current_metadata "chromium/revision") + log "Chromium current: ${current}" + + log "Chromium requested: ${CHROMIUM_VERSION}" + if [ "${CHROMIUM_VERSION}" == "${current}" ]; then + log "Chromium requested (${CHROMIUM_VERSION}) matches current (${current})" + else + log "Building chromium ${CHROMIUM_VERSION}" + build_chromium "${CHROMIUM_VERSION}" + fi + + log "Deleting chromium directory ${CHROMIUM_BUILD_DIR}" + rm -rf "${CHROMIUM_BUILD_DIR}" +} + +build_chromium() { + log_header "${FUNCNAME[0]}" + CHROMIUM_REVISION="$1" + CHROMIUM_DEFAULT_VERSION=$(echo "${CHROMIUM_REVISION}" | awk -F"." '{ printf "%s%03d52\n",$3,$4}') + + ( + # depot tools setup + if [ ! -d "${MISC_DIR}/depot_tools" ]; then + retry git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git "${MISC_DIR}/depot_tools" + fi + export PATH="${PATH}:${MISC_DIR}/depot_tools" + + # fetch chromium + CHROMIUM_BUILD_DIR="${ROOT_DIR}/chromium" + mkdir -p "${CHROMIUM_BUILD_DIR}" + cd "${CHROMIUM_BUILD_DIR}" + fetch --nohooks android + cd src + + # checkout specific revision + git checkout "${CHROMIUM_REVISION}" -f + + # install dependencies + echo ttf-mscorefonts-installer msttcorefonts/accepted-mscorefonts-eula select true | sudo debconf-set-selections + log "Installing chromium build dependencies" + + sudo ./build/install-build-deps-android.sh + + # run gclient sync (runhooks will run as part of this) + log "Running gclient sync (this takes a while)" + for i in {1..5}; do + yes | gclient sync --with_branch_heads --jobs 32 -RDf && break + done + + # cleanup any files in tree not part of this revision + git clean -dff + + # reset any modifications + git checkout -- . + + # generate configuration + KEYSTORE="${KEYS_DIR}/${DEVICE}/chromium.keystore" + trichrome_certdigest=$(keytool -export-cert -alias chromium -keystore "${KEYSTORE}" -storepass chromium | sha256sum | awk '{print $1}') + log "trichrome_certdigest=${trichrome_certdigest}" + mkdir -p out/Default + cat < out/Default/args.gn +target_os = "android" +target_cpu = "arm64" +android_channel = "stable" +android_default_version_name = "${CHROMIUM_REVISION}" +android_default_version_code = "${CHROMIUM_DEFAULT_VERSION}" +is_component_build = false +is_debug = false +is_official_build = true +symbol_level = 1 +fieldtrial_testing_like_official_build = true +ffmpeg_branding = "Chrome" +proprietary_codecs = true +is_cfi = true +enable_gvr_services = false +enable_remoting = false +enable_reporting = true + +trichrome_certdigest = "${trichrome_certdigest}" +chrome_public_manifest_package = "org.chromium.chrome" +system_webview_package_name = "org.chromium.webview" +trichrome_library_package = "org.chromium.trichromelibrary" +EOF + gn gen out/Default + + run_hook_if_exists "build_chromium_pre" + + log "Building trichrome" + autoninja -C out/Default/ trichrome_webview_apk trichrome_chrome_bundle trichrome_library_apk + + log "Signing trichrome" + BUNDLETOOL="${CHROMIUM_BUILD_DIR}/src/build/android/gyp/bundletool.py" + AAPT2="${CHROMIUM_BUILD_DIR}/src/third_party/android_build_tools/aapt2/aapt2" + APKSIGNER="${CHROMIUM_BUILD_DIR}/src/third_party/android_sdk/public/build-tools/30.0.1/apksigner" + cd out/Default/apks + rm -rf release + mkdir release + cd release + "${BUNDLETOOL}" build-apks --aapt2 "${AAPT2}" --bundle "../TrichromeChrome.aab" --output "TrichromeChrome.apks" \ + --mode=universal --ks "${KEYSTORE}" --ks-pass pass:chromium --ks-key-alias chromium + unzip "TrichromeChrome.apks" "universal.apk" + mv "universal.apk" "TrichromeChrome.apk" + for app in TrichromeLibrary TrichromeWebView; do + "${APKSIGNER}" sign --ks "${KEYSTORE}" --ks-pass pass:chromium --ks-key-alias chromium --in "../${app}.apk" --out "${app}.apk" + done + + log "Uploading trichrome apks" + upload_build_artifact "TrichromeLibrary.apk" "chromium/TrichromeLibrary.apk" + upload_build_artifact "TrichromeWebView.apk" "chromium/TrichromeWebView.apk" + upload_build_artifact "TrichromeChrome.apk" "chromium/TrichromeChrome.apk" + set_current_metadata "chromium/revision" "${CHROMIUM_REVISION}" + + run_hook_if_exists "build_chromium_post" + ) +} + +chromium_copy_to_build_tree_if_required() { + log_header "${FUNCNAME[0]}" + + if [ "${CHROMIUM_BUILD_DISABLED}" == "true" ]; then + log "Chromium build is disabled" + return + fi + + # add latest built chromium to external/chromium + download_build_artifact "chromium/TrichromeLibrary.apk" "${AOSP_BUILD_DIR}/external/chromium/prebuilt/arm64/" + download_build_artifact "chromium/TrichromeWebView.apk" "${AOSP_BUILD_DIR}/external/chromium/prebuilt/arm64/" + download_build_artifact "chromium/TrichromeChrome.apk" "${AOSP_BUILD_DIR}/external/chromium/prebuilt/arm64/" +} + +trap cleanup 0 + +set -e + +full_run \ No newline at end of file diff --git a/templates/build_template.go b/templates/build_template.go deleted file mode 100644 index 289cbc0f..00000000 --- a/templates/build_template.go +++ /dev/null @@ -1,1428 +0,0 @@ -package templates - -const BuildTemplate = `#!/bin/bash - -if [ $# -lt 1 ]; then - echo "Need to specify device name as argument" - exit 1 -fi - -# check if supported device -DEVICE=$1 -case "${DEVICE}" in - taimen) - DEVICE_FAMILY=taimen - DEVICE_COMMON=wahoo - AVB_MODE=vbmeta_simple - ;; - walleye) - DEVICE_FAMILY=muskie - DEVICE_COMMON=wahoo - AVB_MODE=vbmeta_simple - ;; - crosshatch|blueline) - DEVICE_FAMILY=crosshatch - DEVICE_COMMON=crosshatch - AVB_MODE=vbmeta_chained - EXTRA_OTA=(--retrofit_dynamic_partitions) - ;; - sargo|bonito) - DEVICE_FAMILY=bonito - DEVICE_COMMON=bonito - AVB_MODE=vbmeta_chained - EXTRA_OTA=(--retrofit_dynamic_partitions) - ;; - flame|coral) - DEVICE_FAMILY=coral - DEVICE_COMMON=coral - AVB_MODE=vbmeta_chained_v2 - ;; - sunfish) - DEVICE_FAMILY=sunfish - DEVICE_COMMON=sunfish - AVB_MODE=vbmeta_chained_v2 - ;; - redfin) - DEVICE_FAMILY=redfin - DEVICE_COMMON=redfin - AVB_MODE=vbmeta_chained_v2 - ;; - *) - echo "error: unknown device ${DEVICE}" - exit 1 - ;; -esac - -# this is a build time option to override stack setting IGNORE_VERSION_CHECKS -FORCE_BUILD=false -if [ "$2" = true ]; then - echo "Setting FORCE_BUILD=true" - FORCE_BUILD=true -fi - -# allow build and branch to be specified -AOSP_BUILD=$3 -AOSP_BRANCH=$4 -AOSP_VENDOR_BUILD= - -# set region -REGION=<% .Region %> -export AWS_DEFAULT_REGION=${REGION} - -# stack name -STACK_NAME=<% .Name %> - -# version of stack running -STACK_VERSION=<% .Version %> - -# prevent default action of shutting down on exit -PREVENT_SHUTDOWN=<% .PreventShutdown %> - -# whether version checks should be ignored -IGNORE_VERSION_CHECKS=<% .IgnoreVersionChecks %> - -# version of chromium to pin to if requested -CHROMIUM_PINNED_VERSION=<% .ChromiumVersion %> - -# whether keys are client side encrypted or not -ENCRYPTED_KEYS="<% .EncryptedKeys %>" -ENCRYPTION_KEY= -ENCRYPTION_PIPE="/tmp/key" - -# pin to specific version of android -ANDROID_VERSION="11.0" - -# build type (user or userdebug) -BUILD_TYPE="user" - -# build channel (stable or beta) -BUILD_CHANNEL="stable" - -# user customizable things -HOSTS_FILE=<% .HostsFile %> - -# aws settings -AWS_KEYS_BUCKET="${STACK_NAME}-keys" -AWS_ENCRYPTED_KEYS_BUCKET="${STACK_NAME}-keys-encrypted" -AWS_RELEASE_BUCKET="${STACK_NAME}-release" -AWS_LOGS_BUCKET="${STACK_NAME}-logs" -AWS_SNS_ARN=$(aws --region ${REGION} sns list-topics --query 'Topics[0].TopicArn' --output text | cut -d":" -f1,2,3,4,5)":${STACK_NAME}" -INSTANCE_TYPE=$(curl -s http://169.254.169.254/latest/meta-data/instance-type) -INSTANCE_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | awk -F\" '/region/ {print $4}') -INSTANCE_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) - -# build settings -SECONDS=0 -BUILD_TARGET="release aosp_${DEVICE} ${BUILD_TYPE}" -RELEASE_URL="https://${AWS_RELEASE_BUCKET}.s3.amazonaws.com" -RELEASE_CHANNEL="${DEVICE}-${BUILD_CHANNEL}" -BUILD_DATE=$(date +%Y.%m.%d.%H) -BUILD_TIMESTAMP=$(date +%s) -BUILD_DIR="${HOME}/rattlesnake-os" -KEYS_DIR="${BUILD_DIR}/keys" -CERTIFICATE_SUBJECT='/CN=RattlesnakeOS' -OFFICIAL_FDROID_KEY="43238d512c1e5eb2d6569f4a3afbf5523418b82e0a3ed1552770abb9a9c9ccab" -BUILD_REASON="" - -# urls -MANIFEST_URL="https://android.googlesource.com/platform/manifest" -STACK_URL_LATEST="https://api.github.com/repos/dan-v/rattlesnakeos-stack/releases/latest" -RATTLESNAKEOS_LATEST_JSON="https://raw.githubusercontent.com/RattlesnakeOS/latest/${ANDROID_VERSION}/latest.json" - -STACK_UPDATE_MESSAGE= -LATEST_STACK_VERSION= -LATEST_CHROMIUM= -FDROID_CLIENT_VERSION= -FDROID_PRIV_EXT_VERSION= - -full_run() { - log_header "${FUNCNAME[0]}" - - get_latest_versions - check_for_new_versions - initial_key_setup - aws_notify "RattlesnakeOS Build STARTED" - setup_env - aws_import_keys - check_chromium - aosp_repo_init - aosp_repo_modifications - aosp_repo_sync - setup_vendor - build_fdroid - add_chromium - apply_patches - build_aosp - release - aws_upload - checkpoint_versions - aws_notify "RattlesnakeOS Build SUCCESS" -} - -get_latest_versions() { - log_header "${FUNCNAME[0]}" - - sudo DEBIAN_FRONTEND=noninteractive apt-get -y install jq - - # check if running latest stack - LATEST_STACK_VERSION=$(curl --fail -s "${STACK_URL_LATEST}" | jq -r '.name') - if [ -z "${LATEST_STACK_VERSION}" ]; then - aws_notify_simple "ERROR: Unable to get latest rattlesnakeos-stack version details. Stopping build." - exit 1 - elif [ "${LATEST_STACK_VERSION}" == "${STACK_VERSION}" ]; then - log "Running the latest rattlesnakeos-stack version ${LATEST_STACK_VERSION}" - else - STACK_UPDATE_MESSAGE="WARNING: you should upgrade to the latest version: ${LATEST_STACK_VERSION}" - fi - - # download latest.json and use for remaining checks - curl --fail -s "${RATTLESNAKEOS_LATEST_JSON}" > "${HOME}/latest.json" - - # check for latest chromium version - LATEST_CHROMIUM=$(jq -r '.chromium' "${HOME}/latest.json") - if [ -z "${LATEST_CHROMIUM}" ]; then - aws_notify_simple "ERROR: Unable to get latest Chromium version details. Stopping build." - exit 1 - fi - log "LATEST_CHROMIUM=${LATEST_CHROMIUM}" - - FDROID_CLIENT_VERSION=$(jq -r '.fdroid.client' "${HOME}/latest.json") - if [ -z "${FDROID_CLIENT_VERSION}" ]; then - aws_notify_simple "ERROR: Unable to get latest F-Droid version details. Stopping build." - exit 1 - fi - log "FDROID_CLIENT_VERSION=${FDROID_CLIENT_VERSION}" - - FDROID_PRIV_EXT_VERSION=$(jq -r '.fdroid.privilegedextention' "${HOME}/latest.json") - if [ -z "${FDROID_PRIV_EXT_VERSION}" ]; then - aws_notify_simple "ERROR: Unable to get latest F-Droid privilege extension version details. Stopping build." - exit 1 - fi - log "FDROID_PRIV_EXT_VERSION=${FDROID_PRIV_EXT_VERSION}" - - AOSP_VENDOR_BUILD=$(jq -r ".devices.${DEVICE}.build_id" "${HOME}/latest.json") - if [ -z "${AOSP_VENDOR_BUILD}" ]; then - aws_notify_simple "ERROR: Unable to get latest AOSP build version details. Stopping build." - exit 1 - fi - if [ -z "${AOSP_BUILD}" ]; then - AOSP_BUILD="${AOSP_VENDOR_BUILD}" - fi - log "AOSP_VENDOR_BUILD=${AOSP_VENDOR_BUILD}" - log "AOSP_BUILD=${AOSP_BUILD}" - - if [ -z "${AOSP_BRANCH}" ]; then - AOSP_BRANCH=$(jq -r ".devices.${DEVICE}.aosp_tag" "${HOME}/latest.json") - if [ -z "${AOSP_BRANCH}" ]; then - aws_notify_simple "ERROR: Unable to get latest AOSP branch details. Stopping build." - exit 1 - fi - fi - log "AOSP_BRANCH=${AOSP_BRANCH}" - -} - -check_for_new_versions() { - log_header "${FUNCNAME[0]}" - - log "Checking if any new versions of software exist" - needs_update=false - - # check stack version - existing_stack_version=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/rattlesnakeos-stack/revision" - || true) - if [ "${existing_stack_version}" == "${STACK_VERSION}" ]; then - log "Stack version (${existing_stack_version}) is up to date" - else - log "Last successful build (if there was one) is not with current stack version ${STACK_VERSION}" - needs_update=true - BUILD_REASON="'Stack version ${existing_stack_version} != ${STACK_VERSION}'" - fi - - # check aosp - existing_aosp_build=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-vendor" - || true) - if [ "${existing_aosp_build}" == "${AOSP_VENDOR_BUILD}" ]; then - log "AOSP build (${existing_aosp_build}) is up to date" - else - log "AOSP needs to be updated to ${AOSP_VENDOR_BUILD}" - needs_update=true - BUILD_REASON="${BUILD_REASON} 'AOSP build ${existing_aosp_build} != ${AOSP_VENDOR_BUILD}'" - fi - - # check chromium - if [ -n "${CHROMIUM_PINNED_VERSION}" ]; then - log "Setting LATEST_CHROMIUM to pinned version ${CHROMIUM_PINNED_VERSION}" - LATEST_CHROMIUM="${CHROMIUM_PINNED_VERSION}" - fi - existing_chromium=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/chromium/revision" - || true) - chromium_included=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/chromium/included" - || true) - if [ "${existing_chromium}" == "${LATEST_CHROMIUM}" ] && [ "${chromium_included}" == "yes" ]; then - log "Chromium build (${existing_chromium}) is up to date" - else - log "Chromium needs to be updated to ${LATEST_CHROMIUM}" - echo "no" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/chromium/included" - needs_update=true - if [ "${existing_chromium}" == "${LATEST_CHROMIUM}" ]; then - BUILD_REASON="${BUILD_REASON} 'Chromium version ${existing_chromium} built but not installed'" - else - BUILD_REASON="${BUILD_REASON} 'Chromium version ${existing_chromium} != ${LATEST_CHROMIUM}'" - fi - fi - - # check fdroid - existing_fdroid_client=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/fdroid/revision" - || true) - if [ "${existing_fdroid_client}" == "${FDROID_CLIENT_VERSION}" ]; then - log "F-Droid build (${existing_fdroid_client}) is up to date" - else - log "F-Droid needs to be updated to ${FDROID_CLIENT_VERSION}" - needs_update=true - BUILD_REASON="${BUILD_REASON} 'F-Droid version ${existing_fdroid_client} != ${FDROID_CLIENT_VERSION}'" - fi - - # check fdroid priv extension - existing_fdroid_priv_version=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/fdroid-priv/revision" - || true) - if [ "${existing_fdroid_priv_version}" == "${FDROID_PRIV_EXT_VERSION}" ]; then - log "F-Droid privileged extension build (${existing_fdroid_priv_version}) is up to date" - else - log "F-Droid privileged extension needs to be updated to ${FDROID_PRIV_EXT_VERSION}" - needs_update=true - BUILD_REASON="${BUILD_REASON} 'F-Droid privileged extension ${existing_fdroid_priv_version} != ${FDROID_PRIV_EXT_VERSION}'" - fi - - if [ "${needs_update}" = true ]; then - log "New build is required" - else - if [ "${FORCE_BUILD}" = true ]; then - message="No build is required, but FORCE_BUILD=true" - log "${message}" - BUILD_REASON="${message}" - elif [ "${IGNORE_VERSION_CHECKS}" = true ]; then - message="No build is required, but IGNORE_VERSION_CHECKS=true" - log "${message}" - BUILD_REASON="${message}" - else - aws_notify "RattlesnakeOS build not required as all components are already up to date." - exit 0 - fi - fi - - if [ -z "${existing_stack_version}" ]; then - BUILD_REASON="Initial build" - fi -} - -add_chromium() { - log_header "${FUNCNAME[0]}" - - # add latest built chromium to external/chromium - aws s3 cp "s3://${AWS_RELEASE_BUCKET}/chromium/TrichromeLibrary.apk" "${BUILD_DIR}/external/chromium/prebuilt/arm64/" - aws s3 cp "s3://${AWS_RELEASE_BUCKET}/chromium/TrichromeWebView.apk" "${BUILD_DIR}/external/chromium/prebuilt/arm64/" - aws s3 cp "s3://${AWS_RELEASE_BUCKET}/chromium/TrichromeChrome.apk" "${BUILD_DIR}/external/chromium/prebuilt/arm64/" - - cat < "${BUILD_DIR}/frameworks/base/core/res/res/xml/config_webview_packages.xml" - - - - - -EOF -} - -build_fdroid() { - log_header "${FUNCNAME[0]}" - - pushd "${HOME}" - # install gradle - gradle_version="6.6.1" - if [ ! -f "${HOME}/gradle/gradle-${gradle_version}/bin/gradle" ]; then - retry wget -q "https://downloads.gradle-dn.com/distributions/gradle-${gradle_version}-bin.zip" -O "gradle-${gradle_version}-bin.zip" - mkdir -p "${HOME}/gradle" - unzip -d "${HOME}/gradle" "gradle-${gradle_version}-bin.zip" - fi - export PATH="${PATH}:${HOME}/gradle/gradle-${gradle_version}/bin" - popd - - # setup android sdk root/paths, commandline tools and install build-tools - export ANDROID_SDK_ROOT="${HOME}/sdk" - export ANDROID_HOME="${ANDROID_SDK_ROOT}" - export PATH="${PATH}:${ANDROID_SDK_ROOT}/cmdline-tools/tools" - export PATH="${PATH}:${ANDROID_SDK_ROOT}/cmdline-tools/tools/bin" - export PATH="${PATH}:${ANDROID_SDK_ROOT}/platform-tools" - if [ ! -f "${ANDROID_SDK_ROOT}/cmdline-tools/tools/bin/sdkmanager" ]; then - mkdir -p "${ANDROID_SDK_ROOT}/cmdline-tools" - pushd "${ANDROID_SDK_ROOT}/cmdline-tools" - retry wget -q "https://dl.google.com/android/repository/commandlinetools-linux-6609375_latest.zip" -O commandline-tools.zip - unzip commandline-tools.zip - yes | sdkmanager --licenses - sdkmanager --update - popd - fi - - # build it outside AOSP build tree or hit errors - rm -rf "${HOME}/fdroidclient" - git clone https://gitlab.com/fdroid/fdroidclient "${HOME}/fdroidclient" - pushd "${HOME}/fdroidclient" - git checkout "${FDROID_CLIENT_VERSION}" - retry gradle assembleRelease - - # copy to AOSP build tree - cp -f "app/build/outputs/apk/full/release/app-full-release-unsigned.apk" "${BUILD_DIR}/packages/apps/F-Droid/F-Droid.apk" - popd -} - -get_encryption_key() { - additional_message="" - if [ "$(aws s3 ls "s3://${AWS_ENCRYPTED_KEYS_BUCKET}/${DEVICE}" | wc -l)" == '0' ]; then - additional_message="Since you have no encrypted signing keys in s3://${AWS_ENCRYPTED_KEYS_BUCKET}/${DEVICE} yet - new signing keys will be generated and encrypted with provided passphrase." - fi - - wait_time="10m" - error_message="" - while true; do - aws sns publish --region ${REGION} --topic-arn "${AWS_SNS_ARN}" \ - --message="$(printf "%s Need to login to the EC2 instance and provide the encryption passphrase (${wait_time} timeout before shutdown). You may need to open up SSH in the default security group, see the FAQ for details. %s\n\nssh ubuntu@%s 'printf \"Enter encryption passphrase: \" && read -s k && echo \"\$k\" > %s'" "${error_message}" "${additional_message}" "${INSTANCE_IP}" "${ENCRYPTION_PIPE}")" - error_message="" - - log "Waiting for encryption passphrase (with ${wait_time} timeout) to be provided over named pipe ${ENCRYPTION_PIPE}" - set +e - ENCRYPTION_KEY=$(timeout ${wait_time} cat ${ENCRYPTION_PIPE}) - if [ $? -ne 0 ]; then - set -e - log "Timeout (${wait_time}) waiting for encryption passphrase" - aws_notify_simple "Timeout (${wait_time}) waiting for encryption passphrase. Terminating build process." - exit 1 - fi - set -e - if [ -z "${ENCRYPTION_KEY}" ]; then - error_message="ERROR: Empty encryption passphrase received - try again." - log "${error_message}" - continue - fi - log "Received encryption passphrase over named pipe ${ENCRYPTION_PIPE}" - - if [ "$(aws s3 ls "s3://${AWS_ENCRYPTED_KEYS_BUCKET}/${DEVICE}" | wc -l)" == '0' ]; then - log "No existing encrypting keys - new keys will be generated later in build process." - else - log "Verifying encryption passphrase is valid by syncing encrypted signing keys from S3 and decrypting" - aws s3 sync "s3://${AWS_ENCRYPTED_KEYS_BUCKET}" "${KEYS_DIR}" - - decryption_error=false - set +e - for f in $(find "${KEYS_DIR}" -type f -name '*.gpg'); do - output_file=$(echo "${f}" | awk -F".gpg" '{print $1}') - log "Decrypting ${f} to ${output_file}..." - gpg -d --batch --passphrase "${ENCRYPTION_KEY}" "${f}" > "${output_file}" - if [ $? -ne 0 ]; then - log "Failed to decrypt ${f}" - decryption_error=true - fi - done - set -e - if [ "${decryption_error}" = true ]; then - log - error_message="ERROR: Failed to decrypt signing keys with provided passphrase - try again." - log "${error_message}" - continue - fi - fi - break - done -} - -initial_key_setup() { - # setup in memory file system to hold keys - log "Mounting in memory filesystem at ${KEYS_DIR} to hold keys" - mkdir -p "${KEYS_DIR}" - sudo mount -t tmpfs -o size=20m tmpfs "${KEYS_DIR}" || true - - # additional steps for getting encryption key up front - if [ "${ENCRYPTED_KEYS}" = true ]; then - log "Encrypted keys option was specified" - - # send warning if user has selected encrypted keys option but still has normal keys - if [ "$(aws s3 ls "s3://${AWS_KEYS_BUCKET}/${DEVICE}" | wc -l)" != '0' ]; then - if [ "$(aws s3 ls "s3://${AWS_ENCRYPTED_KEYS_BUCKET}/${DEVICE}" | wc -l)" == '0' ]; then - aws_notify_simple "It looks like you have selected --encrypted-keys option and have existing signing keys in s3://${AWS_KEYS_BUCKET}/${DEVICE} but you haven't migrated your keys to s3://${AWS_ENCRYPTED_KEYS_BUCKET}/${DEVICE}. This means new encrypted signing keys will be generated and you'll need to flash a new factory image on your device. If you want to keep your existing keys - cancel this build and follow the steps on migrating your keys in the FAQ." - fi - fi - - sudo DEBIAN_FRONTEND=noninteractive apt-get -y install gpg - if [ ! -e "${ENCRYPTION_PIPE}" ]; then - mkfifo "${ENCRYPTION_PIPE}" - fi - - get_encryption_key - fi -} - -setup_env() { - log_header "${FUNCNAME[0]}" - - # setup build dir - mkdir -p "${BUILD_DIR}" - - # install required packages - sudo apt-get update - sudo DEBIAN_FRONTEND=noninteractive apt-get -y install python3 repo gperf jq default-jdk git-core gnupg \ - flex bison build-essential zip curl zlib1g-dev gcc-multilib g++-multilib libc6-dev-i386 lib32ncurses5-dev \ - x11proto-core-dev libx11-dev lib32z-dev ccache libgl1-mesa-dev libxml2-utils xsltproc unzip python-networkx liblz4-tool pxz - sudo DEBIAN_FRONTEND=noninteractive apt-get -y build-dep "linux-image-$(uname --kernel-release)" - - retry curl --fail -s https://storage.googleapis.com/git-repo-downloads/repo > /tmp/repo - chmod +x /tmp/repo - sudo mv /tmp/repo /usr/local/bin/ - - # setup git - git config --get --global user.name || git config --global user.name 'aosp' - git config --get --global user.email || git config --global user.email 'aosp@localhost' - git config --global color.ui true -} - -check_chromium() { - log_header "${FUNCNAME[0]}" - - current=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/chromium/revision" - || true) - log "Chromium current: ${current}" - - log "Chromium latest: ${LATEST_CHROMIUM}" - if [ "${LATEST_CHROMIUM}" == "${current}" ]; then - log "Chromium latest (${LATEST_CHROMIUM}) matches current (${current})" - else - log "Building chromium ${LATEST_CHROMIUM}" - build_chromium "${LATEST_CHROMIUM}" - fi - - log "Deleting chromium directory ${HOME}/chromium" - rm -rf "${HOME}/chromium" -} - -build_chromium() { - log_header "${FUNCNAME[0]}" - - CHROMIUM_REVISION="$1" - DEFAULT_VERSION=$(echo "${CHROMIUM_REVISION}" | awk -F"." '{ printf "%s%03d52\n",$3,$4}') - - # depot tools setup - if [ ! -d "${HOME}/depot_tools" ]; then - retry git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git "${HOME}/depot_tools" - fi - export PATH="${PATH}:${HOME}/depot_tools" - - # fetch chromium - mkdir -p "${HOME}/chromium" - cd "${HOME}/chromium" - fetch --nohooks android - cd src - - # checkout specific revision - git checkout "${CHROMIUM_REVISION}" -f - - # install dependencies - echo ttf-mscorefonts-installer msttcorefonts/accepted-mscorefonts-eula select true | sudo debconf-set-selections - log "Installing chromium build dependencies" - sudo ./build/install-build-deps-android.sh - - # run gclient sync (runhooks will run as part of this) - log "Running gclient sync (this takes a while)" - for i in {1..5}; do - yes | gclient sync --with_branch_heads --jobs 32 -RDf && break - done - - # cleanup any files in tree not part of this revision - git clean -dff - - # reset any modifications - git checkout -- . - - # generate configuration - KEYSTORE="${KEYS_DIR}/${DEVICE}/chromium.keystore" - trichrome_certdigest=$(keytool -export-cert -alias chromium -keystore "${KEYSTORE}" -storepass chromium | sha256sum | awk '{print $1}') - log "trichrome_certdigest=${trichrome_certdigest}" - mkdir -p out/Default - cat < out/Default/args.gn -target_os = "android" -target_cpu = "arm64" -android_channel = "stable" -android_default_version_name = "${CHROMIUM_REVISION}" -android_default_version_code = "${DEFAULT_VERSION}" -is_component_build = false -is_debug = false -is_official_build = true -symbol_level = 1 -fieldtrial_testing_like_official_build = true -ffmpeg_branding = "Chrome" -proprietary_codecs = true -is_cfi = true -enable_gvr_services = false -enable_remoting = false -enable_reporting = true - -trichrome_certdigest = "${trichrome_certdigest}" -chrome_public_manifest_package = "org.chromium.chrome" -system_webview_package_name = "org.chromium.webview" -trichrome_library_package = "org.chromium.trichromelibrary" -EOF - gn gen out/Default - - log "Building trichrome" - autoninja -C out/Default/ trichrome_webview_apk trichrome_chrome_bundle trichrome_library_apk - - log "Signing trichrome" - BUNDLETOOL="${HOME}/chromium/src/build/android/gyp/bundletool.py" - AAPT2="${HOME}/chromium/src/third_party/android_build_tools/aapt2/aapt2" - find "${HOME}/chromium/src" | grep 'apksigner' || true - APKSIGNER="${HOME}/chromium/src/third_party/android_sdk/public/build-tools/30.0.1/apksigner" - cd out/Default/apks - rm -rf release - mkdir release - cd release - "${BUNDLETOOL}" build-apks --aapt2 "${AAPT2}" --bundle "../TrichromeChrome.aab" --output "TrichromeChrome.apks" \ - --mode=universal --ks "${KEYSTORE}" --ks-pass pass:chromium --ks-key-alias chromium - unzip "TrichromeChrome.apks" "universal.apk" - mv "universal.apk" "TrichromeChrome.apk" - for app in TrichromeLibrary TrichromeWebView; do - "${APKSIGNER}" sign --ks "${KEYSTORE}" --ks-pass pass:chromium --ks-key-alias chromium --in "../${app}.apk" --out "${app}.apk" - done - - log "Uploading trichrome apks to s3" - retry aws s3 cp "TrichromeLibrary.apk" "s3://${AWS_RELEASE_BUCKET}/chromium/TrichromeLibrary.apk" - retry aws s3 cp "TrichromeWebView.apk" "s3://${AWS_RELEASE_BUCKET}/chromium/TrichromeWebView.apk" - retry aws s3 cp "TrichromeChrome.apk" "s3://${AWS_RELEASE_BUCKET}/chromium/TrichromeChrome.apk" - echo "${CHROMIUM_REVISION}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/chromium/revision" -} - -aosp_repo_init() { - log_header "${FUNCNAME[0]}" - cd "${BUILD_DIR}" - - retry repo init --manifest-url "${MANIFEST_URL}" --manifest-branch "${AOSP_BRANCH}" --depth 1 || true -} - -aosp_repo_modifications() { - log_header "${FUNCNAME[0]}" - cd "${BUILD_DIR}" - - mkdir -p "${BUILD_DIR}/.repo/local_manifests" - - cat < "${BUILD_DIR}/.repo/local_manifests/rattlesnakeos.xml" - - - - - - - - - - - - - - - <% if .CustomManifestRemotes %> - <% range $i, $r := .CustomManifestRemotes %> - - <% end %> - <% end %> - <% if .CustomManifestProjects %><% range $i, $r := .CustomManifestProjects %> - - <% end %> - <% end %> - - -EOF - -} - -aosp_repo_sync() { - log_header "${FUNCNAME[0]}" - cd "${BUILD_DIR}" - - # sync with retries - for i in {1..10}; do - log "aosp repo sync attempt ${i}/10" - repo sync -c --no-tags --no-clone-bundle --jobs 32 && break - done -} - -setup_vendor() { - log_header "${FUNCNAME[0]}" - - # new dependency to extract ota partitions - # one of the scripts depends on python2, the other on python3 - sudo DEBIAN_FRONTEND=noninteractive apt-get -y install python-protobuf python3-protobuf python3-pip - pip3 install --user protobuf -U - - # get vendor files (with timeout) - timeout 30m "${BUILD_DIR}/vendor/android-prepare-vendor/execute-all.sh" --debugfs --yes --device "${DEVICE}" \ - --buildID "${AOSP_VENDOR_BUILD}" --output "${BUILD_DIR}/vendor/android-prepare-vendor" - - # copy vendor files to build tree - mkdir --parents "${BUILD_DIR}/vendor/google_devices" || true - rm -rf "${BUILD_DIR}/vendor/google_devices/${DEVICE}" || true - mv "${BUILD_DIR}/vendor/android-prepare-vendor/${DEVICE}/$(tr '[:upper:]' '[:lower:]' <<< "${AOSP_VENDOR_BUILD}")/vendor/google_devices/${DEVICE}" "${BUILD_DIR}/vendor/google_devices" - - # smaller devices need big brother vendor files - if [ "${DEVICE}" != "${DEVICE_FAMILY}" ]; then - rm -rf "${BUILD_DIR}/vendor/google_devices/${DEVICE_FAMILY}" || true - mv "${BUILD_DIR}/vendor/android-prepare-vendor/${DEVICE}/$(tr '[:upper:]' '[:lower:]' <<< "${AOSP_VENDOR_BUILD}")/vendor/google_devices/${DEVICE_FAMILY}" "${BUILD_DIR}/vendor/google_devices" - fi - - # workaround for libsdsprpc and libadsprpc not specifying LOCAL_SHARED_LIBRARIES - sed -i '/LOCAL_MODULE := libsdsprpc/a LOCAL_SHARED_LIBRARIES := libc++ libc libcutils libdl libion liblog libm' "${BUILD_DIR}/vendor/google_devices/${DEVICE}/Android.mk" || true - sed -i '/LOCAL_MODULE := libadsprpc/a LOCAL_SHARED_LIBRARIES := libc++ libc libcutils libdl libion liblog libm' "${BUILD_DIR}/vendor/google_devices/${DEVICE}/Android.mk" || true -} - -apply_patches() { - log_header "${FUNCNAME[0]}" - - patch_11_issues - patch_launcher - patch_disable_apex - patch_custom - patch_base_config - patch_device_config - patch_add_apps - patch_updater - patch_priv_ext -} - -patch_11_issues() { - log_header "${FUNCNAME[0]}" - - # workaround for vintf build issue - sed -i '1 i\BUILD_BROKEN_VINTF_PRODUCT_COPY_FILES := true' "${BUILD_DIR}/build/make/target/board/BoardConfigMainlineCommon.mk" - - # workaround for coral/flame build issue - sed -i 's@PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := strict@#PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := strict@' "${BUILD_DIR}/device/google/coral/aosp_coral.mk" || true - sed -i 's@PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := strict@#PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := strict@' "${BUILD_DIR}/device/google/coral/aosp_flame.mk" || true - - # biometrics was disabled (https://cs.android.com/android/_/android/platform/frameworks/base/+/ede919cace2a32ec235eefe86e04a75848bd1d5f) - # but never added upstream to device specific overlays - - # ID0:Fingerprint:Strong - biometric_sensors="0:2:15" - if [ "${DEVICE_COMMON}" == "coral" ]; then - # ID0:Face:Strong - biometric_sensors="0:8:15" - fi - if [ "${DEVICE_COMMON}" == "redfin" ] - then - sed -i '$ s/^<\/resources>//' "${BUILD_DIR}/device/google/${DEVICE_COMMON}/${DEVICE_COMMON}/overlay/frameworks/base/core/res/res/values/config.xml" - cat <> "${BUILD_DIR}/device/google/${DEVICE_COMMON}/${DEVICE_COMMON}/overlay/frameworks/base/core/res/res/values/config.xml" - - ${biometric_sensors} - - -EOF - else - sed -i '$ s/^<\/resources>//' "${BUILD_DIR}/device/google/${DEVICE_COMMON}/overlay/frameworks/base/core/res/res/values/config.xml" - cat <> "${BUILD_DIR}/device/google/${DEVICE_COMMON}/overlay/frameworks/base/core/res/res/values/config.xml" - - ${biometric_sensors} - - -EOF - fi -} - -patch_launcher() { - log_header "${FUNCNAME[0]}" - - # disable QuickSearchBox widget on home screen - sed -i "s/QSB_ON_FIRST_SCREEN = true;/QSB_ON_FIRST_SCREEN = false;/" "${BUILD_DIR}/packages/apps/Launcher3/src/com/android/launcher3/config/FeatureFlags.java" -} - -# currently don't have a need for apex updates (https://source.android.com/devices/tech/ota/apex) -patch_disable_apex() { - log_header "${FUNCNAME[0]}" - - # pixel 2 devices opt in here - sed -i 's@$(call inherit-product, $(SRC_TARGET_DIR)/product/updatable_apex.mk)@@' "${BUILD_DIR}/device/google/wahoo/device.mk" || true - # all other devices use mainline and opt in here - sed -i 's@$(call inherit-product, $(SRC_TARGET_DIR)/product/updatable_apex.mk)@@' "${BUILD_DIR}/build/make/target/product/mainline_system.mk" -} - -# TODO: most of this is fragile and unforgiving -patch_custom() { - log_header "${FUNCNAME[0]}" - - cd "${BUILD_DIR}" - - # allow custom patches to be applied - patches_dir="${HOME}/patches" - <% if .CustomPatches %> - <% range $i, $r := .CustomPatches %> - retry git clone <% if $r.Branch %>--branch <% $r.Branch %><% end %> <% $r.Repo %> ${patches_dir}/<% $i %> - <% range $r.Patches %> - log "Applying patch <% . %>" - patch -p1 --no-backup-if-mismatch < ${patches_dir}/<% $i %>/<% . %> - <% end %> - <% end %> - <% end %> - - # allow custom scripts to be applied - scripts_dir="${HOME}/scripts" - <% if .CustomScripts %> - <% range $i, $r := .CustomScripts %> - retry git clone <% if $r.Branch %>--branch <% $r.Branch %><% end %> <% $r.Repo %> ${scripts_dir}/<% $i %> - <% range $r.Scripts %> - log "Applying shell script <% . %>" - . ${scripts_dir}/<% $i %>/<% . %> - <% end %> - <% end %> - <% end %> - - # allow prebuilt applications to be added to build tree - prebuilt_dir="${BUILD_DIR}/packages/apps/Custom" - # TODO: should be able to specify where to add PRODUCT_PACKAGES - mk_file="${BUILD_DIR}/build/make/target/product/handheld_system.mk" - <% if .CustomPrebuilts %> - <% range $i, $r := .CustomPrebuilts %> - log "Putting custom prebuilts from <% $r.Repo %> in build tree location ${prebuilt_dir}/<% $i %>" - retry git clone <% $r.Repo %> ${prebuilt_dir}/<% $i %> - <% range .Modules %> - log "Adding custom PRODUCT_PACKAGES += <% . %> to ${mk_file}" - sed -i "\$aPRODUCT_PACKAGES += <% . %>" "${mk_file}" - <% end %> - <% end %> - <% end %> - - # allow custom hosts file - hosts_file_location="${BUILD_DIR}/system/core/rootdir/etc/hosts" - if [ -z "${HOSTS_FILE}" ]; then - log "No custom hosts file requested" - else - log "Replacing hosts file with ${HOSTS_FILE}" - retry wget -q -O "${hosts_file_location}" "${HOSTS_FILE}" - fi -} - -patch_base_config() { - log_header "${FUNCNAME[0]}" - - # enable doze and app standby - sed -i 's@false@true@' "${BUILD_DIR}/frameworks/base/core/res/res/values/config.xml" -} - -patch_device_config() { - log_header "${FUNCNAME[0]}" - - # set proper model names - sed -i 's@PRODUCT_MODEL := AOSP on taimen@PRODUCT_MODEL := Pixel 2 XL@' "${BUILD_DIR}/device/google/taimen/aosp_taimen.mk" || true - sed -i 's@PRODUCT_MODEL := AOSP on walleye@PRODUCT_MODEL := Pixel 2@' "${BUILD_DIR}/device/google/muskie/aosp_walleye.mk" || true - - sed -i 's@PRODUCT_MODEL := AOSP on crosshatch@PRODUCT_MODEL := Pixel 3 XL@' "${BUILD_DIR}/device/google/crosshatch/aosp_crosshatch.mk" || true - sed -i 's@PRODUCT_MODEL := AOSP on blueline@PRODUCT_MODEL := Pixel 3@' "${BUILD_DIR}/device/google/crosshatch/aosp_blueline.mk" || true - - sed -i 's@PRODUCT_MODEL := AOSP on bonito@PRODUCT_MODEL := Pixel 3a XL@' "${BUILD_DIR}/device/google/bonito/aosp_bonito.mk" || true - sed -i 's@PRODUCT_MODEL := AOSP on sargo@PRODUCT_MODEL := Pixel 3a@' "${BUILD_DIR}/device/google/bonito/aosp_sargo.mk" || true - - sed -i 's@PRODUCT_MODEL := AOSP on coral@PRODUCT_MODEL := Pixel 4 XL@' "${BUILD_DIR}/device/google/coral/aosp_coral.mk" || true - sed -i 's@PRODUCT_MODEL := AOSP on flame@PRODUCT_MODEL := Pixel 4@' "${BUILD_DIR}/device/google/coral/aosp_flame.mk" || true - - sed -i 's@PRODUCT_MODEL := AOSP on sunfish@PRODUCT_MODEL := Pixel 4A@' "${BUILD_DIR}/device/google/sunfish/aosp_sunfish.mk" || true - - sed -i 's@PRODUCT_MODEL := AOSP on redfin@PRODUCT_MODEL := Pixel 5@' "${BUILD_DIR}/device/google/redfin/aosp_redfin.mk" || true -} - -patch_add_apps() { - log_header "${FUNCNAME[0]}" - - handheld_system_mk="${BUILD_DIR}/build/make/target/product/handheld_system.mk" - sed -i "\$aPRODUCT_PACKAGES += Updater" "${handheld_system_mk}" - sed -i "\$aPRODUCT_PACKAGES += F-DroidPrivilegedExtension" "${handheld_system_mk}" - sed -i "\$aPRODUCT_PACKAGES += F-Droid" "${handheld_system_mk}" - - handheld_product_mk="${BUILD_DIR}/build/make/target/product/handheld_product.mk" - sed -i 's/Browser2 \\/TrichromeChrome \\/' "${handheld_product_mk}" - - media_product_mk="${BUILD_DIR}/build/make/target/product/media_product.mk" - sed -i 's/webview \\/TrichromeWebView \\/' "${media_product_mk}" - - # add any modules defined in custom manifest projects - <% if .CustomManifestProjects %><% range $i, $r := .CustomManifestProjects %><% range $j, $q := .Modules %> - log "Adding custom PRODUCT_PACKAGES += <% $q %> to ${handheld_system_mk}" - sed -i "\$aPRODUCT_PACKAGES += <% $q %>" "${handheld_system_mk}" - <% end %> - <% end %> - <% end %> -} - -patch_updater() { - log_header "${FUNCNAME[0]}" - - cd "${BUILD_DIR}/packages/apps/Updater/res/values" - sed --in-place --expression "s@s3bucket@${RELEASE_URL}/@g" config.xml - - # TODO: just a hack to get 11 up and running - # related commit: https://android.googlesource.com/platform/system/sepolicy/+/d61b0ce1bc8de2560f1fa173c8d01a09d039a12a%5E%21/#F0 - cat << 'EOF' > "${HOME}/updater-selinux.patch" -From 33fa92c37dd0101164a55ea1584cef6450fa641b Mon Sep 17 00:00:00 2001 -From: Daniel Micay -Date: Tue, 15 Sep 2020 00:08:40 -0400 -Subject: [PATCH] add SELinux domain for Updater app - ---- - prebuilts/api/30.0/private/app_neverallows.te | 2 +- - .../30.0/private/compat/29.0/29.0.ignore.cil | 1 + - prebuilts/api/30.0/private/seapp_contexts | 9 +++++---- - prebuilts/api/30.0/private/updater_app.te | 17 +++++++++++++++++ - prebuilts/api/30.0/public/update_engine.te | 1 + - prebuilts/api/30.0/public/updater_app.te | 5 +++++ - private/app_neverallows.te | 2 +- - private/compat/29.0/29.0.ignore.cil | 1 + - private/seapp_contexts | 1 + - private/updater_app.te | 17 +++++++++++++++++ - public/update_engine.te | 1 + - public/updater_app.te | 5 +++++ - 12 files changed, 56 insertions(+), 6 deletions(-) - create mode 100644 prebuilts/api/30.0/private/updater_app.te - create mode 100644 prebuilts/api/30.0/public/updater_app.te - create mode 100644 private/updater_app.te - create mode 100644 public/updater_app.te - -diff --git a/prebuilts/api/30.0/private/app_neverallows.te b/prebuilts/api/30.0/private/app_neverallows.te -index 115718700..32980b354 100644 ---- a/prebuilts/api/30.0/private/app_neverallows.te -+++ b/prebuilts/api/30.0/private/app_neverallows.te -@@ -130,7 +130,7 @@ neverallow { all_untrusted_apps -mediaprovider } { cache_file cache_recovery_fil - # World accessible data locations allow application to fill the device - # with unaccounted for data. This data will not get removed during - # application un-installation. --neverallow { all_untrusted_apps -mediaprovider } { -+neverallow { all_untrusted_apps -mediaprovider -updater_app } { - fs_type - -sdcard_type - file_type -diff --git a/prebuilts/api/30.0/private/compat/29.0/29.0.ignore.cil b/prebuilts/api/30.0/private/compat/29.0/29.0.ignore.cil -index fdea691ea..730695e8e 100644 ---- a/prebuilts/api/30.0/private/compat/29.0/29.0.ignore.cil -+++ b/prebuilts/api/30.0/private/compat/29.0/29.0.ignore.cil -@@ -113,6 +113,7 @@ - traced_perf_socket - timezonedetector_service - untrusted_app_29 -+ updater_app - usb_serial_device - userspace_reboot_config_prop - userspace_reboot_exported_prop -diff --git a/prebuilts/api/30.0/private/seapp_contexts b/prebuilts/api/30.0/private/seapp_contexts -index a8c61be8f..e8951230d 100644 ---- a/prebuilts/api/30.0/private/seapp_contexts -+++ b/prebuilts/api/30.0/private/seapp_contexts -@@ -162,10 +162,11 @@ user=_app isPrivApp=true name=com.android.providers.media.module domain=mediapro - user=_app isPrivApp=true name=com.google.android.providers.media.module domain=mediaprovider_app type=privapp_data_file levelFrom=all - user=_app seinfo=platform isPrivApp=true name=com.android.permissioncontroller domain=permissioncontroller_app type=privapp_data_file levelFrom=all - user=_app isPrivApp=true name=com.android.vzwomatrigger domain=vzwomatrigger_app type=privapp_data_file levelFrom=all - user=_app isPrivApp=true name=com.google.android.gms domain=gmscore_app type=privapp_data_file levelFrom=user - user=_app isPrivApp=true name=com.google.android.gms.* domain=gmscore_app type=privapp_data_file levelFrom=user - user=_app isPrivApp=true name=com.google.android.gms:* domain=gmscore_app type=privapp_data_file levelFrom=user - user=_app isPrivApp=true name=com.google.android.gsf domain=gmscore_app type=privapp_data_file levelFrom=user -+user=_app isPrivApp=true name=app.seamlessupdate.client domain=updater_app type=app_data_file levelFrom=user - user=_app minTargetSdkVersion=30 domain=untrusted_app type=app_data_file levelFrom=all - user=_app minTargetSdkVersion=29 domain=untrusted_app_29 type=app_data_file levelFrom=all - user=_app minTargetSdkVersion=28 domain=untrusted_app_27 type=app_data_file levelFrom=all -diff --git a/prebuilts/api/30.0/private/updater_app.te b/prebuilts/api/30.0/private/updater_app.te -new file mode 100644 -index 000000000..0ce047b97 ---- /dev/null -+++ b/prebuilts/api/30.0/private/updater_app.te -@@ -0,0 +1,17 @@ -+### -+### Updater app -+### -+ -+typeattribute updater_app coredomain; -+ -+app_domain(updater_app) -+untrusted_app_domain(updater_app) -+net_domain(updater_app) -+ -+# Write to /data/ota_package for OTA packages. -+allow updater_app ota_package_file:dir rw_dir_perms; -+allow updater_app ota_package_file:file create_file_perms; -+ -+# Allow Updater to communicate with update_engine for A/B update. -+binder_call(updater_app, update_engine) -+allow updater_app update_engine_service:service_manager find; -diff --git a/prebuilts/api/30.0/public/update_engine.te b/prebuilts/api/30.0/public/update_engine.te -index 8b767bea0..4dd951c9d 100644 ---- a/prebuilts/api/30.0/public/update_engine.te -+++ b/prebuilts/api/30.0/public/update_engine.te -@@ -46,6 +46,7 @@ userdebug_or_eng(` + "`" + ` - ') - - binder_call(update_engine, gmscore_app) -+binder_call(update_engine, updater_app) - - # Allow update_engine to call the callback function provided by system_server. - binder_call(update_engine, system_server) -diff --git a/prebuilts/api/30.0/public/updater_app.te b/prebuilts/api/30.0/public/updater_app.te -new file mode 100644 -index 000000000..97a850ba1 ---- /dev/null -+++ b/prebuilts/api/30.0/public/updater_app.te -@@ -0,0 +1,5 @@ -+### -+### Updater app -+### -+ -+type updater_app, domain; -diff --git a/private/app_neverallows.te b/private/app_neverallows.te -index 115718700..32980b354 100644 ---- a/private/app_neverallows.te -+++ b/private/app_neverallows.te -@@ -130,7 +130,7 @@ neverallow { all_untrusted_apps -mediaprovider } { cache_file cache_recovery_fil - # World accessible data locations allow application to fill the device - # with unaccounted for data. This data will not get removed during - # application un-installation. --neverallow { all_untrusted_apps -mediaprovider } { -+neverallow { all_untrusted_apps -mediaprovider -updater_app } { - fs_type - -sdcard_type - file_type -diff --git a/private/compat/29.0/29.0.ignore.cil b/private/compat/29.0/29.0.ignore.cil -index fdea691ea..730695e8e 100644 ---- a/private/compat/29.0/29.0.ignore.cil -+++ b/private/compat/29.0/29.0.ignore.cil -@@ -113,6 +113,7 @@ - traced_perf_socket - timezonedetector_service - untrusted_app_29 -+ updater_app - usb_serial_device - userspace_reboot_config_prop - userspace_reboot_exported_prop -diff --git a/private/seapp_contexts b/private/seapp_contexts -index d0898bd16..e8951230d 100644 ---- a/private/seapp_contexts -+++ b/private/seapp_contexts -@@ -166,6 +166,7 @@ user=_app isPrivApp=true name=com.android.vzwomatrigger domain=vzwomatrigger_app - user=_app isPrivApp=true name=com.google.android.gms.* domain=gmscore_app type=privapp_data_file levelFrom=user - user=_app isPrivApp=true name=com.google.android.gms:* domain=gmscore_app type=privapp_data_file levelFrom=user - user=_app isPrivApp=true name=com.google.android.gsf domain=gmscore_app type=privapp_data_file levelFrom=user -+user=_app isPrivApp=true name=app.seamlessupdate.client domain=updater_app type=app_data_file levelFrom=user - user=_app minTargetSdkVersion=30 domain=untrusted_app type=app_data_file levelFrom=all - user=_app minTargetSdkVersion=29 domain=untrusted_app_29 type=app_data_file levelFrom=all - user=_app minTargetSdkVersion=28 domain=untrusted_app_27 type=app_data_file levelFrom=all -diff --git a/private/updater_app.te b/private/updater_app.te -new file mode 100644 -index 000000000..0ce047b97 ---- /dev/null -+++ b/private/updater_app.te -@@ -0,0 +1,17 @@ -+### -+### Updater app -+### -+ -+typeattribute updater_app coredomain; -+ -+app_domain(updater_app) -+untrusted_app_domain(updater_app) -+net_domain(updater_app) -+ -+# Write to /data/ota_package for OTA packages. -+allow updater_app ota_package_file:dir rw_dir_perms; -+allow updater_app ota_package_file:file create_file_perms; -+ -+# Allow Updater to communicate with update_engine for A/B update. -+binder_call(updater_app, update_engine) -+allow updater_app update_engine_service:service_manager find; -diff --git a/public/update_engine.te b/public/update_engine.te -index 8b767bea0..4dd951c9d 100644 ---- a/public/update_engine.te -+++ b/public/update_engine.te -@@ -46,6 +46,7 @@ userdebug_or_eng(` + "`" + ` - ') - - binder_call(update_engine, gmscore_app) -+binder_call(update_engine, updater_app) - - # Allow update_engine to call the callback function provided by system_server. - binder_call(update_engine, system_server) -diff --git a/public/updater_app.te b/public/updater_app.te -new file mode 100644 -index 000000000..97a850ba1 ---- /dev/null -+++ b/public/updater_app.te -@@ -0,0 +1,5 @@ -+### -+### Updater app -+### -+ -+type updater_app, domain; --- -EOF - pushd "${BUILD_DIR}/system/sepolicy" - git apply "${HOME}/updater-selinux.patch" - popd -} - -fdpe_hash() { - keytool -printcert -file "$1" | grep 'SHA256:' | tr --delete ':' | cut --delimiter ' ' --fields 3 -} - -patch_priv_ext() { - log_header "${FUNCNAME[0]}" - - # 0.2.9 added whitelabel support, so BuildConfig.APPLICATION_ID needs to be set now - sed -i 's@BuildConfig.APPLICATION_ID@"org.fdroid.fdroid.privileged"@' "${BUILD_DIR}/packages/apps/F-DroidPrivilegedExtension/app/src/main/java/org/fdroid/fdroid/privileged/PrivilegedService.java" - - unofficial_releasekey_hash=$(fdpe_hash "${KEYS_DIR}/${DEVICE}/releasekey.x509.pem") - unofficial_platform_hash=$(fdpe_hash "${KEYS_DIR}/${DEVICE}/platform.x509.pem") - sed -i 's/'${OFFICIAL_FDROID_KEY}'")/'${unofficial_releasekey_hash}'"),\n new Pair<>("org.fdroid.fdroid", "'${unofficial_platform_hash}'")/' \ - "${BUILD_DIR}/packages/apps/F-DroidPrivilegedExtension/app/src/main/java/org/fdroid/fdroid/privileged/ClientWhitelist.java" -} - -build_aosp() { - log_header "${FUNCNAME[0]}" - - cd "${BUILD_DIR}" - - if [ "${AOSP_BUILD}" != "${AOSP_VENDOR_BUILD}" ]; then - log "WARNING: Requested AOSP build does not match upstream vendor files. These images may not be functional." - log "Patching build_id to match ${AOSP_BUILD}" - echo "${AOSP_BUILD}" > "vendor/google_devices/${DEVICE}/build_id.txt" - fi - - ############################ - # from original setup.sh script - ############################ - source build/envsetup.sh - export LANG=C - export _JAVA_OPTIONS=-XX:-UsePerfData - export BUILD_NUMBER=$(cat out/soong/build_number.txt 2>/dev/null || date --utc +%Y.%m.%d.%H) - log "BUILD_NUMBER=${BUILD_NUMBER}" - export DISPLAY_BUILD_NUMBER=true - chrt -b -p 0 $$ - - log "Running choosecombo ${BUILD_TARGET}" - choosecombo ${BUILD_TARGET} - - log "Running target-files-package" - retry make -j "$(nproc)" target-files-package - - log "Running brillo_update_payload" - retry make -j "$(nproc)" brillo_update_payload - - log "Running m otatools-package" - m otatools-package - rm -rf "${HOME}/release" - mkdir -p "${HOME}/release" - unzip "${BUILD_DIR}/out/target/product/${DEVICE}/otatools.zip" -d "${HOME}/release" -} - -get_radio_image() { - grep -Po "require version-$1=\K.+" "vendor/$2/vendor-board-info.txt" | tr '[:upper:]' '[:lower:]' -} - -release() { - log_header "${FUNCNAME[0]}" - - cd "${BUILD_DIR}" - - ############################ - # from original setup.sh script - ############################ - source build/envsetup.sh - export LANG=C - export _JAVA_OPTIONS=-XX:-UsePerfData - export BUILD_NUMBER=$(cat out/soong/build_number.txt 2>/dev/null || date --utc +%Y.%m.%d.%H) - log "BUILD_NUMBER=${BUILD_NUMBER}" - export DISPLAY_BUILD_NUMBER=true - chrt -b -p 0 $$ - - ############################ - # from original release.sh script - ############################ - KEY_DIR="keys/${DEVICE}" - OUT="out/release-${DEVICE}-${BUILD_NUMBER}" - device="${DEVICE}" - source "device/common/clear-factory-images-variables.sh" - - DEVICE="${device}" - BOOTLOADER=$(get_radio_image bootloader "google_devices/${DEVICE}") - RADIO=$(get_radio_image baseband "google_devices/${DEVICE}") - PREFIX="aosp_" - BUILD="${BUILD_NUMBER}" - VERSION=$(grep -Po "BUILD_ID=\K.+" "build/core/build_id.mk" | tr '[:upper:]' '[:lower:]') - PRODUCT="${DEVICE}" - TARGET_FILES="${DEVICE}-target_files-${BUILD}.zip" - - # make sure output directory exists - mkdir -p "${OUT}" - - # pick avb mode depending on device and determine key size - avb_key_size=$(openssl rsa -in "${KEY_DIR}/avb.pem" -text -noout | grep 'Private-Key' | awk -F'[()]' '{print $2}' | awk '{print $1}') - log "avb_key_size=${avb_key_size}" - avb_algorithm="SHA256_RSA${avb_key_size}" - log "avb_algorithm=${avb_algorithm}" - case "${AVB_MODE}" in - vbmeta_simple) - # Pixel 2: one vbmeta struct, no chaining - AVB_SWITCHES=(--avb_vbmeta_key "${KEY_DIR}/avb.pem" - --avb_vbmeta_algorithm "${avb_algorithm}") - ;; - vbmeta_chained) - # Pixel 3: main vbmeta struct points to a chained vbmeta struct in system.img - AVB_SWITCHES=(--avb_vbmeta_key "${KEY_DIR}/avb.pem" - --avb_vbmeta_algorithm "${avb_algorithm}" - --avb_system_key "${KEY_DIR}/avb.pem" - --avb_system_algorithm "${avb_algorithm}") - ;; - vbmeta_chained_v2) - AVB_SWITCHES=(--avb_vbmeta_key "${KEY_DIR}/avb.pem" - --avb_vbmeta_algorithm "${avb_algorithm}" - --avb_system_key "${KEY_DIR}/avb.pem" - --avb_system_algorithm "${avb_algorithm}" - --avb_vbmeta_system_key "${KEY_DIR}/avb.pem" - --avb_vbmeta_system_algorithm "${avb_algorithm}") - ;; - esac - - export PATH="${HOME}/release/bin:${PATH}" - export PATH="${BUILD_DIR}/prebuilts/jdk/jdk9/linux-x86/bin:${PATH}" - - log "Running sign_target_files_apks" - "${HOME}/release/releasetools/sign_target_files_apks" \ - -o -d "${KEY_DIR}" \ - -k "${BUILD_DIR}/build/target/product/security/networkstack=${KEY_DIR}/networkstack" "${AVB_SWITCHES[@]}" \ - "${BUILD_DIR}/out/target/product/${DEVICE}/obj/PACKAGING/target_files_intermediates/${PREFIX}${DEVICE}-target_files-${BUILD_NUMBER}.zip" \ - "${OUT}/${TARGET_FILES}" - - log "Running ota_from_target_files" - "${HOME}/release/releasetools/ota_from_target_files" --block -k "${KEY_DIR}/releasekey" "${EXTRA_OTA[@]}" "${OUT}/${TARGET_FILES}" \ - "${OUT}/${DEVICE}-ota_update-${BUILD}.zip" - - log "Running img_from_target_files" - sed -i 's/zipfile\.ZIP_DEFLATED/zipfile\.ZIP_STORED/' "${HOME}/release/releasetools/img_from_target_files.py" - "${HOME}/release/releasetools/img_from_target_files" "${OUT}/${TARGET_FILES}" "${OUT}/${DEVICE}-img-${BUILD}.zip" - - log "Running generate-factory-images" - cd "${OUT}" - sed -i 's/zip -r/tar cvf/' "../../device/common/generate-factory-images-common.sh" - sed -i 's/factory\.zip/factory\.tar/' "../../device/common/generate-factory-images-common.sh" - sed -i '/^mv / d' "../../device/common/generate-factory-images-common.sh" - source "../../device/common/generate-factory-images-common.sh" - mv "${DEVICE}-${VERSION}-factory.tar" "${DEVICE}-factory-${BUILD_NUMBER}.tar" - rm -f "${DEVICE}-factory-${BUILD_NUMBER}.tar.xz" - - log "Running compress of factory image with pxz" - time pxz -v -T0 -9 -z "${DEVICE}-factory-${BUILD_NUMBER}.tar" -} - -# TODO: cleanup this function -aws_upload() { - log_header "${FUNCNAME[0]}" - - cd "${BUILD_DIR}/out" - build_date="$(< soong/build_number.txt)" - build_timestamp="$(unzip -p "release-${DEVICE}-${build_date}/${DEVICE}-ota_update-${build_date}.zip" "META-INF/com/android/metadata" | grep 'post-timestamp' | cut --delimiter "=" --fields 2)" - - # copy ota file to s3, update file metadata used by updater app, and remove old ota files - read -r old_metadata <<< "$(wget -O - "${RELEASE_URL}/${RELEASE_CHANNEL}")" - old_date="$(cut -d ' ' -f 1 <<< "${old_metadata}")" - ( - aws s3 cp "${BUILD_DIR}/out/release-${DEVICE}-${build_date}/${DEVICE}-ota_update-${build_date}.zip" "s3://${AWS_RELEASE_BUCKET}" --acl public-read && - echo "${build_date} ${build_timestamp} ${AOSP_BUILD}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/${RELEASE_CHANNEL}" --acl public-read && - echo "${BUILD_TIMESTAMP}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/${RELEASE_CHANNEL}-true-timestamp" --acl public-read - ) && ( aws s3 rm "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-ota_update-${old_date}.zip" || true ) - - # upload factory image - retry aws s3 cp "${BUILD_DIR}/out/release-${DEVICE}-${build_date}/${DEVICE}-factory-${build_date}.tar.xz" "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-factory-latest.tar.xz" - - # cleanup old target files if some exist - if [ "$(aws s3 ls "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-target" | wc -l)" != '0' ]; then - cleanup_target_files - fi - - # copy new target file to s3 - retry aws s3 cp "${BUILD_DIR}/out/release-${DEVICE}-${build_date}/${DEVICE}-target_files-${build_date}.zip" "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-target/${DEVICE}-target-files-${build_date}.zip" -} - -cleanup_target_files() { - log_header "${FUNCNAME[0]}" - - aws s3 sync "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-target" "${BUILD_DIR}/${DEVICE}-target" - cd "${BUILD_DIR}/${DEVICE}-target" - for target_file in ${DEVICE}-target-files-*.zip ; do - old_date=$(echo "${target_file}" | cut --delimiter "-" --fields 4 | cut --delimiter "." --fields 5 --complement) - aws s3 rm "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-target/${DEVICE}-target-files-${old_date}.zip" || true - done -} - -checkpoint_versions() { - log_header "${FUNCNAME[0]}" - - # checkpoint stack version - echo "${STACK_VERSION}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/rattlesnakeos-stack/revision" - - # checkpoint f-droid - echo "${FDROID_PRIV_EXT_VERSION}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/fdroid-priv/revision" - echo "${FDROID_CLIENT_VERSION}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/fdroid/revision" - - # checkpoint aosp - aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/${DEVICE}-vendor" --acl public-read <<< "${AOSP_VENDOR_BUILD}" || true - - # checkpoint chromium - echo "yes" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/chromium/included" -} - -aws_notify_simple() { - log_header "${FUNCNAME[0]}" - - aws sns publish --region ${REGION} --topic-arn "${AWS_SNS_ARN}" --message "$1" -} - -aws_notify() { - log_header "${FUNCNAME[0]}" - - LOGOUTPUT= - if [ -n "$2" ]; then - LOGOUTPUT=$(tail -c 20000 /var/log/cloud-init-output.log) - fi - ELAPSED="$((SECONDS / 3600))hrs $(((SECONDS / 60) % 60))min $((SECONDS % 60))sec" - aws sns publish --region ${REGION} --topic-arn "${AWS_SNS_ARN}" \ - --message="$(printf "$1\n Device: %s\n Stack Name: %s\n Stack Version: %s %s\n Stack Region: %s\n Release Channel: %s\n Instance Type: %s\n Instance Region: %s\n Instance IP: %s\n Build Date: %s\n Elapsed Time: %s\n AOSP Build: %s\n AOSP Vendor Build: %s\n AOSP Branch: %s\n Chromium Version: %s\n F-Droid Version: %s\n F-Droid Priv Extension Version: %s\n Build Reason: %s\n%s" \ - "${DEVICE}" "${STACK_NAME}" "${STACK_VERSION}" "${STACK_UPDATE_MESSAGE}" "${REGION}" "${RELEASE_CHANNEL}" "${INSTANCE_TYPE}" "${INSTANCE_REGION}" "${INSTANCE_IP}" "${BUILD_DATE}" "${ELAPSED}" "${AOSP_BUILD}" "${AOSP_VENDOR_BUILD}" "${AOSP_BRANCH}" "${LATEST_CHROMIUM}" "${FDROID_CLIENT_VERSION}" "${FDROID_PRIV_EXT_VERSION}" "${BUILD_REASON}" "${LOGOUTPUT}")" || true -} - -aws_logging() { - log_header "${FUNCNAME[0]}" - - df -h - du -chs "${BUILD_DIR}" || true - uptime - aws s3 cp /var/log/cloud-init-output.log "s3://${AWS_LOGS_BUCKET}/${DEVICE}/$(date +%s)" -} - -aws_import_keys() { - log_header "${FUNCNAME[0]}" - - if [ "${ENCRYPTED_KEYS}" = true ]; then - if [ "$(aws s3 ls "s3://${AWS_ENCRYPTED_KEYS_BUCKET}/${DEVICE}" | wc -l)" == '0' ]; then - log "No encrypted keys were found - generating encrypted keys" - gen_keys - for f in $(find "${KEYS_DIR}" -type f); do - log "Encrypting file ${f} to ${f}.gpg" - gpg --symmetric --batch --passphrase "${ENCRYPTION_KEY}" --cipher-algo AES256 "${f}" - done - log "Syncing encrypted keys to S3 s3://${AWS_ENCRYPTED_KEYS_BUCKET}" - aws s3 sync "${KEYS_DIR}" "s3://${AWS_ENCRYPTED_KEYS_BUCKET}" --exclude "*" --include "*.gpg" - fi - else - if [ "$(aws s3 ls "s3://${AWS_KEYS_BUCKET}/${DEVICE}" | wc -l)" == '0' ]; then - log "No keys were found - generating keys" - gen_keys - log "Syncing keys to S3 s3://${AWS_KEYS_BUCKET}" - aws s3 sync "${KEYS_DIR}" "s3://${AWS_KEYS_BUCKET}" - else - log "Keys already exist for ${DEVICE} - syncing them from S3" - aws s3 sync "s3://${AWS_KEYS_BUCKET}" "${KEYS_DIR}" - fi - fi - - # handle migration with chromium.keystore - pushd "${KEYS_DIR}/${DEVICE}" - if [ ! -f "${KEYS_DIR}/${DEVICE}/chromium.keystore" ]; then - log "Did not find chromium.keystore - generating" - keytool -genkey -v -keystore chromium.keystore -storetype pkcs12 -alias chromium -keyalg RSA -keysize 4096 \ - -sigalg SHA512withRSA -validity 10000 -dname "cn=RattlesnakeOS" -storepass chromium - if [ "${ENCRYPTED_KEYS}" = true ]; then - log "Encrypting and uploading new chromium.keystore to s3://${AWS_ENCRYPTED_KEYS_BUCKET}" - gpg --symmetric --batch --passphrase "${ENCRYPTION_KEY}" --cipher-algo AES256 chromium.keystore - aws s3 sync "${KEYS_DIR}" "s3://${AWS_ENCRYPTED_KEYS_BUCKET}" --exclude "*" --include "*.gpg" - else - log "Uploading new chromium.keystore to s3://${AWS_KEYS_BUCKET}" - aws s3 sync "${KEYS_DIR}" "s3://${AWS_KEYS_BUCKET}" - fi - fi - popd -} - -gen_keys() { - log_header "${FUNCNAME[0]}" - - # download make_key and avbtool as aosp tree isn't downloaded yet - make_key="${HOME}/make_key" - retry curl --fail -s "https://android.googlesource.com/platform/development/+/refs/tags/${AOSP_BRANCH}/tools/make_key?format=TEXT" | base64 --decode > "${make_key}" - chmod +x "${make_key}" - avb_tool="${HOME}/avbtool" - retry curl --fail -s "https://android.googlesource.com/platform/external/avb/+/refs/tags/${AOSP_BRANCH}/avbtool?format=TEXT" | base64 --decode > "${avb_tool}" - chmod +x "${avb_tool}" - - # generate releasekey,platform,shared,media,networkstack keys - mkdir -p "${KEYS_DIR}/${DEVICE}" - cd "${KEYS_DIR}/${DEVICE}" - for key in {releasekey,platform,shared,media,networkstack} ; do - # make_key exits with unsuccessful code 1 instead of 0, need ! to negate - ! "${make_key}" "${key}" "${CERTIFICATE_SUBJECT}" - done - - # generate avb key - openssl genrsa -out "${KEYS_DIR}/${DEVICE}/avb.pem" 4096 - "${avb_tool}" extract_public_key --key "${KEYS_DIR}/${DEVICE}/avb.pem" --output "${KEYS_DIR}/${DEVICE}/avb_pkmd.bin" -} - -cleanup() { - rv=$? - aws_logging - if [ $rv -ne 0 ]; then - aws_notify "RattlesnakeOS Build FAILED" 1 - fi - if [ "${PREVENT_SHUTDOWN}" = true ]; then - log "Skipping shutdown" - else - sudo shutdown -h now - fi -} - -log_header() { - echo "==================================" - echo "$(date "+%Y-%m-%d %H:%M:%S"): Running $1" - echo "==================================" -} - -log() { - echo "$(date "+%Y-%m-%d %H:%M:%S"): $1" -} - -retry() { - set +e - local max_attempts=${ATTEMPTS-3} - local timeout=${TIMEOUT-1} - local attempt=0 - local exitCode=0 - - while [[ $attempt < $max_attempts ]] - do - "$@" - exitCode=$? - - if [[ $exitCode == 0 ]] - then - break - fi - - log "Failure! Retrying ($*) in $timeout.." - sleep "${timeout}" - attempt=$(( attempt + 1 )) - timeout=$(( timeout * 2 )) - done - - if [[ $exitCode != 0 ]] - then - log "Failed too many times! ($*)" - fi - - set -e - - return $exitCode -} - -trap cleanup 0 - -set -e - -full_run -` diff --git a/templates/generated_vars_and_funcs.sh b/templates/generated_vars_and_funcs.sh new file mode 100644 index 00000000..e1744910 --- /dev/null +++ b/templates/generated_vars_and_funcs.sh @@ -0,0 +1,141 @@ +######################################## +######## STACK CONFIG VARS ############# +######################################## +DEVICE="<% .Device %>" +DEVICE_FRIENDLY="<% .DeviceDetails.Friendly %>" +DEVICE_FAMILY="<% .DeviceDetails.Family %>" +DEVICE_AVB_MODE="<% .DeviceDetails.AVBMode %>" +DEVICE_EXTRA_OTA=<% .DeviceDetails.ExtraOTA %> +STACK_NAME="<% .Name %>" +STACK_VERSION="<% .Version %>" +CHROMIUM_BUILD_DISABLED="<% .ChromiumBuildDisabled %>" +CORE_CONFIG_REPO="<% .CoreConfigRepo %>" +CORE_CONFIG_REPO_BRANCH="<% .CoreConfigRepoBranch %>" +CUSTOM_CONFIG_REPO="<% .CustomConfigRepo %>" +CUSTOM_CONFIG_REPO_BRANCH="<% .CustomConfigRepoBranch %>" + +########################################## +###### CLOUD SPECIFIC VARS AND FUNCS ##### +########################################## +<% if eq .Cloud "aws" -%> +REGION="<% .Region %>" +AWS_KEYS_BUCKET="${STACK_NAME}-keys" +AWS_RELEASE_BUCKET="${STACK_NAME}-release" +RELEASE_URL="https://${AWS_RELEASE_BUCKET}.s3.amazonaws.com" +<%- end %> + +import_keys() { + log_header "${FUNCNAME[0]}" + + <% if eq .Cloud "aws" -%> + if [ "$(aws s3 ls "s3://${AWS_KEYS_BUCKET}/${DEVICE}" | wc -l)" == '0' ]; then + log "No keys were found - generating keys" + gen_keys + log "Syncing keys to S3 s3://${AWS_KEYS_BUCKET}" + aws s3 sync "${KEYS_DIR}" "s3://${AWS_KEYS_BUCKET}" + else + log "Keys already exist for ${DEVICE} - syncing them from S3" + aws s3 sync "s3://${AWS_KEYS_BUCKET}" "${KEYS_DIR}" + fi + <%- else %> + echo "todo" + <%- end %> +} + +notify() { + log_header "${FUNCNAME[0]}" + + <% if eq .Cloud "aws" -%> + LOGOUTPUT= + if [ -n "$2" ]; then + LOGOUTPUT=$(tail -c 20000 /var/log/cloud-init-output.log) + fi + + AWS_SNS_ARN=$(aws --region ${REGION} sns list-topics --query 'Topics[0].TopicArn' --output text | cut -d":" -f1,2,3,4,5)":${STACK_NAME}" + INSTANCE_TYPE=$(curl -s http://169.254.169.254/latest/meta-data/instance-type) + INSTANCE_REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | awk -F\" '/region/ {print $4}') + INSTANCE_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) + ELAPSED="$((SECONDS / 3600))hrs $(((SECONDS / 60) % 60))min $((SECONDS % 60))sec" + aws sns publish --region ${REGION} --topic-arn "${AWS_SNS_ARN}" \ + --message="$(printf "$1\n Stack Name: %s\n Device: %s\n Stack Version: %s\n Stack Region: %s\n Instance Type: %s\n Instance Region: %s\n Instance IP: %s\n Elapsed Time: %s\n Release: %s\n Tag: %s\n Build ID: %s\n %s" \ + "${STACK_NAME}" "${DEVICE}" "${STACK_VERSION}" "${REGION}" "${INSTANCE_TYPE}" "${INSTANCE_REGION}" "${INSTANCE_IP}" "${ELAPSED}" "${RELEASE}" "${AOSP_TAG}" "${AOSP_BUILD_ID}" "${LOGOUTPUT}")" || true + <%- else %> + echo "todo" + <%- end %> +} + +cleanup() { + <% if eq .Cloud "aws" -%> + rv=$? + df -h + du -chs "${AOSP_BUILD_DIR}" || true + uptime + AWS_LOGS_BUCKET="${STACK_NAME}-logs" + aws s3 cp /var/log/cloud-init-output.log "s3://${AWS_LOGS_BUCKET}/${DEVICE}/$(date +%s)" || true + if [ $rv -ne 0 ]; then + notify "RattlesnakeOS Build FAILED" 1 + fi + sudo shutdown -h now + <%- else %> + echo "todo" + <%- end %> +} + +get_current_metadata() { + <% if eq .Cloud "aws" -%> + local metadata_location="${1}" + local current=$(aws s3 cp "s3://${AWS_RELEASE_BUCKET}/${1}" - 2>/dev/null || true) + echo "${current}" + <%- else %> + echo "todo" + <%- end %> +} + +set_current_metadata() { + local metadata_location="${1}" + local metadata_value="${2}" + local public="${3}" + <% if eq .Cloud "aws" -%> + if [ -z "${public}" ]; then + echo "${metadata_value}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/${metadata_location}" + else + echo "${metadata_value}" | aws s3 cp - "s3://${AWS_RELEASE_BUCKET}/${metadata_location}" --acl public-read + fi + <%- else %> + echo "todo" + <%- end %> +} + +upload_build_artifact() { + local src_file="${1}" + local dest_file="${2}" + local public="${3}" + <% if eq .Cloud "aws" -%> + if [ -z "${public}" ]; then + retry aws s3 cp "${src_file}" "s3://${AWS_RELEASE_BUCKET}/${dest_file}" + else + retry aws s3 cp "${src_file}" "s3://${AWS_RELEASE_BUCKET}/${dest_file}" --acl public-read + fi + <%- else %> + echo "todo" + <%- end %> +} + +download_build_artifact() { + local src_file="${1}" + local dest_file="${2}" + <% if eq .Cloud "aws" -%> + retry aws s3 cp "s3://${AWS_RELEASE_BUCKET}/${src_file}" "${dest_file}" + <%- else %> + echo "todo" + <%- end %> +} + +delete_build_artifact() { + local dest_file="${1}" + <% if eq .Cloud "aws" -%> + aws s3 rm "s3://${AWS_RELEASE_BUCKET}/${dest_file}" || true + <%- else %> + echo "todo" + <%- end %> +} \ No newline at end of file diff --git a/templates/lambda.py b/templates/lambda.py new file mode 100644 index 00000000..dd7d39d2 --- /dev/null +++ b/templates/lambda.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python3 +import boto3 +import base64 +import json +import time +from urllib.request import urlopen +from datetime import datetime, timedelta +from pkg_resources import packaging + +STACK_VERSION = '<% .Config.Version %>' +NAME = '<% .Config.Name %>' +LATEST_JSON_URL = "<% .Config.ReleasesURL %>" +STACK_VERSION_LATEST_URL = "<% .RattlesnakeOSStackReleasesURL %>" +BUILD_SCRIPT_S3_LOCATION = 's3://<% .Config.Name %>-script/build.sh' +RELEASE_BUCKET = '<% .Config.Name %>-release' +FLEET_ROLE = 'arn:aws:iam::{0}:role/aws-service-role/spotfleet.amazonaws.com/AWSServiceRoleForEC2SpotFleet' +IAM_PROFILE = 'arn:aws:iam::{0}:instance-profile/<% .Config.Name %>-ec2' +SNS_ARN = 'arn:aws:sns:<% .Config.Region %>:{}:<% .Config.Name %>' +INSTANCE_TYPE = '<% .Config.InstanceType %>' +DEVICE = '<% .Config.Device %>' +SSH_KEY_NAME = '<% .Config.SSHKey %>' +MAX_PRICE = '<% .Config.MaxPrice %>' +SKIP_PRICE = '<% .Config.SkipPrice %>' +STACK_REGION = '<% .Config.Region %>' +INSTANCE_REGIONS = '<% .Config.InstanceRegions %>' +REGION_AMIS = json.loads('<% .RegionAMIs %>') +CHROMIUM_BUILD_DISABLED = '<% .Config.ChromiumBuildDisabled %>' +CHROMIUM_PINNED_VERSION = '<% .Config.ChromiumVersion %>' + + +def lambda_handler(event, context): + # get latest + latest_stack_json = json.loads(urlopen(STACK_VERSION_LATEST_URL).read().decode()) + latest_stack_version = latest_stack_json.get('name') + print("latest_stack_version", latest_stack_version) + latest_json = json.loads(urlopen(LATEST_JSON_URL).read().decode()) + latest_release = latest_json.get('release') + print("latest_release", latest_release) + latest_chromium_version = latest_json.get('chromium') + print("latest_chromium_version", latest_chromium_version) + latest_aosp_build_id = latest_json.get('devices').get(DEVICE).get('build_id') + print("latest_aosp_build_id", latest_aosp_build_id) + latest_aosp_tag = latest_json.get('devices').get(DEVICE).get('aosp_tag') + print("latest_aosp_tag", latest_aosp_tag) + minimum_stack_version = latest_json.get('minimum_stack_version') + print("minimum_stack_version", minimum_stack_version) + + # only build if minimum stack version requirement is met + if packaging.version.parse(STACK_VERSION) < packaging.version.parse(minimum_stack_version): + message = "RattlesnakeOS build was cancelled. Existing stack version {} needs to be updated to latest".format(STACK_VERSION) + send_sns_message("RattlesnakeOS Stack Needs Update", message) + return message + + # gather revisions for passing to build script + revisions = [] + for k, v in latest_json.get('revisions').items(): + revisions.append("{}={}".format(k, v)) + revisions_string = (",".join(revisions)) + print("revisions_string", revisions_string) + + # build time overrides + force_build = event.get('force-build') or False + print("force_build", force_build) + aosp_build_id = event.get('aosp-build-id') or latest_aosp_build_id + print("aosp_build_id", aosp_build_id) + aosp_tag = event.get('aosp-tag') or latest_aosp_tag + print("aosp_tag", aosp_tag) + chromium_version = event.get('ChromiumVersion') or CHROMIUM_PINNED_VERSION if CHROMIUM_PINNED_VERSION != "" else latest_chromium_version + print("chromium_version", chromium_version) + + # check if build is required + needs_build, build_reason = is_build_required(latest_release) + if not needs_build and not force_build: + message = "RattlesnakeOS build is already up to date." + send_sns_message("RattlesnakeOS Build Not Required", message) + return message + if not needs_build and force_build: + build_reason = "Build not required - but force build flag was specified." + print("needs_build", needs_build) + print("build_reason", build_reason) + + # find region and az with cheapest price + try: + cheapest_price, cheapest_region, cheapest_az = find_cheapest_region() + if float(cheapest_price) > float(SKIP_PRICE): + message = f"Cheapest spot instance {INSTANCE_TYPE} price ${cheapest_price} in AZ {cheapest_az} is not lower than --skip-price ${SKIP_PRICE}." + send_sns_message("RattlesnakeOS Spot Instance SKIPPED", message) + return message + except Exception as e: + message = f"There was a problem finding cheapest region for spot instance {INSTANCE_TYPE}: {e}" + send_sns_message("RattlesnakeOS Spot Instance FAILED", message) + raise + + # AMI to launch with + ami = REGION_AMIS[cheapest_region] + + # create ec2 client for cheapest region + client = boto3.client('ec2', region_name=cheapest_region) + + # get a subnet in cheapest az to request spot instance in + subnets = client.describe_subnets(Filters=[{'Name': 'availabilityZone', 'Values': [cheapest_az]}])['Subnets'][0][ + 'SubnetId'] + + # userdata to deploy with spot instance + copy_build_command = f"sudo -u ubuntu aws s3 --region {STACK_REGION} cp {BUILD_SCRIPT_S3_LOCATION} /home/ubuntu/build.sh" + build_args_command = f"echo \\\"/home/ubuntu/build.sh {latest_release} {aosp_build_id} {aosp_tag} {chromium_version} {revisions_string}\\\" > /home/ubuntu/build_cmd" + build_start_command = f"sudo -u ubuntu bash /home/ubuntu/build.sh \\\"{latest_release}\\\" \\\"{aosp_build_id}\\\" \\\"{aosp_tag}\\\" \\\"{chromium_version}\\\" \\\"{revisions_string}\\\"" + userdata = base64.b64encode(f""" +#cloud-config +output : {{ all : '| tee -a /var/log/cloud-init-output.log' }} + +repo_update: true +repo_upgrade: all +packages: +- awscli + +runcmd: +- [ bash, -c, "{copy_build_command}" ] +- [ bash, -c, "{build_args_command}" ] +- [ bash, -c, "{build_start_command}" ] + """.encode('ascii')).decode('ascii') + + # make spot fleet request config + account_id = boto3.client('sts').get_caller_identity().get('Account') + now_utc = datetime.utcnow().replace(microsecond=0) + valid_until = now_utc + timedelta(hours=12) + spot_fleet_request_config = { + 'IamFleetRole': FLEET_ROLE.format(account_id), + 'AllocationStrategy': 'lowestPrice', + 'TargetCapacity': 1, + 'SpotPrice': MAX_PRICE, + 'ValidFrom': now_utc, + 'ValidUntil': valid_until, + 'TerminateInstancesWithExpiration': True, + 'LaunchSpecifications': [ + { + 'ImageId': ami, + 'SubnetId': subnets, + 'InstanceType': INSTANCE_TYPE, + 'IamInstanceProfile': { + 'Arn': IAM_PROFILE.format(account_id) + }, + 'BlockDeviceMappings': [ + { + 'DeviceName': '/dev/sda1', + 'Ebs': { + 'DeleteOnTermination': True, + 'VolumeSize': 300, + 'VolumeType': 'gp3' + }, + }, + ], + 'UserData': userdata + }, + ], + 'Type': 'request' + } + + # check if ec2 keypair exists in this region - otherwise don't include keypair in spot request + try: + client.describe_key_pairs(KeyNames=[SSH_KEY_NAME]) + spot_fleet_request_config['LaunchSpecifications'][0]['KeyName'] = SSH_KEY_NAME + except Exception as e: + print(f"not including SSH key in spot request as no key in region {cheapest_region} with name {SSH_KEY_NAME} found: {e}") + + print("spot_fleet_request_config: {}".format(spot_fleet_request_config)) + + try: + print(f"requesting spot instance in AZ {cheapest_az} with current price of {cheapest_price}") + response = client.request_spot_fleet(SpotFleetRequestConfig=spot_fleet_request_config) + print(f"spot request response: {response}") + spot_fleet_request_id = response.get('SpotFleetRequestId') + except Exception as e: + message = f"There was a problem requesting a spot instance {INSTANCE_TYPE}: {e}" + send_sns_message("RattlesnakeOS Spot Instance FAILED", message) + raise + + try: + found_instance = False + retry_interval = 5 + retry_attempts = 30 + for i in range(1, retry_attempts): + print(f"waiting for spot instance launch for spot fleet request id {spot_fleet_request_id}: {i}/{retry_attempts}") + response = client.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_request_id) + print(response) + if len(response.get('ActiveInstances')) > 0: + found_instance = True + break + time.sleep(retry_interval) + if not found_instance: + raise Exception("max wait timeout for spot instance launch") + except Exception as e: + try: + print(f"attempting to cancel spot fleet request id {spot_fleet_request_id}") + client.cancel_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_request_id], TerminateInstances=True) + except Exception as ex: + print(f"failed to cancel spot fleet request: {ex}") + message = f"There was a problem waiting for active spot instance launch {INSTANCE_TYPE}: {e}" + send_sns_message("RattlesnakeOS Spot Instance FAILED", message) + raise + + chromium_message = "" + if CHROMIUM_BUILD_DISABLED == "false": + chromium_message = f"Chromium Version: {latest_chromium_version}\n " + + subject = "RattlesnakeOS Spot Instance LAUNCHED" + message = f"Successfully launched a spot instance.\n\n Stack Name: {NAME}\n Stack Version: {STACK_VERSION}\n Device: {DEVICE}\n Release: {latest_release}\n Tag: {latest_aosp_tag}\n Build ID: {latest_aosp_build_id}\n {chromium_message}Instance Type: {INSTANCE_TYPE}\n Cheapest Region: {cheapest_region}\n Cheapest Hourly Price: ${cheapest_price}\n Build Reason: {build_reason} " + send_sns_message(subject, message) + return message.replace('\n', ' ') + + +def is_build_required(latest_release): + s3 = boto3.resource('s3') + needs_update = False + reason = "" + + existing_release_version = "" + try: + existing_release_version = s3.Object(RELEASE_BUCKET, "release").get()['Body'].read().decode().strip("\n") + except Exception as e: + print("failed to get existing_release_version: {}".format(e)) + pass + if latest_release > existing_release_version: + needs_update = True + reason = "New release '{}'".format(latest_release) + print(reason) + return needs_update, reason + + return needs_update, reason + + +def find_cheapest_region(): + cheapest_price = 0 + cheapest_region = "" + cheapest_az = "" + for region in INSTANCE_REGIONS.split(","): + ec2_client = boto3.client('ec2', region_name=region) + spot_price_dict = ec2_client.describe_spot_price_history( + StartTime=datetime.now() - timedelta(minutes=1), + EndTime=datetime.now(), + InstanceTypes=[ + INSTANCE_TYPE + ], + ProductDescriptions=[ + 'Linux/UNIX (Amazon VPC)' + ], + ) + for key, value in spot_price_dict.items(): + if key == u'SpotPriceHistory': + for i in value: + az = i[u'AvailabilityZone'] + price = i[u'SpotPrice'] + if cheapest_price == 0: + cheapest_price = price + cheapest_region = region + cheapest_az = az + else: + if price < cheapest_price: + cheapest_price = price + cheapest_region = region + cheapest_az = az + print("{} {}".format(az, price)) + return cheapest_price, cheapest_region, cheapest_az + + +def send_sns_message(subject, message): + account_id = boto3.client('sts').get_caller_identity().get('Account') + sns = boto3.client('sns') + resp = sns.publish(TopicArn=SNS_ARN.format(account_id), Subject=subject, Message=message) + print("Sent SNS message {} and got response: {}".format(message, resp)) + + +if __name__ == '__main__': + lambda_handler("", "") diff --git a/templates/lambda_template.go b/templates/lambda_template.go deleted file mode 100644 index 9482cdc6..00000000 --- a/templates/lambda_template.go +++ /dev/null @@ -1,203 +0,0 @@ -package templates - -const LambdaTemplate = ` -#!/usr/bin/env python3 -import boto3 -import base64 -import json -from urllib.request import urlopen -from urllib.request import HTTPError -from datetime import datetime, timedelta - -# curl -s https://cloud-images.ubuntu.com/locator/ec2/releasesTable | grep '18.04' | grep 'amd64' | grep 'hvm:ebs-ssd' | awk -F'"' '{print $2, $15}' | awk -F"launchAmi=" '{print $1,$2}' | awk '{print $1,$3}' | awk -F'\' '{print $1}' | awk '{printf "\"%s\": \"%s\",\n",$1,$2 }' -# ubuntu 18.04 AMI hvm:ebs-ssd: https://cloud-images.ubuntu.com/locator/ec2/ -REGION_AMIS = { - "ap-northeast-1": "ami-0eb48a19a8d81e20b", - "ap-northeast-2": "ami-078e96948945fc2c9", - "ap-northeast-3": "ami-0babd61cf592f1c03", - "ap-south-1": "ami-007d5db58754fa284", - "ap-southeast-1": "ami-0dad20bd1b9c8c004", - "ap-southeast-2": "ami-0b76c3b150c6b1423", - "ca-central-1": "ami-01b60a3259250381b", - "eu-central-1": "ami-090f10efc254eaf55", - "eu-north-1": "ami-5e9c1520", - "eu-west-1": "ami-08d658f84a6d84a80", - "eu-west-2": "ami-07dc734dc14746eab", - "eu-west-3": "ami-03bca18cb3dc173c9", - "sa-east-1": "ami-09f4cd7c0b533b081", - "us-east-1": "ami-0a313d6098716f372", - "us-east-2": "ami-0c55b159cbfafe1f0", - "us-west-1": "ami-06397100adf427136", - "us-west-2": "ami-005bdb005fb00e791", - "cn-northwest-1": "ami-09b1225e9a1d84e4c", - "cn-north-1": "ami-09dd6088c3e46151c", - "us-gov-west-1": "ami-66bdd307", - "us-gov-east-1": "ami-7bd2340a" -} - -NAME = '<% .Name %>' -SRC_PATH = 's3://<% .Name %>-script/build.sh' -FLEET_ROLE = 'arn:aws:iam::{0}:role/aws-service-role/spotfleet.amazonaws.com/AWSServiceRoleForEC2SpotFleet' -IAM_PROFILE = 'arn:aws:iam::{0}:instance-profile/<% .Name %>-ec2' -SNS_ARN = 'arn:aws:sns:<% .Region %>:{}:<% .Name %>' -INSTANCE_TYPE = '<% .InstanceType %>' -DEVICE = '<% .Device %>' -SSH_KEY_NAME = '<% .SSHKey %>' -MAX_PRICE = '<% .MaxPrice %>' -SKIP_PRICE = '<% .SkipPrice %>' -REGIONS = '<% .InstanceRegions %>' -AMI_OVERRIDE = '<% .AMI %>' -ENCRYPTED_KEYS = '<% .EncryptedKeys %>' - -def send_sns_message(subject, message): - account_id = boto3.client('sts').get_caller_identity().get('Account') - sns = boto3.client('sns') - resp = sns.publish(TopicArn=SNS_ARN.format(account_id), Subject=subject, Message=message) - print("Sent SNS message {} and got response: {}".format(message, resp)) - -def lambda_handler(event, context): - # get account id to fill in fleet role and ec2 profile - account_id = boto3.client('sts').get_caller_identity().get('Account') - - force_build = False - if "ForceBuild" in event: - force_build = event['ForceBuild'] - aosp_build = "" - if "AOSPBuild" in event: - aosp_build = event['AOSPBuild'] - aosp_branch = "" - if "AOSPBranch" in event: - aosp_branch = event['AOSPBranch'] - - try: - cheapest_price = 0 - cheapest_region = "" - cheapest_az = "" - for region in REGIONS.split(","): - ec2_client = boto3.client('ec2', region_name=region) - spot_price_dict = ec2_client.describe_spot_price_history( - StartTime=datetime.now() - timedelta(minutes=1), - EndTime=datetime.now(), - InstanceTypes=[ - INSTANCE_TYPE - ], - ProductDescriptions=[ - 'Linux/UNIX (Amazon VPC)' - ], - ) - for key, value in spot_price_dict.items(): - if key == u'SpotPriceHistory': - for i in value: - az = i[u'AvailabilityZone'] - price = i[u'SpotPrice'] - if cheapest_price == 0: - cheapest_price = price - cheapest_region = region - cheapest_az = az - else: - if price < cheapest_price: - cheapest_price = price - cheapest_region = region - cheapest_az = az - print("{} {}".format(az, price)) - except Exception as e: - send_sns_message("RattlesnakeOS Spot Instance FAILED", "There was a problem finding cheapest region for spot instance {}: {}".format(INSTANCE_TYPE, e)) - raise - - if float(cheapest_price) > float(SKIP_PRICE): - message = "Cheapest spot instance {} price ${} in AZ {} is not lower than --skip-price ${}.".format(INSTANCE_TYPE, cheapest_price, - cheapest_az, SKIP_PRICE) - send_sns_message("RattlesnakeOS Spot Instance SKIPPED", message) - return message - - # AMI to launch with - ami = REGION_AMIS[cheapest_region] - if AMI_OVERRIDE: - ami = AMI_OVERRIDE - - # create ec2 client for cheapest region - client = boto3.client('ec2', region_name=cheapest_region) - - # get a subnet in cheapest az to request spot instance in - subnets = client.describe_subnets(Filters=[{'Name': 'availabilityZone','Values': [cheapest_az]}])['Subnets'][0]['SubnetId'] - - # userdata to deploy with spot instance - userdata = base64.b64encode(""" -#cloud-config -output : {{ all : '| tee -a /var/log/cloud-init-output.log' }} - -repo_update: true -repo_upgrade: all -packages: -- awscli - -runcmd: -- [ bash, -c, "sudo -u ubuntu aws s3 --region <% .Region %> cp {0} /home/ubuntu/build.sh" ] -- [ bash, -c, "sudo -u ubuntu bash /home/ubuntu/build.sh {1} {2} {3} {4}" ] - """.format(SRC_PATH, DEVICE, str(force_build).lower(), aosp_build, aosp_branch).encode('ascii')).decode('ascii') - - # make spot fleet request config - now_utc = datetime.utcnow().replace(microsecond=0) - valid_until = now_utc + timedelta(hours=12) - spot_fleet_request_config = { - 'IamFleetRole': FLEET_ROLE.format(account_id), - 'AllocationStrategy': 'lowestPrice', - 'TargetCapacity': 1, - 'SpotPrice': MAX_PRICE, - 'ValidFrom': now_utc, - 'ValidUntil': valid_until, - 'TerminateInstancesWithExpiration': True, - 'LaunchSpecifications': [ - { - 'ImageId': ami, - 'SubnetId': subnets, - 'InstanceType': INSTANCE_TYPE, - 'IamInstanceProfile': { - 'Arn': IAM_PROFILE.format(account_id) - }, - 'BlockDeviceMappings': [ - { - 'DeviceName' : '/dev/sda1', - 'Ebs': { - 'DeleteOnTermination': True, - 'VolumeSize': 250, - 'VolumeType': 'gp2' - }, - }, - ], - 'UserData': userdata - }, - ], - 'Type': 'request' - } - - # check if ec2 keypair exists in this region - otherwise don't include keypair in spot request - try: - client.describe_key_pairs(KeyNames=[SSH_KEY_NAME]) - spot_fleet_request_config['LaunchSpecifications'][0]['KeyName'] = SSH_KEY_NAME - except Exception as e: - if ENCRYPTED_KEYS == "true": - message = "Encrypted keys is enabled, so properly configured SSH keys are mandatory. Unable to find an EC2 Key Pair named '{}' in region {}.".format(SSH_KEY_NAME, cheapest_region) - send_sns_message("RattlesnakeOS Spot Instance CONFIGURATION ERROR", message) - return message - else: - print("Not including SSH key in spot request as couldn't find a key in region {} with name {}: {}".format(cheapest_region, SSH_KEY_NAME, e)) - - print("spot_fleet_request_config: ", spot_fleet_request_config) - - try: - print("Requesting spot instance in AZ {} with current price of {}".format(cheapest_az, cheapest_price)) - response = client.request_spot_fleet(SpotFleetRequestConfig=spot_fleet_request_config) - print("Spot request response: {}".format(response)) - except Exception as e: - send_sns_message("RattlesnakeOS Spot Instance FAILED", "There was a problem requesting a spot instance {}: {}".format(INSTANCE_TYPE, e)) - raise - - subject = "RattlesnakeOS Spot Instance SUCCESS" - message = "Successfully requested a spot instance.\n\n Stack Name: {}\n Device: {}\n Force Build: {}\n Instance Type: {}\n Cheapest Region: {}\n Cheapest Hourly Price: ${} ".format(NAME, DEVICE, force_build, INSTANCE_TYPE, cheapest_region, cheapest_price) - send_sns_message(subject, message) - return message.replace('\n', ' ') - -if __name__ == '__main__': - lambda_handler("", "") -` diff --git a/templates/terraform_template.go b/templates/terraform.tf similarity index 69% rename from templates/terraform_template.go rename to templates/terraform.tf index 2252fa09..dca0495b 100644 --- a/templates/terraform_template.go +++ b/templates/terraform.tf @@ -1,15 +1,12 @@ -package templates - -const TerraformTemplate = ` ###################### # S3 Terraform Backend ###################### terraform { - backend "s3" { - bucket = "<% .Config.Name %>" - key = "terraform.state" - region = "<% .Config.Region %>" - } + backend "s3" { + bucket = "<% .Config.Name %>" + key = "terraform.state" + region = "<% .Config.Region %>" + } } ################### @@ -26,33 +23,33 @@ variable "region" { } variable "device" { - description = "Device type" - default = "<% .Config.Device %>" + description = "Device type" + default = "<% .Config.Device %>" } variable "lambda_build_zip_file" { - description = "Lambda build zip file" - default = "<% .LambdaZipFileLocation %>" + description = "Lambda build zip file" + default = "<% .LambdaZipFileLocation %>" } variable "shell_script_file" { - description = "Shell script file" - default = "<% .BuildScriptFileLocation %>" + description = "Shell script file" + default = "<% .BuildScriptFileLocation %>" } ################### # Provider ################### provider "aws" { - region = "${var.region}" + region = "${var.region}" } ################### # IAM ################### resource "aws_iam_role" "rattlesnake_ec2_role" { - name = "${var.name}-ec2" - assume_role_policy = < ################### # Cloudwatch Event ################### resource "aws_cloudwatch_event_rule" "build_schedule" { - name = "${var.name}-build-schedule" - description = "RattlesnakeOS build" - schedule_expression = "<% .Config.Schedule %>" + name = "${var.name}-build-schedule" + description = "RattlesnakeOS build" + schedule_expression = "<% .Config.Schedule %>" } resource "aws_cloudwatch_event_target" "check_build_schedule" { - rule = "${aws_cloudwatch_event_rule.build_schedule.name}" - target_id = "${var.name}-build" - arn = "${aws_lambda_function.rattlesnake_lambda_build.arn}" + rule = "${aws_cloudwatch_event_rule.build_schedule.name}" + target_id = "${var.name}" + arn = "${aws_lambda_function.rattlesnake_lambda_build.arn}" } resource "aws_lambda_permission" "allow_cloudwatch_to_call_build_schedule" { - statement_id = "AllowExecutionFromCloudWatch" - action = "lambda:InvokeFunction" - function_name = "${aws_lambda_function.rattlesnake_lambda_build.function_name}" - principal = "events.amazonaws.com" - source_arn = "${aws_cloudwatch_event_rule.build_schedule.arn}" -} - -################### -# Outputs -################### -output "sns_topic_arn" { - description = "The SNS ARN" - value = "${aws_sns_topic.rattlesnake.arn}" + statement_id = "AllowExecutionFromCloudWatch" + action = "lambda:InvokeFunction" + function_name = "${aws_lambda_function.rattlesnake_lambda_build.function_name}" + principal = "events.amazonaws.com" + source_arn = "${aws_cloudwatch_event_rule.build_schedule.arn}" } -` +<%- end %> \ No newline at end of file