diff --git a/bazel/conf/.bazelrc.build b/bazel/conf/.bazelrc.build index 3d741e450d14..2f3ebb1e3c3e 100644 --- a/bazel/conf/.bazelrc.build +++ b/bazel/conf/.bazelrc.build @@ -68,9 +68,6 @@ test:precommit --build_tests_only --test_tag_filters="smoke" build:systest --build_tag_filters= test:systest --test_output=streamed --test_tag_filters= -build:testnet --build_tag_filters= -test:testnet --test_output=streamed --test_tag_filters= - # For sandboxed actions, mount an empty, writable directory at this absolute path # (if supported by the sandboxing implementation, ignored otherwise). test --sandbox_tmpfs_path=/tmp diff --git a/testnet/BUILD.bazel b/testnet/BUILD.bazel deleted file mode 100644 index cda0d1e0c3ac..000000000000 --- a/testnet/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@python_deps//:requirements.bzl", "requirement") -load("@rules_python//python:defs.bzl", "py_binary") - -package(default_visibility = ["//visibility:public"]) - -py_binary( - name = "inventory", - srcs = ["ansible/inventory/inventory.py"], - data = glob(["env/**/*"]), - deps = [ - requirement("ansible"), - ], -) diff --git a/testnet/README.md b/testnet/README.md deleted file mode 100644 index dacdac602f04..000000000000 --- a/testnet/README.md +++ /dev/null @@ -1,445 +0,0 @@ -# IC-OS Testnet - -## Index - -* [About](#about) - * [Overview](#overview) - * [Support](#support) - * [Dependencies](#dependencies) -* [Usage](#usage) - * [Run-Deployment](#run-deployment) - * [List-Inventory](#list-inventory) - * [SSH-Config](#ssh-config) - * [Delete-Deployment](#delete-deployment) -* [Inventory](#inventory) - * [Hosts](#hosts) - * [Resources](#resources) - * [Nodes](#nodes) - * [Mercury](#mercury) - * [Monitoring](#monitoring) -* [Blueprint](#blueprint) - * [HostOS](#hostos) - * [GuestOS](#guestos) - * [Ansible](#ansible) -* [Troubleshooting](#troubleshooting) -* [FAQ](#faq) -* [Appendix](#appendix) - -## About - -This document aims to provide an overview and understanding of the deployment -mechanism behind the new IC-OS based testnets. Please also read (and extend) the -FAQ at the end of this document. - -### Overview - -``` -testnet/ - ansible/ - ansible.cfg - The main Ansible configuration file. Tweak this file if you - happen to run into connectivity issues or timeouts. - - - roles/ - ic_guest/ - Ansible role taking care of redeploying a - testnet. - Functionality: - - pulls disk image on IC-OS hosts - - pushes media images to IC-OS hosts - - creates and destroys IC-OS guests - - starts and stops IC-OS guests - - - inventory/ - inventory.py - This Python script generates the Ansible - inventory, based on the hosts.ini input. - Functionality: - - Prepares IC-OS guest network config - - Defines MAC addresses - - Calculates IPv6 SLAAC addresses - - Prepares config for media images - - Generates SSH config per testnet - - Arguments: - --list - List inventory - --host - List variables of - --all - List all variables - --ssh-config - Configure local ssh client to access testnet hosts. - --verbose - Run Python tool with verbose output. - - - config/ - nftables.conf - This file holds the raw nftables ruleset. The default - ingress and egress (IPv4 and IPv6) policy is ACCEPT. - In other words, the firewall is not filtering any traffic - unless you explicitly change settings in this file. - :information_source: This configuration option might disappear in the near future. - - - ssh_authorized_keys/ - This folder holds the authorized_keys files being injected - into the GuestOS. Please add your public SSH key to the - respective file. - - admin - Adding your public SSH key to this - authorized_keys file will grant you admin - (root) permissions inside the GuestOS. - - ssh admin@ - - backup - Adding your public SSH key to this - authorized_keys file will grant you permissions - for the backup/subnet recovery tasks. - - ssh backup@ - - readonly - Adding your public SSH key to this - authorized_keys file will grant you read-only - permissions in the GuestOS. - - ssh readonly@ - - - env/ - / - hosts - This file is a symbolic link to the dynamic Ansible - inventory script. - - hosts -> ../../ansible/inventory/inventory.py - - When creating a new testnet, this link can be created with - `cd testnet/env/ && ln -sf ../../ansible/inventory/inventory.py hosts` - - hosts.ini - An abstracted, human readable version of the minimal - Ansible inventory for a deployment. Please define your - subnet here. - - For examples, look at the hosts.ini of the - 'small01' or 'medium01' environments. - - - shared-config.yml - Ansible inventory configuration shared across testnets. - This includes data center IPv6 prefixes, metrics scraping - parameters and default values for Ansible roles. - - - tools/ - icos_deploy.sh - This is the actual deployment script. It takes care of - building the removable media files and redeploying a - testnet. - - Command: - ./icos_deploy.sh --git-revision ${testnet} -``` - -### Dependencies - -In order to run the Ansible deployment from your own machine or any remote -server, the following dependencies have to be met: - - Operating System: -``` -Ubuntu 20.04 -``` -:warning: Deployments from MacOS are not supported at the moment. - - - Packages: -``` -apt -y install ansible coreutils jq mtools rclone tar util-linux unzip --no-install-recommends -``` - -If you are not working on a Ubuntu 20.04 based system, you can use the following -office builders. -Please make sure that you initialize the ssh agent before connecting, and to -forward the local ssh credentials. - -Check the ssh-agent keys -```bash -ssh-add -L -``` - -``` -# SSH to remote machine using your DFINITY SSH user -ssh -A zh1-spm22.zh1.dfinity.network -``` - -or -``` -ssh -A zh1-spm34.zh1.dfinity.network -``` - -## Usage - -### Understanding Ansible Playbooks - -**Note:** this section has been copied from the old README file, and may be slightly out of date. However, it is still useful to understand the naming convention for the ansible playbooks. - -The following provides a mental model for understanding the available playbooks. -Playbooks are the `*.yml` files in /dfinity/testnet. The file names are patterned -around adverbs, verbs and nouns as follows. - -```bash -ansible-playbook -i env/xyz/hosts ic_${ optional adverb or adjective }_${ noun }_${ verb }.yml -``` - -Nouns - - Subnet: A collection of nodes that host a set of canisters. - Nodes: All nodes in the IC that form subnets. - Node: ... - NNS: The NNS subnetwork that contains the registry. - Env: All of the above. - -Verbs - - Install: A one-time operation, the operator should only execute once for - the lifecycle of the noun. Performing multiple successive installs - will destroy the previous installation. - - Update: A repeatable operation, the operator may execute through the - life-cycle of a noun, for example to upgrade to new binaries - or configurations. - - Extend: A repeateable operation, just like Update. Usually refers to - membership or other qualities that are not replaced, but extended, - like disk space, memory, ... - - Destroy: A one-time operation, the operator should use this to end - the lifecycle of a noun - -Adjective - - independent: node or subnet installation without crypto material generation. Suitable for bootstrapping. - -***Additional Options*** - -Ansible accepts setting variables from the command line with -e $key=$value. - -### Run-Deployment - -To initiate a redeployment of an IC-OS based testnet, simply run the following -commands: - -``` -# Clone the DFINITY Git repository -git clone git@github.com:dfinity/ic.git -cd ic/ - - -# Run deployment to (e.g. small01, medium01, ...) -./testnet/tools/icos_deploy.sh --git-revision d53b551dc677a82c8420a939b5fee2d38f6f1e8b -``` - -You can get the latest git sha with disk image for a branch (e.g. master) by running: -``` -./ci/src/artifacts/newest_sha_with_disk_image.sh origin/master -``` - -### List-Inventory - -To gather all facts of a testnet, simply run the dynamic inventory script: - -``` -testnet/env//hosts --list -``` - -To list all nodes from a testnet, you can run: -``` -testnet/env//hosts --nodes -``` - -And to list only the IPv6 addresses: -``` -testnet/env//hosts --ipv6 -``` - -Be aware that these commands require python dependencies. If these are not pre-installed on the machine, you will need to start a python virtual environment and install them yourself. Alternatively you can run the same commands in bazel: - -``` -bazel run testnet//inventory -- --deployment --list -bazel run testnet//inventory -- --deployment --nodes -bazel run testnet//inventory -- --deployment --ipv6 -``` - -### Host-Variables - -To list variables for a specific node, you can run the dynamic inventory script: - -``` -testnet/env//hosts --host .1.2.testnet -``` - -### SSH-Config - -SSH can be used to login to the individual nodes. Since nodes do not have IPv4 -addresses or DNS records, you will need to use its public IPv6 address. - -``` -# Optional: Load your SSH private key to enable SSH agent forwarding -ssh-agent bash -ssh-add - -# Remember to use the 'admin' account -ssh admin@feed:f00d:beef:cafe::1 -``` - -Alternatively, generate the SSH configuration file from the dynamic inventory. -This allows you to use the inventory hostname in your SSH command. - -``` -testnet/env//hosts --ssh-config -``` - -The above adds the SSH config file into your SSH config directory -``` -ls -l ~/.ssh/config.d/ -``` - -So you should be able to connect to the testnet nodes with: -``` -ssh .1.2.testnet -``` -The above ssh configuration needs to be for each testnet, and needs to be -re-run whenever testnet configuration changes, for example when nodes are -added or removed. - -### Delete-Deployment - -## Inventory - -This section describes the static inventory, which holds the minimal testnet definition -from which a full dynamic inventory is built during the deployment. - -### Hosts - -The first section defines the physical hosts being used for this testnet. Make -sure to use valid fully qualified domain names (FQDNs). - -``` -[physical_hosts] -sf1-spm00.sf1.dfinity.network -zh1-spm00.zh1.dfinity.network -``` - -### Resources - -For large testnets it might make sense to adjust the default resources. The disk -size should not be smaller than 50 GB. - -(example of changed `env/small01/hosts.ini`) -``` -[physical_hosts] -zh1-spm19.zh1.dfinity.network ic_cores=2 ic_disk_gb=50 ic_memory_gb=8 -``` - -The default resource allocation is: -``` -ic_cores: 4 -ic_disk_gb: 100 -ic_memory_gb: 16 -``` - -Alternatively, to avoid changing the `hosts.ini` file, temporary testnet configuration can be provided on the command line. -For instance, a testnet with larger disks (300 GB in this case) can be deployed this way: -``` -./testnet/tools/icos_deploy.sh \ - --git-revision $(./ci/src/artifacts/newest_sha_with_disk_image.sh origin/master) \ - --ansible-args '-e ic_disk_gb=300' -``` - -### Nodes - -The second part defines all nodes. You can define as many nodes and subnets as -you wish. Please make sure to use a unique `node_index` per node. The `ic_host=` -parameter assigns a node to a physical host. - -`subnet_index` is extracted from the group name since some tests expect the -group name in a form of `subnet_X` where `X` is the `subnet_index`. If -provided explicitly as well for node, the `subnet_index` extracted from the -group name and the node name must match. If not, an error will be thrown. - -`node_index` is extracted from the node name, IFF the name has the format -`.`. The `node_index` can also be provided -explicitly but it must match the value extracted from the node name (if the -node is named in a compatible way). - -For instance, the following is valid configuration: -``` -[nns] -example.0.0 ic_host="zh1-spm00" -example.0.1 ic_host="zh1-spm00" -example.0.2 ic_host="sf1-spm00" -example.0.3 ic_host="sf1-spm00" - -[subnet_1] -example.1.4 ic_host="zh1-spm00" -example.1.5 ic_host="zh1-spm00" -example.1.6 ic_host="sf1-spm00" -example.1.7 ic_host="sf1-spm00" - -[subnet_2] -example.2.8 ic_host="zh1-spm00" -example.2.9 ic_host="zh1-spm00" -example.2.10 ic_host="sf1-spm00" -example.2.11 ic_host="sf1-spm00" - -[nodes:children] -nns -subnet_1 -subnet_2 -``` - -The following is also valid: -``` -[nns] -example.0.0 ic_host="zh1-spm00" - -[subnet_1] -example.1.4 ic_host="zh1-spm00" - -[nodes:children] -nns -subnet_1 -``` - - -The following is NOT valid because `example.2.4` should be in group name `subnet_2` but belongs to a group `subnet_1`: -``` -[nns] -example.0.0 ic_host="zh1-spm00" - -[subnet_1] -example.2.4 ic_host="zh1-spm00" - -[nodes:children] -nns -subnet_1 -``` - -The following is also NOT valid because `example.1.1` has an explicit `node_index` value set to value 4, whereas it should be `node_index=1`: -``` -[nns] -example.0.0 ic_host="zh1-spm00" - -[subnet_1] -example.1.1 node_index=4 ic_host="zh1-spm00" - -[nodes:children] -nns -subnet_1 -``` diff --git a/testnet/ansible.cfg b/testnet/ansible.cfg deleted file mode 100644 index db55c3159743..000000000000 --- a/testnet/ansible.cfg +++ /dev/null @@ -1,20 +0,0 @@ -# This provides some configuration for ansible. -# XXX: part of the configuration -- in particular everything related to mitogen -# -- is provided through environment variables; see our Ansible override in -# nix/overlays/default.nix. The Mitogen version pinned is defined in -# nix/sources.json. - -[defaults] -host_key_checking=False -inventory=unspecified -display_skipped_hosts=False -roles_path=ansible/roles -callback_whitelist = profile_tasks -stdout_callback = debug - -[ssh_connection] -pipelining = True - -[callback_profile_tasks] -output_limit=all -sort_order=none diff --git a/testnet/host_inventory.py b/testnet/host_inventory.py deleted file mode 100755 index 67a82554c89d..000000000000 --- a/testnet/host_inventory.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/env python3 -import itertools -import re -import sys -from os import path, walk - - -def main(argv): - testnet_dir = path.dirname(path.abspath(argv[0])) - env_dir = path.join(testnet_dir, "env") - - res = list(itertools.chain(*map(triples_from_file, host_files(env_dir)))) - - testnets = sorted(set(m[0] for m in res)) - hosts = sorted(set(m[3] for m in res)) - - table = {h: {tn: [] for tn in testnets} for h in hosts} - - for tn, typ, idx, host in res: - table[host][tn].append("{}.{}".format(typ, idx)) - - print("host;{}".format(";".join(sorted(testnets)))) - for k, v in sorted(table.items()): - line = ";".join([", ".join(v[tn]) for tn in testnets]) - print("{};{}".format(k, line)) - - -# print(sorted(testnets)) -# print(len(list(testnets))) -# print(sorted(hosts)) -# print(len(list(hosts))) -# print(table) - - -def host_files(p): - for root, dirs, files in walk(p): - for f in filter(lambda f: f.endswith(".ini"), files): - yield path.join(root, f) - - -NODE_DEF_RE = re.compile(r'^\s*([-a-zA-Z.0-9]+)\s+.*ic_host="([a-zA-Z-0-9.]+)"') -NODE_DEF_RE = re.compile(r'^\s*([-a-zA-Z0-9]+)\.([a-zA-Z0-9]+)\.([a-zA-Z0-9]+)\s+.*ic_host="([a-zA-Z-0-9.]+)"') - - -def triples_from_file(h): - with open(h, "r") as f: - matches = (NODE_DEF_RE.match(line) for line in f) - matches = (m for m in matches if m is not None) - return [(m[1], m[2], m[3], m[4]) for m in matches] - - -if __name__ == "__main__": - main(sys.argv) diff --git a/testnet/tools/BUILD.bazel b/testnet/tools/BUILD.bazel deleted file mode 100644 index fdeaba38298e..000000000000 --- a/testnet/tools/BUILD.bazel +++ /dev/null @@ -1,40 +0,0 @@ -genrule( - name = "icos_deploy", - testonly = True, - srcs = [ - "//ic-os/guestos/envs/dev:version.txt", - "//ic-os/guestos/envs/dev:upload_disk-img", - "//ic-os/guestos/envs/dev:upload_update-img", - "//ic-os/guestos/envs/prod:upload_update-img", - "//ic-os/boundary-guestos/envs/dev:upload_disk-img", - "//ic-os/boundary-guestos/envs/prod:upload_disk-img", - "build-guestos-config.sh", - "//publish/binaries:legacy_upload", - "//publish/canisters:upload", - ], - outs = ["icos_deploy_script"], - cmd = """ - VERSION="$$(cat $(location //ic-os/guestos/envs/dev:version.txt))" - - cat < $@ -#!/usr/bin/env bash -set -euo pipefail - -which ansible || sudo pip install ansible==6.6.0 - -PREFIX='exec' -if [ \\$$# -gt 0 ]; then - if [ "\\$$1" == "-n" ]; then - PREFIX='echo' - shift - fi -fi - -cd "\\$$BUILD_WORKSPACE_DIRECTORY" - -\\$$PREFIX ./testnet/tools/icos_deploy.sh --git-revision "$$VERSION" "\\$$@" -EOF - """, - executable = True, - tags = ["manual"], -) diff --git a/testnet/tools/README.md b/testnet/tools/README.md deleted file mode 100644 index d03cf5878d7e..000000000000 --- a/testnet/tools/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Tools to run static testnets - -## Running a static testnet from the local build - -The procedure currently works on a linux host only. - -* Reserve a static testnet using Dee -* Build and deploy IC-OS to static testnet - ```bash - ./ci/container/container-run.sh -f - export ANSIBLE_REMOTE_USER= - bazel run //testnet/tools:icos_deploy --config=testnet -- - ``` - -**Note:** *It's important that sshuser matches with your username that was set on servers that run tesetnet nodes! See [here](https://github.com/dfinity-lab/dcs/blob/master/ansible-internal/group_vars/development.yml).* - -To produce all required artifacts but do not start a testnet run the command with `-n` flag: -```bash -bazel run //testnet/tools:icos_deploy --config=testnet -- -n -``` diff --git a/testnet/tools/icos_collect_debug_info.py b/testnet/tools/icos_collect_debug_info.py deleted file mode 100755 index 0b748535c05d..000000000000 --- a/testnet/tools/icos_collect_debug_info.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env python3 -""" -Collect various debug information from a testnet test deployment. - -- collect system and "ic-replica" service logs using journalctl -- collect replica endpoint status -- collect netstat -- store them in the $experiment_dir/debug_info -The collected debug information can be downloaded from the GitLab Web UI or using the command line: - -${REPO_ROOT}/ci/src/artifacts/gitlab_artifacts_download.py --job-id -""" - -import argparse -import json -import logging -import os -import pathlib -import subprocess -import typing -from multiprocessing import Pool - -import cbor -import git -import paramiko -import requests -import yaml - -git_repo = git.Repo(os.path.dirname(__file__), search_parent_directories=True) -repo_root = pathlib.Path(git_repo.git.rev_parse("--show-toplevel")) - - -def get_deployment_nodes(deployment_name: str): - """Get a list of nodes for a deployment, as a dictionary of {node_name: ipv6}.""" - output = subprocess.check_output( - [ - repo_root / "testnet/ansible/inventory/inventory.py", - "--deployment", - deployment_name, - "--nodes", - ] - ) - return yaml.load(output, Loader=yaml.FullLoader) - - -def _get_map_node_to_ic_host(deployment_name: str): - """Get the mapping {node: phy_node} between the nodes and the raw iron (physical host) behind it.""" - hosts = subprocess.check_output( - [ - repo_root / "testnet/ansible/inventory/inventory.py", - "--deployment", - deployment_name, - "--list", - ] - ) - result = {} - for phy_host, host_vars in json.loads(hosts)["_meta"]["hostvars"].items(): - # Try to get the list of ic_guests on every hostvars. If the list does not exist, fallback to an empty list. - for node in host_vars.get("ic_guests", []): - # There are some "ic_guests" on this physical node, so create a reverse mapping - result[node] = phy_host - return result - - -def collect_host_dominfo(nodes: typing.List[str], deployment_name: str): - """Iterate through the hosts collecting dominfo for each node and pull the libvirt dominfo for the deployment VMs.""" - node_to_ic_host = _get_map_node_to_ic_host(deployment_name) - for node_name, node_ipv6 in nodes.items(): - ichost = node_to_ic_host[node_name] - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - command = "sudo virsh dominfo " + node_name - - client.connect(ichost, port=22, username=os.environ.get("USER", "gitlab-runner"), timeout=10) - - (_stdin, _stdout, _stderr) = client.exec_command(f"timeout 10 bash -c '{command}'") - logging.info("-------") - logging.info("using command _ %s _ on host %s", command, ichost) - for line in iter(_stdout.readline, ""): - print(line, end="") - - -def _ssh_run_command(node: typing.List, out_dir: pathlib.Path, out_filename: str, command: str): - """SSH into a node, run the command, and store the result in a local file {outdir}/{out_filename}.""" - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - if isinstance(out_dir, str): - out_dir = pathlib.Path(out_dir) - - node_name, node_ipv6 = node - logging.info("Run for node %s: %s", node_name, command) - - client.connect(node_ipv6, port=22, username="admin", timeout=10) - - (_stdin, stdout, stderr) = client.exec_command(f"timeout 10 bash -c '{command}'") - node_log_dir = out_dir / node_name - node_log_dir.mkdir(exist_ok=True, parents=True) - with open(node_log_dir / out_filename, "wb") as f_stdout: - stdout.channel.settimeout(10) - stdout.channel.recv_exit_status() - f_stdout.write(stdout.read()) - with open(node_log_dir / out_filename, "ab") as f_stderr: - stderr.channel.settimeout(10) - stderr.channel.recv_exit_status() - f_stderr.write(stderr.read()) - - -def _parallel_ssh_run(nodes: typing.List[str], out_dir: pathlib.Path, out_filename: str, command: str): - """Parallel ssh into the `nodes` and run `command`, then store the output into `out_dir`/{node}/`out_filename`.""" - with Pool(16) as pool: - pool.starmap( - _ssh_run_command, - map(lambda n: (n, out_dir, out_filename, command), nodes.items()), - ) - - -def collect_journalctl_logs(nodes: typing.List[str], out_dir: pathlib.Path): - """Collect the system logs for all nodes in a deployment.""" - _parallel_ssh_run( - nodes, - out_dir, - "journalctl-system.txt", - "journalctl --since='-24h'", - ) - - -def collect_ic_replica_service_logs(nodes: typing.List[str], out_dir: pathlib.Path): - """Collect the "ic-replica" service logs for all nodes in a deployment.""" - _parallel_ssh_run( - nodes, - out_dir, - "journalctl-ic-replica.txt", - "journalctl -xu ic-replica --since='-24h'", - ) - - -def collect_netstat_listen_ports(nodes: typing.List[str], out_dir: pathlib.Path): - """Collect the netstat listen ports for all nodes in a deployment.""" - _parallel_ssh_run(nodes, out_dir, "ports-tcp-listen.txt", "sudo netstat -tulpn") - - -def collect_netstat_open_ports(nodes: typing.List[str], out_dir: pathlib.Path): - """Collect the netstat open ports for all nodes in a deployment.""" - _parallel_ssh_run(nodes, out_dir, "ports-tcp-open.txt", "sudo netstat -pn") - - -def collect_system_stats(nodes: typing.List[str], out_dir: pathlib.Path): - """Collect the netstat open ports for all nodes in a deployment.""" - _parallel_ssh_run(nodes, out_dir, "system-stats.txt", "uptime; free -m; ps -faux") - - -def collect_replica_api_status(nodes: typing.List[str], out_dir: pathlib.Path): - """Collect the "replica" endpoint status for all nodes in a deployment.""" - for node_name, node_ipv6 in nodes.items(): - node_log_dir = out_dir / node_name - node_log_dir.mkdir(exist_ok=True, parents=True) - - replica_url = f"http://[{node_ipv6}]:8080/api/v2/status" - try: - req = requests.get(replica_url) - - with open(node_log_dir / "replica-status.cbor", "wb") as f_out: - f_out.write(req.content) - - status = cbor.loads(req.content) - with open(node_log_dir / "replica-status.txt", "w") as f_out: - f_out.write(str(status.value)) - - except requests.exceptions.ConnectionError as e: - with open(node_log_dir / "replica-status.txt", "w") as f_out: - f_out.write("ConnectionError: %s" % e) - - -def collect_all_debug_info( - deployment_name: str, - out_dir: pathlib.Path, -): - """Collect the debug info for a deployment and store it in out_dir.""" - nodes = get_deployment_nodes(deployment_name) - if isinstance(out_dir, str): - out_dir = pathlib.Path(out_dir) - - logging.info("Collecting debug info for the IC-OS deployment: %s", deployment_name) - out_dir.mkdir(exist_ok=True, parents=True) - paramiko.util.log_to_file(out_dir / "paramiko.log", level="WARN") - - collect_host_dominfo(nodes, deployment_name) - collect_journalctl_logs(nodes, out_dir) - collect_ic_replica_service_logs(nodes, out_dir) - collect_replica_api_status(nodes, out_dir) - collect_netstat_listen_ports(nodes, out_dir) - collect_netstat_open_ports(nodes, out_dir) - collect_system_stats(nodes, out_dir) - - logging.info("Debug info written to: %s", out_dir.absolute()) - - -def main(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "--deployment-name", - action="store", - help='Deployment name (e.g. "cdhourly")', - ) - - parser.add_argument( - "--out-dir", - action="store", - help="The directory where the debug information should be written.", - default=pathlib.Path("."), - ) - - parser.add_argument("--verbose", "-v", action="store_true", help="Verbose mode") - - args = parser.parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - - collect_all_debug_info(args.deployment_name, out_dir=args.out_dir) - - -if __name__ == "__main__": - main() diff --git a/testnet/tools/icos_deploy.sh b/testnet/tools/icos_deploy.sh deleted file mode 100755 index 871bf50a8fc1..000000000000 --- a/testnet/tools/icos_deploy.sh +++ /dev/null @@ -1,395 +0,0 @@ -#!/usr/bin/env bash - -# Tear-down and re-deploy the IC using Ansible. -# -# This script takes one positional argument: -# : The deployment referenced in `/testnet/env/${deployment}` -# -# Before running for the first time, make sure you have all the dependencies: -# sudo apt install ansible jq rclone - -set -eEuo pipefail - -cd "$(dirname "$0")" -REPO_ROOT="$(git rev-parse --show-toplevel)" -find "$REPO_ROOT/testnet" -type d -exec chmod 0775 '{}' \; - -function err() { - echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $*" >&2 -} - -if [[ "${BASH_VERSINFO:-0}" -lt 4 ]]; then - err "Bash 4+ is required" - exit 1 -fi - -function exit_usage() { - if (($# < 1)); then - err 'Usage: icos_deploy.sh [--git-head ] [--git-revision ] [--dkg-interval-length ] [--max-ingress-bytes-per-message ] [--ansible-args ] [--hosts-ini ] [--no-boundary-nodes] ' - err ' --git-head Deploy the testnet from the current git head.' - err ' --git-revision Deploy the testnet from the given git revision.' - err ' --ansible-args Additional ansible args. Can be specified multiple times.' - err ' --dkg-interval-length Set DKG interval length (-1 if not provided explicitly, which means - default will be used)' - err ' --max-ingress-bytes-per-message Set maximum ingress size in bytes (-1 if not provided explicitly, which means - default will be used)' - err ' --hosts-ini Override the default ansible hosts.ini to set different testnet configuration' - err ' --no-api-nodes Do not deploy API boundary nodes even if they are declared in the hosts.ini file' - err ' --no-boundary-nodes Do not deploy boundary nodes even if they are declared in the hosts.ini file' - err ' --boundary-dev-image Use development image of the boundary node VM' - err ' --with-testnet-keys Initialize the registry with readonly and backup keys from testnet/config/ssh_authorized_keys' - err ' --allow-specified-ids Allow installing canisters at specified IDs' - err '' - err 'To get the latest branch revision that has a disk image pre-built, you can use ci/src/artifacts/newest_sha_with_disk_image.sh' - err 'Example (deploy latest master to small-a):' - err '' - err ' testnet/tools/icos_deploy.sh small-a --git-revision $(ci/src/artifacts/newest_sha_with_disk_image.sh master)' - err '' - exit 1 - fi -} - -function ansible() { - ansible-playbook ${ANSIBLE_ARGS[@]} "$@" -} - -# Helper function to convert times -function dateFromEpoch() { - if [[ "$(uname)" == "Darwin" ]]; then - date -j -f '%s' "$1" - else - date --date="@$1" - fi -} - -function disk_image_exists() { - curl --output /dev/null --silent --head --fail \ - "https://download.dfinity.systems/ic/${GIT_REVISION}/guest-os/disk-img-dev/disk-img.tar.zst" \ - || curl --output /dev/null --silent --head --fail \ - "https://download.dfinity.systems/ic/${GIT_REVISION}/guest-os/disk-img.tar.zst" -} - -ANSIBLE_ARGS=() -HOSTS_INI_FILENAME="${HOSTS_INI_FILENAME:-hosts.ini}" - -if [ -n "${ANSIBLE_REMOTE_USER:-}" ]; then - ANSIBLE_ARGS+=(-u $ANSIBLE_REMOTE_USER) -fi - -while [ $# -gt 0 ]; do - case "${1}" in - --git-head) - GIT_REVISION="$(git rev-parse --verify HEAD)" - ;; - -s | --git-revision) - GIT_REVISION="${2:-}" - if [[ -z "${GIT_REVISION}" ]]; then exit_usage; fi - shift - ;; - --ansible-args) - if [[ -z "${2:-}" ]]; then exit_usage; fi - ANSIBLE_ARGS+=($2) - shift - ;; - --dkg-interval-length) - DKG_INTERVAL_LENGTH="${2:-}" - if [[ -z "${DKG_INTERVAL_LENGTH}" ]]; then exit_usage; fi - shift - ;; - --max-ingress-bytes-per-message) - MAX_INGRESS_BYTES_PER_MESSAGE="${2:-}" - if [[ -z "${MAX_INGRESS_BYTES_PER_MESSAGE}" ]]; then exit_usage; fi - shift - ;; - --hosts-ini) - if [[ -z "${2:-}" ]]; then exit_usage; fi - HOSTS_INI_FILENAME="${2}" - shift - ;; - --boundary-dev-image) - BOUNDARY_IMAGE_TYPE="-dev" - ;; - --no-api-nodes) - USE_API_NODES="false" - ;; - --no-boundary-nodes) - USE_BOUNDARY_NODES="false" - ;; - --with-testnet-keys) - WITH_TESTNET_KEYS="--with-testnet-keys" - ;; - --allow-specified-ids) - ALLOW_SPECIFIED_IDS="--allow-specified-ids" - ;; - -?*) exit_usage ;; - *) deployment="$1" ;; - esac - shift -done - -if [[ -z "${GIT_REVISION:-}" ]]; then - echo "ERROR: GIT_REVISION not set." - echo "Please provide the GIT_REVISION as env. variable or the command line with --git-revision " - exit_usage -fi - -if [[ -z "${deployment:-}" ]]; then - echo "ERROR: No deployment specified." - exit_usage -fi - -# Negative DKG value means unset (default will be used) -DKG_INTERVAL_LENGTH="${DKG_INTERVAL_LENGTH:=-1}" -# Negative value means unset (default will be used) -MAX_INGRESS_BYTES_PER_MESSAGE="${MAX_INGRESS_BYTES_PER_MESSAGE:=-1}" -# This environment variable will be picked up by the Ansible inventory generation script. -# No further action is required to use the custom HOSTS_INI file. -export HOSTS_INI_FILENAME -hosts_ini_file_path="${REPO_ROOT}/testnet/env/${deployment}/${HOSTS_INI_FILENAME}" -if [[ ! -f ${hosts_ini_file_path} ]]; then - echo >&2 "The Ansible inventory file does not exist, aborting: ${hosts_ini_file_path}" - exit 1 -fi - -for i in {1..60}; do - if disk_image_exists; then - echo "Disk image found for ${GIT_REVISION}" - break - fi - echo "Disk image not available for ${GIT_REVISION}, waiting 30s for it to be built by the CI ($i/60)" - sleep 30 -done -if [[ $i -ge 60 ]]; then - echo "Disk image not found for ${GIT_REVISION}, giving up" - exit 1 -fi - -echo "Deploying to ${deployment} from git revision ${GIT_REVISION}" - -starttime="$(date '+%s')" -echo "**** Deployment start time: $(dateFromEpoch "${starttime}")" - -if command -v ip &>/dev/null; then - ipv4_info="$(ip -4 address show | grep -vE 'valid_lft')" - ipv6_info="$(ip -6 address show | grep -vE 'valid_lft|fe80::')" - - echo "------------------------------------------------------------------------------- - **** Local IPv4 address information: - - ${ipv4_info} - - ------------------------------------------------------------------------------- - **** Local IPv6 address information: - - ${ipv6_info} - - -------------------------------------------------------------------------------" -fi - -MEDIA_PATH="${REPO_ROOT}/artifacts/guestos/${deployment}/${GIT_REVISION}" -BN_MEDIA_PATH="${REPO_ROOT}/artifacts/boundary-guestos/${deployment}/${GIT_REVISION}" -INVENTORY="${REPO_ROOT}/testnet/env/${deployment}/hosts" -USE_API_NODES="${USE_API_NODES:-true}" -USE_BOUNDARY_NODES="${USE_BOUNDARY_NODES:-true}" - -rm -rf "${BN_MEDIA_PATH}" -mkdir -p "${BN_MEDIA_PATH}" -"${INVENTORY}" --list >"${BN_MEDIA_PATH}/list.json" - -# Check if hosts.ini has boundary nodes -if jq <"${BN_MEDIA_PATH}/list.json" -e '.boundary.hosts | length == 0' >/dev/null; then - USE_BOUNDARY_NODES="false" -fi -if jq <"${BN_MEDIA_PATH}/list.json" -e '.api.hosts | length == 0' >/dev/null; then - USE_API_NODES="false" -fi - -if [[ "${USE_BOUNDARY_NODES}" == "true" ]]; then - ANSIBLE_ARGS+=("-e" "bn_media_path=${BN_MEDIA_PATH}") -else - ANSIBLE_ARGS+=("--skip-tags" "boundary_node_vm") -fi - -if [[ "${USE_API_NODES}" == "true" ]]; then - ANSIBLE_ARGS+=("-e" "api_media_path=${API_MEDIA_PATH}") -else - ANSIBLE_ARGS+=("--skip-tags" "api_node_vm") -fi - -if ! [[ -z "${ALLOW_SPECIFIED_IDS+x}" ]]; then - ANSIBLE_ARGS+=("-e" "allow_specified_ids=true") -fi - -ANSIBLE_ARGS+=( - "-i" "${INVENTORY}" - "-e" "bn_image_type=${BOUNDARY_IMAGE_TYPE:-}" - "-e" "ic_git_revision=${GIT_REVISION}" - "-e" "ic_media_path=${MEDIA_PATH}" - "-e" "ic_boundary_node_image=boundary" -) - -# Ensure we kill these on CTRL+C or failure -trap 'echo "EXIT received, killing all jobs"; jobs -p | xargs -rn1 pkill -P >/dev/null 2>&1; exit 1' EXIT - -TMPDIR=$(mktemp -d /tmp/icos-deploy.sh.XXXXXX) - -DESTROY_OUT="${TMPDIR}/destroy.log" -echo "**** Start destroying old deployment (log ${DESTROY_OUT})" -COMMAND=$( - cat </dev/null 2>&1 & -DESTROY_PID=$! - -echo "-------------------------------------------------------------------------------" - -echo "**** Build USB sticks for IC nodes - ($(dateFromEpoch "$(date '+%s')"))" -rm -rf "${MEDIA_PATH}" -mkdir -p "${MEDIA_PATH}" -"${INVENTORY}" --media-json >"${MEDIA_PATH}/${deployment}.json" - -"${REPO_ROOT}/testnet/tools/build-guestos-configs.sh" \ - --debug \ - --input="${MEDIA_PATH}/${deployment}.json" \ - --output="${MEDIA_PATH}" \ - --git-revision="${GIT_REVISION}" \ - --whitelist="${REPO_ROOT}/testnet/env/${deployment}/provisional_whitelist.json" \ - --dkg-interval-length=${DKG_INTERVAL_LENGTH} \ - --max-ingress-bytes-per-message=${MAX_INGRESS_BYTES_PER_MESSAGE} \ - --output-nns-public-key="${MEDIA_PATH}/nns-public-key.pem" \ - ${WITH_TESTNET_KEYS:-} \ - ${ALLOW_SPECIFIED_IDS:-} - -SCP_PREFIX="" -NNS_PUBLIC_KEY=$(sed '1d;$d' "${MEDIA_PATH}/nns-public-key.pem" | tr -d '\n\r') -if [ -n "${ANSIBLE_REMOTE_USER:-}" ]; then - SCP_PREFIX="${ANSIBLE_REMOTE_USER}@" -fi - -if [[ "${USE_BOUNDARY_NODES}" == "true" ]]; then - BOUNDARY_OUT="${TMPDIR}/build-boundary.log" - echo "**** Build USB sticks for boundary nodes - ($(dateFromEpoch "$(date '+%s')"))" - COMMAND=$( - cat <&2 "\$(date --rfc-3339=seconds): Copying \$CERT_NAME from server \$HOST" - if scp -B -o "ConnectTimeout 30" -o "UserKnownHostsFile=/dev/null" -o "StrictHostKeyChecking=no" -r "${SCP_PREFIX}\${HOST}:/etc/letsencrypt/live/\${CERT_NAME}/*" "${BN_MEDIA_PATH}/certs/"; then - SUCCESS=1 - break - fi - done - - if [[ \${SUCCESS} -eq 0 ]]; then - err "failed to find certificate \${CERT_NAME} on any designated server" - exit 1 - fi -fi - -echo >&2 "$(date --rfc-3339=seconds): Running build-deployment.sh" - -"${REPO_ROOT}"/ic-os/boundary-guestos/scripts/build-deployment.sh \ - --env=test \ - --input="${MEDIA_PATH}/${deployment}.json" \ - --output="${BN_MEDIA_PATH}" \ - --certdir="${BN_MEDIA_PATH}/certs" \ - --nns_public_key="${MEDIA_PATH}/nns-public-key.pem" -EOF - ) - echo ${COMMAND} - SHELL="${BASH}" script --quiet --return "${BOUNDARY_OUT}" --command "${COMMAND}" >/dev/null 2>&1 & - BOUNDARY_PID=$! -fi - -echo "-------------------------------------------------------------------------------" - -# In case someone wants to deploy with a locally built disk image the following lines contain -# the necessary commands. - -# echo "**** Remove previous disk image" -# rm -f ${HOME}/disk.* - -# echo "**** Build disk image" -# bazel build //ic-os/guestos/envs/dev/... -# cp bazel-bin/ic-os/guestos/envs/dev/disk.img "${MEDIA_PATH}/disk.img" - -# Wait on the destroy to finish -echo "**** Finishing destroy - ($(dateFromEpoch "$(date '+%s')"))" -DESTROY_STATUS=0 -wait ${DESTROY_PID} || DESTROY_STATUS=1 -cat "${DESTROY_OUT}" || true -if [[ ${DESTROY_STATUS} -ne 0 ]]; then - exit $(tail -1 "${DESTROY_OUT}" | sed -re "s/.*=\"([0-9]+).*/\1/") -fi - -# Wait on the boundary node image to finish -if [[ "${USE_BOUNDARY_NODES}" == "true" ]]; then - echo "**** Finishing boundary image - ($(dateFromEpoch "$(date '+%s')")) (${BOUNDARY_OUT})" - BOUNDARY_STATUS=0 - wait ${BOUNDARY_PID} || BOUNDARY_STATUS=1 - cat "${BOUNDARY_OUT}" || true - if [[ ${BOUNDARY_STATUS} -ne 0 ]]; then - exit $(tail -1 "${BOUNDARY_OUT}" | sed -re "s/.*=\"([0-9]+).*/\1/") - fi - - DOMAIN=$(jq <"${MEDIA_PATH}/${deployment}.json" -r '.bn_vars.domain // empty') -fi - -# Wait on the api node image to finish -if [[ "${USE_API_NODES}" == "true" ]]; then - echo "**** Finishing api image - ($(dateFromEpoch "$(date '+%s')"))" - API_STATUS=0 - wait ${API_PID} || API_STATUS=1 - cat "${API_OUT}" || true - if [[ ${API_STATUS} -ne 0 ]]; then - exit $(tail -1 "${API_OUT}" | sed -re "s/.*=\"([0-9]+).*/\1/") - fi -fi - -rm -rf "${TMPDIR}" -echo "-------------------------------------------------------------------------------" -cd "${REPO_ROOT}/testnet/ansible" - -echo "**** Remove eventual monitoring - ($(dateFromEpoch "$(date '+%s')"))" -ansible ic_p8s_service_discovery_destroy.yml - -echo "**** Create new IC instance - ($(dateFromEpoch "$(date '+%s')"))" -ansible icos_network_redeploy.yml -e ic_state="create" - -echo "**** Start VMs - ($(dateFromEpoch "$(date '+%s')"))" -ansible icos_network_redeploy.yml -e ic_state="start" - -echo "**** Install NNS canisters - ($(dateFromEpoch "$(date '+%s')"))" -ansible icos_network_redeploy.yml -e ic_state="install" - -echo "**** Start monitoring - ($(dateFromEpoch "$(date '+%s')"))" -ansible ic_p8s_service_discovery_install.yml -e nns_public_key="${NNS_PUBLIC_KEY}" - -endtime="$(date '+%s')" -echo "**** Completed deployment at $(dateFromEpoch "${endtime}") (start time was $(dateFromEpoch "${starttime}"))" -if [[ -n "${DOMAIN:-}" ]]; then - echo "Access through 'https://${DOMAIN}'" -fi -duration=$((endtime - starttime)) -echo "**** $((duration / 60)) minutes and $((duration % 60)) seconds elapsed." - -trap - EXIT diff --git a/testnet/tools/icos_destroy.sh b/testnet/tools/icos_destroy.sh deleted file mode 100755 index c364ed72c0b4..000000000000 --- a/testnet/tools/icos_destroy.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash - -# Tear-down an IC deployment. -# -# This script takes one positional argument: -# : The deployment referenced in `/testnet/env/${deployment}` -# - -set -eEuo pipefail - -cd "$(dirname "$0")" -REPO_ROOT="$(git rev-parse --show-toplevel)" - -# Collapse (hide) this script's output in the gitlab job log -echo -e "\e[0Ksection_start:$(date +%s):icos_destroy.sh[collapsed=true]\r\e[0KClick here to see details from the testnet destroy script: icos_destroy.sh." - -function exit_usage() { - if (($# < 1)); then - echo >&2 "Usage: icos_destroy.sh [--hosts-ini ] " - echo >&2 " --hosts-ini Override the default ansible hosts.ini to set different testnet configuration" - - exit 1 - fi -} - -deployment="" -ANSIBLE_ARGS="" -HOSTS_INI_FILENAME="${HOSTS_INI_FILENAME:-hosts.ini}" - -while [ $# -gt 0 ]; do - case "${1}" in - --ansible-args) - if [[ -z "${2:-}" ]]; then exit_usage; fi - ANSIBLE_ARGS="${ANSIBLE_ARGS} ${2:-}" - shift - ;; - --hosts-ini) - if [[ -z "${2:-}" ]]; then exit_usage; fi - HOSTS_INI_FILENAME="${2}" - shift - ;; - -?*) exit_usage ;; - *) deployment="$1" ;; - esac - shift -done - -if [[ -z "$deployment" ]]; then - echo "ERROR: No deployment specified." - exit_usage -fi - -# This environment variable will be picked up by the Ansible inventory generation script. -# No further action is required to use the custom HOSTS_INI file. -export HOSTS_INI_FILENAME -hosts_ini_file_path="$REPO_ROOT/testnet/env/$deployment/$HOSTS_INI_FILENAME" -if [[ ! -f $hosts_ini_file_path ]]; then - echo >&2 "The Ansible inventory file does not exist, aborting: $hosts_ini_file_path" - exit 1 -fi - -echo "Destroying the IC deployment $deployment" - -cd "$REPO_ROOT"/testnet/ansible - -INVENTORY=$REPO_ROOT/testnet/env/$deployment/hosts -ANSIBLE="ansible-playbook -i "$INVENTORY" $ANSIBLE_ARGS " - -$ANSIBLE icos_network_redeploy.yml -e ic_state="destroy" - -$ANSIBLE ic_p8s_service_discovery_destroy.yml - -echo -e "\e[0Ksection_end:$(date +%s):icos_destroy.sh\r\e[0K" diff --git a/testnet/tools/ipv6-calc.py b/testnet/tools/ipv6-calc.py deleted file mode 100755 index 5e00f228f8fb..000000000000 --- a/testnet/tools/ipv6-calc.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env python3 -# -# This script will calculate the guests' SLAAC IPv6 addresses for a given -# datacenter in production like deployments. -# -# The SLAAC IPv6 address is generated based on the following input: -# - IPv6 prefix (e.g. "2607:f1d0:10:1") -# - Serial (e.g. "59HBR53") -# - Deployment name (e.g. "mercury") -# -# Example for "mercury" guests running on hosts in NY1. The --serials input -# is a file containing a new line separated list of serial numbers. -# -# ./ipv6-calc.py --prefix "2607:f1d0:10:1" --deployment "mercury" --serials ./serials.txt -# -import argparse -import ipaddress -import logging -import re -import sys -from random import Random - - -# TODO: find a way to import directly from ansible -# https://github.com/ansible-collections/community.general/blob/main/plugins/filter/random_mac.py -def ansible_random_mac(value: str, seed: str): - """Take string prefix, and return it completed with random bytes to get a complete 6 bytes MAC address.""" - if not isinstance(value, str): - raise ValueError("Invalid value type (%s) for random_mac (%s)" % (type(value), value)) - - value = value.lower() - mac_items = value.split(":") - - if len(mac_items) > 5: - raise ValueError("Invalid value (%s) for random_mac: 5 colon(:) separated" " items max" % value) - - err = "" - for mac in mac_items: - if not mac: - err += ",empty item" - continue - if not re.match("[a-f0-9]{2}", mac): - err += ",%s not hexa byte" % mac - err = err.strip(",") - - if err: - raise ValueError("Invalid value (%s) for random_mac: %s" % (value, err)) - - r = Random(seed) - # Generate random int between x1000000000 and xFFFFFFFFFF - v = r.randint(68719476736, 1099511627775) - # Select first n chars to complement input prefix - remain = 2 * (6 - len(mac_items)) - rnd = ("%x" % v)[:remain] - return value + re.sub(r"(..)", r":\1", rnd) - - -def mac2eui64(mac, prefix=None): - """Convert a MAC address to a EUI64 address or, with prefix provided, a full IPv6 address.""" - # http://tools.ietf.org/html/rfc4291#section-2.5.1 - eui64 = re.sub(r"[.:-]", "", mac).lower() - eui64 = eui64[0:6] + "fffe" + eui64[6:] - eui64 = hex(int(eui64[0:2], 16) ^ 2)[2:].zfill(2) + eui64[2:] - - if prefix is None: - return ":".join(re.findall(r".{4}", eui64)) - else: - try: - net = ipaddress.ip_network(prefix, strict=False) - euil = int("0x{0}".format(eui64), 16) - return str(net[euil]) - except ValueError: # pylint: disable=bare-except - return - - -def calc_mac_address(serial: str, deployment_name: str, node_index: str): - return ansible_random_mac("52:00", f"{serial} {deployment_name} {node_index}") - - -def ipv6_address_calculate_slaac(ipv6_prefix: str, ipv6_subnet: str, mac_address: str): - """Calculate the same IPv6 address as SLAAC does, based on the interface MAC address.""" - return mac2eui64(mac_address, f"{ipv6_prefix.strip()}::{ipv6_subnet.strip()}") - - -def ipv6_address_compressed(ipv6_address: str): - return ipaddress.IPv6Address(ipv6_address).compressed - - -def main(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "--prefix", - action="store", - help='IPv6 data center prefix (e.g. "2607:f1d0:10:1")', - ) - - parser.add_argument( - "--deployment-name", - action="store", - help='Deployment name (e.g. "mercury")', - ) - - parser.add_argument( - "--serials", - action="store", - help="File containing new line separated list of serial numbers", - ) - - parser.add_argument( - "--mac-address", - action="store", - help="Calculate IPv6 using SLAAC, based on this MAC address.", - ) - - parser.add_argument( - "--addr-to-compressed", - action="store", - help="Return the compressed format of the IPv6 address.", - ) - - parser.add_argument("--verbose", "-v", action="store_true", help="Verbose mode") - - args = parser.parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - - if args.addr_to_compressed: - print(ipv6_address_compressed(args.addr_to_compressed)) - elif args.prefix: - if args.mac_address: - print(ipv6_address_calculate_slaac(args.prefix, "/64", args.mac_address)) - elif args.serials: - for serial in open(args.serials).read().split(): - mac_address = ansible_random_mac("52:00", f"{serial} {args.deployment_name} 1") - print(ipv6_address_calculate_slaac(args.prefix, "/64", mac_address)) - else: - print("ERROR: either --mac-address or --serials argument is required in addition to --prefix.") - parser.print_usage() - sys.exit(1) - else: - parser.print_usage() - sys.exit(1) - - -if __name__ == "__main__": - main()