From 5f3e7b8bd1d4dc2618dca47671272a1951c52c37 Mon Sep 17 00:00:00 2001 From: cdrappi Date: Wed, 15 Oct 2025 12:28:43 -0400 Subject: [PATCH 1/8] use ruff --- .gitignore | 39 +++ .python-version | 1 + README.md | 15 + azure_devbox/README.md | 43 +++ azure_devbox/devnet.conf | 43 +++ azure_devbox/setup.sh | 164 ++++++++++ datadisk/README.md | 27 ++ datadisk/datadisk.conf | 3 + datadisk/init_disk.sh | 48 +++ datadisk/mount_disk.sh | 40 +++ datadisk/unmount_disk.sh | 22 ++ deploy.sh | 137 ++++++++ deploy_metadata.json | 117 +++++++ devnet/README.md | 83 +++++ devnet/azure/parameters.json | 138 ++++++++ devnet/azure/template.json | 240 ++++++++++++++ devnet/clean-image.sh | 27 ++ devnet/nginx/README.md | 80 +++++ devnet/nginx/devnet.template | 39 +++ devnet/nginx/nginx.conf | 162 ++++++++++ devnet/supervisor/README.md | 22 ++ devnet/supervisor/devnet.conf | 48 +++ main.py | 6 + pyproject.toml | 39 +++ uv.lock | 239 ++++++++++++++ yocto/README.md | 111 +++++++ yocto/__init__.py | 0 yocto/artifact.py | 88 +++++ yocto/azure_common.py | 586 ++++++++++++++++++++++++++++++++++ yocto/azure_deploy.py | 62 ++++ yocto/build.py | 150 +++++++++ yocto/cfg.py | 143 +++++++++ yocto/cli.py | 67 ++++ yocto/conf/__init__.py | 0 yocto/conf/conf.py | 226 +++++++++++++ yocto/conf/logs.py | 8 + yocto/deploy.py | 185 +++++++++++ yocto/genesis_deploy.py | 143 +++++++++ yocto/git.py | 156 +++++++++ yocto/measurements.py | 58 ++++ yocto/metadata.py | 53 +++ yocto/parser.py | 95 ++++++ yocto/paths.py | 57 ++++ yocto/proxy.py | 120 +++++++ yocto/summit_client.py | 72 +++++ yocto/validators.py | 120 +++++++ 46 files changed, 4322 insertions(+) create mode 100644 .gitignore create mode 100644 .python-version create mode 100644 azure_devbox/README.md create mode 100644 azure_devbox/devnet.conf create mode 100644 azure_devbox/setup.sh create mode 100644 datadisk/README.md create mode 100644 datadisk/datadisk.conf create mode 100755 datadisk/init_disk.sh create mode 100755 datadisk/mount_disk.sh create mode 100755 datadisk/unmount_disk.sh create mode 100755 deploy.sh create mode 100644 deploy_metadata.json create mode 100644 devnet/README.md create mode 100644 devnet/azure/parameters.json create mode 100644 devnet/azure/template.json create mode 100755 devnet/clean-image.sh create mode 100644 devnet/nginx/README.md create mode 100644 devnet/nginx/devnet.template create mode 100644 devnet/nginx/nginx.conf create mode 100644 devnet/supervisor/README.md create mode 100644 devnet/supervisor/devnet.conf create mode 100644 main.py create mode 100644 pyproject.toml create mode 100644 uv.lock create mode 100644 yocto/README.md create mode 100644 yocto/__init__.py create mode 100644 yocto/artifact.py create mode 100644 yocto/azure_common.py create mode 100755 yocto/azure_deploy.py create mode 100644 yocto/build.py create mode 100644 yocto/cfg.py create mode 100644 yocto/cli.py create mode 100644 yocto/conf/__init__.py create mode 100644 yocto/conf/conf.py create mode 100644 yocto/conf/logs.py create mode 100644 yocto/deploy.py create mode 100755 yocto/genesis_deploy.py create mode 100644 yocto/git.py create mode 100644 yocto/measurements.py create mode 100644 yocto/metadata.py create mode 100644 yocto/parser.py create mode 100644 yocto/paths.py create mode 100644 yocto/proxy.py create mode 100644 yocto/summit_client.py create mode 100644 yocto/validators.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..03c48941 --- /dev/null +++ b/.gitignore @@ -0,0 +1,39 @@ +# Mac OS X +.DS_Store + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Virtual Environment +.env +.venv +env/ +venv/ +ENV/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + + diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..24ee5b1b --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.13 diff --git a/README.md b/README.md index 94413a5c..55389e86 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,17 @@ # deploy Seismic's home base for deploying + + +## Install python dependencies + +First install python3 + +Then create a virtualenv: + +`python3 -m venv venv` + +Source it: `source venv/bin/activate` + +Install `uv`: +`pip install uv` + diff --git a/azure_devbox/README.md b/azure_devbox/README.md new file mode 100644 index 00000000..2d9f8c12 --- /dev/null +++ b/azure_devbox/README.md @@ -0,0 +1,43 @@ +# Azure Devbox + +## Setting up the VM +* Go to the `Create a virtual machine` page + +### Basics +* Virtual machine name: [give it a name] +* Region: `EAST US 2` +* Availability Zone: `Zone 2` +* Security type: `Confidential virtual machine` +* Image: `Ubuntu Server 24.04 LTS (Confidential VM) - x64 Gen2`. It will nested under `Ubuntu 24.04 LTS - All Plans including Ubuntu Pro` +* Size: `Standard E8as v6 (8 vcpus, 64 GiB memory)`. Other sizes probably work too, but I haven't tested them. Smaller sizes may not be able to run Reth +* Select inbound ports: [I often pick allow all] + +### Disks +Turn on `Confidential OS disk encryption` + +For OS disk size, the default (30GiB) is usually fine. However, if Reth runs for a long time (or restores from a snapshot with a lot of state), the OS disk size should be large (e.x. 1 TiB) + +### Networking +Turn on `Delete public IP and NIC when VM is deleted` + +### Create +You are ready to click the blue `create` button + + +## Installing Dependencies +`setup.sh` is a script that installs all the necessary dependencies for the devbox. Copy `setup.sh` to the devbox. Then run it: +``` +chmod +x setup.sh +./setup.sh +``` + +### While running the script +Handle interactive prompts: You may need to press enter, type yes, etc. +If a purple prompt appears, press escape to accept the default provided + +## Post Installation +- You need to exit and re-enter the shell to get the environment variables to be set, particularly for cargo/rust to work +- On the azure machine, add your ssh pub key to `~/.ssh/authorized_keys` so that you can ssh into the machine + - `../ssh/authorized_keys` has a list of keys for the company if you intend to have others use the box +- Copy over the devnet.conf supervisorctl config to /etc/supervisor/conf.d/devnet.conf - it may need to be adjusted per your use case. e.g actions runners have a differnt conf becuase reth builds in a differnt spot, then reload supervisor so the conf is active +- (Optional) Generate a new ssh key for the machine itself with `ssh-keygen -t ed25519 -C "your_email@example.com"` and add it to github diff --git a/azure_devbox/devnet.conf b/azure_devbox/devnet.conf new file mode 100644 index 00000000..97bbf921 --- /dev/null +++ b/azure_devbox/devnet.conf @@ -0,0 +1,43 @@ +[supervisord] +environment=RUST_BACKTRACE="full", + RUST_LOG="info" + +[program:reth] +command=/home/azureuser/seismic-reth/target/release/seismic-reth + node + -vvvv + --dev + --dev.block-max-transactions 1 + --http + --http.addr 127.0.0.1 + --http.port 8545 + --http.api all + --ws + --ws.addr 127.0.0.1 + --ws.port 8546 + --ws.api all + --authrpc.addr 127.0.0.1 + --authrpc.port 8551 + --port 30303 + --discovery.port 30303 + --metrics 9001 + --datadir /home/azureuser/.reth + --log.file.directory /home/azureuser/.reth/logs +autostart=true +autorestart=true +startsecs=10 +stdout_logfile=/var/log/reth.log +stdout_logfile_maxbytes=0 +stderr_logfile=/var/log/reth.err +stderr_logfile_maxbytes=0 + + +[program:enclave-server] +command=/home/azureuser/enclave/target/release/seismic-enclave-server +autostart=true +autorestart=true +startsecs=10 +stdout_logfile=/var/log/enclave.log +stdout_logfile_maxbytes=0 +stderr_logfile=/var/log/enclave.err +stderr_logfile_maxbytes=0 \ No newline at end of file diff --git a/azure_devbox/setup.sh b/azure_devbox/setup.sh new file mode 100644 index 00000000..448aeae3 --- /dev/null +++ b/azure_devbox/setup.sh @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +# Exit immediately if any command fails, and treat unset variables as errors +set -euo pipefail + +echo "Updating package information..." +sudo apt-get -yq update + +# Install Basic Dev Tools +echo "Installing basic dev tools..." +sudo DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + build-essential \ + ocaml \ + ocamlbuild \ + automake \ + autoconf \ + libtool \ + wget \ + python-is-python3 \ + libssl-dev \ + git \ + cmake \ + perl \ + libcurl4-openssl-dev \ + protobuf-compiler \ + libprotobuf-dev \ + debhelper \ + reprepro \ + unzip \ + pkgconf \ + libboost-dev \ + libboost-system-dev \ + libboost-thread-dev \ + lsb-release \ + libsystemd0 \ + clang \ + tpm2-tools \ + libtss2-dev + +# Downgrade to Node.js 18 for compatibility +echo "Installing Compatible Node.js..." +sudo apt purge nodejs npm +wget https://nodejs.org/dist/v18.19.1/node-v18.19.1-linux-x64.tar.xz +sudo tar -xJf node-v18.19.1-linux-x64.tar.xz -C /usr/ --strip-components=1 +rm node-v18.19.1-linux-x64.tar.xz + +# Install SGX SDK +# Followed instructions from https://github.com/intel/SGXDataCenterAttestationPrimitives/tree/main/QuoteGeneration +# Note: The SGX driver is pre-installed by Azure, while the sdk is not. +## You can confirm the driver is installed by running +## 'grep CONFIG_X86_SGX /boot/config-$(uname -r)' and seeing 'CONFIG_X86_SGX=y' +# Note: the latest sgx sdk distro will change over time +## find the latest sdk distro here: https://download.01.org/intel-sgx/latest/linux-latest/distro/ubuntu24.04-server/ +SGX_SDK_BIN="sgx_linux_x64_sdk_2.26.100.0.bin" +echo "Installing SGX SDK..." +if [ ! -d "/opt/intel" ]; then + sudo mkdir /opt/intel +fi +cd /opt/intel +sudo wget -O ./"$SGX_SDK_BIN" "https://download.01.org/intel-sgx/latest/linux-latest/distro/ubuntu24.04-server/$SGX_SDK_BIN" +sudo chmod +x "$SGX_SDK_BIN" +echo "Current directory: $(pwd)" +echo "yes" | sudo ./"$SGX_SDK_BIN" +sudo chown "$USER:$USER" "/opt/intel/sgxsdk/environment" +export PKG_CONFIG_PATH=${PKG_CONFIG_PATH:-""} +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-""} +source "/opt/intel/sgxsdk/environment" +sudo rm -f "$SGX_SDK_BIN" +cd $HOME + +# Install SGX Software Packages +# See https://download.01.org/intel-sgx/latest/linux-latest/docs/Intel_SGX_SW_Installation_Guide_for_Linux.pdf +echo "Installing SGX Software Packages..." +wget -O sgx_debian_local_repo.tgz https://download.01.org/intel-sgx/latest/linux-latest/distro/ubuntu24.04-server/sgx_debian_local_repo.tgz +tar xzf sgx_debian_local_repo.tgz +echo 'deb [signed-by=/etc/apt/keyrings/intel-sgx-keyring.asc arch=amd64] https://download.01.org/intel-sgx/sgx_repo/ubuntu jammy main' | sudo tee /etc/apt/sources.list.d/intel-sgx.list +wget -O intel-sgx-deb.key https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key +cat intel-sgx-deb.key | sudo tee /etc/apt/keyrings/intel-sgx-keyring.asc > /dev/null +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq update +# sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install libsgx-epid libsgx-quote-ex libsgx-dcap-ql # necessary for 22.04 but not 24.04 +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install libsgx-dcap-ql-dev libsgx-dcap-quote-verify-dev # missing from installation guide, but necessary on some architectures? +sudo usermod -aG sgx "$USER" +sudo usermod -aG sgx_prv "$USER" +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install libsgx-dcap-default-qpl +rm sgx_debian_local_repo.tgz +rm -rf sgx_debian_local_repo +rm intel-sgx-deb.key + +# Build DCAP Quote Generation +echo "Building DCAP Quote Generation..." +git clone --recurse-submodules https://github.com/intel/SGXDataCenterAttestationPrimitives.git +cd SGXDataCenterAttestationPrimitives/QuoteGeneration/ +./download_prebuilt.sh +make +cd $HOME +rm -rf SGXDataCenterAttestationPrimitives + +# Setup qncl file +# based on https://github.com/intel/SGXDataCenterAttestationPrimitives/blob/main/QuoteGeneration/qcnl/linux/sgx_default_qcnl_azure.conf +# need to replace the /etc/sgx_default_qcnl.conf with this json for pccs to work +echo "Setting up qncl file..." +cat << 'EOF' | sudo tee /etc/sgx_default_qcnl.conf >/dev/null +{ + "pccs_url": "https://global.acccache.azure.net/sgx/certification/v4/", + "use_secure_cert": true, + "collateral_service": "https://api.trustedservices.intel.com/sgx/certification/v4/", + "pccs_api_version": "3.1", + "retry_times": 6, + "retry_delay": 5, + "local_pck_url": "http://169.254.169.254/metadata/THIM/sgx/certification/v4/", + "pck_cache_expire_hours": 48, + "verify_collateral_cache_expire_hours": 48, + "custom_request_options" : { + "get_cert" : { + "headers": { + "metadata": "true" + }, + "params": { + "api-version": "2021-07-22-preview" + } + } + } +} +EOF + +# Install Rust +# Note: you need to exit and the shell and re-enter to get the environment variables to be set +echo "Installing Rust..." +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + +# Install Docker +## Add Docker's official GPG key: +echo "Installing Docker..." +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq update +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install ca-certificates curl +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc +## Add the repository to Apt sources: +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq update +## Install Docker Packeges: +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +## set up docker group +sudo usermod -aG docker $USER +newgrp docker + +# Install supervisorctl +echo "Installing supervisorctl..." +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install supervisor + +# Install lz4 for tar compression +echo "Installing lz4..." +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install lz4 + +# restart services to make sure things are as updated as possible +echo "Restarting services..." +sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install needrestart +sudo needrestart + +echo "All done!" diff --git a/datadisk/README.md b/datadisk/README.md new file mode 100644 index 00000000..2903d0a9 --- /dev/null +++ b/datadisk/README.md @@ -0,0 +1,27 @@ +# Data Disk + +This directory contains scripts for setting up and managing a data disk for the enclave. This is particularly useful for saving encrypted snapshots of the enclave state, so that new nodes can snapsync with the existing nodes. + +## Usage + +### Configuration + +The disk configuration is stored in `disk.conf`. You can edit this file to change the disk, partition, and mount point. Scripts expect this file to be in the same directory as the scripts. + +### Initialize the disk + +To initialize the disk, run the `init_disk.sh` script. This will partition the disk, format it as ext4, and create a mount point at `/mnt/datadisk`. + +WARNING: This will erase all existing data on the disk. This script is intended for use on a new disk, not on an existing disk. + +```bash +sudo ./init_disk.sh --force +``` + +### Unmount the disk + +To unmount the disk, run the `unmount_disk.sh` script. + +### Mount the disk + +To mount a previously initialized disk, run the `mount_disk.sh` script. diff --git a/datadisk/datadisk.conf b/datadisk/datadisk.conf new file mode 100644 index 00000000..06993c8a --- /dev/null +++ b/datadisk/datadisk.conf @@ -0,0 +1,3 @@ +DISK="/dev/sdb" +PARTITION="${DISK}1" +MOUNT_POINT="/mnt/datadisk" diff --git a/datadisk/init_disk.sh b/datadisk/init_disk.sh new file mode 100755 index 00000000..89d92bba --- /dev/null +++ b/datadisk/init_disk.sh @@ -0,0 +1,48 @@ +#!/bin/sh + +set -e + +. "$(dirname "$0")/datadisk.conf" + +if [ "$1" != "--force" ]; then + echo "This script will ERASE all data on $DISK by partitioning and formatting it." + echo "If you really want to do this, run:" + echo " $0 --force" + exit 1 +fi + +echo "!!! WARNING: You are about to erase all data on $DISK !!!" +sleep 5 + +# Partition the disk (single primary partition) +# Note: whitespace is important here +echo "Partitioning the disk..." +fdisk "$DISK" <> /etc/fstab + +echo "Done! Disk initialized, formatted, mounted, and set for persistence." diff --git a/datadisk/mount_disk.sh b/datadisk/mount_disk.sh new file mode 100755 index 00000000..4021c61f --- /dev/null +++ b/datadisk/mount_disk.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +set -e + +. "$(dirname "$0")/datadisk.conf" + +# Check that partition exists +if [ ! -b "$PARTITION" ]; then + echo "Error: Partition $PARTITION not found." + echo "First check the disk is attached with lsblk" + echo "Then make sure the disk has been initialized (run init_disk.sh if it's a new disk)." + exit 1 +fi + +# Check that it has a valid ext4 filesystem +if ! blkid "$PARTITION" | grep -q 'TYPE="ext4"'; then + echo "Error: $PARTITION does not have an ext4 filesystem." + echo "Aborting to prevent data corruption. Please inspect the disk manually." + echo "This error may also occur when the script is not run with sudo." + exit 1 +fi + +# Create mount point if it doesn't exist +if [ ! -d "$MOUNT_POINT" ]; then + echo "Creating mount directory at $MOUNT_POINT..." + mkdir "$MOUNT_POINT" +fi + +# Mount the partition +echo "Mounting $PARTITION to $MOUNT_POINT..." +mount "$PARTITION" "$MOUNT_POINT" + +# Add to fstab if not already present +UUID=$(blkid -s UUID -o value "$PARTITION") +if ! grep -q "$MOUNT_POINT" /etc/fstab; then + echo "UUID=$UUID $MOUNT_POINT ext4 defaults,nofail 0 2" >> /etc/fstab + echo "Added to /etc/fstab for persistence." +fi + +echo "Done! Disk mounted at $MOUNT_POINT." diff --git a/datadisk/unmount_disk.sh b/datadisk/unmount_disk.sh new file mode 100755 index 00000000..0d1cc6e2 --- /dev/null +++ b/datadisk/unmount_disk.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# Exit if any command fails +set -e + +. "$(dirname "$0")/datadisk.conf" + +# Unmount the disk +echo "Unmounting $PARTITION from $MOUNT_POINT..." +umount "$MOUNT_POINT" + +# Remove fstab entry +echo "Removing entry from /etc/fstab..." +sed -i "\|$MOUNT_POINT|d" /etc/fstab + +# Remove mount directory +if [ -d "$MOUNT_POINT" ]; then + echo "Removing mount directory $MOUNT_POINT..." + rmdir "$MOUNT_POINT" +fi + +echo "Done! Disk $PARTITION unmounted and fstab cleaned." diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 00000000..9ef8c41b --- /dev/null +++ b/deploy.sh @@ -0,0 +1,137 @@ +#!/bin/bash +set -e + +# Check if 'az' exists +if ! command -v az &> /dev/null; then + echo "Error: 'az' command not found. Please install the Azure CLI." + exit 1 +fi + +# Check if 'azcopy' exists +if ! command -v azcopy &> /dev/null; then + echo "Error: 'azcopy' command not found. Please install AzCopy." + exit 1 +fi + +# Check if 'jq' exists +if ! command -v jq &> /dev/null; then + echo "Error: 'jq' command not found. Please install jq." + exit 1 +fi + +# example usage: ./deploy.sh /path/to/disk.vhd resource_and_disk_and_vm_name region Standard_EC4eds_v5 dev_server_ip /path/to/config.json +# ./deploy.sh /home/azureuser/yocto-manifests/reproducible-build/artifacts/cvm-image-azure-tdx.rootfs-20241105164959.wic.vhd seismic-yocto-202 eastus2 Standard_EC4es_v5 74.249.93.93 /home/azureuser/yocto-manifests/config.json + +DISK_PATH=$1 +VM_NAME=$2 +REGION=$3 +VM_SIZE=$4 +SOURCE_IP=$5 + +RESOURCE_GROUP=${VM_NAME} +DISK_NAME=${VM_NAME} +NSG=${VM_NAME} + +DISK_SIZE=`wc -c < ${DISK_PATH}` + +RECORD_NAME=${6:-"yocto-0"} +DOMAIN_RESOURCE_GROUP=${7:-"devnet2"} +DOMAIN_NAME=${8:-"seismicdev.net"} +CERTBOT_EMAIL=${9:-"c@seismic.systems"} + +# Create a temporary cloud-init.yaml file +USER_DATA_FILE=$(mktemp --suffix=.yaml) +echo "Created temporary --user-data file: $USER_DATA_FILE" + +# Write cloud-init configuration to the temporary file +cat > "$USER_DATA_FILE" << EOF +CERTBOT_EMAIL="$CERTBOT_EMAIL" +RECORD_NAME="$RECORD_NAME" +DOMAIN="$DOMAIN_NAME" +EOF + +cat $USER_DATA_FILE + +echo "creating resource group" +az group create --name ${DISK_NAME} --location ${REGION} + +# TODO: write IP addresses to genesis +echo "creating static public ip address" +IP_ADDRESS=$(az network public-ip create --resource-group ${RESOURCE_GROUP} --name ${RESOURCE_GROUP} --version IPv4 --sku standard --zone 1 2 3 --query "publicIp.ipAddress" -o tsv) + +PREVIOUS_IPS=$(az network dns record-set a list --resource-group ${DOMAIN_RESOURCE_GROUP} --zone-name ${DOMAIN_NAME} --recordsetnamesuffix ${RECORD_NAME} --query "[].ARecords[].ipv4Address" -o tsv) + +for prev_ip in $PREVIOUS_IPS; do + echo "Removing ${prev_ip} from ${RECORD_NAME}.${DOMAIN_NAME} record set" + az network dns record-set a remove-record --resource-group ${DOMAIN_RESOURCE_GROUP} --zone-name ${DOMAIN_NAME} --record-set-name ${RECORD_NAME} --ipv4-address ${prev_ip} --keep-empty-record-set +done + +echo "Mapping ${RECORD_NAME}.${DOMAIN_NAME} to ${IP_ADDRESS}" +az network dns record-set a add-record --ttl 300 --resource-group ${DOMAIN_RESOURCE_GROUP} --zone-name ${DOMAIN_NAME} --record-set-name ${RECORD_NAME} --ipv4-address ${IP_ADDRESS} + +echo "creating disk" +az disk create -n ${DISK_NAME} -g ${RESOURCE_GROUP} -l ${REGION} --os-type Linux --upload-type Upload --upload-size-bytes ${DISK_SIZE} --sku standard_lrs --security-type ConfidentialVM_NonPersistedTPM --hyper-v-generation V2 + +echo "granting access" +SAS_REQ=`az disk grant-access -n ${DISK_NAME} -g ${RESOURCE_GROUP} --access-level Write --duration-in-seconds 86400` +echo ${SAS_REQ} +SAS_URI=`echo ${SAS_REQ} | jq -r '.accessSas'` + +echo "copying disk" +azcopy copy ${DISK_PATH} ${SAS_URI} --blob-type PageBlob + +echo "revoking access" +az disk revoke-access -n ${DISK_NAME} -g ${RESOURCE_GROUP} + +echo "creating network security group" +az network nsg create --name ${NSG} --resource-group ${RESOURCE_GROUP} --location ${REGION} + +echo "creating HTTP rule (TCP 80)" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name AllowAnyHTTPInbound --priority 101 --destination-port-ranges 80 --access Allow --protocol Tcp + +echo "creating HTTPS rule (TCP 443)" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name AllowAnyHTTPSInbound --priority 102 --destination-port-ranges 443 --access Allow --protocol Tcp + +echo "creating ssh rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name AllowSSH --priority 100 --source-address-prefixes ${SOURCE_IP} --destination-port-ranges 22 --access Allow --protocol Tcp + +echo "creating TCP 8545 rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name TCP8545 --priority 110 --destination-port-ranges 8545 --access Allow --protocol Tcp + +echo "creating TCP 8551 rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name TCP8551 --priority 111 --destination-port-ranges 8551 --access Allow --protocol Tcp + +echo "creating TCP 8645 rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name TCP8645 --priority 112 --destination-port-ranges 8645 --access Allow --protocol Tcp + +echo "creating TCP 8745 rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name TCP8745 --priority 113 --destination-port-ranges 8745 --access Allow --protocol Tcp + +echo "creating Any 30303 rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name ANY30303 --priority 114 --destination-port-ranges 30303 --access Allow + +echo "creating TCP 7878 rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name TCP7878 --priority 115 --destination-port-ranges 7878 --access Allow --protocol Tcp + +echo "creating TCP 7936 rule" +az network nsg rule create --nsg-name ${NSG} --resource-group ${RESOURCE_GROUP} --name TCP7936 --priority 116 --destination-port-ranges 7936 --access Allow --protocol Tcp + +echo "booting vm..." + +az vm create \ + --name ${VM_NAME} \ + --size ${VM_SIZE} \ + --resource-group ${RESOURCE_GROUP} \ + --attach-os-disk ${DISK_NAME} \ + --security-type ConfidentialVM \ + --enable-vtpm true \ + --enable-secure-boot false \ + --os-disk-security-encryption-type NonPersistedTPM \ + --os-type Linux \ + --nsg ${NSG} \ + --public-ip-address ${RESOURCE_GROUP} \ + --user-data "$USER_DATA_FILE" + +rm -f "$USER_DATA_FILE" +echo "Deleted temporary --user-data file: $USER_DATA_FILE" +echo "Deployment completed." diff --git a/deploy_metadata.json b/deploy_metadata.json new file mode 100644 index 00000000..22d0313a --- /dev/null +++ b/deploy_metadata.json @@ -0,0 +1,117 @@ +{ + "resources": { + "yocto-genesis-3": { + "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", + "public_ip": "4.152.153.129", + "domain": { + "url": "https://summit-genesis-3.seismictest.net", + "record": "summit-genesis-3", + "name": "seismictest.net", + "resource_group": "yocto-testnet" + }, + "vm": { + "resourceGroup": "yocto-testnet", + "name": "yocto-genesis-3", + "nsgName": "yocto-genesis-3", + "location": "eastus2", + "size": "Standard_EC4es_v5" + } + }, + "yocto-genesis-1": { + "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", + "public_ip": "135.222.187.137", + "domain": { + "url": "https://summit-genesis-1.seismictest.net", + "record": "summit-genesis-1", + "name": "seismictest.net", + "resource_group": "yocto-testnet" + }, + "vm": { + "resourceGroup": "yocto-testnet", + "name": "yocto-genesis-1", + "nsgName": "yocto-genesis-1", + "location": "eastus2", + "size": "Standard_EC4es_v5" + } + }, + "yocto-genesis-4": { + "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", + "public_ip": "172.175.19.226", + "domain": { + "url": "https://summit-genesis-4.seismictest.net", + "record": "summit-genesis-4", + "name": "seismictest.net", + "resource_group": "yocto-testnet" + }, + "vm": { + "resourceGroup": "yocto-testnet", + "name": "yocto-genesis-4", + "nsgName": "yocto-genesis-4", + "location": "eastus2", + "size": "Standard_EC4es_v5" + } + }, + "yocto-genesis-2": { + "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", + "public_ip": "4.152.152.179", + "domain": { + "url": "https://summit-genesis-2.seismictest.net", + "record": "summit-genesis-2", + "name": "seismictest.net", + "resource_group": "yocto-testnet" + }, + "vm": { + "resourceGroup": "yocto-testnet", + "name": "yocto-genesis-2", + "nsgName": "yocto-genesis-2", + "location": "eastus2", + "size": "Standard_EC4es_v5" + } + } + }, + "artifacts": { + "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd": { + "repos": { + "enclave": { + "branch": "seismic", + "commit": "b8b7ab3049b42c7de38b2641d28a30c711663d68" + }, + "sreth": { + "branch": "seismic", + "commit": "2072d1c68c3b8b32caa0491a9c982dedbfd1dc46" + }, + "summit": { + "branch": "main", + "commit": "a89f2f7e36f46b024fea43e4b3328c22eab921be" + } + }, + "image": { + "measurement_id": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", + "attestation_type": "azure-tdx", + "measurements": { + "11": { + "expected": "3d200a691a41f2efe24b9e550a30ef3bc717520ddef843e2f6183c014fcf3077" + }, + "12": { + "expected": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "13": { + "expected": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "15": { + "expected": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "4": { + "expected": "48cbc8cdda567abb0c3d5bdd573eaf817c24800bc81ba3c65c26425ec44bbdd1" + }, + "8": { + "expected": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "9": { + "expected": "dba2475a62abe5c70c9d7ec80e02061723f69c250431071f33f0a2d6124bb660" + } + } + } + } + } +} \ No newline at end of file diff --git a/devnet/README.md b/devnet/README.md new file mode 100644 index 00000000..8e0a3d28 --- /dev/null +++ b/devnet/README.md @@ -0,0 +1,83 @@ +# Devnet tools + +This repository contains tools for deploying infrastructure for the Seismic devnet. + +## Machine specs + +- Resource group: devnet-1 +- Image: devnet_gallery/devnet-image/1.0.2 +- Size: Standard EC4es v5 + - vCPUs: 4 + - RAM: 32GiB +- Availability zone: Self-selected, Zone 2 +- Key pair: make sure you use a keypair that you have access to. If you don't, you'll have to delete the VM and start over +- OS disk size: image default (256GB) + - OS disk type: Premium SSD LRS + - Use managed disks: Yes + - Delete OS disk with VM: disabled + - Data disks: 1 + - Delete data disk with VM: 0 disks enabled + - Ephemeral OS disk: No +- Security: + - Security type: Confidential virtual machines + - Enable secure boot: Yes + - Enable TPM: Yes + - Integrity monitoring: No +- Virtual machine name: node-${number} +- Networking: + - Virtual network: devnet-1-vnet + - Subnet: default + - Security group: Standard (SSH, HTTP, HTTPS) + - Static IP: node-${number}-ip + - Delete public IP & NIC when VM is deleted: Enabled + - Domain: node-${number}.seismicdev.net + - Domain resource group: devnet2 + - Domain name: seismicdev.net + - Record name: node-${number} + - Accelerated networking: off +- Username: azureuser +- Azure Spot: No + +## Installed on the image +- Code: + - seismic-reth + - enclave +- Tooling + - various SGX/TDX libraries + - cargo + - supervisor + - nginx + - certbot + - starship + - fzf + +## Setup machine from image + +- First, deploy image using specs above using Azure Portal. Note the image's Public IP address (`$VM_PUBLIC_IP`) + +- Decide what you want to call the node's domain record (`$RECORD_NAME`). For example, `node-0`. + +- On your local machine, add domain record to Azure DNS: +```sh +RECORD_NAME="" VM_PUBLIC_IP="" az network dns record-set a add-record --ttl 300 --resource-group devnet2 --zone-name seismicdev.net --record-set-name $RECORD_NAME --ipv4-address $VM_PUBLIC_IP +``` + +- Run this to set `server_name` in Nginx conf. Make sure to set the `SERVER_NAME` variable to the domain you want to use. +```sh +SERVER_NAME="node-0.seismicdev.net" sudo -E sh -c "envsubst '\$SERVER_NAME' < /etc/nginx/sites-enabled/devnet.template > /etc/nginx/sites-enabled/default.conf" +``` + +- Run this to set up SSL. Make sure to set the `--domain` and `--email` variables to the domain and email you want to use. +```sh +sudo certbot --nginx --non-interactive --renew-by-default --agree-tos --email "c@seismic.systems" --domain node-0.seismicdev.net +``` + +Restart Nginx: +```sh +sudo systemctl restart nginx +``` + +For good measure, reboot the machine: +```sh +sudo reboot +``` diff --git a/devnet/azure/parameters.json b/devnet/azure/parameters.json new file mode 100644 index 00000000..3de81d8b --- /dev/null +++ b/devnet/azure/parameters.json @@ -0,0 +1,138 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "value": "eastus2" + }, + "networkInterfaceName2": { + "value": "node-1832_z2" + }, + "networkSecurityGroupName": { + "value": "node1nsg261" + }, + "networkSecurityGroupRules": { + "value": [ + { + "name": "SSH", + "properties": { + "priority": 300, + "protocol": "TCP", + "access": "Allow", + "direction": "Inbound", + "sourceAddressPrefix": "*", + "sourcePortRange": "*", + "destinationAddressPrefix": "*", + "destinationPortRange": "22" + } + }, + { + "name": "HTTP", + "properties": { + "priority": 320, + "protocol": "TCP", + "access": "Allow", + "direction": "Inbound", + "sourceAddressPrefix": "*", + "sourcePortRange": "*", + "destinationAddressPrefix": "*", + "destinationPortRange": "80" + } + }, + { + "name": "HTTPS", + "properties": { + "priority": 340, + "protocol": "TCP", + "access": "Allow", + "direction": "Inbound", + "sourceAddressPrefix": "*", + "sourcePortRange": "*", + "destinationAddressPrefix": "*", + "destinationPortRange": "443" + } + } + ] + }, + "subnetName": { + "value": "default" + }, + "virtualNetworkId": { + "value": "/subscriptions/214887ea-51a7-4ca7-9cec-29b3cf3d311c/resourceGroups/devnet-1/providers/Microsoft.Network/virtualNetworks/devnet-1-vnet" + }, + "publicIpAddressName2": { + "value": "node-1-ip" + }, + "publicIpAddressType": { + "value": "Static" + }, + "publicIpAddressSku": { + "value": "Standard" + }, + "pipDeleteOption": { + "value": "Delete" + }, + "virtualMachineName": { + "value": "node-1" + }, + "virtualMachineName2": { + "value": "node-1" + }, + "virtualMachineComputerName2": { + "value": "node-1" + }, + "virtualMachineRG": { + "value": "devnet-1" + }, + "osDiskType": { + "value": "Premium_LRS" + }, + "osDiskDeleteOption": { + "value": "Detach" + }, + "dataDisks2": { + "value": [ + { + "lun": 0, + "createOption": "fromImage", + "deleteOption": "Detach", + "caching": "None", + "writeAcceleratorEnabled": false, + "id": null, + "name": null, + "storageAccountType": null, + "diskSizeGB": null, + "tier": null, + "diskEncryptionSet": null + } + ] + }, + "virtualMachineSize": { + "value": "Standard_EC4es_v5" + }, + "nicDeleteOption": { + "value": "Delete" + }, + "hibernationEnabled": { + "value": false + }, + "adminUsername": { + "value": "azureuser" + }, + "adminPublicKey": { + "value": null + }, + "securityType": { + "value": "ConfidentialVM" + }, + "secureBoot": { + "value": true + }, + "vTPM": { + "value": true + }, + "virtualMachine2Zone": { + "value": "2" + } + } +} diff --git a/devnet/azure/template.json b/devnet/azure/template.json new file mode 100644 index 00000000..1e0d16a4 --- /dev/null +++ b/devnet/azure/template.json @@ -0,0 +1,240 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "string" + }, + "networkInterfaceName2": { + "type": "string" + }, + "networkSecurityGroupName": { + "type": "string" + }, + "networkSecurityGroupRules": { + "type": "array" + }, + "subnetName": { + "type": "string" + }, + "virtualNetworkId": { + "type": "string" + }, + "publicIpAddressName2": { + "type": "string" + }, + "publicIpAddressType": { + "type": "string" + }, + "publicIpAddressSku": { + "type": "string" + }, + "pipDeleteOption": { + "type": "string" + }, + "virtualMachineName": { + "type": "string" + }, + "virtualMachineName2": { + "type": "string" + }, + "virtualMachineComputerName2": { + "type": "string" + }, + "virtualMachineRG": { + "type": "string" + }, + "osDiskType": { + "type": "string" + }, + "osDiskDeleteOption": { + "type": "string" + }, + "dataDisks2": { + "type": "array" + }, + "virtualMachineSize": { + "type": "string" + }, + "nicDeleteOption": { + "type": "string" + }, + "hibernationEnabled": { + "type": "bool" + }, + "adminUsername": { + "type": "string" + }, + "adminPublicKey": { + "type": "secureString" + }, + "securityType": { + "type": "string" + }, + "secureBoot": { + "type": "bool" + }, + "vTPM": { + "type": "bool" + }, + "virtualMachine2Zone": { + "type": "string" + } + }, + "variables": { + "nsgId": "[resourceId(resourceGroup().name, 'Microsoft.Network/networkSecurityGroups', parameters('networkSecurityGroupName'))]", + "vnetId": "[parameters('virtualNetworkId')]", + "vnetName": "[last(split(variables('vnetId'), '/'))]", + "subnetRef": "[concat(variables('vnetId'), '/subnets/', parameters('subnetName'))]" + }, + "resources": [ + { + "name": "[parameters('networkInterfaceName2')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2022-11-01", + "location": "[parameters('location')]", + "dependsOn": [ + "[concat('Microsoft.Network/networkSecurityGroups/', parameters('networkSecurityGroupName'))]", + "[concat('Microsoft.Network/publicIpAddresses/', parameters('publicIpAddressName2'))]" + ], + "properties": { + "ipConfigurations": [ + { + "name": "ipconfig1", + "properties": { + "subnet": { + "id": "[variables('subnetRef')]" + }, + "privateIPAllocationMethod": "Dynamic", + "publicIpAddress": { + "id": "[resourceId(resourceGroup().name, 'Microsoft.Network/publicIpAddresses', parameters('publicIpAddressName2'))]", + "properties": { + "deleteOption": "[parameters('pipDeleteOption')]" + } + } + } + } + ], + "networkSecurityGroup": { + "id": "[variables('nsgId')]" + } + } + }, + { + "name": "[parameters('networkSecurityGroupName')]", + "type": "Microsoft.Network/networkSecurityGroups", + "apiVersion": "2020-05-01", + "location": "[parameters('location')]", + "properties": { + "securityRules": "[parameters('networkSecurityGroupRules')]" + } + }, + { + "name": "[parameters('publicIpAddressName2')]", + "type": "Microsoft.Network/publicIpAddresses", + "apiVersion": "2020-08-01", + "location": "[parameters('location')]", + "properties": { + "publicIpAllocationMethod": "[parameters('publicIpAddressType')]" + }, + "sku": { + "name": "[parameters('publicIpAddressSku')]" + }, + "zones": [ + "[parameters('virtualMachine2Zone')]" + ] + }, + { + "name": "[parameters('virtualMachineName2')]", + "type": "Microsoft.Compute/virtualMachines", + "apiVersion": "2024-03-01", + "location": "[parameters('location')]", + "dependsOn": [ + "[concat('Microsoft.Network/networkInterfaces/', parameters('networkInterfaceName2'))]" + ], + "properties": { + "hardwareProfile": { + "vmSize": "[parameters('virtualMachineSize')]" + }, + "storageProfile": { + "osDisk": { + "createOption": "fromImage", + "managedDisk": { + "storageAccountType": "[parameters('osDiskType')]", + "securityProfile": { + "securityEncryptionType": "DiskWithVMGuestState" + } + }, + "deleteOption": "[parameters('osDiskDeleteOption')]" + }, + "imageReference": { + "id": "/subscriptions/214887ea-51a7-4ca7-9cec-29b3cf3d311c/resourceGroups/devnet-1/providers/Microsoft.Compute/galleries/devnet_gallery/images/devnet-image/versions/1.0.2" + }, + "copy": [ + { + "name": "dataDisks", + "count": "[length(parameters('dataDisks2'))]", + "input": { + "name": "[parameters('dataDisks2')[copyIndex('dataDisks')].name]", + "lun": "[parameters('dataDisks2')[copyIndex('dataDisks')].lun]", + "createOption": "[parameters('dataDisks2')[copyIndex('dataDisks')].createOption]", + "caching": "[parameters('dataDisks2')[copyIndex('dataDisks')].caching]", + "diskSizeGB": "[parameters('dataDisks2')[copyIndex('dataDisks')].diskSizeGB]", + "managedDisk": { + "storageAccountType": "[parameters('dataDisks2')[copyIndex('dataDisks')].storageAccountType]", + "id": "[coalesce(parameters('dataDisks2')[copyIndex('dataDisks')].id, if(equals(parameters('dataDisks2')[copyIndex('dataDisks')].name, json('null')), json('null'), resourceId('Microsoft.Compute/disks', parameters('dataDisks2')[copyIndex('dataDisks')].name)))]" + }, + "deleteOption": "[parameters('dataDisks2')[copyIndex('dataDisks')].deleteOption]", + "writeAcceleratorEnabled": "[parameters('dataDisks2')[copyIndex('dataDisks')].writeAcceleratorEnabled]" + } + } + ] + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "[resourceId('Microsoft.Network/networkInterfaces', parameters('networkInterfaceName2'))]", + "properties": { + "deleteOption": "[parameters('nicDeleteOption')]" + } + } + ] + }, + "securityProfile": { + "securityType": "[parameters('securityType')]", + "uefiSettings": { + "secureBootEnabled": "[parameters('secureBoot')]", + "vTpmEnabled": "[parameters('vTPM')]" + } + }, + "additionalCapabilities": { + "hibernationEnabled": false + }, + "osProfile": { + "computerName": "[parameters('virtualMachineComputerName2')]", + "adminUsername": "[parameters('adminUsername')]", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "path": "[concat('/home/', parameters('adminUsername'), '/.ssh/authorized_keys')]", + "keyData": "[parameters('adminPublicKey')]" + } + ] + } + } + } + }, + "zones": [ + "[parameters('virtualMachine2Zone')]" + ] + } + ], + "outputs": { + "adminUsername": { + "type": "string", + "value": "[parameters('adminUsername')]" + } + } +} diff --git a/devnet/clean-image.sh b/devnet/clean-image.sh new file mode 100755 index 00000000..8b9a6984 --- /dev/null +++ b/devnet/clean-image.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +# stop all processes first +sudo supervisorctl stop all + +# reth data +sudo rm -rf ~/.reth + +# temp files +sudo rm -rf /var/tmp/* +sudo rm -rf /tmp/* + +# known hosts +sudo rm -rf ~/.ssh/known_hosts + +# nginx / letsencrypt +sudo rm -rf /etc/letsencrypt/* +sudo rm -rf /etc/nginx/sites-enabled/default.conf + +# logs +sudo find /var/log -type f -exec truncate --size=0 {} \; + +# cloud +sudo cloud-init clean -l -s --machine-id -c all + +# finally, bash history +rm -f ~/.bash_history diff --git a/devnet/nginx/README.md b/devnet/nginx/README.md new file mode 100644 index 00000000..ada5f655 --- /dev/null +++ b/devnet/nginx/README.md @@ -0,0 +1,80 @@ +# Nginx + +## Systemctl +Nginx is managed on our machine by `systemctl`. Some useful commands: + +Start it: +`sudo systemctl start nginx` + +Stop it: +`sudo systemctl stop nginx` + +Restart it: +`sudo systemctl restart nginx` + +Reload it (does not restart service and only apply changes in conf): +`sudo systemctl reload nginx` + +See if it's healthy: +`sudo systemctl status nginx` + +## Configuration +The conf files in `deploy/nginx/` tell Nginx what to do when requests come into the machine + +First replace the file at `/etc/nginx/nginx.conf` with the one in `deploy/nginx/nginx.conf`. + +Then copy `devnet.template` to `/etc/nginx/sites-enabled/devnet.template`. Then set the SERVER_NAME variable in the command below and run it: + +```sh +SERVER_NAME="node-0.seismicdev.net" sudo -E sh -c "envsubst '\$SERVER_NAME' < /etc/nginx/sites-enabled/devnet.template > /etc/nginx/sites-enabled/default" +``` + +## Editing the configuration +First make changes manually on the machine + +You can test that they are valid with: +`sudo nginx -t` + +To apply the changes: +`sudo systemctl reload nginx` + +Then copy the changes over to the `devnet.template` in this repo and make a PR. + +## SSL + +### Renewing SSL certificate + +We have to do this every 90 days. Simply run: +`sudo certbot renew` + +### Setting up SSL (One-time) + +After you have successfully set up the domain, you can set up SSL. We are running Nginx on Ubuntu. First install Nginx on the machine: + +```sh +sudo apt install nginx +``` + +Then follow the instructions for [Certbot](https://certbot.eff.org/instructions?ws=nginx&os=ubuntufocal). Copied here for convenience: + +Install certbot: +```sh +sudo snap install --classic certbot +``` + +Perpare certbot command: +```sh +sudo ln -s /snap/bin/certbot /usr/bin/certbot +``` + +Run certbot itself: + +Either: +```sh +sudo certbot --nginx +``` + +Or make sure to set the --domain and --email in the below command and run it: +```sh +sudo certbot --nginx --non-interactive --renew-by-default --agree-tos --email "c@seismic.systems" --domain node-0.seismicdev.net +``` diff --git a/devnet/nginx/devnet.template b/devnet/nginx/devnet.template new file mode 100644 index 00000000..863c8a52 --- /dev/null +++ b/devnet/nginx/devnet.template @@ -0,0 +1,39 @@ +server { + root /var/www/html; + + index index.html index.htm index.nginx-debian.html; + + server_name $SERVER_NAME; + + location / { + # First attempt to serve request as file, then + # as directory, then fall back to displaying a 404. + try_files $uri $uri/ =404; + } + + # HTTPS RPC API + location /rpc { + proxy_pass http://localhost:8545/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + } + + # WS RPC API + location /ws { + proxy_pass http://localhost:8546/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + } +} diff --git a/devnet/nginx/nginx.conf b/devnet/nginx/nginx.conf new file mode 100644 index 00000000..9e030f1f --- /dev/null +++ b/devnet/nginx/nginx.conf @@ -0,0 +1,162 @@ +# Inspired by https://gist.github.com/v0lkan/90fcb83c86918732b894 +user www-data; + +pid /run/nginx.pid; + +worker_processes auto; +# Should be consistent with the limits in these files: +# /etc/security/limits.conf +# www-data soft nofile 65535 +# www-data hard nofile 65535 +# /etc/default/nginx +# ULIMIT="-n 65535" +worker_rlimit_nofile 65535; + +error_log /var/log/nginx/error.log; + +events { + # Determines how many clients will be served by each worker process. + # (Max clients = worker_connections * worker_processes) + # Should be equal to `ulimit -n / worker_processes` + worker_connections 8191; + + # Let each process accept multiple connections. Unclear if we should use this or not. + # May flood worker_connections, if that option is set too low. + multi_accept on; + + # Everywhere I read says it's better to use epoll on Linux + use epoll; +} + +http { + + ## + # Buffer and Request Limits + ## + # Allocates buffer size to store the client request body. + # This helps manage memory and can mitigate certain types of + # denial-of-service attacks by limiting how much data is buffered. + client_body_buffer_size 10k; + + # Sets aside a 1 KB buffer for reading the client request headers. + # This buffer must be large enough to hold typical header sizes. + client_header_buffer_size 1k; + + # Specifies the maximum allowed size of a client request body. + # Requests exceeding this size are rejected, + # which can protect our server from excessively large uploads. + client_max_body_size 8m; + + # Provides two buffers of 1 KB each for handling larger-than-normal client headers. + # If headers exceed the size defined by client_header_buffer_size, + # these additional buffers are used. + large_client_header_buffers 2 1k; + + + ## + # Timeouts + ## + # Default is 60 for both of these. Server responds with 408 if timeout + client_body_timeout 32; + client_header_timeout 32; + + # Defines the idle timeout for keep-alive (persistent) connections. + # If no new request arrives within this amount of time, Nginx will close the connection. + # Default is 65 seconds + keepalive_timeout 90; + + # Close the connection if the client doesn't receive any data for this long + send_timeout 120; + + # Instructs Nginx to forcibly close connections that have timed out. + # This frees up socket resources and helps prevent lingering, dead connections. + reset_timedout_connection on; + + ## + # File Descriptor Caching + # + # NOTE: Unclear how relevant this is for us given reth isn't serving files + ## + # How many file descriptors Nginx will cache, and how long to keep them in cache + open_file_cache max=200000 inactive=20s; + + # Tells Nginx how often it should check cached file descriptors + # This helps ensure the cache reflects the current state of the filesystem. + open_file_cache_valid 30s; + + # How many times a file must be accessed before it’s cached. + # This prevents rarely used files from filling the cache unnecessarily. + open_file_cache_min_uses 2; + + # Caches errors encountered when opening files (e.g., file not found) + # to prevent repeated, costly file system lookups for the same error. + open_file_cache_errors on; + + ## + # Data Transfer Optimizations + ## + # Enables the use of the sendfile system call, + # which allows data to be sent directly from disk to the network + # without being copied into user space. + # This improves performance and reduces CPU usage. + sendfile on; + + # Optimizes packet sending by attempting to send + # the HTTP response header and file content in one packet, + # rather than in multiple smaller packets. + # This can improve throughput, especially when serving static files. + tcp_nopush on; + + # Don't buffer data-sends (disable Nagle algorithm). + # Good for sending frequent small bursts of data in real time. + tcp_nodelay on; + + ## + # MIME Types + ## + # Increases the maximum size of the hash table used for MIME type lookups. + # This is useful if you have many MIME types configured, + # ensuring efficient mapping between file extensions and their corresponding types. + types_hash_max_size 2048; + + # Include the mime.types file, which contains the mappings of file extensions to MIME types. + include /etc/nginx/mime.types; + + # Default MIME type for files that don't have a specific type. + default_type application/octet-stream; + + ## + # SSL Settings + ## + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE + ssl_prefer_server_ciphers on; + + ## + # Logging Settings + ## + access_log /var/log/nginx/access.log; + + ## + # Gzip Settings + ## + gzip on; + # Old versions of IE don't handle compressed content well + gzip_disable "MSIE [1-6]\."; + + # Number between 1-9, default 6. + # Higher levels compress data more efficiently but require more CPU + gzip_comp_level 9; + + # Only compress responses that are at least 500 bytes long + gzip_min_length 500; + + # Only compress these MIME types; these settings are the default + gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; + + ## + # Virtual Host Configs + ## + # Our devnet routing is in sites-enabled/default + # The template file (before certbot edits it) lives in sites-enabled/devnet.template + include /etc/nginx/sites-enabled/*; +} diff --git a/devnet/supervisor/README.md b/devnet/supervisor/README.md new file mode 100644 index 00000000..9e123f38 --- /dev/null +++ b/devnet/supervisor/README.md @@ -0,0 +1,22 @@ +# Supervisor + +## Install + +```sh +sudo apt-get install -y supervisor +``` + +## Configuration + +Copy `devnet/supervisor/devnet.conf` to `/etc/supervisor/conf.d/devnet.conf` +Then run `sudo supervisorctl reload` + +## Build + +Build the relevant `--release` binaries. You can see which binaries are expected with `sudo supervisorctl status` + +## Management + +```sh +sudo supervisorctl start all +``` diff --git a/devnet/supervisor/devnet.conf b/devnet/supervisor/devnet.conf new file mode 100644 index 00000000..eb5d5740 --- /dev/null +++ b/devnet/supervisor/devnet.conf @@ -0,0 +1,48 @@ +[supervisord] +environment=RUST_BACKTRACE="full", + RUST_LOG="info" + +[program:enclave] +command=/home/azureuser/enclave/target/release/seismic-enclave-server +autostart=true +autorestart=true +startsecs=3 +priority=100 +stdout_logfile=/var/log/enclave.log +stdout_logfile_maxbytes=0 +stderr_logfile=/var/log/enclave.err +stderr_logfile_maxbytes=0 + +[program:reth] +command=/home/azureuser/seismic-reth/target/release/seismic-reth + node -vvv + --dev + --chain dev-old + --dev.block-time 2s + --disable-discovery + --http + --http.addr 127.0.0.1 + --http.port 8545 + --http.api all + --http.corsdomain * + --ws + --ws.addr 127.0.0.1 + --ws.port 8546 + --ws.api all + --ws.origins * + --authrpc.addr 127.0.0.1 + --authrpc.port 8551 + --port 30303 + --discovery.port 30303 + --metrics 9001 + --datadir /home/azureuser/.reth + --log.file.directory /home/azureuser/.reth/logs + --gpo.maxprice 10000000000 +autostart=true +autorestart=true +startsecs=10 +priority=200 +stdout_logfile=/var/log/reth.log +stdout_logfile_maxbytes=0 +stderr_logfile=/var/log/reth.err +stderr_logfile_maxbytes=0 diff --git a/main.py b/main.py new file mode 100644 index 00000000..40797309 --- /dev/null +++ b/main.py @@ -0,0 +1,6 @@ +def main(): + print("Hello from deploy!") + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..4caf04da --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,39 @@ +[project] +name = "deploy" +version = "0.1.0" +description = "Add your description here" +requires-python = ">=3.13" +dependencies = [ + "ruff>=0.8.0", + "certifi>=2025.10.5", + "charset-normalizer>=3.4.4", + "click>=8.3.0", + "idna>=3.11", + "mypy-extensions>=1.1.0", + "packaging>=25.0", + "pathspec>=0.12.1", + "platformdirs>=4.5.0", + "pyright>=1.1.406", + "requests>=2.32.5", + "urllib3>=2.5.0", +] + +[tool.ruff] +line-length = 88 +target-version = "py313" + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "UP", # pyupgrade +] +ignore = [] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..8a67a5f1 --- /dev/null +++ b/uv.lock @@ -0,0 +1,239 @@ +version = 1 +revision = 3 +requires-python = ">=3.13" + +[[package]] +name = "black" +version = "25.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pytokens" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/43/20b5c90612d7bdb2bdbcceeb53d588acca3bb8f0e4c5d5c751a2c8fdd55a/black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619", size = 648393, upload-time = "2025-09-19T00:27:37.758Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/99/3acfea65f5e79f45472c45f87ec13037b506522719cd9d4ac86484ff51ac/black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175", size = 1742165, upload-time = "2025-09-19T00:34:10.402Z" }, + { url = "https://files.pythonhosted.org/packages/3a/18/799285282c8236a79f25d590f0222dbd6850e14b060dfaa3e720241fd772/black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f", size = 1581259, upload-time = "2025-09-19T00:32:49.685Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ce/883ec4b6303acdeca93ee06b7622f1fa383c6b3765294824165d49b1a86b/black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831", size = 1655583, upload-time = "2025-09-19T00:30:44.505Z" }, + { url = "https://files.pythonhosted.org/packages/21/17/5c253aa80a0639ccc427a5c7144534b661505ae2b5a10b77ebe13fa25334/black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357", size = 1343428, upload-time = "2025-09-19T00:32:13.839Z" }, + { url = "https://files.pythonhosted.org/packages/1b/46/863c90dcd3f9d41b109b7f19032ae0db021f0b2a81482ba0a1e28c84de86/black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae", size = 203363, upload-time = "2025-09-19T00:27:35.724Z" }, +] + +[[package]] +name = "certifi" +version = "2025.10.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/5b/b6ce21586237c77ce67d01dc5507039d444b630dd76611bbca2d8e5dcd91/certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43", size = 164519, upload-time = "2025-10-05T04:12:15.808Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de", size = 163286, upload-time = "2025-10-05T04:12:14.03Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "deploy" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "black" }, + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "click" }, + { name = "idna" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "pyright" }, + { name = "requests" }, + { name = "urllib3" }, +] + +[package.metadata] +requires-dist = [ + { name = "black", specifier = ">=25.9.0" }, + { name = "certifi", specifier = ">=2025.10.5" }, + { name = "charset-normalizer", specifier = ">=3.4.4" }, + { name = "click", specifier = ">=8.3.0" }, + { name = "idna", specifier = ">=3.11" }, + { name = "mypy-extensions", specifier = ">=1.1.0" }, + { name = "packaging", specifier = ">=25.0" }, + { name = "pathspec", specifier = ">=0.12.1" }, + { name = "platformdirs", specifier = ">=4.5.0" }, + { name = "pyright", specifier = ">=1.1.406" }, + { name = "requests", specifier = ">=2.32.5" }, + { name = "urllib3", specifier = ">=2.5.0" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/61/33/9611380c2bdb1225fdef633e2a9610622310fed35ab11dac9620972ee088/platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312", size = 21632, upload-time = "2025-10-08T17:44:48.791Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.406" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/16/6b4fbdd1fef59a0292cbb99f790b44983e390321eccbc5921b4d161da5d1/pyright-1.1.406.tar.gz", hash = "sha256:c4872bc58c9643dac09e8a2e74d472c62036910b3bd37a32813989ef7576ea2c", size = 4113151, upload-time = "2025-10-02T01:04:45.488Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/a2/e309afbb459f50507103793aaef85ca4348b66814c86bc73908bdeb66d12/pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71", size = 5980982, upload-time = "2025-10-02T01:04:43.137Z" }, +] + +[[package]] +name = "pytokens" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d4/c2/dbadcdddb412a267585459142bfd7cc241e6276db69339353ae6e241ab2b/pytokens-0.2.0.tar.gz", hash = "sha256:532d6421364e5869ea57a9523bf385f02586d4662acbcc0342afd69511b4dd43", size = 15368, upload-time = "2025-10-15T08:02:42.738Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/5a/c269ea6b348b6f2c32686635df89f32dbe05df1088dd4579302a6f8f99af/pytokens-0.2.0-py3-none-any.whl", hash = "sha256:74d4b318c67f4295c13782ddd9abcb7e297ec5630ad060eb90abf7ebbefe59f8", size = 12038, upload-time = "2025-10-15T08:02:41.694Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] diff --git a/yocto/README.md b/yocto/README.md new file mode 100644 index 00000000..3f20271f --- /dev/null +++ b/yocto/README.md @@ -0,0 +1,111 @@ +# Yocto Build & Deploy Automation + +Automates the process of building and deploying Yocto images with Seismic Enclave integration for TDX instances. + +## Prerequisites + +Before running the automation script, ensure you have: +- Access to the build machine +- Python 3.8+ +- `az` CLI tool configured +- Git configured for pushing changes + +## Setup + +1. Set up Python virtual environment: +```bash +python3 -m venv .venv +source .venv/bin/activate +``` + +2. Install dependencies: +```bash +pip install -r requirements.txt +``` + +## Usage + +The script supports two modes of operation: + +### 1. Build New and Deploy +```bash +python3 cli.py \ + --build \ + --deploy -v \ + --resource-group devnet-yocto-1 \ + --domain-record yocto-1 +``` + +This mode: +- Builds new Yocto image +- Generates measurements +- Deploys newly built image to new resource group +- Starts proxy server to validates the machine has deployed correctly + +### 2. Build an image +```bash +python3 cli.py --build -v +``` + +### 3. Deploy an image without rebuilding: +Look for the artifact number in `deploy_metadata.json`. For example, if the artifact is `cvm-image-azure-tdx.rootfs-20250307221436.wic.vhd`, you would run + +```bash +python3 cli.py \ + --deploy -v \ + --artifact 20250307221436 \ + --resource-group devnet-yocto-1 \ + --domain-record yocto-1 +``` + +This mode: +- Uses provided artifact & its existing measurements +- Deploys image to new resource group +- Starts proxy server to validates the machine has deployed correctly + +## Output + +The script manages its artifact & resource information in the `deploy_metadata.json` file. After deploying an image, you should be able to make reth RPC requests to `https://.seismicdev.net/rpc/`. E.g. +``` +curl -X POST --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + -H "Content-Type: application/json" http://yocto-1.seismicdev.net:8545/ +``` +You should be able to make RPC requests to the enclave-server on port 7878, e.g. +``` +curl -X POST http://yocto-1.seismicdev.net:7878 \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","method":"healthCheck","params":[],"id":1}' +``` + +## SSH + +After an image is deployed, you can ssh into the machine with `ssh root@domain-record.DOMAIN`, e.g. `ssh root@yocto-1.seismicdev.net`. You must ssh in from the machine that deployed the image. + +## Deployment validation + +Upon successful deployment, the script will: +1. Start the proxy server and client processes +2. Verify attestation from the server on both the server and the client processes +3. Stop the processes and exit + +## Arguments + +### Modes +- `--build` Build a new image +- `--deploy` Deploy an image +- `--delete-vm` Resource group to delete +- `--delete-artifact` Artifact to delete +- `--logs` If flagged, print build and/or deploy logs as they run + +### Build arguments +- `--enclave-branch` Seismic Enclave git branch name. Defaults to 'main' +- `--enclave-commit` Seismic Enclave git gommit hash. If not provided, does not change image +- `--sreth-branch` Seismic Reth git branch name. Defaults to 'seismic' +- `--sreth-commit` Seismic Reth git commit hash. If not provided, does not change image + +### Deploy arguments +- `--artifact` Required when running --deploy without --build (e.g. '20241203182636') +- `--resource-group` (required) For deploying: the name of the resource group to create +- `--domain-record` (required) Domain record name (e.g. xxx.seismicdev.net). Required if deploying +- `--domain-name` Domain name (e.g. seismicdev.net) +- `--domain-resource-group` Azure domain resource group name (e.g. devnet2) diff --git a/yocto/__init__.py b/yocto/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/yocto/artifact.py b/yocto/artifact.py new file mode 100644 index 00000000..5de5b989 --- /dev/null +++ b/yocto/artifact.py @@ -0,0 +1,88 @@ +import glob +import logging +import os +import re +from typing import Optional +import datetime +from yocto.metadata import load_metadata, remove_artifact_from_metadata +from yocto.paths import BuildPaths + +logger = logging.getLogger(__name__) + + +def _extract_timestamp(artifact: str): + """ + Extract timestamp from artifact filename + e.g. 'cvm-image-azure-tdx.rootfs-20241202202935.wic.vhd' + Returns the timestamp if found, None otherwise + """ + + pattern = r".*?(\d{14}).*" + match = re.search(pattern, artifact) + if not match: + raise ValueError( + f"Invalid artifact name: {artifact}. " + 'It should look like "cvm-image-azure-tdx.rootfs-20241202202935.wic.vhd"' + ) + return match.group(1) + + +def artifact_timestamp(artifact: str) -> int: + """ + Extract timestamp from artifact filename + e.g. 'cvm-image-azure-tdx.rootfs-20241202202935.wic.vhd' + Returns the timestamp if found, None otherwise + """ + ts_string = _extract_timestamp(artifact) + if not ts_string: + raise ValueError(f"Invalid artifact name: {artifact}") + dt = datetime.datetime.strptime(ts_string, "%Y%m%d%H%M%S") + return int(dt.timestamp()) + + +def _artifact_from_timestamp(timestamp: str) -> str: + return f"cvm-image-azure-tdx.rootfs-{timestamp}.wic.vhd" + + +def parse_artifact(artifact_arg: Optional[str]) -> Optional[str]: + if not artifact_arg: + return None + + if len(artifact_arg) == 14: + if all(a.isdigit() for a in artifact_arg): + return _artifact_from_timestamp(artifact_arg) + + # Validate that it's correctly named + timestamp = _extract_timestamp(artifact_arg) + return _artifact_from_timestamp(timestamp) + + +def delete_artifact(artifact: str, home: str): + resources = load_metadata(home).get("resources", {}) + deployed_to = [ + rg for rg, resource in resources.items() if resource["artifact"] == artifact + ] + if deployed_to: + confirm = input( + f'\nThe artifact "{artifact}" is deployed to {len(deployed_to)} resource group(s):' + f'\n - {"\n - ".join(deployed_to)}\n\n' + "Are you really sure you want to delete it? " + "This will not delete the resources (y/n): " + ) + if confirm.strip().lower() != "y": + logger.info(f"Not deleting artifact {artifact}") + return + + timestamp = _extract_timestamp(artifact) + artifacts_path = BuildPaths(home).artifacts + files_deleted = 0 + for filepath in glob.glob(f"{artifacts_path}/*{timestamp}*"): + os.remove(filepath) + files_deleted += 1 + + if not files_deleted: + logger.warning(f"Found no files associated with this artifact") + return + + logger.info(f"Deleted {files_deleted} files associated with artifact {artifact}") + remove_artifact_from_metadata(artifact, home) diff --git a/yocto/azure_common.py b/yocto/azure_common.py new file mode 100644 index 00000000..c7138a0b --- /dev/null +++ b/yocto/azure_common.py @@ -0,0 +1,586 @@ +#!/usr/bin/env python3 +""" +Common Azure deployment functionality. +Shared components for Azure VM deployment scripts. +""" + +import argparse +import json +import os +import subprocess +import tempfile +from typing import List, Optional +from pathlib import Path +import logging +from yocto.conf.conf import DeployConfigs, VmConfigs + +logger = logging.getLogger(__name__) + + +DEFAULT_RESOURCE_GROUP = "yocto-testnet" +DEFAULT_DOMAIN_NAME = "seismictest.net" +DEFAULT_CERTBOT_EMAIL = "c@seismic.systems" + +DEFAULT_REGION = "eastus2" +DEFAULT_VM_SIZE = "Standard_EC4es_v5" + +CONSENSUS_PORT = 18551 + + +# Disk Operations +def get_disk_size(disk_path: Path) -> int: + """Get disk size in bytes.""" + return disk_path.stat().st_size + + +class AzureCLI: + """Wrapper for Azure CLI commands.""" + + @staticmethod + def run_command( + cmd: List[str], + show_logs: bool = False, + ) -> subprocess.CompletedProcess: + """Execute an Azure CLI command.""" + try: + result = subprocess.run( + cmd, + capture_output=not show_logs, + text=True, + check=True, + ) + return result + except subprocess.CalledProcessError as e: + logger.info(f"Command failed: {' '.join(cmd)}") + logger.info(f"Error: {e.stderr}") + raise + + @staticmethod + def check_dependencies(): + """Check if required tools are installed.""" + tools = ["az", "azcopy"] + for tool in tools: + try: + subprocess.run([tool, "--version"], capture_output=True, check=True) + except (subprocess.CalledProcessError, FileNotFoundError): + raise RuntimeError( + f"Error: '{tool}' command not found. Please install {tool}." + ) + + @classmethod + def resource_group_exists(cls, name: str) -> bool: + """Check if resource group exists.""" + try: + cmd = ["az", "group", "show", "--name", name] + cls.run_command(cmd) + return True + except subprocess.CalledProcessError: + return False + + @classmethod + def create_resource_group(cls, name: str, location: str) -> None: + """Create a resource group.""" + logger.info(f"Creating resource group: {name} in {location}") + cmd = ["az", "group", "create", "--name", name, "--location", location] + cls.run_command(cmd) + + @classmethod + def ensure_created_resource_group(cls, name: str, location: str): + """Ensure genesis IP resource group exists.""" + if cls.resource_group_exists(name): + logger.info(f"Resource group {name} already exists") + else: + confirm(f"create genesis resource group: {name} in {location}") + logger.info(f"Creating genesis IP resource group: {name} in {location}") + cls.create_resource_group(name, location) + + @classmethod + def create_public_ip(cls, name: str, resource_group: str) -> str: + """Create a static public IP address and return it.""" + logger.info(f"Creating static public IP address: {name}") + cmd = [ + "az", + "network", + "public-ip", + "create", + "--resource-group", + resource_group, + "--name", + name, + "--version", + "IPv4", + "--sku", + "standard", + "--zone", + "1", + "2", + "3", + "--query", + "publicIp.ipAddress", + "-o", + "tsv", + ] + result = cls.run_command(cmd) + return result.stdout.strip() + + @classmethod + def get_existing_public_ip( + cls, + name: str, + resource_group: str, + ) -> Optional[str]: + """Get existing IP address if it exists.""" + try: + cmd = [ + "az", + "network", + "public-ip", + "show", + "--name", + name, + "--resource-group", + resource_group, + "--query", + "ipAddress", + "-o", + "tsv", + ] + result = cls.run_command(cmd) + ip = result.stdout.strip() + return ip if ip and ip != "None" else None + except subprocess.CalledProcessError: + return None + + @classmethod + def get_existing_dns_ips(cls, config: DeployConfigs) -> List[str]: + """Get existing DNS A record IPs.""" + cmd = [ + "az", + "network", + "dns", + "record-set", + "a", + "list", + "--resource-group", + config.domain.resource_group, + "--zone-name", + config.domain.name, + "--recordsetnamesuffix", + config.domain.record, + "--query", + "[].ARecords[].ipv4Address", + "-o", + "tsv", + ] + result = cls.run_command(cmd) + return result.stdout.strip().split("\n") if result.stdout.strip() else [] + + @classmethod + def remove_dns_ip(cls, config: DeployConfigs, ip_address: str) -> None: + """Remove IP from DNS A record.""" + logger.info( + f"Removing {ip_address} from {config.domain.record}.{config.domain.name} record set" + ) + cmd = [ + "az", + "network", + "dns", + "record-set", + "a", + "remove-record", + "--resource-group", + config.domain.resource_group, + "--zone-name", + config.domain.name, + "--record-set-name", + config.domain.record, + "--ipv4-address", + ip_address, + "--keep-empty-record-set", + ] + cls.run_command(cmd) + + @classmethod + def add_dns_ip(cls, config: DeployConfigs, ip_address: str) -> None: + """Add IP to DNS A record.""" + logger.info( + f"Mapping {config.domain.record}.{config.domain.name} to {ip_address}" + ) + cmd = [ + "az", + "network", + "dns", + "record-set", + "a", + "add-record", + "--ttl", + "300", + "--resource-group", + config.domain.resource_group, + "--zone-name", + config.domain.name, + "--record-set-name", + config.domain.record, + "--ipv4-address", + ip_address, + ] + cls.run_command(cmd) + + @classmethod + def update_dns_record( + cls, + config: DeployConfigs, + ip_address: str, + remove_old: bool = True, + ) -> None: + """Update DNS A record with new IP address.""" + if remove_old: + previous_ips = cls.get_existing_dns_ips(config) + for prev_ip in previous_ips: + if prev_ip: + cls.remove_dns_ip(config, prev_ip) + + cls.add_dns_ip(config, ip_address) + + @classmethod + def disk_exists(cls, config: DeployConfigs, image_path: Path) -> bool: + cmd = [ + "az", + "disk", + "list", + "-g", + config.vm.resource_group, + ] + result = cls.run_command(cmd, show_logs=False) + disks = json.loads(result.stdout) + return any(config.vm.disk_name(image_path) == d["name"] for d in disks) + + @classmethod + def create_disk(cls, config: DeployConfigs, image_path: Path) -> None: + """Create a managed disk for upload.""" + disk_size = get_disk_size(image_path) + + logger.info("Creating disk") + cmd = [ + "az", + "disk", + "create", + "-n", + config.vm.disk_name(image_path), + "-g", + config.vm.resource_group, + "-l", + config.vm.location, + "--os-type", + "Linux", + "--upload-type", + "Upload", + "--upload-size-bytes", + str(disk_size), + "--sku", + "standard_lrs", + "--security-type", + "ConfidentialVM_NonPersistedTPM", + "--hyper-v-generation", + "V2", + ] + cls.run_command(cmd, show_logs=config.show_logs) + + @classmethod + def grant_disk_access(cls, config: DeployConfigs, image_path: Path) -> str: + # Grant access + logger.info("Granting access") + cmd = [ + "az", + "disk", + "grant-access", + "-n", + config.vm.disk_name(image_path), + "-g", + config.vm.resource_group, + "--access-level", + "Write", + "--duration-in-seconds", + "86400", + "-o", + "json", + ] + result = cls.run_command(cmd, show_logs=False) + sas_data = json.loads(result.stdout) + return sas_data["accessSas"] + + @classmethod + def delete_disk(cls, resource_group: str, vm_name: str, artifact: str): + disk_name = VmConfigs.get_disk_name(vm_name, artifact) + logger.info(f"Deleting disk {disk_name} from resource group {resource_group}") + cmd = [ + "az", + "disk", + "delete", + "-g", + resource_group, + "-n", + disk_name, + "--yes", + ] + cls.run_command(cmd, show_logs=True) + + @classmethod + def copy_disk( + cls, + image_path: Path, + sas_uri: str, + show_logs: bool = False, + ) -> None: + # Copy disk + logger.info("Copying disk") + cmd = ["azcopy", "copy", image_path, sas_uri, "--blob-type", "PageBlob"] + cls.run_command(cmd, show_logs=show_logs) + + @classmethod + def revoke_disk_access(cls, config: DeployConfigs, image_path: Path) -> None: + # Revoke access + logger.info("Revoking access") + cmd = [ + "az", + "disk", + "revoke-access", + "-n", + config.vm.disk_name(image_path), + "-g", + config.vm.resource_group, + ] + cls.run_command(cmd, show_logs=config.show_logs) + + @classmethod + def upload_disk(cls, config: DeployConfigs, image_path: Path) -> None: + """Upload disk image to Azure.""" + sas_uri = cls.grant_disk_access(config, image_path) + cls.copy_disk(image_path, sas_uri, show_logs=config.show_logs) + cls.revoke_disk_access(config, image_path) + + @classmethod + def create_nsg(cls, config: DeployConfigs) -> None: + """Create network security group.""" + logger.info("Creating network security group") + cmd = [ + "az", + "network", + "nsg", + "create", + "--name", + config.vm.nsg_name, + "--resource-group", + config.vm.resource_group, + "--location", + config.vm.location, + ] + cls.run_command(cmd, show_logs=config.show_logs) + + @classmethod + def add_nsg_rule( + cls, + config: DeployConfigs, + name: str, + priority: str, + port: str, + protocol: str, + source: str, + ) -> None: + """Add a single NSG rule.""" + cmd = [ + "az", + "network", + "nsg", + "rule", + "create", + "--nsg-name", + config.vm.nsg_name, + "--resource-group", + config.vm.resource_group, + "--name", + name, + "--priority", + priority, + "--destination-port-ranges", + port, + "--access", + "Allow", + "--protocol", + protocol, + "--source-address-prefixes", + source, + ] + cls.run_command(cmd, show_logs=config.show_logs) + + @classmethod + def create_standard_nsg_rules(cls, config: DeployConfigs) -> None: + """Add all standard security rules.""" + rules = [ + ("AllowSSH", "100", "22", "Tcp", config.source_ip, "SSH rule"), + ("AllowAnyHTTPInbound", "101", "80", "Tcp", "*", "HTTP rule (TCP 80)"), + ("AllowAnyHTTPSInbound", "102", "443", "Tcp", "*", "HTTPS rule (TCP 443)"), + ("TCP7878", "115", "7878", "Tcp", "*", "TCP 7878 rule"), + ("TCP7936", "116", "7936", "Tcp", "*", "TCP 7936 rule"), + ("TCP8545", "110", "8545", "Tcp", "*", "TCP 8545 rule"), + ("TCP8551", "111", "8551", "Tcp", "*", "TCP 8551 rule"), + ("TCP8645", "112", "8645", "Tcp", "*", "TCP 8645 rule"), + ("TCP8745", "113", "8745", "Tcp", "*", "TCP 8745 rule"), + ( + f"ANY{CONSENSUS_PORT}", + "114", + f"{CONSENSUS_PORT}", + "*", + "*", + "Any 30303 rule", + ), + ] + + for name, priority, port, protocol, source, description in rules: + logger.info(f"Creating {description}") + cls.add_nsg_rule(config, name, priority, port, protocol, source) + + @classmethod + def create_user_data_file(cls, config: DeployConfigs) -> str: + """Create temporary user data file.""" + fd, temp_file = tempfile.mkstemp(suffix=".yaml") + try: + with os.fdopen(fd, "w") as f: + f.write(f'CERTBOT_EMAIL="{config.email}"\n') + f.write(f'RECORD_NAME="{config.domain.record}"\n') + f.write(f'DOMAIN="{config.domain.name}"\n') + + logger.info(f"Created temporary user-data file: {temp_file}") + with open(temp_file, "r") as f: + logger.info(f.read()) + + return temp_file + except: + os.close(fd) + raise + + @classmethod + def create_vm(cls, config: DeployConfigs, image_path: Path, ip_name: str) -> None: + """Create the virtual machine.""" + user_data_file = cls.create_user_data_file(config) + + try: + logger.info("Booting VM...") + cmd = [ + "az", + "vm", + "create", + "--name", + config.vm.name, + "--size", + config.vm.size, + "--resource-group", + config.vm.resource_group, + "--attach-os-disk", + config.vm.disk_name(image_path), + "--security-type", + "ConfidentialVM", + "--enable-vtpm", + "true", + "--enable-secure-boot", + "false", + "--os-disk-security-encryption-type", + "NonPersistedTPM", + "--os-type", + "Linux", + "--nsg", + config.vm.nsg_name, + "--public-ip-address", + ip_name, + "--user-data", + user_data_file, + ] + cls.run_command(cmd, show_logs=False) + finally: + os.unlink(user_data_file) + logger.info(f"Deleted temporary user-data file: {user_data_file}") + + +# Common Argument Parser +def create_base_parser(description: str) -> argparse.ArgumentParser: + """Create base argument parser with common arguments.""" + parser = argparse.ArgumentParser(description=description) + + # Common optional arguments + parser.add_argument( + "-r", + "--region", + type=str, + default=DEFAULT_REGION, + help=f"Azure region (default: {DEFAULT_REGION})", + ) + parser.add_argument( + "--domain-resource-group", + type=str, + default=DEFAULT_RESOURCE_GROUP, + help="Domain resource group (default: devnet2)", + ) + parser.add_argument( + "--domain-name", + type=str, + default=DEFAULT_DOMAIN_NAME, + help="Domain name (default: seismicdev.net)", + ) + parser.add_argument( + "--certbot-email", + type=str, + default=DEFAULT_CERTBOT_EMAIL, + help=f"Certbot email (default: {DEFAULT_CERTBOT_EMAIL})", + ) + parser.add_argument( + "--source-ip", + type=str, + help="Source IP address for SSH access. Defaults to this machine's IP", + ) + parser.add_argument( + "--vm_size", + type=str, + # TODO: validate that it's a TDX machine + default=DEFAULT_VM_SIZE, + help=f"VM size (default: {DEFAULT_VM_SIZE})", + ) + parser.add_argument( + "-v", + "--logs", + action="store_true", + help="If flagged, print build and/or deploy logs as they run", + default=False, + ) + parser.add_argument( + "--code-path", + default="", + type=str, + help="Path to code relative to $HOME", + ) + + deploy_parser = parser.add_mutually_exclusive_group(required=True) + + # Only one of these two + deploy_parser.add_argument( + "-a", + "--artifact", + type=str, + help=( + "If not running with --build, " + "use this to specify an artifact to deploy, " + "e.g. 'cvm-image-azure-tdx.rootfs-20241203182636.wic.vhd'" + ), + ) + deploy_parser.add_argument( + "--ip-only", + action="store_true", + help="Only deploy genesis IPs", + ) + return parser + + +def confirm(what: str) -> bool: + inp = input(f"Are you sure you want to {what}? [y/N]\n") + if not inp.strip().lower() == "y": + raise ValueError(f"Aborting; will not {what}") + return True diff --git a/yocto/azure_deploy.py b/yocto/azure_deploy.py new file mode 100755 index 00000000..9b5b4c05 --- /dev/null +++ b/yocto/azure_deploy.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +""" +Azure VM Deployment Tool + +A modular Python replacement for deploy.sh that handles Azure VM deployment. +""" + +from yocto.azure_common import ( + AzureCLI, + create_base_parser, +) +from yocto.cfg import DeploymentConfig + + +def deploy_vm(config: DeploymentConfig) -> None: + """Execute full VM deployment pipeline.""" + print("Starting Azure VM deployment...") + + # Check dependencies + AzureCLI.check_dependencies() + + # Create resource group + AzureCLI.create_resource_group(config.resource_group, config.region) + + # Create and configure IP address + ip_address = AzureCLI.create_public_ip(config.resource_group, config.resource_group) + AzureCLI.update_dns_record(config, ip_address) + + # Create and upload disk + AzureCLI.create_disk(config) + AzureCLI.upload_disk(config) + + # Create network security group and rules + AzureCLI.create_nsg(config) + AzureCLI.create_standard_nsg_rules(config) + + # Create VM + AzureCLI.create_vm(config) + + print("Deployment completed.") + + +def main(): + """Main entry point.""" + try: + parser = create_base_parser("Azure VM Deployment Tool") + parser.add_argument( + "--node", + type=int, + required=True, + help="Node number. Will deploy at node-.", + ) + args = parser.parse_args() + config = DeploymentConfig.from_deploy_args(args) + deploy_vm(config) + except Exception as e: + print(f"Deployment failed: {e}") + exit(1) + + +if __name__ == "__main__": + main() diff --git a/yocto/build.py b/yocto/build.py new file mode 100644 index 00000000..92fb1712 --- /dev/null +++ b/yocto/build.py @@ -0,0 +1,150 @@ +import logging +import subprocess +import datetime +from typing import Optional, Tuple + +from dataclasses import dataclass +from pathlib import Path + +from yocto.git import GitConfigs, update_git_bb +from yocto.measurements import Measurements, generate_measurements +from yocto.metadata import load_metadata, write_metadata, load_artifact_measurements +from yocto.conf.conf import BuildConfigs, Configs +from yocto.paths import BuildPaths +from yocto.artifact import artifact_timestamp + +logger = logging.getLogger(__name__) + + +_ONE_HOUR_IN_SECONDS = 3600 +_MAX_ARTIFACT_AGE = 5 + + +def build_image(home: str, capture_output: bool = True) -> Path: + """Build Yocto image and return image path and timestamp.""" + + yocto_manifests_path = BuildPaths(home).yocto_manifests + if not yocto_manifests_path.exists(): + raise FileNotFoundError( + f"yocto-manifests path not found: {yocto_manifests_path}" + ) + + # Run the build command + build_cmd = " && ".join( + [f"cd {yocto_manifests_path}", "rm -rf build/", "make azure-image"] + ) + build_result = subprocess.run( + build_cmd, + shell=True, + capture_output=capture_output, + text=True, + ) + if build_result.returncode != 0: + err = build_result.stderr.strip() if build_result.stderr else "Unknown error" + raise RuntimeError(f"Image build failed: {err}") + + # Find the latest built image + find_cmd = """ + find ~/yocto-manifests/reproducible-build/artifacts \ + -name 'cvm-image-azure-tdx.rootfs-*.wic.vhd' \ + -type f -printf '%T@ %p\n' | sort -n | tail -1 | cut -f2- -d" " + """ + find_result = subprocess.run( + find_cmd, + shell=True, + capture_output=True, + text=True, + ) + if find_result.returncode != 0: + raise RuntimeError(f"Find command failed: {find_result.stderr.strip()}") + + image_path_str = find_result.stdout.strip() + if not image_path_str: + raise FileNotFoundError("No image file found in the expected directory") + ts = artifact_timestamp(image_path_str) + if ( + ts + < datetime.datetime.now().timestamp() - _MAX_ARTIFACT_AGE * _ONE_HOUR_IN_SECONDS + ): + raise RuntimeError( + "Most recently built image more than " f"{_MAX_ARTIFACT_AGE} hours old" + ) + + logger.info(f"Image built successfully at {image_path_str}") + return Path(image_path_str) + + +@dataclass +class BuildOutput: + image_path: Path + git_configs: GitConfigs + measurements: Measurements + home: str + + def update_artifacts_metadata(self): + metadata = load_metadata(self.home) + artifacts = metadata.get("artifacts", {}) + artifacts[self.image_path.name] = { + "repos": self.git_configs.to_dict(), + "image": self.measurements, + } + metadata["artifacts"] = artifacts + write_metadata(metadata, self.home) + + +class Builder: + def __init__(self, configs: BuildConfigs, home: str, show_logs: bool = True): + self.configs = configs + self.show_logs = show_logs + self.home = home + + def update_git(self) -> GitConfigs: + paths = BuildPaths(self.home) + git = self.configs.git + enclave = update_git_bb(paths.enclave_bb, git.enclave, self.home) + sreth = update_git_bb(paths.sreth_bb, git.sreth, self.home) + summit = update_git_bb(paths.summit_bb, git.summit, self.home) + return GitConfigs( + enclave=enclave, + sreth=sreth, + summit=summit, + ) + + def build(self) -> BuildOutput: + """Build new image and deploy it""" + git_configs = self.update_git() + image_path = build_image( + self.home, + capture_output=not self.show_logs, + ) + measurements = generate_measurements(image_path, self.home) + return BuildOutput( + image_path=image_path, + git_configs=git_configs, + measurements=measurements, + home=self.home, + ) + + +def maybe_build(configs: Configs) -> Optional[Tuple[Path, Measurements]]: + """ + if --build was passed in, build a fresh image + if --deploy was passed in, return the path to the image to deploy + """ + if configs.build: + builder = Builder(configs.build, configs.home, configs.show_logs) + build_output = builder.build() + build_output.update_artifacts_metadata() + if configs.deploy: + return build_output.image_path, build_output.measurements + return None + + if not configs.deploy: + # Not going to deploy anything, so exit early + return None + + if not configs.deploy.artifact: + # Should never happen since we validate this in argument parsing + return None + + return load_artifact_measurements(configs.deploy.artifact, configs.home) diff --git a/yocto/cfg.py b/yocto/cfg.py new file mode 100644 index 00000000..291539c0 --- /dev/null +++ b/yocto/cfg.py @@ -0,0 +1,143 @@ +import argparse +from dataclasses import dataclass +from typing import Dict, Optional, Any +from pathlib import Path +import logging + +from yocto.conf.conf import get_host_ip +from yocto.artifact import parse_artifact +from yocto.conf.conf import ( + Configs, + Mode, + DeployConfigs, + VmConfigs, + DomainConfig, +) + +logger = logging.getLogger(__name__) + + +DEFAULT_RESOURCE_GROUP = "yocto-testnet" +DEFAULT_DOMAIN_NAME = "seismictest.net" +DEFAULT_CERTBOT_EMAIL = "c@seismic.systems" + +DEFAULT_REGION = "eastus2" +DEFAULT_VM_SIZE = "Standard_EC4es_v5" + + +# Disk Operations +def get_disk_size(disk_path: str) -> int: + """Get disk size in bytes.""" + return Path(disk_path).stat().st_size + + +@dataclass +class DeploymentConfig: + """Configuration for Azure VM deployment.""" + + vm_name: str + region: str + vm_size: str + node: int + record_name: str + source_ip: str + ip_only: bool + artifact: Optional[str] + home: str + domain_resource_group: str = DEFAULT_RESOURCE_GROUP + domain_name: str = DEFAULT_DOMAIN_NAME + certbot_email: str = DEFAULT_CERTBOT_EMAIL + resource_group: Optional[str] = None + nsg_name: Optional[str] = None + show_logs: bool = True + + def __post_init__(self): + """Set derived values after initialization.""" + if self.resource_group is None: + self.resource_group = self.domain_resource_group + if self.nsg_name is None: + self.nsg_name = self.vm_name + + def to_configs(self) -> Configs: + return Configs( + mode=Mode.deploy_only(), + build=None, + deploy=DeployConfigs( + vm=VmConfigs( + resource_group=self.resource_group, + name=self.vm_name, + nsg_name=self.nsg_name, + location=self.region, + size=self.vm_size, + ), + domain=DomainConfig( + record=self.record_name, + resource_group=self.domain_resource_group, + name=self.domain_name, + ), + artifact=self.artifact or "", + email=self.certbot_email, + source_ip=self.source_ip, + show_logs=self.show_logs, + ), + home=self.home, + show_logs=self.show_logs, + ) + + @classmethod + def parse_base_kwargs(cls, args: argparse.Namespace) -> Dict[str, Any]: + source_ip = args.source_ip + if source_ip is None: + logger.warning(f"No --source-ip provided, so fetching IP from ipify.org...") + source_ip = get_host_ip() + logger.info(f"Fetched public IP: {source_ip}") + return { + "home": str( + Path.home() / args.code_path if args.code_path else Path.home() + ), + "artifact": parse_artifact(args.artifact), + "ip_only": args.ip_only, + "region": args.region, + "vm_size": args.vm_size, + "source_ip": source_ip, + "domain_resource_group": args.domain_resource_group, + "domain_name": args.domain_name, + "certbot_email": args.certbot_email, + "show_logs": args.logs, + } + + @classmethod + def parse_deploy_args(cls, args: argparse.Namespace) -> "DeploymentConfig": + if not args.node or args.node < 1: + raise ValueError("Argument -n is required and cannot be less than 1") + return { + "node": args.node, + "record_name": f"node-{args.node}", + "vm_name": f"yocto-node-{args.node}", + } + + @classmethod + def configure_genesis_node(cls, node: int) -> "DeploymentConfig": + if node < 1: + raise ValueError("Argument --node is required and cannot be less than 1") + return { + "node": node, + "record_name": f"summit-genesis-{node}", + "vm_name": f"yocto-genesis-{node}", + } + + @classmethod + def from_deploy_args(cls, args: argparse.Namespace) -> "DeploymentConfig": + """Create config from parsed arguments with optional overrides.""" + config_kwargs = cls.parse_base_kwargs(args) + config_kwargs.update(cls.parse_deploy_args(args)) + return cls(**config_kwargs) + + @classmethod + def from_genesis_args( + cls, args: argparse.Namespace, node: int + ) -> "DeploymentConfig": + """Create config from parsed arguments with optional overrides.""" + config_kwargs = cls.parse_base_kwargs(args) + config_kwargs.update(cls.configure_genesis_node(node)) + return cls(**config_kwargs) diff --git a/yocto/cli.py b/yocto/cli.py new file mode 100644 index 00000000..6d97251a --- /dev/null +++ b/yocto/cli.py @@ -0,0 +1,67 @@ +import logging +import signal +import sys +import traceback + +from yocto.artifact import delete_artifact +from yocto.build import maybe_build +from yocto.deploy import Deployer, delete_vm +from yocto.conf.conf import Configs +from yocto.conf.logs import setup_logging + +logger = logging.getLogger(__name__) + + +def main(): + setup_logging() + + configs = Configs.parse() + + if configs.mode.delete_vm: + delete_vm(configs.mode.delete_vm, configs.home) + + if configs.mode.delete_artifact: + delete_artifact(configs.mode.delete_artifact, configs.home) + + should_deploy = maybe_build(configs) + if not should_deploy: + return + + assert configs.deploy # should never happen + + image_path, measurements = should_deploy + deployer = Deployer( + configs=configs.deploy, + image_path=image_path, + measurements=measurements, + # TODO: old + ip_name=configs.deploy.vm.resource_group, + home=configs.home, + show_logs=configs.show_logs, + ) + + def deploy_signal_handler(signum, frame): + """Handle cleanup on signals""" + logger.info("Received signal to terminate") + if deployer: + deployer.cleanup() + sys.exit(0) + + # Setup signal handlers for cleanup + signal.signal(signal.SIGINT, deploy_signal_handler) + signal.signal(signal.SIGTERM, deploy_signal_handler) + + try: + deploy_output = deployer.deploy() + deploy_output.update_deploy_metadata() + deployer.start_proxy_server(deploy_output.public_ip) + return 0 + except Exception as e: + logger.error(f"Failed: {str(e)}\n{traceback.format_exc()}") + return 1 + finally: + deployer.cleanup() + + +if __name__ == "__main__": + exit(main()) diff --git a/yocto/conf/__init__.py b/yocto/conf/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/yocto/conf/conf.py b/yocto/conf/conf.py new file mode 100644 index 00000000..fb78cf0a --- /dev/null +++ b/yocto/conf/conf.py @@ -0,0 +1,226 @@ +import argparse +import subprocess +from dataclasses import dataclass +from typing import Dict, Optional, Any + +from yocto.artifact import parse_artifact +from yocto.git import GitConfigs +from yocto.parser import parse_args +from pathlib import Path + + +def get_host_ip() -> str: + result = subprocess.run( + "curl -s ifconfig.me", shell=True, capture_output=True, text=True + ) + if result.returncode != 0: + raise RuntimeError("Failed to fetch host IP") + return result.stdout.strip() + + +@dataclass +class BuildConfigs: + git: GitConfigs + + @staticmethod + def from_args(args: argparse.Namespace) -> "BuildConfigs": + return BuildConfigs(git=GitConfigs.from_args(args)) + + @staticmethod + def default() -> "BuildConfigs": + return BuildConfigs( + git=GitConfigs.default(), + ) + + def to_dict(self) -> Dict[str, Any]: + return { + "git": self.git.to_dict(), + } + + +@dataclass +class VmConfigs: + resource_group: str + name: str + nsg_name: str + location: str = "eastus2" + size: str = "Standard_EC4es_v5" + api_port: int = 7878 + client_proxy_port: int = 8080 + + @staticmethod + def from_args(args: argparse.Namespace) -> "VmConfigs": + if not args.resource_group: + raise ValueError( + "If passing in --deploy, you must specify a --resource-group" + ) + return VmConfigs( + resource_group=args.resource_group, + name=args.resource_group, + nsg_name=args.resource_group, + # TODO: + # location=args.location, + # size=args.vm_size, + ) + + def to_dict(self): + return { + "resourceGroup": self.resource_group, + "name": self.name, + "nsgName": self.nsg_name, + "location": self.location, + "size": self.size, + } + + @staticmethod + def get_disk_name(vm_name: str, artifact: str) -> str: + return f"{vm_name}_{artifact}" + + def disk_name(self, image_path: Path) -> str: + return self.get_disk_name(self.name, image_path.name) + + +@dataclass +class DomainConfig: + record: str = "yocto-0" + resource_group: str = "devnet2" + name: str = "seismicdev.net" + + @staticmethod + def from_args(args: argparse.Namespace) -> "DomainConfig": + if not args.domain_record: + raise ValueError( + "If passing in --deploy, you must also provide a --domain-record" + ) + return DomainConfig( + record=args.domain_record, + resource_group=args.domain_resource_group, + name=args.domain_name, + ) + + def to_dict(self) -> Dict[str, str]: + return { + "url": f"https://{self.record}.{self.name}", + "record": self.record, + "name": self.name, + "resource_group": self.resource_group, + } + + +@dataclass +class DeployConfigs: + vm: VmConfigs + domain: DomainConfig + artifact: Optional[str] + email: str + source_ip: str + show_logs: bool = False + + @staticmethod + def from_args(args: argparse.Namespace) -> "DeployConfigs": + return DeployConfigs( + vm=VmConfigs.from_args(args), + domain=DomainConfig.from_args(args), + artifact=parse_artifact(args.artifact), + email=args.email, + source_ip=get_host_ip(), + show_logs=args.logs, + ) + + def to_dict(self) -> Dict[str, Any]: + kwargs = {} + if self.artifact: + kwargs["artifact"] = self.artifact + return { + "vm": self.vm.to_dict(), + "domain": self.domain.to_dict(), + **kwargs, + "email": self.email, + "sourceIp": self.source_ip, + "showLogs": self.show_logs, + } + + +@dataclass +class Mode: + build: bool + deploy: bool + delete_vm: Optional[str] + delete_artifact: Optional[str] + + @staticmethod + def from_args(args: argparse.Namespace) -> "Mode": + mode = Mode( + build=args.build, + deploy=args.deploy, + delete_vm=args.delete_vm, + delete_artifact=parse_artifact(args.delete_artifact), + ) + if not (mode.build or mode.deploy or mode.delete_vm or mode.delete_artifact): + raise ValueError( + "Invalid arguments. Must specify at least one of: " + "--build, " + "--deploy, " + "--delete-vm={{resource-group}}, or " + "--delete-artifact={{artifact}}" + ) + return mode + + @staticmethod + def deploy_only() -> "Mode": + return Mode( + build=False, + deploy=True, + delete_vm=None, + delete_artifact=None, + ) + + def to_dict(self) -> Dict[str, str | bool]: + delete_kwargs = {} + if self.delete_vm: + delete_kwargs["vm"] = self.delete_vm + if self.delete_artifact: + delete_kwargs["artifact"] = self.delete_artifact + kwargs = {"delete": delete_kwargs} if delete_kwargs else {} + return {"build": self.build, "deploy": self.deploy, **kwargs} + + +@dataclass +class Configs: + mode: Mode + build: Optional[BuildConfigs] + deploy: Optional[DeployConfigs] + show_logs: bool + home: str + + @staticmethod + def parse() -> "Configs": + args = parse_args() + mode = Mode.from_args(args) + build = BuildConfigs.from_args(args) if args.build else None + deploy = DeployConfigs.from_args(args) if args.deploy else None + show_logs = args.logs + if deploy and not build and not deploy.artifact: + raise ValueError( + "If running with --deploy and not --build, " + "you must provide an --artifact to deploy" + ) + return Configs( + mode=mode, + build=build, + deploy=deploy, + show_logs=show_logs, + home=Path.home() if not args.code_path else Path.home / args.code_path, + ) + + def to_dict(self) -> Dict[str, Any]: + kwargs = {} + if self.build: + kwargs["build"] = self.build.to_dict() + if self.deploy: + kwargs["deploy"] = self.deploy.to_dict() + return { + "mode": self.mode.to_dict(), + **kwargs, + "show_logs": self.show_logs, + } diff --git a/yocto/conf/logs.py b/yocto/conf/logs.py new file mode 100644 index 00000000..4e92c870 --- /dev/null +++ b/yocto/conf/logs.py @@ -0,0 +1,8 @@ +import logging + + +def setup_logging(): + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + ) diff --git a/yocto/deploy.py b/yocto/deploy.py new file mode 100644 index 00000000..59341a4a --- /dev/null +++ b/yocto/deploy.py @@ -0,0 +1,185 @@ +import glob +import json +import logging +import os +import subprocess +import time +from dataclasses import dataclass +from pathlib import Path +from typing import Optional + +from yocto.measurements import Measurements, write_measurements_tmpfile +from yocto.metadata import ( + load_metadata, + remove_vm_from_metadata, + write_metadata, +) +from yocto.azure_common import AzureCLI, confirm +from yocto.conf.conf import DeployConfigs +from yocto.paths import BuildPaths +from yocto.proxy import ProxyClient + +logger = logging.getLogger(__name__) + + +def get_ip_address(vm_name: str) -> str: + """Get IP address of deployed VM. Raises an error if IP cannot be retrieved.""" + result = subprocess.run( + ["az", "vm", "list-ip-addresses", "--name", vm_name], + capture_output=True, + text=True, + ) + + if result.returncode != 0: + raise RuntimeError(f"Failed to get IP address: {result.stderr.strip()}") + + # Parse and return the IP address + vm_info = json.loads(result.stdout) + return vm_info[0]["virtualMachine"]["network"]["publicIpAddresses"][0]["ipAddress"] + + +def delete_vm(vm_name: str, home: str) -> bool: + """Delete existing resource group if provided. Returns True if successful, False otherwise.""" + metadata = load_metadata(home) + resources = metadata["resources"] + meta = resources[vm_name] + resource_group = meta["vm"]["resourceGroup"] + prompt = f"Are you sure you want to delete VM {vm_name}" + if not confirm(prompt): + return False + + logger.info( + f"Deleting VM {vm_name} in resource group {resource_group}. " + "This takes a few minutes..." + ) + # az vm delete -g yocto-testnet -n yocto-genesis-1 + cmd = ["az", "vm", "delete", "-g", resource_group, "--name", vm_name, "--yes"] + process = subprocess.Popen( + args=" ".join(cmd), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + stdout, stderr = process.communicate() + + if process.returncode != 0: + logger.error(f"Error when deleting VM:\n{stderr.strip()}") + return False + + logger.info(f"Successfully deleted {vm_name}:\n{stdout}") + logger.info(f"Deleting associated disk...") + AzureCLI.delete_disk(resource_group, vm_name, meta["artifact"]) + remove_vm_from_metadata(vm_name, home) + return True + + +def deploy_image( + image_path: Path, + configs: DeployConfigs, + ip_name: str, +) -> str: + """Deploy image and return public IP. Raises an error if deployment fails.""" + + # Check if image_path exists + if not image_path.exists(): + raise FileNotFoundError(f"Image path not found: {image_path}") + + # Disk + if AzureCLI.disk_exists(configs, image_path): + logger.error(f"Artifact {image_path.name} already exists for {configs.vm.name}") + + AzureCLI.create_disk(configs, image_path) + AzureCLI.upload_disk(configs, image_path) + + # Security groups + AzureCLI.create_nsg(configs) + AzureCLI.create_standard_nsg_rules(configs) + + # Actually create the VM + AzureCLI.create_vm(configs, image_path, ip_name) + + return get_ip_address(configs.vm.name) + + +@dataclass +class DeployOutput: + configs: DeployConfigs + artifact: str + public_ip: str + home: str + + def update_deploy_metadata(self): + metadata = load_metadata(self.home) + if "resources" not in metadata: + metadata["resources"] = {} + metadata["resources"][self.configs.vm.name] = { + "artifact": self.artifact, + "public_ip": self.public_ip, + "domain": self.configs.domain.to_dict(), + "vm": self.configs.vm.to_dict(), + } + write_metadata(metadata, self.home) + + +class Deployer: + def __init__( + self, + configs: DeployConfigs, + image_path: Path, + measurements: Measurements, + ip_name: str, + home: str, + show_logs: bool = True, + ): + self.configs = configs + self.image_path = image_path + self.ip_name = ip_name + self.home = home + self.show_logs = show_logs + + self.measurements_file = write_measurements_tmpfile(measurements) + self.proxy: Optional[ProxyClient] = None + + def deploy(self) -> DeployOutput: + public_ip = deploy_image( + image_path=self.image_path, + configs=self.configs, + ip_name=self.ip_name, + ) + if not public_ip: + raise RuntimeError("Failed to obtain public IP during deployment") + + return DeployOutput( + configs=self.configs, + artifact=self.image_path.name, + public_ip=public_ip, + home=self.home, + ) + + def start_proxy_server(self, public_ip: str) -> None: + # Give 5 seconds to let the VM boot up + time.sleep(5) + self.proxy = ProxyClient(public_ip, self.measurements_file, self.home) + if not self.proxy.start(): + raise RuntimeError("Failed to start proxy server") + + def find_latest_image(self) -> Path: + """Find the most recently built image""" + pattern = str( + BuildPaths(self.home).artifacts / "cvm-image-azure-tdx.rootfs-*.wic.vhd" + ) + image_files = glob.glob(pattern) + if not image_files: + raise FileNotFoundError("No existing images found in artifacts directory") + + latest_image = max(image_files, key=lambda x: Path(x).stat().st_mtime) + logger.info(f"Found latest image: {latest_image}") + return Path(latest_image) + + def cleanup(self) -> None: + """Cleanup resources""" + if self.proxy: + self.proxy.stop() + if self.measurements_file.exists(): + os.remove(self.measurements_file) diff --git a/yocto/genesis_deploy.py b/yocto/genesis_deploy.py new file mode 100755 index 00000000..b6a82b32 --- /dev/null +++ b/yocto/genesis_deploy.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +""" +Genesis Azure VM Deployment Tool + +Genesis mode deployment with persistent IP addresses and node-specific allocation. +""" +import json +import logging +from typing import Tuple + +from yocto.conf.logs import setup_logging +from yocto.azure_common import ( + AzureCLI, + create_base_parser, + confirm, + DEFAULT_RESOURCE_GROUP, +) +from yocto.cfg import DeploymentConfig +from yocto.deploy import Deployer +from yocto.build import maybe_build + +logger = logging.getLogger(__name__) + + +class GenesisIPManager: + """Manages persistent IP addresses for genesis nodes.""" + + def __init__(self): + self.genesis_rg = DEFAULT_RESOURCE_GROUP + + def ensure_genesis_resource_group(self, region: str) -> None: + AzureCLI.ensure_created_resource_group(self.genesis_rg, region) + + def get_or_create_node_ip(self, node_number: int, region: str) -> Tuple[str, str]: + """Get or create persistent IP for a specific node number.""" + self.ensure_genesis_resource_group(region) + + ip_name = f"genesis-node-{node_number}" + + # Check if IP already exists + existing_ip = AzureCLI.get_existing_public_ip(ip_name, self.genesis_rg) + if existing_ip: + logger.info(f"Using existing IP {existing_ip} for node {node_number}") + return (existing_ip, ip_name) + + # Create new IP + logger.info(f"Creating new IP for node {node_number}") + confirm(f"create new IP for node {node_number} @ {ip_name}") + ip_address = AzureCLI.create_public_ip(ip_name, self.genesis_rg) + logger.info(f"Created IP {ip_address} for node {node_number}") + return (ip_address, ip_name) + + +def deploy_genesis_vm(args: DeploymentConfig) -> None: + """Execute genesis VM deployment pipeline.""" + logger.info("Starting Genesis Azure VM deployment...") + + if not args.artifact and not args.ip_only: + raise ValueError("Missing --artifact arg") + + node = args.node + cfg = args.to_configs() + deploy_cfg = cfg.deploy + print(f"Config:\n{json.dumps(cfg.to_dict(), indent=2)}") + + genesis_ip_manager = GenesisIPManager() + + # Check dependencies + AzureCLI.check_dependencies() + + # Create resource group + AzureCLI.ensure_created_resource_group( + name=deploy_cfg.vm.resource_group, + location=deploy_cfg.vm.location, + ) + + if node is None: + raise ValueError("Genesis deploy ran without --node arg") + + # Handle IP address allocation + (ip_address, ip_name) = genesis_ip_manager.get_or_create_node_ip( + node_number=node, + region=deploy_cfg.vm.location, + ) + AzureCLI.update_dns_record(deploy_cfg, ip_address, remove_old=False) + + if args.ip_only: + logger.info("Not creating machines (used --ip-only flag)") + return + + image_path, measurements = maybe_build(cfg) + deployer = Deployer( + configs=cfg.deploy, + image_path=image_path, + measurements=measurements, + home=cfg.home, + ip_name=ip_name, + show_logs=cfg.show_logs, + ) + deploy_output = deployer.deploy() + deploy_output.update_deploy_metadata() + + logger.info("Genesis deployment completed.") + + +def parse_genesis_args(): + """Parse genesis-specific command line arguments.""" + parser = create_base_parser("Genesis Azure VM Deployment Tool") + # Genesis-specific node arguments (mutually exclusive) + node_group = parser.add_mutually_exclusive_group(required=True) + node_group.add_argument( + "-c", + "--count", + type=int, + help="Number of nodes to deploy", + ) + node_group.add_argument( + "-n", + "--node", + type=int, + help="Specific node number to deploy", + ) + return parser.parse_args() + + +def main(): + setup_logging() + args = parse_genesis_args() + if args.node: + configs = [DeploymentConfig.from_genesis_args(args, args.node)] + elif args.count: + configs = [ + DeploymentConfig.from_genesis_args(args, n) + for n in range(1, args.count + 1) + ] + + for config in configs: + logger.info(f"Deploying genesis node {config.node}...") + deploy_genesis_vm(config) + + +if __name__ == "__main__": + main() diff --git a/yocto/git.py b/yocto/git.py new file mode 100644 index 00000000..de9e2858 --- /dev/null +++ b/yocto/git.py @@ -0,0 +1,156 @@ +import logging +import subprocess +from argparse import Namespace +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, Optional + +from yocto.paths import BuildPaths + +logger = logging.getLogger(__name__) + + +@dataclass +class GitConfig: + commit: Optional[str] + branch: str + + @staticmethod + def from_args(args: Namespace, repo: str) -> "GitConfig": + values = vars(args) + return GitConfig( + commit=values[f"{repo}_commit"], branch=values[f"{repo}_branch"] + ) + + def to_dict(self) -> Dict[str, str]: + if not self.commit: + raise ValueError("Cannot call to_dict() on GitConfig without commit") + return { + "branch": self.branch, + "commit": self.commit, + } + + @staticmethod + def branch_only(branch: str) -> "GitConfig": + return GitConfig(commit=None, branch=branch) + + +@dataclass +class GitConfigs: + enclave: GitConfig + sreth: GitConfig + summit: GitConfig + + @staticmethod + def from_args(args: Namespace) -> "GitConfigs": + return GitConfigs( + enclave=GitConfig.from_args(args, "enclave"), + sreth=GitConfig.from_args(args, "sreth"), + summit=GitConfig.from_args(args, "summit"), + ) + + def to_dict(self): + return { + "enclave": self.enclave.to_dict(), + "sreth": self.sreth.to_dict(), + "summit": self.summit.to_dict(), + } + + @staticmethod + def default() -> "GitConfigs": + return GitConfigs( + enclave=GitConfig.branch_only("seismic"), + sreth=GitConfig.branch_only("seismic"), + summit=GitConfig.branch_only("main"), + ) + + +def run_command(cmd: str, cwd: Optional[Path] = None) -> subprocess.CompletedProcess: + result = subprocess.run(cmd, shell=True, capture_output=True, text=True, cwd=cwd) + + if result.returncode != 0: + raise RuntimeError(f"Command failed: {result.stderr.strip()}") + + return result + + +def _extract(cmd: str, field: str) -> str: + process = subprocess.Popen( + args=cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) + stdout, stderr = process.communicate() + + if process.returncode == 0: + return stdout.strip() + else: + raise Exception(f"Failed to get {field}: {stderr}") + + +def _extract_srcrev(bb_path: Path) -> str: + pattern = "^[[:space:]]*SRCREV[[:space:]]*=" + cmd = f"grep '{pattern}' {bb_path} | awk -F'\"' '{{print $2}}'" + return _extract(cmd, "SRCREV") + + +def _extract_branch(bb_path: Path) -> str: + cmd = f"grep 'branch=' {bb_path} | sed 's/.*branch=\\([^;\"]*\\).*/\\1/'" + return _extract(cmd, "branch") + + +def update_git_bb( + bb_pathname: str, + git_config: GitConfig, + home: str, + commit_message: Optional[str] = None, +) -> GitConfig: + """ + Update the git commit and branch for a given Yocto bb file + """ + + paths = BuildPaths(home) + bb_path = paths.meta_seismic / bb_pathname + + if not commit_message: + commit_message = f"Update {bb_path.name} commit hash and branch" + + if not paths.meta_seismic.exists(): + raise FileNotFoundError(f"Meta seismic path not found: {paths.meta_seismic}") + + if not bb_path.exists(): + raise FileNotFoundError(f"{bb_path} not found") + + if git_config.commit is None: + current_git = GitConfig( + commit=_extract_srcrev(bb_path), + branch=_extract_branch(bb_path), + ) + logger.info( + f"No git commit provided for {bb_pathname}. " + f"Using current git state {current_git.branch}#{current_git.commit}" + ) + return current_git + + logger.info(f"Updating {bb_pathname}...") + update_cmd = f""" + sed -i 's|\\(branch=\\)[^;"]*|\\1{git_config.branch}|' {bb_path} && + sed -i 's|^\\s*SRCREV\\s*=.*|SRCREV = "{git_config.commit}"|' {bb_path} + """ + run_command(update_cmd, cwd=paths.meta_seismic) + logger.info(f"{bb_path.name} updated successfully") + + run_command(f"git add {bb_pathname}", cwd=paths.meta_seismic) + + # Check if there are changes to commit + status_result = run_command("git status --porcelain", cwd=paths.meta_seismic) + if status_result.stdout.strip(): + logger.info("Changes detected, committing...") + run_command(f'git commit -m "{commit_message}"', cwd=paths.meta_seismic) + logger.info("Committed changes") + + run_command("git push", cwd=paths.meta_seismic) + logger.info("Successfully pushed changes") + else: + logger.info("No changes to commit") + + logger.info(f"{bb_pathname} update completed successfully") + return git_config diff --git a/yocto/measurements.py b/yocto/measurements.py new file mode 100644 index 00000000..6e45bd2a --- /dev/null +++ b/yocto/measurements.py @@ -0,0 +1,58 @@ +import json +import logging +import os +import subprocess +import tempfile +from pathlib import Path +from typing import Any, Dict + +from yocto.paths import BuildPaths + +logger = logging.getLogger(__name__) + +Measurements = Dict[str, Any] + + +def write_measurements_tmpfile(measurements: Measurements) -> Path: + measurements_tmpfile = Path(tempfile.mktemp()) + with open(measurements_tmpfile, "w+") as f: + json.dump([measurements], f) + return measurements_tmpfile + + +def generate_measurements(image_path: Path, home: str) -> Measurements: + """Generate measurements for TDX boot process & write to tempfile""" + + paths = BuildPaths(home) + # Check if measured_boot_path and image_path exist + if not paths.measured_boot.exists(): + raise FileNotFoundError(f"Measured boot path not found: {paths.measured_boot}") + if not image_path.exists(): + raise FileNotFoundError(f"Image path not found: {image_path}") + + jq_format = f'{{"measurement_id": "{image_path.name}", "attestation_type": "azure-tdx", "measurements": .measurements}}' + measurements_tmpfile = Path(tempfile.mktemp()) + # Command to generate measurements + measure_cmd = f""" + cd {paths.source_env} && . ./oe-init-build-env && + cd {paths.measured_boot} && + go build -o measured-boot && + ./measured-boot {image_path} ../output.json && + cd ~ && + jq '{jq_format}' {paths.measured_boot.parent}/output.json > {measurements_tmpfile} + """ + + # Run the command without check=True and handle returncode manually + result = subprocess.run(measure_cmd, shell=True, capture_output=True, text=True) + + # Check if the command failed and raise an error if necessary + if result.returncode != 0: + raise RuntimeError( + f"Measurement generation command failed: {result.stderr.strip()}" + ) + + with open(measurements_tmpfile) as f: + measurements = json.load(f) + + os.remove(measurements_tmpfile) + return measurements diff --git a/yocto/metadata.py b/yocto/metadata.py new file mode 100644 index 00000000..f629a633 --- /dev/null +++ b/yocto/metadata.py @@ -0,0 +1,53 @@ +import json +from typing import Dict, Tuple +from pathlib import Path + +from yocto.paths import BuildPaths +from yocto.measurements import Measurements + + +def load_metadata(home: str) -> Dict[str, Dict]: + with open(BuildPaths(home).deploy_metadata) as f: + return json.load(f) + + +def write_metadata(metadata: Dict[str, Dict], home: str): + with open(BuildPaths(home).deploy_metadata, "w+") as f: + json.dump(metadata, f, indent=2) + + +def remove_vm_from_metadata(name: str, home: str): + metadata = load_metadata(home) + resources = metadata.get("resources", {}) + if name not in resources: + return + resources.pop(name) + metadata["resources"] = resources + write_metadata(metadata, home) + + +def remove_artifact_from_metadata(name: str, home: str): + metadata = load_metadata(home) + artifacts = metadata.get("artifacts", {}) + if name not in artifacts: + return + artifacts.pop(name) + metadata["artifacts"] = artifacts + write_metadata(metadata, home) + + +def load_artifact_measurements(artifact: str, home: str) -> Tuple[Path, Measurements]: + artifacts = load_metadata(home).get("artifacts", {}) + if artifact not in artifacts: + raise ValueError( + f"Could not find artifact {artifact} " + f"in {BuildPaths(home).deploy_metadata}" + ) + image_path = BuildPaths(home).artifacts / artifact + artifact = artifacts[artifact] + if not image_path.exists(): + raise FileNotFoundError( + f"Artifact {artifact} is defined in the deploy metadata, " + "but the corresponding file was not found on the machine" + ) + return image_path, artifact["image"] diff --git a/yocto/parser.py b/yocto/parser.py new file mode 100644 index 00000000..9a05d117 --- /dev/null +++ b/yocto/parser.py @@ -0,0 +1,95 @@ +import argparse + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Yocto Build and Deploy Automation") + + parser.add_argument( + "--build", + action="store_true", + help="Build a new image", + ) + parser.add_argument("--deploy", action="store_true", help="Deploy an image") + parser.add_argument("--delete-vm", type=str, help="VM name to delete") + parser.add_argument("--delete-artifact", type=str, help="Artifact to delete") + + parser.add_argument( + "--artifact", + type=str, + help=( + "If not running with --build, " + "use this to specify an artifact to deploy, " + "e.g. 'cvm-image-azure-tdx.rootfs-20241203182636.wic.vhd'" + ), + ) + parser.add_argument( + "--resource-group", + type=str, + help="For deploying: the name of the resource group to create", + ) + parser.add_argument( + "-v", + "--logs", + action="store_true", + help="If flagged, print build and/or deploy logs as they run", + default=False, + ) + + # Git args + parser.add_argument( + "--enclave-branch", + default="main", + help="Seismic Enclave git branch name. Defaults to 'main'. Only used if --enclave-commit is provided too", + ) + parser.add_argument( + "--enclave-commit", + help="Seismic Enclave git gommit hash. If not provided, does not change image", + ) + + parser.add_argument( + "--sreth-branch", + default="seismic", + help="Seismic Reth git branch name. Defaults to 'seismic'. Only used if --sreth-commit is provided too", + ) + parser.add_argument( + "--sreth-commit", + help="Seismic Reth git commit hash. If not provided, does not change image", + ) + + parser.add_argument( + "--summit-branch", + default="seismic", + help="Summit git branch name. Defaults to 'main'. Only used if --summit-commit is provided too", + ) + parser.add_argument( + "--summit-commit", + help="Summit git commit hash. If not provided, does not change image", + ) + + # Domain args + parser.add_argument( + "--domain-record", + help="Domain record name (e.g. xxx.seismicdev.net). Required if deploying", + ) + parser.add_argument( + "--domain-name", + default="seismicdev.net", + help="Domain name (e.g. seismicdev.net)", + ) + parser.add_argument( + "--domain-resource-group", + default="devnet2", + help="Azure domain resource group name (e.g. devnet2)", + ) + parser.add_argument( + "--email", + default="c@seismic.systems", + help="Email for certbot (e.g. c@seismic.systems)", + ) + parser.add_argument( + "--code-path", + type=str, + default="", + help="path of code relative to $HOME", + ) + return parser.parse_args() diff --git a/yocto/paths.py b/yocto/paths.py new file mode 100644 index 00000000..335d2672 --- /dev/null +++ b/yocto/paths.py @@ -0,0 +1,57 @@ +from dataclasses import dataclass +from pathlib import Path + + +@dataclass +class BuildPaths: + + def __init__(self, home: str): + self.home = Path(home) + + @property + def yocto_manifests(self) -> Path: + return self.home / "yocto-manifests" + + @property + def artifacts(self) -> Path: + return self.yocto_manifests / "reproducible-build/artifacts" + + @property + def meta_seismic(self) -> Path: + return self.home / "meta-seismic" + + @property + def measured_boot(self) -> Path: + return self.home / "measured-boot" + + @property + def enclave_bb(self) -> str: + return "recipes-nodes/enclave/enclave.bb" + + @property + def sreth_bb(self) -> str: + return "recipes-nodes/reth/reth.bb" + + @property + def summit_bb(self) -> str: + return "recipes-nodes/summit/summit.bb" + + @property + def repo_root(self) -> Path: + return self.home / "deploy" + + @property + def deploy_script(self) -> Path: + return self.repo_root / "deploy.sh" + + @property + def deploy_metadata(self) -> Path: + return self.repo_root / "deploy_metadata.json" + + @property + def proxy_client(self) -> Path: + return self.home / "cvm-reverse-proxy/build/proxy-client" + + @property + def source_env(self) -> Path: + return self.home / "yocto-manifests/build/srcs/poky" diff --git a/yocto/proxy.py b/yocto/proxy.py new file mode 100644 index 00000000..1f639599 --- /dev/null +++ b/yocto/proxy.py @@ -0,0 +1,120 @@ +import json +import logging +import subprocess +import threading +import time +from pathlib import Path +from typing import Optional + +import requests + +from yocto.paths import BuildPaths + +logger = logging.getLogger(__name__) + + +class ProxyClient: + def __init__(self, public_ip: str, measurements_file: Path, home: str): + self.public_ip = public_ip + self.measurements_file = measurements_file + self.executable_path = BuildPaths(home).proxy_client + self.process: Optional[subprocess.Popen] = None + + def start(self) -> bool: + """Start the proxy client, make an HTTP request, and verify attestation.""" + + proxy_cmd = [ + self.executable_path, + "--target-addr", + f"https://{self.public_ip}:7936", + "--server-attestation-type", + "azure-tdx", + "--server-measurements", + str(self.measurements_file), + ] + + # Start the proxy client process + try: + self.process = subprocess.Popen( + proxy_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + logger.info(f"Starting proxy client to https://{self.public_ip}:7936") + + # Wait for the process to confirm startup or timeout after 5 seconds + try: + self.process.wait(timeout=5) + if self.process.returncode is not None: + if self.process.stderr is None: + raise RuntimeError(f"{proxy_cmd} failed with no stderr") + stderr_output = self.process.stderr.read().decode() + raise RuntimeError( + f"Proxy process terminated immediately: {stderr_output}" + ) + except subprocess.TimeoutExpired: + logger.info("Proxy client has started successfully") + + # Start a thread to perform the HTTP request after a short delay + request_thread = threading.Thread(target=self.perform_http_request) + request_thread.start() + + # Monitor proxy output for successful attestation message + return self._monitor_attestation(request_thread) + + except FileNotFoundError: + logger.error("Proxy client binary not found at specified path.") + raise FileNotFoundError("Proxy client binary not found.") + except RuntimeError as e: + logger.error(f"Failed to start proxy: {e}") + raise + finally: + self.stop() + + def _monitor_attestation(self, request_thread: threading.Thread) -> bool: + """Monitor proxy output for successful attestation validation message.""" + start_time = time.time() + while True: + # Read output line by line from stdout + if self.process and self.process.stdout: + output = self.process.stdout.readline().decode().strip() + if output: + logger.info(f"Proxy stdout: {output}") + + # Look for attestation validation message + if "Successfully validated attestation document" in output: + logger.info("Proxy server validated attestation successfully") + request_thread.join() # Ensure HTTP request thread has completed + return True + + # Timeout after 30 seconds if no validation message is found + if time.time() - start_time > 30: + logger.error("Timeout: Attestation validation message not found") + self.stop() + raise TimeoutError( + "Timeout: Attestation validation message not found." + ) + + time.sleep(1) # Slight delay to avoid CPU overuse + + def perform_http_request(self): + """Simulate an external HTTP request to the proxy server""" + # Wait a moment before sending the request to ensure proxy client is running + time.sleep(5) + try: + response = requests.get( + "http://localhost:8080/genesis/data", headers={"Host": "localhost"} + ) + response.raise_for_status() + logger.info( + f"HTTP request succeeded with output:\n" + f"{json.dumps(response.json())}" + ) + except requests.RequestException as e: + logger.error(f"HTTP request failed: {e}") + raise ConnectionError(f"HTTP request to proxy server failed: {e}") + + def stop(self): + """Stop the proxy client""" + if self.process: + self.process.terminate() + logger.info("Proxy client stopped") + self.process = None diff --git a/yocto/summit_client.py b/yocto/summit_client.py new file mode 100644 index 00000000..c0c68717 --- /dev/null +++ b/yocto/summit_client.py @@ -0,0 +1,72 @@ +import logging +import tomllib +import requests +from typing import Dict, Any +from pathlib import Path + +logger = logging.getLogger(__name__) + +GenesisText = str +Json = Any + + +class SummitClient: + def __init__(self, url: str): + self.url = url + + def _get(self, path: str) -> str: + response = requests.get(f"{self.url}/{path}") + response.raise_for_status() + return response.text + + def _post_text(self, path: str, body: str) -> str: + response = requests.post( + f"{self.url}/{path}", + data=body, + headers={"Content-Type": "text/plain"}, + ) + response.raise_for_status() + return response.text + + def health(self) -> str: + return self._get("health") + + def get_public_key(self) -> str: + return self._get("get_public_key") + + def send_share(self, share: str) -> str: + return self._post_text("send_share", share) + + def send_genesis(self, genesis: GenesisText) -> str: + self.validate_genesis_text(genesis) + return self._post_text("send_genesis", genesis) + + def post_genesis_filepath(self, path: Path): + text = self.load_genesis_file(path) + self.send_genesis(text) + + @staticmethod + def load_genesis_file(path: Path) -> GenesisText: + with open(path) as f: + return f.read() + + @staticmethod + def validate_genesis_text(genesis: GenesisText) -> Dict[str, Any]: + try: + return tomllib.loads(genesis) + except tomllib.TOMLDecodeError as e: + logger.error( + "\n".join( + [ + f"Failed to parse genesis as toml: {e}", + "File contents:", + genesis, + ] + ) + ) + raise e + + @classmethod + def load_genesis_toml(cls, path: Path) -> Dict[str, Any]: + text = cls.load_genesis_file(path) + return cls.validate_genesis_text(text) diff --git a/yocto/validators.py b/yocto/validators.py new file mode 100644 index 00000000..290a87d4 --- /dev/null +++ b/yocto/validators.py @@ -0,0 +1,120 @@ +import argparse +import json +import tempfile +from pathlib import Path +from typing import List, Dict, Tuple + +from yocto.summit_client import SummitClient +from yocto.metadata import load_metadata +from yocto.azure_common import AzureCLI, CONSENSUS_PORT + + +def _genesis_vm_name(node: int) -> str: + return f"yocto-genesis-{node}" + + +def _genesis_client(node: int) -> SummitClient: + return SummitClient(f"https://summit-genesis-{node}.seismictest.net/summit") + + +def _parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser() + parser.add_argument("-n", "--nodes", type=int, default=4) + parser.add_argument( + "--code-path", + default="", + type=str, + help="path to code relative to $HOME", + ) + return parser.parse_args() + + +def _get_pubkeys( + home: Path, + node_clients: List[Tuple[int, SummitClient]], +) -> Tuple[List[Dict[str, str]], Dict[int, str]]: + resources = load_metadata(str(home))["resources"] + + validators = [] + node_to_pubkey = {} + for node, client in node_clients: + meta = resources[_genesis_vm_name(node)] + ip_address = meta["public_ip"] + try: + pubkey = client.get_public_key() + validators.append( + { + "public_key": pubkey, + "ip_address": f"{ip_address}:{CONSENSUS_PORT}", + } + ) + node_to_pubkey[node] = pubkey + except Exception as e: + print(f"Error: {e}") + raise e + return validators, node_to_pubkey + + +def _post_shares( + tmpdir: str, + node_clients: List[Tuple[int, SummitClient]], + node_to_pubkey: Dict[int, str], +): + genesis_file = f"{tmpdir}/genesis.toml" + genesis_toml = SummitClient.load_genesis_toml(genesis_file) + validators = genesis_toml["validators"] + + for node, client in node_clients: + share_index = next( + i + for i, v in enumerate(validators) + if v["public_key"] == node_to_pubkey[node] + ) + ip = validators[share_index]["ip_address"] + share_file = f"{tmpdir}/node{share_index}/share.pem" + with open(share_file, "r") as f: + share = f.read() + print( + f"Posting share {share} to node {node} @ {ip} / {node_to_pubkey[node]}" + ) + client.send_share(share) + + +def main(): + args = _parse_args() + node_clients = [(n, _genesis_client(n)) for n in range(1, args.nodes + 1)] + + tmpdir = tempfile.mkdtemp() + home = Path.home() if not args.code_path else Path.home() / args.code_path + + summit_path = str(home / "summit") + summit_genesis_target = f"{summit_path}/target/debug/genesis" + summit_example_genesis = f"{summit_path}/example_genesis.toml" + + validators, node_to_pubkey = _get_pubkeys(home, node_clients) + + tmp_validators = f"{tmpdir}/validators.json" + with open(tmp_validators, "w+") as f: + print(f"Wrote validators to {tmp_validators}") + json.dump(validators, f, indent=2) + + AzureCLI.run_command( + cmd=[ + summit_genesis_target, + "-o", + f"{tmpdir}", + "-i", + summit_example_genesis, + "-v", + tmp_validators, + ], + show_logs=True, + ) + + _post_shares(tmpdir, node_clients, node_to_pubkey) + for _, client in node_clients: + client.post_genesis_filepath(f"{tmpdir}/genesis.toml") + + +if __name__ == "__main__": + main() From 19d2d0779a7ee52c150dbf4c2a0e384532eac02e Mon Sep 17 00:00:00 2001 From: cdrappi Date: Wed, 15 Oct 2025 12:30:03 -0400 Subject: [PATCH 2/8] ruff format --- uv.lock | 60 +++++++++++++++++++---------------------- yocto/artifact.py | 2 +- yocto/build.py | 2 +- yocto/genesis_deploy.py | 1 + yocto/metadata.py | 3 +-- yocto/paths.py | 1 - yocto/proxy.py | 3 +-- 7 files changed, 33 insertions(+), 39 deletions(-) diff --git a/uv.lock b/uv.lock index 8a67a5f1..980e9953 100644 --- a/uv.lock +++ b/uv.lock @@ -2,27 +2,6 @@ version = 1 revision = 3 requires-python = ">=3.13" -[[package]] -name = "black" -version = "25.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "mypy-extensions" }, - { name = "packaging" }, - { name = "pathspec" }, - { name = "platformdirs" }, - { name = "pytokens" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/4b/43/20b5c90612d7bdb2bdbcceeb53d588acca3bb8f0e4c5d5c751a2c8fdd55a/black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619", size = 648393, upload-time = "2025-09-19T00:27:37.758Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/48/99/3acfea65f5e79f45472c45f87ec13037b506522719cd9d4ac86484ff51ac/black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175", size = 1742165, upload-time = "2025-09-19T00:34:10.402Z" }, - { url = "https://files.pythonhosted.org/packages/3a/18/799285282c8236a79f25d590f0222dbd6850e14b060dfaa3e720241fd772/black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f", size = 1581259, upload-time = "2025-09-19T00:32:49.685Z" }, - { url = "https://files.pythonhosted.org/packages/f1/ce/883ec4b6303acdeca93ee06b7622f1fa383c6b3765294824165d49b1a86b/black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831", size = 1655583, upload-time = "2025-09-19T00:30:44.505Z" }, - { url = "https://files.pythonhosted.org/packages/21/17/5c253aa80a0639ccc427a5c7144534b661505ae2b5a10b77ebe13fa25334/black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357", size = 1343428, upload-time = "2025-09-19T00:32:13.839Z" }, - { url = "https://files.pythonhosted.org/packages/1b/46/863c90dcd3f9d41b109b7f19032ae0db021f0b2a81482ba0a1e28c84de86/black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae", size = 203363, upload-time = "2025-09-19T00:27:35.724Z" }, -] - [[package]] name = "certifi" version = "2025.10.5" @@ -99,7 +78,6 @@ name = "deploy" version = "0.1.0" source = { virtual = "." } dependencies = [ - { name = "black" }, { name = "certifi" }, { name = "charset-normalizer" }, { name = "click" }, @@ -110,12 +88,12 @@ dependencies = [ { name = "platformdirs" }, { name = "pyright" }, { name = "requests" }, + { name = "ruff" }, { name = "urllib3" }, ] [package.metadata] requires-dist = [ - { name = "black", specifier = ">=25.9.0" }, { name = "certifi", specifier = ">=2025.10.5" }, { name = "charset-normalizer", specifier = ">=3.4.4" }, { name = "click", specifier = ">=8.3.0" }, @@ -126,6 +104,7 @@ requires-dist = [ { name = "platformdirs", specifier = ">=4.5.0" }, { name = "pyright", specifier = ">=1.1.406" }, { name = "requests", specifier = ">=2.32.5" }, + { name = "ruff", specifier = ">=0.8.0" }, { name = "urllib3", specifier = ">=2.5.0" }, ] @@ -196,15 +175,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f6/a2/e309afbb459f50507103793aaef85ca4348b66814c86bc73908bdeb66d12/pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71", size = 5980982, upload-time = "2025-10-02T01:04:43.137Z" }, ] -[[package]] -name = "pytokens" -version = "0.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d4/c2/dbadcdddb412a267585459142bfd7cc241e6276db69339353ae6e241ab2b/pytokens-0.2.0.tar.gz", hash = "sha256:532d6421364e5869ea57a9523bf385f02586d4662acbcc0342afd69511b4dd43", size = 15368, upload-time = "2025-10-15T08:02:42.738Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/89/5a/c269ea6b348b6f2c32686635df89f32dbe05df1088dd4579302a6f8f99af/pytokens-0.2.0-py3-none-any.whl", hash = "sha256:74d4b318c67f4295c13782ddd9abcb7e297ec5630ad060eb90abf7ebbefe59f8", size = 12038, upload-time = "2025-10-15T08:02:41.694Z" }, -] - [[package]] name = "requests" version = "2.32.5" @@ -220,6 +190,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] +[[package]] +name = "ruff" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/b9/9bd84453ed6dd04688de9b3f3a4146a1698e8faae2ceeccce4e14c67ae17/ruff-0.14.0.tar.gz", hash = "sha256:62ec8969b7510f77945df916de15da55311fade8d6050995ff7f680afe582c57", size = 5452071, upload-time = "2025-10-07T18:21:55.763Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/4e/79d463a5f80654e93fa653ebfb98e0becc3f0e7cf6219c9ddedf1e197072/ruff-0.14.0-py3-none-linux_armv6l.whl", hash = "sha256:58e15bffa7054299becf4bab8a1187062c6f8cafbe9f6e39e0d5aface455d6b3", size = 12494532, upload-time = "2025-10-07T18:21:00.373Z" }, + { url = "https://files.pythonhosted.org/packages/ee/40/e2392f445ed8e02aa6105d49db4bfff01957379064c30f4811c3bf38aece/ruff-0.14.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:838d1b065f4df676b7c9957992f2304e41ead7a50a568185efd404297d5701e8", size = 13160768, upload-time = "2025-10-07T18:21:04.73Z" }, + { url = "https://files.pythonhosted.org/packages/75/da/2a656ea7c6b9bd14c7209918268dd40e1e6cea65f4bb9880eaaa43b055cd/ruff-0.14.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:703799d059ba50f745605b04638fa7e9682cc3da084b2092feee63500ff3d9b8", size = 12363376, upload-time = "2025-10-07T18:21:07.833Z" }, + { url = "https://files.pythonhosted.org/packages/42/e2/1ffef5a1875add82416ff388fcb7ea8b22a53be67a638487937aea81af27/ruff-0.14.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ba9a8925e90f861502f7d974cc60e18ca29c72bb0ee8bfeabb6ade35a3abde7", size = 12608055, upload-time = "2025-10-07T18:21:10.72Z" }, + { url = "https://files.pythonhosted.org/packages/4a/32/986725199d7cee510d9f1dfdf95bf1efc5fa9dd714d0d85c1fb1f6be3bc3/ruff-0.14.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e41f785498bd200ffc276eb9e1570c019c1d907b07cfb081092c8ad51975bbe7", size = 12318544, upload-time = "2025-10-07T18:21:13.741Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ed/4969cefd53315164c94eaf4da7cfba1f267dc275b0abdd593d11c90829a3/ruff-0.14.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30a58c087aef4584c193aebf2700f0fbcfc1e77b89c7385e3139956fa90434e2", size = 14001280, upload-time = "2025-10-07T18:21:16.411Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ad/96c1fc9f8854c37681c9613d825925c7f24ca1acfc62a4eb3896b50bacd2/ruff-0.14.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f8d07350bc7af0a5ce8812b7d5c1a7293cf02476752f23fdfc500d24b79b783c", size = 15027286, upload-time = "2025-10-07T18:21:19.577Z" }, + { url = "https://files.pythonhosted.org/packages/b3/00/1426978f97df4fe331074baf69615f579dc4e7c37bb4c6f57c2aad80c87f/ruff-0.14.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eec3bbbf3a7d5482b5c1f42d5fc972774d71d107d447919fca620b0be3e3b75e", size = 14451506, upload-time = "2025-10-07T18:21:22.779Z" }, + { url = "https://files.pythonhosted.org/packages/58/d5/9c1cea6e493c0cf0647674cca26b579ea9d2a213b74b5c195fbeb9678e15/ruff-0.14.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16b68e183a0e28e5c176d51004aaa40559e8f90065a10a559176713fcf435206", size = 13437384, upload-time = "2025-10-07T18:21:25.758Z" }, + { url = "https://files.pythonhosted.org/packages/29/b4/4cd6a4331e999fc05d9d77729c95503f99eae3ba1160469f2b64866964e3/ruff-0.14.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb732d17db2e945cfcbbc52af0143eda1da36ca8ae25083dd4f66f1542fdf82e", size = 13447976, upload-time = "2025-10-07T18:21:28.83Z" }, + { url = "https://files.pythonhosted.org/packages/3b/c0/ac42f546d07e4f49f62332576cb845d45c67cf5610d1851254e341d563b6/ruff-0.14.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:c958f66ab884b7873e72df38dcabee03d556a8f2ee1b8538ee1c2bbd619883dd", size = 13682850, upload-time = "2025-10-07T18:21:31.842Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c4/4b0c9bcadd45b4c29fe1af9c5d1dc0ca87b4021665dfbe1c4688d407aa20/ruff-0.14.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:7eb0499a2e01f6e0c285afc5bac43ab380cbfc17cd43a2e1dd10ec97d6f2c42d", size = 12449825, upload-time = "2025-10-07T18:21:35.074Z" }, + { url = "https://files.pythonhosted.org/packages/4b/a8/e2e76288e6c16540fa820d148d83e55f15e994d852485f221b9524514730/ruff-0.14.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4c63b2d99fafa05efca0ab198fd48fa6030d57e4423df3f18e03aa62518c565f", size = 12272599, upload-time = "2025-10-07T18:21:38.08Z" }, + { url = "https://files.pythonhosted.org/packages/18/14/e2815d8eff847391af632b22422b8207704222ff575dec8d044f9ab779b2/ruff-0.14.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:668fce701b7a222f3f5327f86909db2bbe99c30877c8001ff934c5413812ac02", size = 13193828, upload-time = "2025-10-07T18:21:41.216Z" }, + { url = "https://files.pythonhosted.org/packages/44/c6/61ccc2987cf0aecc588ff8f3212dea64840770e60d78f5606cd7dc34de32/ruff-0.14.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a86bf575e05cb68dcb34e4c7dfe1064d44d3f0c04bbc0491949092192b515296", size = 13628617, upload-time = "2025-10-07T18:21:44.04Z" }, + { url = "https://files.pythonhosted.org/packages/73/e6/03b882225a1b0627e75339b420883dc3c90707a8917d2284abef7a58d317/ruff-0.14.0-py3-none-win32.whl", hash = "sha256:7450a243d7125d1c032cb4b93d9625dea46c8c42b4f06c6b709baac168e10543", size = 12367872, upload-time = "2025-10-07T18:21:46.67Z" }, + { url = "https://files.pythonhosted.org/packages/41/77/56cf9cf01ea0bfcc662de72540812e5ba8e9563f33ef3d37ab2174892c47/ruff-0.14.0-py3-none-win_amd64.whl", hash = "sha256:ea95da28cd874c4d9c922b39381cbd69cb7e7b49c21b8152b014bd4f52acddc2", size = 13464628, upload-time = "2025-10-07T18:21:50.318Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2a/65880dfd0e13f7f13a775998f34703674a4554906167dce02daf7865b954/ruff-0.14.0-py3-none-win_arm64.whl", hash = "sha256:f42c9495f5c13ff841b1da4cb3c2a42075409592825dada7c5885c2c844ac730", size = 12565142, upload-time = "2025-10-07T18:21:53.577Z" }, +] + [[package]] name = "typing-extensions" version = "4.15.0" diff --git a/yocto/artifact.py b/yocto/artifact.py index 5de5b989..b4d6377d 100644 --- a/yocto/artifact.py +++ b/yocto/artifact.py @@ -65,7 +65,7 @@ def delete_artifact(artifact: str, home: str): if deployed_to: confirm = input( f'\nThe artifact "{artifact}" is deployed to {len(deployed_to)} resource group(s):' - f'\n - {"\n - ".join(deployed_to)}\n\n' + f"\n - {'\n - '.join(deployed_to)}\n\n" "Are you really sure you want to delete it? " "This will not delete the resources (y/n): " ) diff --git a/yocto/build.py b/yocto/build.py index 92fb1712..fd5bc781 100644 --- a/yocto/build.py +++ b/yocto/build.py @@ -67,7 +67,7 @@ def build_image(home: str, capture_output: bool = True) -> Path: < datetime.datetime.now().timestamp() - _MAX_ARTIFACT_AGE * _ONE_HOUR_IN_SECONDS ): raise RuntimeError( - "Most recently built image more than " f"{_MAX_ARTIFACT_AGE} hours old" + f"Most recently built image more than {_MAX_ARTIFACT_AGE} hours old" ) logger.info(f"Image built successfully at {image_path_str}") diff --git a/yocto/genesis_deploy.py b/yocto/genesis_deploy.py index b6a82b32..b123bb6d 100755 --- a/yocto/genesis_deploy.py +++ b/yocto/genesis_deploy.py @@ -4,6 +4,7 @@ Genesis mode deployment with persistent IP addresses and node-specific allocation. """ + import json import logging from typing import Tuple diff --git a/yocto/metadata.py b/yocto/metadata.py index f629a633..42355055 100644 --- a/yocto/metadata.py +++ b/yocto/metadata.py @@ -40,8 +40,7 @@ def load_artifact_measurements(artifact: str, home: str) -> Tuple[Path, Measurem artifacts = load_metadata(home).get("artifacts", {}) if artifact not in artifacts: raise ValueError( - f"Could not find artifact {artifact} " - f"in {BuildPaths(home).deploy_metadata}" + f"Could not find artifact {artifact} in {BuildPaths(home).deploy_metadata}" ) image_path = BuildPaths(home).artifacts / artifact artifact = artifacts[artifact] diff --git a/yocto/paths.py b/yocto/paths.py index 335d2672..daf634b2 100644 --- a/yocto/paths.py +++ b/yocto/paths.py @@ -4,7 +4,6 @@ @dataclass class BuildPaths: - def __init__(self, home: str): self.home = Path(home) diff --git a/yocto/proxy.py b/yocto/proxy.py index 1f639599..d002e835 100644 --- a/yocto/proxy.py +++ b/yocto/proxy.py @@ -105,8 +105,7 @@ def perform_http_request(self): ) response.raise_for_status() logger.info( - f"HTTP request succeeded with output:\n" - f"{json.dumps(response.json())}" + f"HTTP request succeeded with output:\n{json.dumps(response.json())}" ) except requests.RequestException as e: logger.error(f"HTTP request failed: {e}") From 9128e8db93fdd5287d7a35e1284a78c9e43a1c75 Mon Sep 17 00:00:00 2001 From: cdrappi Date: Wed, 15 Oct 2025 12:30:28 -0400 Subject: [PATCH 3/8] run ruff as linter --- yocto/artifact.py | 8 ++++---- yocto/azure_common.py | 12 ++++++------ yocto/build.py | 12 +++++------- yocto/cfg.py | 20 ++++++++++---------- yocto/cli.py | 2 +- yocto/conf/conf.py | 24 ++++++++++++------------ yocto/deploy.py | 9 ++++----- yocto/genesis_deploy.py | 11 +++++------ yocto/git.py | 9 ++++----- yocto/measurements.py | 4 ++-- yocto/metadata.py | 9 ++++----- yocto/proxy.py | 3 +-- yocto/summit_client.py | 9 +++++---- yocto/validators.py | 15 +++++++-------- 14 files changed, 70 insertions(+), 77 deletions(-) diff --git a/yocto/artifact.py b/yocto/artifact.py index b4d6377d..b25149db 100644 --- a/yocto/artifact.py +++ b/yocto/artifact.py @@ -1,9 +1,9 @@ +import datetime import glob import logging import os import re -from typing import Optional -import datetime + from yocto.metadata import load_metadata, remove_artifact_from_metadata from yocto.paths import BuildPaths @@ -44,7 +44,7 @@ def _artifact_from_timestamp(timestamp: str) -> str: return f"cvm-image-azure-tdx.rootfs-{timestamp}.wic.vhd" -def parse_artifact(artifact_arg: Optional[str]) -> Optional[str]: +def parse_artifact(artifact_arg: str | None) -> str | None: if not artifact_arg: return None @@ -81,7 +81,7 @@ def delete_artifact(artifact: str, home: str): files_deleted += 1 if not files_deleted: - logger.warning(f"Found no files associated with this artifact") + logger.warning("Found no files associated with this artifact") return logger.info(f"Deleted {files_deleted} files associated with artifact {artifact}") diff --git a/yocto/azure_common.py b/yocto/azure_common.py index c7138a0b..37272899 100644 --- a/yocto/azure_common.py +++ b/yocto/azure_common.py @@ -6,12 +6,12 @@ import argparse import json +import logging import os import subprocess import tempfile -from typing import List, Optional from pathlib import Path -import logging + from yocto.conf.conf import DeployConfigs, VmConfigs logger = logging.getLogger(__name__) @@ -38,7 +38,7 @@ class AzureCLI: @staticmethod def run_command( - cmd: List[str], + cmd: list[str], show_logs: bool = False, ) -> subprocess.CompletedProcess: """Execute an Azure CLI command.""" @@ -128,7 +128,7 @@ def get_existing_public_ip( cls, name: str, resource_group: str, - ) -> Optional[str]: + ) -> str | None: """Get existing IP address if it exists.""" try: cmd = [ @@ -152,7 +152,7 @@ def get_existing_public_ip( return None @classmethod - def get_existing_dns_ips(cls, config: DeployConfigs) -> List[str]: + def get_existing_dns_ips(cls, config: DeployConfigs) -> list[str]: """Get existing DNS A record IPs.""" cmd = [ "az", @@ -451,7 +451,7 @@ def create_user_data_file(cls, config: DeployConfigs) -> str: f.write(f'DOMAIN="{config.domain.name}"\n') logger.info(f"Created temporary user-data file: {temp_file}") - with open(temp_file, "r") as f: + with open(temp_file) as f: logger.info(f.read()) return temp_file diff --git a/yocto/build.py b/yocto/build.py index fd5bc781..08928934 100644 --- a/yocto/build.py +++ b/yocto/build.py @@ -1,17 +1,15 @@ +import datetime import logging import subprocess -import datetime -from typing import Optional, Tuple - from dataclasses import dataclass from pathlib import Path +from yocto.artifact import artifact_timestamp +from yocto.conf.conf import BuildConfigs, Configs from yocto.git import GitConfigs, update_git_bb from yocto.measurements import Measurements, generate_measurements -from yocto.metadata import load_metadata, write_metadata, load_artifact_measurements -from yocto.conf.conf import BuildConfigs, Configs +from yocto.metadata import load_artifact_measurements, load_metadata, write_metadata from yocto.paths import BuildPaths -from yocto.artifact import artifact_timestamp logger = logging.getLogger(__name__) @@ -126,7 +124,7 @@ def build(self) -> BuildOutput: ) -def maybe_build(configs: Configs) -> Optional[Tuple[Path, Measurements]]: +def maybe_build(configs: Configs) -> tuple[Path, Measurements] | None: """ if --build was passed in, build a fresh image if --deploy was passed in, return the path to the image to deploy diff --git a/yocto/cfg.py b/yocto/cfg.py index 291539c0..b550d1e0 100644 --- a/yocto/cfg.py +++ b/yocto/cfg.py @@ -1,17 +1,17 @@ import argparse +import logging from dataclasses import dataclass -from typing import Dict, Optional, Any from pathlib import Path -import logging +from typing import Any -from yocto.conf.conf import get_host_ip from yocto.artifact import parse_artifact from yocto.conf.conf import ( Configs, - Mode, DeployConfigs, - VmConfigs, DomainConfig, + Mode, + VmConfigs, + get_host_ip, ) logger = logging.getLogger(__name__) @@ -42,13 +42,13 @@ class DeploymentConfig: record_name: str source_ip: str ip_only: bool - artifact: Optional[str] + artifact: str | None home: str domain_resource_group: str = DEFAULT_RESOURCE_GROUP domain_name: str = DEFAULT_DOMAIN_NAME certbot_email: str = DEFAULT_CERTBOT_EMAIL - resource_group: Optional[str] = None - nsg_name: Optional[str] = None + resource_group: str | None = None + nsg_name: str | None = None show_logs: bool = True def __post_init__(self): @@ -85,10 +85,10 @@ def to_configs(self) -> Configs: ) @classmethod - def parse_base_kwargs(cls, args: argparse.Namespace) -> Dict[str, Any]: + def parse_base_kwargs(cls, args: argparse.Namespace) -> dict[str, Any]: source_ip = args.source_ip if source_ip is None: - logger.warning(f"No --source-ip provided, so fetching IP from ipify.org...") + logger.warning("No --source-ip provided, so fetching IP from ipify.org...") source_ip = get_host_ip() logger.info(f"Fetched public IP: {source_ip}") return { diff --git a/yocto/cli.py b/yocto/cli.py index 6d97251a..d46156fa 100644 --- a/yocto/cli.py +++ b/yocto/cli.py @@ -5,9 +5,9 @@ from yocto.artifact import delete_artifact from yocto.build import maybe_build -from yocto.deploy import Deployer, delete_vm from yocto.conf.conf import Configs from yocto.conf.logs import setup_logging +from yocto.deploy import Deployer, delete_vm logger = logging.getLogger(__name__) diff --git a/yocto/conf/conf.py b/yocto/conf/conf.py index fb78cf0a..fe91a0c9 100644 --- a/yocto/conf/conf.py +++ b/yocto/conf/conf.py @@ -1,12 +1,12 @@ import argparse import subprocess from dataclasses import dataclass -from typing import Dict, Optional, Any +from pathlib import Path +from typing import Any from yocto.artifact import parse_artifact from yocto.git import GitConfigs from yocto.parser import parse_args -from pathlib import Path def get_host_ip() -> str: @@ -32,7 +32,7 @@ def default() -> "BuildConfigs": git=GitConfigs.default(), ) - def to_dict(self) -> Dict[str, Any]: + def to_dict(self) -> dict[str, Any]: return { "git": self.git.to_dict(), } @@ -98,7 +98,7 @@ def from_args(args: argparse.Namespace) -> "DomainConfig": name=args.domain_name, ) - def to_dict(self) -> Dict[str, str]: + def to_dict(self) -> dict[str, str]: return { "url": f"https://{self.record}.{self.name}", "record": self.record, @@ -111,7 +111,7 @@ def to_dict(self) -> Dict[str, str]: class DeployConfigs: vm: VmConfigs domain: DomainConfig - artifact: Optional[str] + artifact: str | None email: str source_ip: str show_logs: bool = False @@ -127,7 +127,7 @@ def from_args(args: argparse.Namespace) -> "DeployConfigs": show_logs=args.logs, ) - def to_dict(self) -> Dict[str, Any]: + def to_dict(self) -> dict[str, Any]: kwargs = {} if self.artifact: kwargs["artifact"] = self.artifact @@ -145,8 +145,8 @@ def to_dict(self) -> Dict[str, Any]: class Mode: build: bool deploy: bool - delete_vm: Optional[str] - delete_artifact: Optional[str] + delete_vm: str | None + delete_artifact: str | None @staticmethod def from_args(args: argparse.Namespace) -> "Mode": @@ -175,7 +175,7 @@ def deploy_only() -> "Mode": delete_artifact=None, ) - def to_dict(self) -> Dict[str, str | bool]: + def to_dict(self) -> dict[str, str | bool]: delete_kwargs = {} if self.delete_vm: delete_kwargs["vm"] = self.delete_vm @@ -188,8 +188,8 @@ def to_dict(self) -> Dict[str, str | bool]: @dataclass class Configs: mode: Mode - build: Optional[BuildConfigs] - deploy: Optional[DeployConfigs] + build: BuildConfigs | None + deploy: DeployConfigs | None show_logs: bool home: str @@ -213,7 +213,7 @@ def parse() -> "Configs": home=Path.home() if not args.code_path else Path.home / args.code_path, ) - def to_dict(self) -> Dict[str, Any]: + def to_dict(self) -> dict[str, Any]: kwargs = {} if self.build: kwargs["build"] = self.build.to_dict() diff --git a/yocto/deploy.py b/yocto/deploy.py index 59341a4a..9c640437 100644 --- a/yocto/deploy.py +++ b/yocto/deploy.py @@ -6,16 +6,15 @@ import time from dataclasses import dataclass from pathlib import Path -from typing import Optional +from yocto.azure_common import AzureCLI, confirm +from yocto.conf.conf import DeployConfigs from yocto.measurements import Measurements, write_measurements_tmpfile from yocto.metadata import ( load_metadata, remove_vm_from_metadata, write_metadata, ) -from yocto.azure_common import AzureCLI, confirm -from yocto.conf.conf import DeployConfigs from yocto.paths import BuildPaths from yocto.proxy import ProxyClient @@ -68,7 +67,7 @@ def delete_vm(vm_name: str, home: str) -> bool: return False logger.info(f"Successfully deleted {vm_name}:\n{stdout}") - logger.info(f"Deleting associated disk...") + logger.info("Deleting associated disk...") AzureCLI.delete_disk(resource_group, vm_name, meta["artifact"]) remove_vm_from_metadata(vm_name, home) return True @@ -139,7 +138,7 @@ def __init__( self.show_logs = show_logs self.measurements_file = write_measurements_tmpfile(measurements) - self.proxy: Optional[ProxyClient] = None + self.proxy: ProxyClient | None = None def deploy(self) -> DeployOutput: public_ip = deploy_image( diff --git a/yocto/genesis_deploy.py b/yocto/genesis_deploy.py index b123bb6d..d0096613 100755 --- a/yocto/genesis_deploy.py +++ b/yocto/genesis_deploy.py @@ -7,18 +7,17 @@ import json import logging -from typing import Tuple -from yocto.conf.logs import setup_logging from yocto.azure_common import ( + DEFAULT_RESOURCE_GROUP, AzureCLI, - create_base_parser, confirm, - DEFAULT_RESOURCE_GROUP, + create_base_parser, ) +from yocto.build import maybe_build from yocto.cfg import DeploymentConfig +from yocto.conf.logs import setup_logging from yocto.deploy import Deployer -from yocto.build import maybe_build logger = logging.getLogger(__name__) @@ -32,7 +31,7 @@ def __init__(self): def ensure_genesis_resource_group(self, region: str) -> None: AzureCLI.ensure_created_resource_group(self.genesis_rg, region) - def get_or_create_node_ip(self, node_number: int, region: str) -> Tuple[str, str]: + def get_or_create_node_ip(self, node_number: int, region: str) -> tuple[str, str]: """Get or create persistent IP for a specific node number.""" self.ensure_genesis_resource_group(region) diff --git a/yocto/git.py b/yocto/git.py index de9e2858..eb24ac0d 100644 --- a/yocto/git.py +++ b/yocto/git.py @@ -3,7 +3,6 @@ from argparse import Namespace from dataclasses import dataclass from pathlib import Path -from typing import Dict, Optional from yocto.paths import BuildPaths @@ -12,7 +11,7 @@ @dataclass class GitConfig: - commit: Optional[str] + commit: str | None branch: str @staticmethod @@ -22,7 +21,7 @@ def from_args(args: Namespace, repo: str) -> "GitConfig": commit=values[f"{repo}_commit"], branch=values[f"{repo}_branch"] ) - def to_dict(self) -> Dict[str, str]: + def to_dict(self) -> dict[str, str]: if not self.commit: raise ValueError("Cannot call to_dict() on GitConfig without commit") return { @@ -65,7 +64,7 @@ def default() -> "GitConfigs": ) -def run_command(cmd: str, cwd: Optional[Path] = None) -> subprocess.CompletedProcess: +def run_command(cmd: str, cwd: Path | None = None) -> subprocess.CompletedProcess: result = subprocess.run(cmd, shell=True, capture_output=True, text=True, cwd=cwd) if result.returncode != 0: @@ -101,7 +100,7 @@ def update_git_bb( bb_pathname: str, git_config: GitConfig, home: str, - commit_message: Optional[str] = None, + commit_message: str | None = None, ) -> GitConfig: """ Update the git commit and branch for a given Yocto bb file diff --git a/yocto/measurements.py b/yocto/measurements.py index 6e45bd2a..24de0da3 100644 --- a/yocto/measurements.py +++ b/yocto/measurements.py @@ -4,13 +4,13 @@ import subprocess import tempfile from pathlib import Path -from typing import Any, Dict +from typing import Any from yocto.paths import BuildPaths logger = logging.getLogger(__name__) -Measurements = Dict[str, Any] +Measurements = dict[str, Any] def write_measurements_tmpfile(measurements: Measurements) -> Path: diff --git a/yocto/metadata.py b/yocto/metadata.py index 42355055..2b9b6893 100644 --- a/yocto/metadata.py +++ b/yocto/metadata.py @@ -1,17 +1,16 @@ import json -from typing import Dict, Tuple from pathlib import Path -from yocto.paths import BuildPaths from yocto.measurements import Measurements +from yocto.paths import BuildPaths -def load_metadata(home: str) -> Dict[str, Dict]: +def load_metadata(home: str) -> dict[str, dict]: with open(BuildPaths(home).deploy_metadata) as f: return json.load(f) -def write_metadata(metadata: Dict[str, Dict], home: str): +def write_metadata(metadata: dict[str, dict], home: str): with open(BuildPaths(home).deploy_metadata, "w+") as f: json.dump(metadata, f, indent=2) @@ -36,7 +35,7 @@ def remove_artifact_from_metadata(name: str, home: str): write_metadata(metadata, home) -def load_artifact_measurements(artifact: str, home: str) -> Tuple[Path, Measurements]: +def load_artifact_measurements(artifact: str, home: str) -> tuple[Path, Measurements]: artifacts = load_metadata(home).get("artifacts", {}) if artifact not in artifacts: raise ValueError( diff --git a/yocto/proxy.py b/yocto/proxy.py index d002e835..177d024c 100644 --- a/yocto/proxy.py +++ b/yocto/proxy.py @@ -4,7 +4,6 @@ import threading import time from pathlib import Path -from typing import Optional import requests @@ -18,7 +17,7 @@ def __init__(self, public_ip: str, measurements_file: Path, home: str): self.public_ip = public_ip self.measurements_file = measurements_file self.executable_path = BuildPaths(home).proxy_client - self.process: Optional[subprocess.Popen] = None + self.process: subprocess.Popen | None = None def start(self) -> bool: """Start the proxy client, make an HTTP request, and verify attestation.""" diff --git a/yocto/summit_client.py b/yocto/summit_client.py index c0c68717..5fcedd75 100644 --- a/yocto/summit_client.py +++ b/yocto/summit_client.py @@ -1,8 +1,9 @@ import logging import tomllib -import requests -from typing import Dict, Any from pathlib import Path +from typing import Any + +import requests logger = logging.getLogger(__name__) @@ -51,7 +52,7 @@ def load_genesis_file(path: Path) -> GenesisText: return f.read() @staticmethod - def validate_genesis_text(genesis: GenesisText) -> Dict[str, Any]: + def validate_genesis_text(genesis: GenesisText) -> dict[str, Any]: try: return tomllib.loads(genesis) except tomllib.TOMLDecodeError as e: @@ -67,6 +68,6 @@ def validate_genesis_text(genesis: GenesisText) -> Dict[str, Any]: raise e @classmethod - def load_genesis_toml(cls, path: Path) -> Dict[str, Any]: + def load_genesis_toml(cls, path: Path) -> dict[str, Any]: text = cls.load_genesis_file(path) return cls.validate_genesis_text(text) diff --git a/yocto/validators.py b/yocto/validators.py index 290a87d4..5893d3eb 100644 --- a/yocto/validators.py +++ b/yocto/validators.py @@ -2,11 +2,10 @@ import json import tempfile from pathlib import Path -from typing import List, Dict, Tuple -from yocto.summit_client import SummitClient +from yocto.azure_common import CONSENSUS_PORT, AzureCLI from yocto.metadata import load_metadata -from yocto.azure_common import AzureCLI, CONSENSUS_PORT +from yocto.summit_client import SummitClient def _genesis_vm_name(node: int) -> str: @@ -31,8 +30,8 @@ def _parse_args() -> argparse.Namespace: def _get_pubkeys( home: Path, - node_clients: List[Tuple[int, SummitClient]], -) -> Tuple[List[Dict[str, str]], Dict[int, str]]: + node_clients: list[tuple[int, SummitClient]], +) -> tuple[list[dict[str, str]], dict[int, str]]: resources = load_metadata(str(home))["resources"] validators = [] @@ -57,8 +56,8 @@ def _get_pubkeys( def _post_shares( tmpdir: str, - node_clients: List[Tuple[int, SummitClient]], - node_to_pubkey: Dict[int, str], + node_clients: list[tuple[int, SummitClient]], + node_to_pubkey: dict[int, str], ): genesis_file = f"{tmpdir}/genesis.toml" genesis_toml = SummitClient.load_genesis_toml(genesis_file) @@ -72,7 +71,7 @@ def _post_shares( ) ip = validators[share_index]["ip_address"] share_file = f"{tmpdir}/node{share_index}/share.pem" - with open(share_file, "r") as f: + with open(share_file) as f: share = f.read() print( f"Posting share {share} to node {node} @ {ip} / {node_to_pubkey[node]}" From 5fcde12dd1ce4574ac43ef77b799204a9193f414 Mon Sep 17 00:00:00 2001 From: cdrappi Date: Wed, 15 Oct 2025 12:35:06 -0400 Subject: [PATCH 4/8] fix ruff checls --- yocto/artifact.py | 3 ++- yocto/azure_common.py | 7 ++++--- yocto/deploy.py | 5 ++++- yocto/measurements.py | 8 ++++++-- yocto/parser.py | 30 ++++++++++++++++++++++++------ yocto/proxy.py | 6 +++--- 6 files changed, 43 insertions(+), 16 deletions(-) diff --git a/yocto/artifact.py b/yocto/artifact.py index b25149db..1376ef4c 100644 --- a/yocto/artifact.py +++ b/yocto/artifact.py @@ -64,7 +64,8 @@ def delete_artifact(artifact: str, home: str): ] if deployed_to: confirm = input( - f'\nThe artifact "{artifact}" is deployed to {len(deployed_to)} resource group(s):' + f'\nThe artifact "{artifact}" is deployed to ' + f'{len(deployed_to)} resource group(s):' f"\n - {'\n - '.join(deployed_to)}\n\n" "Are you really sure you want to delete it? " "This will not delete the resources (y/n): " diff --git a/yocto/azure_common.py b/yocto/azure_common.py index 37272899..577f43eb 100644 --- a/yocto/azure_common.py +++ b/yocto/azure_common.py @@ -62,10 +62,10 @@ def check_dependencies(): for tool in tools: try: subprocess.run([tool, "--version"], capture_output=True, check=True) - except (subprocess.CalledProcessError, FileNotFoundError): + except (subprocess.CalledProcessError, FileNotFoundError) as e: raise RuntimeError( f"Error: '{tool}' command not found. Please install {tool}." - ) + ) from e @classmethod def resource_group_exists(cls, name: str) -> bool: @@ -179,7 +179,8 @@ def get_existing_dns_ips(cls, config: DeployConfigs) -> list[str]: def remove_dns_ip(cls, config: DeployConfigs, ip_address: str) -> None: """Remove IP from DNS A record.""" logger.info( - f"Removing {ip_address} from {config.domain.record}.{config.domain.name} record set" + f"Removing {ip_address} from " + f"{config.domain.record}.{config.domain.name} record set" ) cmd = [ "az", diff --git a/yocto/deploy.py b/yocto/deploy.py index 9c640437..3b86852b 100644 --- a/yocto/deploy.py +++ b/yocto/deploy.py @@ -38,7 +38,10 @@ def get_ip_address(vm_name: str) -> str: def delete_vm(vm_name: str, home: str) -> bool: - """Delete existing resource group if provided. Returns True if successful, False otherwise.""" + """ + Delete existing resource group if provided. + Returns True if successful, False otherwise. + """ metadata = load_metadata(home) resources = metadata["resources"] meta = resources[vm_name] diff --git a/yocto/measurements.py b/yocto/measurements.py index 24de0da3..1e8cf528 100644 --- a/yocto/measurements.py +++ b/yocto/measurements.py @@ -30,13 +30,17 @@ def generate_measurements(image_path: Path, home: str) -> Measurements: if not image_path.exists(): raise FileNotFoundError(f"Image path not found: {image_path}") - jq_format = f'{{"measurement_id": "{image_path.name}", "attestation_type": "azure-tdx", "measurements": .measurements}}' + jq_format = f'''{{ + "measurement_id": "{image_path.name}", + "attestation_type": "azure-tdx", + "measurements": .measurements + }}''' measurements_tmpfile = Path(tempfile.mktemp()) # Command to generate measurements measure_cmd = f""" cd {paths.source_env} && . ./oe-init-build-env && cd {paths.measured_boot} && - go build -o measured-boot && + go build -o measured-boot && ./measured-boot {image_path} ../output.json && cd ~ && jq '{jq_format}' {paths.measured_boot.parent}/output.json > {measurements_tmpfile} diff --git a/yocto/parser.py b/yocto/parser.py index 9a05d117..b179e988 100644 --- a/yocto/parser.py +++ b/yocto/parser.py @@ -39,17 +39,26 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--enclave-branch", default="main", - help="Seismic Enclave git branch name. Defaults to 'main'. Only used if --enclave-commit is provided too", + help=( + "Seismic Enclave git branch name. Defaults to 'main'. " + "Only used if --enclave-commit is provided too" + ), ) parser.add_argument( "--enclave-commit", - help="Seismic Enclave git gommit hash. If not provided, does not change image", + help=( + "Seismic Enclave git gommit hash. " + "If not provided, does not change image" + ), ) parser.add_argument( "--sreth-branch", default="seismic", - help="Seismic Reth git branch name. Defaults to 'seismic'. Only used if --sreth-commit is provided too", + help=( + "Seismic Reth git branch name. Defaults to 'seismic'. " + "Only used if --sreth-commit is provided too" + ), ) parser.add_argument( "--sreth-commit", @@ -59,17 +68,26 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--summit-branch", default="seismic", - help="Summit git branch name. Defaults to 'main'. Only used if --summit-commit is provided too", + help=( + "Summit git branch name. Defaults to 'main'. " + "Only used if --summit-commit is provided too" + ), ) parser.add_argument( "--summit-commit", - help="Summit git commit hash. If not provided, does not change image", + help=( + "Summit git commit hash. " + "If not provided, does not change image" + ), ) # Domain args parser.add_argument( "--domain-record", - help="Domain record name (e.g. xxx.seismicdev.net). Required if deploying", + help=( + "Domain record name (e.g. xxx.seismicdev.net). " + "Required if deploying" + ), ) parser.add_argument( "--domain-name", diff --git a/yocto/proxy.py b/yocto/proxy.py index 177d024c..db7332e5 100644 --- a/yocto/proxy.py +++ b/yocto/proxy.py @@ -59,9 +59,9 @@ def start(self) -> bool: # Monitor proxy output for successful attestation message return self._monitor_attestation(request_thread) - except FileNotFoundError: + except FileNotFoundError as e: logger.error("Proxy client binary not found at specified path.") - raise FileNotFoundError("Proxy client binary not found.") + raise FileNotFoundError("Proxy client binary not found.") from e except RuntimeError as e: logger.error(f"Failed to start proxy: {e}") raise @@ -108,7 +108,7 @@ def perform_http_request(self): ) except requests.RequestException as e: logger.error(f"HTTP request failed: {e}") - raise ConnectionError(f"HTTP request to proxy server failed: {e}") + raise ConnectionError(f"HTTP request to proxy server failed: {e}") from e def stop(self): """Stop the proxy client""" From 84b50494bf0b3b6f18525f14ed8bd54e1f46c4aa Mon Sep 17 00:00:00 2001 From: cdrappi Date: Wed, 15 Oct 2025 12:56:52 -0400 Subject: [PATCH 5/8] add .ruff_cache to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 03c48941..e1eaec8a 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ wheels/ *.egg-info/ .installed.cfg *.egg +.ruff_cache # Virtual Environment .env From c01253bb72d0ed99ea3044543927dc1517607ffa Mon Sep 17 00:00:00 2001 From: cdrappi Date: Wed, 15 Oct 2025 17:38:53 +0000 Subject: [PATCH 6/8] delete artifacts --- deploy_metadata.json | 117 +------------------------------------------ 1 file changed, 2 insertions(+), 115 deletions(-) diff --git a/deploy_metadata.json b/deploy_metadata.json index 22d0313a..febae4ee 100644 --- a/deploy_metadata.json +++ b/deploy_metadata.json @@ -1,117 +1,4 @@ { - "resources": { - "yocto-genesis-3": { - "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", - "public_ip": "4.152.153.129", - "domain": { - "url": "https://summit-genesis-3.seismictest.net", - "record": "summit-genesis-3", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "yocto-testnet", - "name": "yocto-genesis-3", - "nsgName": "yocto-genesis-3", - "location": "eastus2", - "size": "Standard_EC4es_v5" - } - }, - "yocto-genesis-1": { - "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", - "public_ip": "135.222.187.137", - "domain": { - "url": "https://summit-genesis-1.seismictest.net", - "record": "summit-genesis-1", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "yocto-testnet", - "name": "yocto-genesis-1", - "nsgName": "yocto-genesis-1", - "location": "eastus2", - "size": "Standard_EC4es_v5" - } - }, - "yocto-genesis-4": { - "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", - "public_ip": "172.175.19.226", - "domain": { - "url": "https://summit-genesis-4.seismictest.net", - "record": "summit-genesis-4", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "yocto-testnet", - "name": "yocto-genesis-4", - "nsgName": "yocto-genesis-4", - "location": "eastus2", - "size": "Standard_EC4es_v5" - } - }, - "yocto-genesis-2": { - "artifact": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", - "public_ip": "4.152.152.179", - "domain": { - "url": "https://summit-genesis-2.seismictest.net", - "record": "summit-genesis-2", - "name": "seismictest.net", - "resource_group": "yocto-testnet" - }, - "vm": { - "resourceGroup": "yocto-testnet", - "name": "yocto-genesis-2", - "nsgName": "yocto-genesis-2", - "location": "eastus2", - "size": "Standard_EC4es_v5" - } - } - }, - "artifacts": { - "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd": { - "repos": { - "enclave": { - "branch": "seismic", - "commit": "b8b7ab3049b42c7de38b2641d28a30c711663d68" - }, - "sreth": { - "branch": "seismic", - "commit": "2072d1c68c3b8b32caa0491a9c982dedbfd1dc46" - }, - "summit": { - "branch": "main", - "commit": "a89f2f7e36f46b024fea43e4b3328c22eab921be" - } - }, - "image": { - "measurement_id": "cvm-image-azure-tdx.rootfs-20250909192317.wic.vhd", - "attestation_type": "azure-tdx", - "measurements": { - "11": { - "expected": "3d200a691a41f2efe24b9e550a30ef3bc717520ddef843e2f6183c014fcf3077" - }, - "12": { - "expected": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "13": { - "expected": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "15": { - "expected": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "4": { - "expected": "48cbc8cdda567abb0c3d5bdd573eaf817c24800bc81ba3c65c26425ec44bbdd1" - }, - "8": { - "expected": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "9": { - "expected": "dba2475a62abe5c70c9d7ec80e02061723f69c250431071f33f0a2d6124bb660" - } - } - } - } - } + "resources": {}, + "artifacts": {} } \ No newline at end of file From 1a0d0b0328ac1f7bf03b5a18ddf53cd5b474e43f Mon Sep 17 00:00:00 2001 From: cdrappi Date: Wed, 15 Oct 2025 14:11:17 -0400 Subject: [PATCH 7/8] commits --- yocto/parser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yocto/parser.py b/yocto/parser.py index b179e988..8278fd16 100644 --- a/yocto/parser.py +++ b/yocto/parser.py @@ -38,7 +38,7 @@ def parse_args() -> argparse.Namespace: # Git args parser.add_argument( "--enclave-branch", - default="main", + default="seismic", help=( "Seismic Enclave git branch name. Defaults to 'main'. " "Only used if --enclave-commit is provided too" @@ -67,7 +67,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--summit-branch", - default="seismic", + default="main", help=( "Summit git branch name. Defaults to 'main'. " "Only used if --summit-commit is provided too" From 2d2ae19d33b43aeb8ecd55db667b288697eea313 Mon Sep 17 00:00:00 2001 From: cdrappi Date: Thu, 16 Oct 2025 09:21:35 -0400 Subject: [PATCH 8/8] node modules --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index e1eaec8a..b4ab23d7 100644 --- a/.gitignore +++ b/.gitignore @@ -37,4 +37,5 @@ ENV/ *.swp *.swo - +# NPM +node_modules/