diff --git a/.gitignore b/.gitignore index d8507411..80ed4e72 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,8 @@ venv/* .env.local .gitsigners + +# Devtools +devtools/bin +devtools/programs +devtools/built_programs \ No newline at end of file diff --git a/devtools/0_install_env.sh b/devtools/0_install_env.sh new file mode 100755 index 00000000..0e669f03 --- /dev/null +++ b/devtools/0_install_env.sh @@ -0,0 +1,69 @@ +#!/bin/sh +set -euf + +arch=$(uname -m) +image_bucket_url="https://s3.amazonaws.com/spec.ccfc.min/img/quickstart_guide/$arch" +bin_dir=bin +firecracker=firecracker +kernel=vmlinux.bin +mkdir -p ${bin_dir} + +# Check architecture +if [ ${arch} = "x86_64" ]; then + kernel_url="${image_bucket_url}/kernels/vmlinux.bin" +elif [ ${arch} = "aarch64" ]; then + kernel_url="${image_bucket_url}/kernels/vmlinux.bin" +else + echo "Cannot run firecracker on $arch architecture." + exit 1 +fi + +# Dependencies +apt update -y +apt upgrade -y +apt install squashfs-tools debootstrap -y + +# Get the Firecracker binary +if [ ! -f ${bin_dir}/${firecracker} ]; then + echo "Installing Firecracker..." + release_url="https://github.com/firecracker-microvm/firecracker/releases" + latest=$(basename $(curl -fsSLI -o /dev/null -w %{url_effective} ${release_url}/latest)) + arch=$(uname -m) + if curl -L ${release_url}/download/${latest}/firecracker-${latest}-${arch}.tgz | tar -xz; then + bin_folder="release-${latest}-$(uname -m)" + mv "${bin_folder}/firecracker-${latest}-$(uname -m)" ${bin_dir}/${firecracker} + rm -r "${bin_folder}" + echo "Firecracker installation complete." + else + echo "Failed to install Firecracker. Please check your network connection and try again." + exit 1 + fi +else + echo "Firecracker is already installed." +fi + +# Get the vmlinux.bin +if [ ! -f ${bin_dir}/${kernel} ]; then + echo "Installing kernel..." + if curl -L -o ${bin_dir}/${kernel} $kernel_url; then + echo "${kernel} installation complete." + else + echo "Failed to install ${kernel}. Please check your network connection and try again." + exit 1 + fi +else + echo "${kernel} is already installed." +fi + +# Docker installation +if docker --version >/dev/null 2>&1; then + echo "Docker is already installed." +else + apt install docker.io -y +fi +# Docker without sudo +groupadd -f docker +usermod -aG docker $(whoami) +docker context use default + +echo "Environment setup complete." diff --git a/devtools/1a_build_custom_runtime.sh b/devtools/1a_build_custom_runtime.sh new file mode 100755 index 00000000..a722d882 --- /dev/null +++ b/devtools/1a_build_custom_runtime.sh @@ -0,0 +1,53 @@ +#!/bin/sh +set -euf + +runtime=$1 + +# Utils +setup=utils/setup.sh +inittab=utils/inittab +init=utils/init0.sh +init_py=utils/init1.py +loading_html=utils/loading.html + +# Temp Files +dest=/mnt/rootfs + +# Target +rootfs_dir=runtimes +target=$rootfs_dir/$runtime.squashfs +mkdir -p $rootfs_dir + +# Cleanup previous run +rm -rf $target +rm -rf $dest +mkdir $dest + +echo "Downloading Debian Bullseye minimal" +debootstrap --variant=minbase bullseye $dest http://deb.debian.org/debian/ + +echo "Run setup script" +chmod +x $setup +cp $setup $dest/setup.sh +chroot $dest /bin/sh -c "./setup.sh && rm -f setup.sh" + +# Reduce size +rm -fr $dest/root/.cache +rm -fr $dest/var/cache +mkdir -p $dest/var/cache/apt/archives/partial +rm -fr $dest/usr/share/doc +rm -fr $dest/usr/share/man +rm -fr $dest/var/lib/apt/lists + +echo "Install init scripts" +cp $inittab $dest/etc/inittab +chmod +x $init +chmod +x $init_py +cp $init $dest/sbin/init +cp $init_py $dest/root/init1.py +cp $loading_html $dest/root/loading.html + +echo "Creating squashfs image" +mksquashfs $dest $target -noappend -quiet +rm -rf $dest +echo "Done" diff --git a/devtools/1b_download_runtime.sh b/devtools/1b_download_runtime.sh new file mode 100755 index 00000000..09ab8aad --- /dev/null +++ b/devtools/1b_download_runtime.sh @@ -0,0 +1,33 @@ +#!/bin/sh +set -euf + +default_runtime=63f07193e6ee9d207b7d1fcf8286f9aee34e6f12f101d2ec77c1229f92964696 +get_message_url=https://api1.aleph.im/api/v0/messages/ +ipfs_gateway=https://ipfs.aleph.im/ipfs/ +rootfs_dir=runtimes +mkdir -p $rootfs_dir + +# Ask for runtime item hash +read -p "Enter a runtime item hash (leave blank for default): " runtime +if [ -z "${runtime}" ]; then + runtime=$default_runtime + echo "Using default runtime: ${runtime}" +else + echo "Using provided runtime: ${runtime}" +fi + +# Get item hash message +message=$(curl -s -X GET "$get_message_url$runtime") +ipfs_cid=$(echo "$message" | jq -r '.message.content.item_hash') +#echo "IPFS CID: $ipfs_cid" + +# Get runtime file +runtime_file="${runtime}.squashfs" +download_url="$ipfs_gateway$ipfs_cid" +if [ ! -f "${rootfs_dir}/${runtime_file}" ]; then + echo "Downloading ${runtime_file}..." + curl -L $download_url -o ${rootfs_dir}/${runtime_file} + echo "Download complete." +else + echo "${runtime_file} is already available." +fi diff --git a/devtools/1b_edit_runtime.sh b/devtools/1b_edit_runtime.sh new file mode 100755 index 00000000..fe8fd135 --- /dev/null +++ b/devtools/1b_edit_runtime.sh @@ -0,0 +1,16 @@ +#!/bin/sh +set -euf + +apt install squashfs-tools -y + +rootfs_dir=runtimes +runtime=63f07193e6ee9d207b7d1fcf8286f9aee34e6f12f101d2ec77c1229f92964696.squashfs +mount_dir=vm +mkdir -p ${mount_dir}/ro +mkdir -p ${mount_dir}/new + +mount -o loop,ro -t squashfs ${rootfs_dir}/${runtime} ${mount_dir}/ro +cp -ar ${mount_dir}/ro ${mount_dir}/new +umount ${mount_dir}/ro + +#mksquashfs ${mount_dir}/new new.squashfs -noappend -quiet diff --git a/devtools/2_pack_program.sh b/devtools/2_pack_program.sh new file mode 100755 index 00000000..f3ff953d --- /dev/null +++ b/devtools/2_pack_program.sh @@ -0,0 +1,9 @@ +#!/bin/sh +set -euf + +in=programs/$1 +out_dir=built_programs/$1 +mkdir -p $out_dir +out=$out_dir/program.squashfs +rm -f $out +mksquashfs $in $out -noappend -quiet diff --git a/devtools/3_pack_dependencies.sh b/devtools/3_pack_dependencies.sh new file mode 100755 index 00000000..047a164c --- /dev/null +++ b/devtools/3_pack_dependencies.sh @@ -0,0 +1,44 @@ +#!/bin/sh +set -euf + +if docker --version >/dev/null 2>&1; then + echo "Docker is already installed." +else + apt install docker.io -y +fi + +if [ $# -ne 1 ]; then + echo "Error: Please provide the application directory as an argument." + echo "Usage: $0 " + exit 1 +fi + +app_dir=programs/$1 +deps_file="${app_dir}/requirements.txt" +output_file="built_${app_dir}/packages.squashfs" + +echo "Packing dependencies for ${app_dir} into ${output_file} using ${deps_file}" + +if [ ! -d "$app_dir" ]; then + echo "Error: The application directory '$app_dir' does not exist." + exit 1 +fi +if [ ! -f "$deps_file" ]; then + echo "Error: The requirements file '$deps_file' does not exist." + exit 1 +fi + +rm -f "$output_file" +touch "$output_file" + +docker run --rm -t --platform linux/amd64 \ + -v "$(pwd)/${deps_file}:/opt/requirements.txt" \ + -v "$(pwd)/${output_file}:/opt/packages.squashfs" \ + debian:bookworm /bin/bash \ + -c "apt-get update -y; +apt-get install python3-pip squashfs-tools -y; +python3 -V; +pip install -t /opt/packages -r /opt/requirements.txt; +mksquashfs /opt/packages /opt/packages.squashfs -noappend -quiet" + +echo "Dependencies packed into ${output_file} successfully." diff --git a/devtools/5_run.sh b/devtools/5_run.sh new file mode 100755 index 00000000..bd488ede --- /dev/null +++ b/devtools/5_run.sh @@ -0,0 +1,8 @@ +#!/bin/sh +set -euf + +rm -f /tmp/v.sock +rm -f /tmp/firecracker.socket +./bin/firecracker --api-sock /tmp/firecracker.socket --config-file ./utils/vm_config_base.json +rm -f /tmp/v.sock +rm -f /tmp/firecracker.socket diff --git a/devtools/config.py b/devtools/config.py new file mode 100644 index 00000000..714640a5 --- /dev/null +++ b/devtools/config.py @@ -0,0 +1,64 @@ +from pathlib import Path + +from pydantic import BaseModel, PositiveInt + +ROOTFS_PATH = "./runtimes/test.squashfs" +KERNEL_PATH = "./bin/vmlinux.bin" +VSOCK_PATH = "/tmp/v.sock" + + +class BootSource(BaseModel): + kernel_image_path: Path = Path(KERNEL_PATH) + boot_args: str = "console=ttyS0 reboot=k panic=1 pci=off ro noapic nomodules random.trust_cpu=on" + + @staticmethod + def args(enable_console: bool = True, writable: bool = False): + default = "reboot=k panic=1 pci=off noapic nomodules random.trust_cpu=on" + if writable: + default = default + " rw" + else: + default = default + " ro" + if enable_console: + return "console=ttyS0 " + default + else: + return default + + +class Drive(BaseModel): + drive_id: str = "rootfs" + path_on_host: Path = Path(ROOTFS_PATH) + is_root_device: bool = True + is_read_only: bool = True + + +class MachineConfig(BaseModel): + vcpu_count: PositiveInt = 1 + mem_size_mib: PositiveInt = 512 + smt: bool = False + + +class Vsock(BaseModel): + vsock_id: str = "1" + guest_cid: PositiveInt = 3 + uds_path: str = VSOCK_PATH + + +class NetworkInterface(BaseModel): + iface_id: str = "eth0" + guest_mac: str = "AA:FC:00:00:00:01" + host_dev_name: str + + +class FirecrackerConfig(BaseModel): + boot_source: BootSource + drives: list[Drive] + machine_config: MachineConfig + vsock: Vsock | None + network_interfaces: list[NetworkInterface] | None + + class Config: + allow_population_by_field_name = True + + @staticmethod + def alias_generator(x: str): + return x.replace("_", "-") diff --git a/devtools/draft.py b/devtools/draft.py new file mode 100755 index 00000000..11831eef --- /dev/null +++ b/devtools/draft.py @@ -0,0 +1,24 @@ +import asyncio +import logging +from pathlib import Path + +from run import MicroVM + +logging.basicConfig( + level=logging.DEBUG, + format="%(relativeCreated)4f |V %(levelname)s | %(message)s", +) +logger = logging.getLogger(__name__) + +vm = MicroVM( + vm_id=0, + firecracker_bin_path=Path("./bin/firecracker"), + jailer_base_directory=Path("/tmp/jail"), + use_jailer=False, + jailer_bin_path=Path("./bin/release-v1.10.1-x86_64/jailer-v1.10.1-x86_64"), + init_timeout=5.0, + enable_log=True, +) + +config_path = Path("utils/vm_config_base.json") +asyncio.run(vm.start(config_path)) diff --git a/devtools/reqs.txt b/devtools/reqs.txt new file mode 100644 index 00000000..dac59beb --- /dev/null +++ b/devtools/reqs.txt @@ -0,0 +1,2 @@ +msgpack==1.0.7 +systemd-python==235 \ No newline at end of file diff --git a/devtools/run.py b/devtools/run.py new file mode 100755 index 00000000..0a3f5f35 --- /dev/null +++ b/devtools/run.py @@ -0,0 +1,516 @@ +#!/usr/bin/python3 +import asyncio +import errno +import json +import logging +import os.path +import shutil +import string +import traceback +from asyncio import Task +from asyncio.base_events import Server +from dataclasses import dataclass +from os import getuid +from pathlib import Path +from pwd import getpwnam +from tempfile import NamedTemporaryFile +from typing import Any, BinaryIO + +import msgpack +from config import Drive, FirecrackerConfig +from systemd import journal + +logger = logging.getLogger(__name__) +journal_stdout_name = "vm-stdout" +journal_stderr_name = "vm-stderr" + +VSOCK_PATH = "/tmp/v.sock" +DEVICE_BASE_DIRECTORY = "/dev/mapper" + + +class MicroVMFailedInitError(Exception): + pass + + +# extend the json.JSONEncoder class to support bytes +class JSONBytesEncoder(json.JSONEncoder): + # overload method default + def default(self, obj): + # Match all the types you want to handle in your converter + if isinstance(obj, bytes): + return obj.decode() + return json.JSONEncoder.default(self, obj) + + +def system(command): + logger.debug(f"shell {command}") + ret = os.system(command) + if ret != 0: + logger.warning(f"Failed shell `{command}`: return code {ret}") + # print trace so we know who called this + traceback.print_stack() + return ret + + +async def setfacl(): + """Give current user permission to access /dev/kvm via acl""" + if os.access("/dev/kvm", os.R_OK | os.W_OK): + return + + user = getuid() + cmd = f"sudo setfacl -m u:{user}:rw /dev/kvm" + proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) + stdout, stderr = await proc.communicate() + + if proc.returncode == 0: + return + logger.warning(f"[{cmd!r} exited with {[proc.returncode]}]") + if stdout: + logger.warning(f"[stdout]\n{stdout.decode()}") + if stderr: + logger.warning(f"[stderr]\n{stderr.decode()}") + + +@dataclass +class RuntimeConfiguration: + version: str + + def supports_ipv6(self) -> bool: + return self.version != "1.0.0" + + +class MicroVM: + vm_id: int + use_jailer: bool + firecracker_bin_path: Path + jailer_bin_path: Path | None + proc: asyncio.subprocess.Process | None = None + stdout_task: Task | None = None + stderr_task: Task | None = None + config_file_path: Path | None = None + drives: list[Drive] + init_timeout: float + runtime_config: RuntimeConfiguration | None + mounted_rootfs: Path | None = None + _unix_socket: Server | None = None + enable_log: bool + journal_stdout: BinaryIO | int | None = None + journal_stderr: BinaryIO | int | None = None + + def __repr__(self): + return f"" + + def __str__(self): + return f"vm-{self.vm_id}" + + @property + def namespace_path(self) -> str: + firecracker_bin_name = os.path.basename(self.firecracker_bin_path) + return str(self.jailer_base_directory / firecracker_bin_name / str(self.vm_id)) + + @property + def jailer_path(self) -> str: + return os.path.join(self.namespace_path, "root") + + @property + def socket_path(self) -> str: + if self.use_jailer: + return f"{self.jailer_path}/run/firecracker.socket" + else: + return f"/tmp/firecracker-{self.vm_id}.socket" + + @property + def vsock_path(self) -> str: + if self.use_jailer: + return f"{self.jailer_path}{VSOCK_PATH}" + else: + return f"{VSOCK_PATH}" + + def __init__( + self, + vm_id: int, + firecracker_bin_path: Path, + jailer_base_directory: Path, + use_jailer: bool = True, + jailer_bin_path: Path | None = None, + init_timeout: float = 5.0, + enable_log: bool = True, + ): + self.vm_id = vm_id + self.use_jailer = use_jailer + self.jailer_base_directory = jailer_base_directory + self.firecracker_bin_path = firecracker_bin_path + self.jailer_bin_path = jailer_bin_path + self.drives = [] + self.init_timeout = init_timeout + self.runtime_config = None + self.enable_log = enable_log + + def to_dict(self) -> dict: + return { + "jailer_path": self.jailer_path, + "socket_path": self.socket_path, + "vsock_path": self.vsock_path, + **self.__dict__, + } + + def prepare_jailer(self) -> None: + if not self.use_jailer: + return + system(f"rm -fr {self.jailer_path}") + + # system(f"rm -fr {self.jailer_path}/run/") + # system(f"rm -fr {self.jailer_path}/dev/") + # system(f"rm -fr {self.jailer_path}/opt/") + # + # if os.path.exists(path=self.vsock_path): + # os.remove(path=self.vsock_path) + # + system(f"mkdir -p {self.jailer_path}/tmp/") + system(f"chown jailman:jailman {self.jailer_path}/tmp/") + # + system(f"mkdir -p {self.jailer_path}/opt") + system(f"mkdir -p {self.jailer_path}/dev/mapper") + + # system(f"cp disks/rootfs.ext4 {self.jailer_path}/opt") + # system(f"cp hello-vmlinux.bin {self.jailer_path}/opt") + + def prepare_start(self): + if not self.use_jailer: + return False + + system(f"rm -fr {self.jailer_path}/dev/net/") + system(f"rm -fr {self.jailer_path}/dev/kvm") + system(f"rm -fr {self.jailer_path}/dev/urandom") + system(f"rm -fr {self.jailer_path}/dev/userfaultfd") + system(f"rm -fr {self.jailer_path}/run/") + + if os.path.exists(path=self.vsock_path): + os.remove(path=self.vsock_path) + + async def save_configuration_file(self, config: FirecrackerConfig) -> Path: + with ( + NamedTemporaryFile(delete=False) + if not self.use_jailer + else open(f"{self.jailer_path}/tmp/config.json", "wb") + ) as config_file: + config_file.write(config.json(by_alias=True, exclude_none=True, indent=4).encode()) + config_file.flush() + config_file_path = Path(config_file.name) + config_file_path.chmod(0o644) + return config_file_path + + async def start(self, config_path: Path) -> asyncio.subprocess.Process: + if self.use_jailer: + return await self.start_jailed_firecracker(config_path) + else: + return await self.start_firecracker(config_path) + + async def start_firecracker(self, config_path: Path) -> asyncio.subprocess.Process: + if os.path.exists(VSOCK_PATH): + os.remove(VSOCK_PATH) + if os.path.exists(self.socket_path): + os.remove(self.socket_path) + + options = ( + str(self.firecracker_bin_path), + "--api-sock", + str(self.socket_path), + "--config-file", + str(config_path), + ) + if self.enable_log: + self.journal_stdout = journal.stream(journal_stdout_name) + self.journal_stderr = journal.stream(journal_stderr_name) + else: + self.journal_stdout = asyncio.subprocess.DEVNULL + self.journal_stderr = asyncio.subprocess.DEVNULL + + logger.debug(" ".join(options)) + + self.proc = await asyncio.create_subprocess_exec( + *options, + stdin=asyncio.subprocess.PIPE, + stdout=self.journal_stdout, + stderr=self.journal_stderr, + ) + return self.proc + + async def start_jailed_firecracker(self, config_path: Path) -> asyncio.subprocess.Process: + if not self.jailer_bin_path: + msg = "Jailer binary path is missing" + raise ValueError(msg) + uid = str(getpwnam("jailman").pw_uid) + gid = str(getpwnam("jailman").pw_gid) + + self.config_file_path = config_path + if self.enable_log: + self.journal_stdout = journal.stream(journal_stdout_name) + self.journal_stderr = journal.stream(journal_stderr_name) + else: + self.journal_stdout = asyncio.subprocess.DEVNULL + self.journal_stderr = asyncio.subprocess.DEVNULL + + options = ( + str(self.jailer_bin_path), + "--id", + str(self.vm_id), + "--exec-file", + str(self.firecracker_bin_path), + "--uid", + uid, + "--gid", + gid, + "--chroot-base-dir", + str(self.jailer_base_directory), + "--", + "--config-file", + "/tmp/" + str(self.config_file_path.name), + ) + + logger.debug(" ".join(options)) + + self.proc = await asyncio.create_subprocess_exec( + *options, + stdin=asyncio.subprocess.PIPE, + stdout=self.journal_stdout, + stderr=self.journal_stderr, + ) + return self.proc + + def enable_kernel(self, kernel_image_path: Path) -> Path: + """Make a kernel available to the VM. + + Creates a symlink to the kernel file if jailer is in use. + """ + if self.use_jailer: + kernel_filename = kernel_image_path.name + jailer_kernel_image_path = f"/opt/{kernel_filename}" + + try: + Path(f"{self.jailer_path}{jailer_kernel_image_path}").hardlink_to(kernel_image_path) + except FileExistsError: + logger.debug(f"File {jailer_kernel_image_path} already exists") + + return Path(jailer_kernel_image_path) + else: + return kernel_image_path + + def enable_rootfs(self, path_on_host: Path) -> Path: + if path_on_host.is_file(): + return self.enable_file_rootfs(path_on_host) + elif path_on_host.is_block_device(): + return self.enable_device_mapper_rootfs(path_on_host) + else: + msg = f"Not a file or a block device: {path_on_host}" + raise ValueError(msg) + + def enable_file_rootfs(self, path_on_host: Path) -> Path: + """Make a rootfs available to the VM. + + If jailer is in use, try to create a hardlink + If it is not possible to create a link because the dir are in separate device made a copy. + """ + if self.use_jailer: + rootfs_filename = Path(path_on_host).name + jailer_path_on_host = f"/opt/{rootfs_filename}" + try: + os.link(path_on_host, f"{self.jailer_path}/{jailer_path_on_host}") + except FileExistsError: + logger.debug(f"File {jailer_path_on_host} already exists") + except OSError as err: + if err.errno == errno.EXDEV: + # Invalid cross-device link: cannot make hard link between partition. + # In this case, copy the file instead: + shutil.copyfile(path_on_host, f"{self.jailer_path}/{jailer_path_on_host}") + else: + raise + return Path(jailer_path_on_host) + else: + return path_on_host + + def enable_device_mapper_rootfs(self, path_on_host: Path) -> Path: + """Mount a rootfs to the VM.""" + self.mounted_rootfs = path_on_host + if not self.use_jailer: + return path_on_host + + rootfs_filename = path_on_host.name + device_jailer_path = Path(DEVICE_BASE_DIRECTORY) / rootfs_filename + final_path = Path(self.jailer_path) / str(device_jailer_path).strip("/") + if not final_path.is_block_device(): + jailer_device_vm_path = Path(f"{self.jailer_path}/{DEVICE_BASE_DIRECTORY}") + jailer_device_vm_path.mkdir(exist_ok=True, parents=True) + rootfs_device = path_on_host.resolve() + # Copy the /dev/dm-{device_id} special block file that is the real mapping destination on Jailer + system(f"cp -vap {rootfs_device} {self.jailer_path}/dev/") + path_to_mount = jailer_device_vm_path / rootfs_filename + if not path_to_mount.is_symlink(): + path_to_mount.symlink_to(rootfs_device) + system(f"chown -Rh jailman:jailman {self.jailer_path}/dev") + + return device_jailer_path + + @staticmethod + def compute_device_name(index: int) -> str: + return f"vd{string.ascii_lowercase[index + 1]}" + + def enable_drive(self, drive_path: Path, read_only: bool = True) -> Drive: + """Make a volume available to the VM. + + Creates a hardlink or a copy to the volume file if jailer is in use. + """ + index = len(self.drives) + device_name = self.compute_device_name(index) + + if self.use_jailer: + drive_filename = drive_path.name + jailer_path_on_host = f"/opt/{drive_filename}" + + try: + Path(f"{self.jailer_path}/{jailer_path_on_host}").hardlink_to(drive_path) + except OSError as err: + if err.errno == errno.EXDEV: + # Invalid cross-device link: cannot make hard link between partition. + # In this case, copy the file instead: + shutil.copyfile(drive_path, f"{self.jailer_path}/{jailer_path_on_host}") + except FileExistsError: + logger.debug(f"File {jailer_path_on_host} already exists") + drive_path = Path(jailer_path_on_host) + + drive = Drive( + drive_id=device_name, + path_on_host=drive_path, + is_root_device=False, + is_read_only=read_only, + ) + self.drives.append(drive) + return drive + + async def wait_for_init(self) -> None: + """Wait for a connection from the init in the VM""" + logger.debug("Waiting for init...") + queue: asyncio.Queue[RuntimeConfiguration] = asyncio.Queue() + + async def unix_client_connected(reader: asyncio.StreamReader, _writer: asyncio.StreamWriter): + data = await reader.read(1_000_000) + if data: + config_dict: dict[str, Any] = msgpack.loads(data) + runtime_config = RuntimeConfiguration(version=config_dict["version"]) + else: + # Older runtimes do not send a config. Use a default. + runtime_config = RuntimeConfiguration(version="1.0.0") + + logger.debug("Runtime version: %s", runtime_config) + await queue.put(runtime_config) + + self._unix_socket = await asyncio.start_unix_server(unix_client_connected, path=f"{self.vsock_path}_52") + if self.use_jailer: + system(f"chown jailman:jailman {self.vsock_path}_52") + try: + self.runtime_config = await asyncio.wait_for(queue.get(), timeout=self.init_timeout) + logger.debug("...signal from init received") + except asyncio.TimeoutError as error: + logger.warning("Never received signal from init") + raise MicroVMFailedInitError() from error + + async def shutdown(self) -> None: + logger.debug(f"Shutdown vm={self.vm_id}") + try: + reader, writer = await asyncio.open_unix_connection(path=self.vsock_path) + except ( + FileNotFoundError, + ConnectionResetError, + ConnectionRefusedError, + ) as error: + logger.warning(f"VM={self.vm_id} cannot receive shutdown signal: {error.args}") + return + + try: + payload = b"halt" + writer.write(b"CONNECT 52\n" + payload) + + await writer.drain() + + ack: bytes = await reader.readline() + logger.debug(f"ack={ack.decode()}") + + msg: bytes = await reader.readline() + logger.debug(f"msg={msg!r}") + + msg2: bytes = await reader.readline() + logger.debug(f"msg2={msg2!r}") + + if msg2 != b"STOPZ\n": + logger.warning(f"Unexpected response from VM: {msg2[:20]!r}") + except ConnectionResetError as error: + logger.warning(f"ConnectionResetError in shutdown of {self.vm_id}: {error.args}") + + async def stop(self): + if self.proc: + logger.debug("Stopping firecracker process") + try: + self.proc.terminate() + self.proc.kill() + except ProcessLookupError: + logger.debug(f"Firecracker process pid={self.proc.pid} not found") + self.proc = None + else: + logger.debug("No firecracker process to stop") + + async def teardown(self): + """Stop the VM, cleanup network interface and remove data directory.""" + try: + await asyncio.wait_for(self.shutdown(), timeout=5) + except asyncio.TimeoutError: + logger.exception(f"Timeout during VM shutdown vm={self.vm_id}") + logger.debug("Waiting for one second for the process to shutdown") + await asyncio.sleep(1) + await self.stop() + + if self.stdout_task: + self.stdout_task.cancel() + if self.stderr_task: + self.stderr_task.cancel() + + if ( + self.journal_stdout + and self.journal_stdout != asyncio.subprocess.DEVNULL + and hasattr(self.journal_stdout, "close") + ): + self.journal_stdout.close() + if ( + self.journal_stderr + and self.journal_stderr != asyncio.subprocess.DEVNULL + and hasattr(self.journal_stderr, "close") + ): + self.journal_stderr.close() + + # Clean mounted block devices + if self.mounted_rootfs: + logger.debug("Waiting for one second for the VM to shutdown") + await asyncio.sleep(1) + if self.mounted_rootfs.is_block_device(): + root_fs = self.mounted_rootfs.name + system(f"dmsetup remove {root_fs}") + base_device = Path(self.mounted_rootfs.name.replace("_rootfs", "_base")) + if base_device.is_block_device(): + system(f"dmsetup remove {base_device}") + if self.use_jailer and Path(self.jailer_path).is_dir(): + shutil.rmtree(self.jailer_path) + + if self._unix_socket: + logger.debug("Closing unix socket") + self._unix_socket.close() + try: + await asyncio.wait_for(self._unix_socket.wait_closed(), 2) + except asyncio.TimeoutError: + # In Python < 3.11 wait_closed() was broken and returned immediatly + # It is supposedly fixed in Python 3.12.1, but it hangs indefinitely during tests. + logger.info("f{self} unix socket closing timeout") + + logger.debug("Removing files") + if self.config_file_path: + self.config_file_path.unlink(missing_ok=True) + if Path(self.namespace_path).exists(): + system(f"rm -fr {self.namespace_path}") diff --git a/devtools/utils/__init0.sh b/devtools/utils/__init0.sh new file mode 100755 index 00000000..9434d2a1 --- /dev/null +++ b/devtools/utils/__init0.sh @@ -0,0 +1,57 @@ +#!/bin/sh +set -euf + +mount -t proc proc /proc -o nosuid,noexec,nodev + +log() { + echo "$(awk '{print $1}' /proc/uptime)" '|S' "$@" +} +log "init0.sh is launching" + +# Switch root from read-only ext4 to to read-write overlay +mkdir -p /overlay +/bin/mount -t tmpfs -o noatime,mode=0755 tmpfs /overlay +mkdir -p /overlay/root /overlay/work +/bin/mount -o noatime,lowerdir=/,upperdir=/overlay/root,workdir=/overlay/work -t overlay "overlayfs:/overlay/root" /mnt +mkdir -p /mnt/rom +pivot_root /mnt /mnt/rom + +mount --move /rom/proc /proc +mount --move /rom/dev /dev + +mkdir -p /dev/pts +mkdir -p /dev/shm + +mount -t sysfs sys /sys -o nosuid,noexec,nodev +mount -t tmpfs run /run -o mode=0755,nosuid,nodev +#mount -t devtmpfs dev /dev -o mode=0755,nosuid +mount -t devpts devpts /dev/pts -o mode=0620,gid=5,nosuid,noexec +mount -t tmpfs shm /dev/shm -omode=1777,nosuid,nodev + +# Required by Docker +cgroupfs-mount +update-alternatives --set iptables /usr/sbin/iptables-legacy +update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy + +# Enable the following to force the storage driver used by Docker. +# See https://docs.docker.com/storage/storagedriver/select-storage-driver/ +#echo '{\n"storage-driver": "overlay2"\n}\n' > /etc/docker/daemon.json + +# List block devices +lsblk + +#cat /proc/sys/kernel/random/entropy_avail + +# TODO: Move in init1 +mkdir -p /run/sshd +/usr/sbin/sshd & +log "SSH UP" + +log "Setup socat" +socat UNIX-LISTEN:/tmp/socat-socket,fork,reuseaddr VSOCK-CONNECT:2:53 & +log "Socat ready" + +export PYTHONPATH=/opt/aleph/libs + +# Replace this script with the manager +exec /root/init1.py diff --git a/devtools/utils/__init1.py b/devtools/utils/__init1.py new file mode 100755 index 00000000..5af37585 --- /dev/null +++ b/devtools/utils/__init1.py @@ -0,0 +1,624 @@ +#!/usr/bin/python3 -OO +import base64 +import logging +from pathlib import Path + +logging.basicConfig( + level=logging.DEBUG, + format="%(relativeCreated)4f |V %(levelname)s | %(message)s", +) +logger = logging.getLogger(__name__) + +logger.debug("Imports starting") + +import asyncio +import ctypes +import os +import socket +import subprocess +import sys +import traceback +from collections.abc import AsyncIterable +from contextlib import redirect_stdout +from dataclasses import dataclass, field +from enum import Enum +from io import StringIO +from os import system +from shutil import make_archive +from typing import Any, Literal, NewType, Optional, Union, cast + +import aiohttp +import msgpack + +logger.debug("Imports finished") + +__version__ = "2.0.0" +ASGIApplication = NewType("ASGIApplication", Any) # type: ignore + + +class Encoding(str, Enum): + plain = "plain" + zip = "zip" + squashfs = "squashfs" + + +class Interface(str, Enum): + asgi = "asgi" + executable = "executable" + + +class ShutdownException(Exception): + pass + + +@dataclass +class Volume: + mount: str + device: str + read_only: bool + + +@dataclass +class ConfigurationPayload: + input_data: bytes + interface: Interface + vm_hash: str + code: bytes + encoding: Encoding + entrypoint: str + ip: Optional[str] = None + ipv6: Optional[str] = None + route: Optional[str] = None + ipv6_gateway: Optional[str] = None + dns_servers: list[str] = field(default_factory=list) + volumes: list[Volume] = field(default_factory=list) + variables: Optional[dict[str, str]] = None + authorized_keys: Optional[list[str]] = None + + +@dataclass +class RunCodePayload: + scope: dict + + +# Open a socket to receive instructions from the host +s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) +s.bind((socket.VMADDR_CID_ANY, 52)) +s.listen() + +# Send the host that we are ready +s0 = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) +s0.connect((2, 52)) +s0.sendall(msgpack.dumps({"version": __version__})) +s0.close() + +# Configure aleph-client to use the guest API +os.environ["ALEPH_INIT_VERSION"] = __version__ +os.environ["ALEPH_API_HOST"] = "http://localhost" +os.environ["ALEPH_API_UNIX_SOCKET"] = "/tmp/socat-socket" +os.environ["ALEPH_REMOTE_CRYPTO_HOST"] = "http://localhost" +os.environ["ALEPH_REMOTE_CRYPTO_UNIX_SOCKET"] = "/tmp/socat-socket" + +logger.debug("init1.py is launching") + + +def setup_hostname(hostname: str): + os.environ["ALEPH_ADDRESS_TO_USE"] = hostname + system(f"hostname {hostname}") + + +def setup_variables(variables: Optional[dict[str, str]]): + if variables is None: + return + for key, value in variables.items(): + os.environ[key] = value + + +def setup_network( + ipv4: Optional[str], + ipv6: Optional[str], + ipv4_gateway: Optional[str], + ipv6_gateway: Optional[str], + dns_servers: Optional[list[str]] = None, +): + """Setup the system with info from the host.""" + dns_servers = dns_servers or [] + if not os.path.exists("/sys/class/net/eth0"): + logger.error("No network interface eth0") + return + + # Configure loopback networking + system("ip addr add 127.0.0.1/8 dev lo brd + scope host") + system("ip addr add ::1/128 dev lo") + system("ip link set lo up") + + # Forward compatibility with future supervisors that pass the mask with the IP. + if ipv4 and ("/" not in ipv4): + logger.warning("Not passing the mask with the IP is deprecated and will be unsupported") + ipv4 = f"{ipv4}/24" + + addresses = [ip for ip in [ipv4, ipv6] if ip] + gateways = [gateway for gateway in [ipv4_gateway, ipv6_gateway] if gateway] + + for address in addresses: + system(f"ip addr add {address} dev eth0") + + # Interface must be up before a route can use it + if addresses: + system("ip link set eth0 up") + else: + logger.debug("No ip address provided") + + for gateway in gateways: + system(f"ip route add default via {gateway} dev eth0") + + if not gateways: + logger.debug("No ip gateway provided") + + with open("/etc/resolv.conf", "wb") as resolvconf_fd: + for server in dns_servers: + resolvconf_fd.write(f"nameserver {server}\n".encode()) + + +def setup_input_data(input_data: bytes): + logger.debug("Extracting data") + if input_data: + # Unzip in /data + if not os.path.exists("/opt/input.zip"): + open("/opt/input.zip", "wb").write(input_data) + os.makedirs("/data", exist_ok=True) + os.system("unzip -q /opt/input.zip -d /data") + + +def setup_authorized_keys(authorized_keys: list[str]) -> None: + path = Path("/root/.ssh/authorized_keys") + path.parent.mkdir(exist_ok=True) + path.write_text("\n".join(key for key in authorized_keys)) + + +def setup_volumes(volumes: list[Volume]): + for volume in volumes: + logger.debug(f"Mounting /dev/{volume.device} on {volume.mount}") + os.makedirs(volume.mount, exist_ok=True) + if volume.read_only: + system(f"mount -t squashfs -o ro /dev/{volume.device} {volume.mount}") + else: + system(f"mount -o rw /dev/{volume.device} {volume.mount}") + + system("mount") + + +async def wait_for_lifespan_event_completion( + application: ASGIApplication, event: Union[Literal["startup", "shutdown"]] +): + """ + Send the startup lifespan signal to the ASGI app. + Specification: https://asgi.readthedocs.io/en/latest/specs/lifespan.html + """ + + lifespan_completion = asyncio.Event() + + async def receive(): + return { + "type": f"lifespan.{event}", + } + + async def send(response: dict): + response_type = response.get("type") + if response_type == f"lifespan.{event}.complete": + lifespan_completion.set() + return + else: + logger.warning(f"Unexpected response to {event}: {response_type}") + + while not lifespan_completion.is_set(): + await application( + scope={ + "type": "lifespan", + }, + receive=receive, + send=send, + ) + + +async def setup_code_asgi(code: bytes, encoding: Encoding, entrypoint: str) -> ASGIApplication: + # Allow importing packages from /opt/packages, give it priority + sys.path.insert(0, "/opt/packages") + + logger.debug("Extracting code") + app: ASGIApplication + if encoding == Encoding.squashfs: + sys.path.insert(0, "/opt/code") + module_name, app_name = entrypoint.split(":", 1) + logger.debug("import module") + module = __import__(module_name) + for level in module_name.split(".")[1:]: + module = getattr(module, level) + app = getattr(module, app_name) + elif encoding == Encoding.zip: + # Unzip in /opt and import the entrypoint from there + if not os.path.exists("/opt/archive.zip"): + open("/opt/archive.zip", "wb").write(code) + logger.debug("Run unzip") + os.system("unzip -q /opt/archive.zip -d /opt") + sys.path.insert(0, "/opt") + module_name, app_name = entrypoint.split(":", 1) + logger.debug("import module") + module = __import__(module_name) + for level in module_name.split(".")[1:]: + module = getattr(module, level) + logger.debug("import done") + app = getattr(module, app_name) + elif encoding == Encoding.plain: + # Execute the code and extract the entrypoint + locals: dict[str, Any] = {} + exec(code, globals(), locals) + app = locals[entrypoint] + else: + raise ValueError(f"Unknown encoding '{encoding}'") + await wait_for_lifespan_event_completion(application=app, event="startup") + return ASGIApplication(app) + + +def setup_code_executable(code: bytes, encoding: Encoding, entrypoint: str) -> subprocess.Popen: + logger.debug("Extracting code") + if encoding == Encoding.squashfs: + path = f"/opt/code/{entrypoint}" + if not os.path.isfile(path): + os.system("find /opt/code/") + raise FileNotFoundError(f"No such file: {path}") + os.system(f"chmod +x {path}") + elif encoding == Encoding.zip: + open("/opt/archive.zip", "wb").write(code) + logger.debug("Run unzip") + os.makedirs("/opt/code", exist_ok=True) + os.system("unzip /opt/archive.zip -d /opt/code") + path = f"/opt/code/{entrypoint}" + if not os.path.isfile(path): + os.system("find /opt/code") + raise FileNotFoundError(f"No such file: {path}") + os.system(f"chmod +x {path}") + elif encoding == Encoding.plain: + os.makedirs("/opt/code", exist_ok=True) + path = f"/opt/code/executable {entrypoint}" + open(path, "wb").write(code) + os.system(f"chmod +x {path}") + else: + raise ValueError(f"Unknown encoding '{encoding}'. This should never happen.") + + process = subprocess.Popen(path) + return process + + +async def setup_code( + code: bytes, + encoding: Encoding, + entrypoint: str, + interface: Interface, +) -> Union[ASGIApplication, subprocess.Popen]: + if interface == Interface.asgi: + return await setup_code_asgi(code=code, encoding=encoding, entrypoint=entrypoint) + elif interface == Interface.executable: + return setup_code_executable(code=code, encoding=encoding, entrypoint=entrypoint) + else: + raise ValueError("Invalid interface. This should never happen.") + + +async def run_python_code_http(application: ASGIApplication, scope: dict) -> tuple[dict, dict, str, Optional[bytes]]: + logger.debug("Running code") + with StringIO() as buf, redirect_stdout(buf): + # Execute in the same process, saves ~20ms than a subprocess + + # The body should not be part of the ASGI scope itself + scope_body: bytes = scope.pop("body") + + async def receive(): + type_ = "http.request" if scope["type"] in ("http", "websocket") else "aleph.message" + return {"type": type_, "body": scope_body, "more_body": False} + + send_queue: asyncio.Queue = asyncio.Queue() + + async def send(dico): + await send_queue.put(dico) + + # TODO: Better error handling + logger.debug("Awaiting application...") + await application(scope, receive, send) + + logger.debug("Waiting for headers") + headers: dict + if scope["type"] == "http": + headers = await send_queue.get() + else: + headers = {} + + logger.debug("Waiting for body") + response_body: dict = await send_queue.get() + + logger.debug("Waiting for buffer") + output = buf.getvalue() + + logger.debug(f"Headers {headers}") + logger.debug(f"Body {response_body}") + logger.debug(f"Output {output}") + + logger.debug("Getting output data") + output_data: bytes + if os.path.isdir("/data") and os.listdir("/data"): + make_archive("/opt/output", "zip", "/data") + with open("/opt/output.zip", "rb") as output_zipfile: + output_data = output_zipfile.read() + else: + output_data = b"" + + logger.debug("Returning result") + return headers, response_body, output, output_data + + +async def make_request(session, scope): + async with session.request( + scope["method"], + url="http://localhost:8080{}".format(scope["path"]), + params=scope["query_string"], + headers=[(a.decode("utf-8"), b.decode("utf-8")) for a, b in scope["headers"]], + data=scope.get("body", None), + ) as resp: + headers = { + "headers": [(a.encode("utf-8"), b.encode("utf-8")) for a, b in resp.headers.items()], + "status": resp.status, + } + body = {"body": await resp.content.read()} + return headers, body + + +def show_loading(): + body = {"body": Path("/root/loading.html").read_text()} + headers = { + "headers": [ + [b"Content-Type", b"text/html"], + [b"Connection", b"keep-alive"], + [b"Keep-Alive", b"timeout=5"], + [b"Transfer-Encoding", b"chunked"], + ], + "status": 503, + } + return headers, body + + +async def run_executable_http(scope: dict) -> tuple[dict, dict, str, Optional[bytes]]: + logger.debug("Calling localhost") + + tries = 0 + headers = None + body = None + + timeout = aiohttp.ClientTimeout(total=5) + async with aiohttp.ClientSession(timeout=timeout) as session: + while not body: + try: + tries += 1 + headers, body = await make_request(session, scope) + except aiohttp.ClientConnectorError: + if tries > 20: + headers, body = show_loading() + await asyncio.sleep(0.05) + + output = "" # Process stdout is not captured per request + output_data = None + logger.debug("Returning result") + return headers, body, output, output_data + + +async def process_instruction( + instruction: bytes, + interface: Interface, + application: Union[ASGIApplication, subprocess.Popen], +) -> AsyncIterable[bytes]: + if instruction == b"halt": + logger.info("Received halt command") + system("sync") + logger.debug("Filesystems synced") + if isinstance(application, subprocess.Popen): + application.terminate() + logger.debug("Application terminated") + # application.communicate() + else: + await wait_for_lifespan_event_completion(application=application, event="shutdown") + yield b"STOP\n" + logger.debug("Supervisor informed of halt") + raise ShutdownException + elif instruction.startswith(b"!"): + # Execute shell commands in the form `!ls /` + msg = instruction[1:].decode() + try: + process_output = subprocess.check_output(msg, stderr=subprocess.STDOUT, shell=True) + yield process_output + except subprocess.CalledProcessError as error: + yield str(error).encode() + b"\n" + error.output + else: + # Python + logger.debug("msgpack.loads (") + msg_ = msgpack.loads(instruction, raw=False) + logger.debug("msgpack.loads )") + payload = RunCodePayload(**msg_) + + output: Optional[str] = None + try: + headers: dict + body: dict + output_data: Optional[bytes] + + if interface == Interface.asgi: + application = cast(ASGIApplication, application) + headers, body, output, output_data = await run_python_code_http( + application=application, scope=payload.scope + ) + elif interface == Interface.executable: + headers, body, output, output_data = await run_executable_http(scope=payload.scope) + else: + raise ValueError("Unknown interface. This should never happen") + + result = { + "headers": headers, + "body": body, + "output": output, + "output_data": output_data, + } + yield msgpack.dumps(result, use_bin_type=True) + except Exception as error: + yield msgpack.dumps( + { + "error": str(error), + "traceback": str(traceback.format_exc()), + "output": output, + } + ) + + +def receive_data_length(client) -> int: + """Receive the length of the data to follow.""" + buffer = b"" + for _ in range(9): + byte = client.recv(1) + if byte == b"\n": + break + else: + buffer += byte + return int(buffer) + + +def load_configuration(data: bytes) -> ConfigurationPayload: + msg_ = msgpack.loads(data, raw=False) + msg_["volumes"] = [Volume(**volume_dict) for volume_dict in msg_.get("volumes")] + return ConfigurationPayload(**msg_) + + +def receive_config(client) -> ConfigurationPayload: + length = receive_data_length(client) + data = b"" + while len(data) < length: + data += client.recv(1024 * 1024) + return load_configuration(data) + + +def setup_system(config: ConfigurationPayload): + # Linux host names are limited to 63 characters. We therefore use the base32 representation + # of the item_hash instead of its common base16 representation. + item_hash_binary: bytes = base64.b16decode(config.vm_hash.encode().upper()) + hostname = base64.b32encode(item_hash_binary).decode().strip("=").lower() + setup_hostname(hostname) + + setup_variables(config.variables) + setup_volumes(config.volumes) + setup_network( + ipv4=config.ip, + ipv6=config.ipv6, + ipv4_gateway=config.route, + ipv6_gateway=config.ipv6_gateway, + dns_servers=config.dns_servers, + ) + setup_input_data(config.input_data) + if authorized_keys := config.authorized_keys: + setup_authorized_keys(authorized_keys) + logger.debug("Setup finished") + + +def umount_volumes(volumes: list[Volume]): + "Umount user related filesystems" + system("sync") + for volume in volumes: + logger.debug(f"Umounting /dev/{volume.device} on {volume.mount}") + system(f"umount {volume.mount}") + + +async def main() -> None: + client, addr = s.accept() + + logger.debug("Receiving setup...") + config = receive_config(client) + setup_system(config) + + try: + app: Union[ASGIApplication, subprocess.Popen] = await setup_code( + config.code, config.encoding, config.entrypoint, config.interface + ) + client.send(msgpack.dumps({"success": True})) + except Exception as error: + client.send( + msgpack.dumps( + { + "success": False, + "error": str(error), + "traceback": str(traceback.format_exc()), + } + ) + ) + logger.exception("Program could not be started") + raise + + class ServerReference: + "Reference used to close the server from within `handle_instruction" + + server: asyncio.AbstractServer + + server_reference = ServerReference() + + async def handle_instruction(reader, writer): + data = await reader.read(1000_1000) # Max 1 Mo + + logger.debug("Init received msg") + if logger.level <= logging.DEBUG: + data_to_print = f"{data[:500]}..." if len(data) > 500 else data + logger.debug(f"<<<\n\n{data_to_print}\n\n>>>") + + try: + async for result in process_instruction(instruction=data, interface=config.interface, application=app): + writer.write(result) + await writer.drain() + + logger.debug("Instruction processed") + except ShutdownException: + logger.info("Initiating shutdown") + writer.write(b"STOPZ\n") + await writer.drain() + logger.debug("Shutdown confirmed to supervisor") + server_reference.server.close() + logger.debug("Supervisor socket server closed") + finally: + writer.close() + + server = await asyncio.start_server(handle_instruction, sock=s) + server_reference.server = server + + addr = server.sockets[0].getsockname() + print(f"Serving on {addr}") + + try: + async with server: + await server.serve_forever() + except asyncio.CancelledError: + logger.debug("Server was properly cancelled") + finally: + logger.warning("System shutdown") + server.close() + logger.debug("Server closed") + umount_volumes(config.volumes) + logger.debug("User volumes unmounted") + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + asyncio.run(main()) + + logger.info("Unmounting system filesystems") + system("umount /dev/shm") + system("umount /dev/pts") + system("umount -a") + + logger.info("Sending reboot syscall") + # Send reboot syscall, see man page + # https://man7.org/linux/man-pages/man2/reboot.2.html + libc = ctypes.CDLL(None) + libc.syscall(169, 0xFEE1DEAD, 672274793, 0x1234567, None) + # The exit should not happen due to system halt. + sys.exit(0) diff --git a/devtools/utils/init0.sh b/devtools/utils/init0.sh new file mode 100755 index 00000000..7256c5d3 --- /dev/null +++ b/devtools/utils/init0.sh @@ -0,0 +1,13 @@ +#!/bin/sh +set -euf + +# Mounting userful system filesystems +mount -t proc proc /proc -o nosuid,noexec,nodev +mkdir -p /dev/pts +mkdir -p /dev/shm +mount -t sysfs sys /sys -o nosuid,noexec,nodev +mount -t tmpfs run /run -o mode=0755,nosuid,nodev +mount -t devpts devpts /dev/pts -o mode=0620,gid=5,nosuid,noexec +mount -t tmpfs shm /dev/shm -omode=1777,nosuid,nodev + +exec /root/init1.py diff --git a/devtools/utils/init1.py b/devtools/utils/init1.py new file mode 100755 index 00000000..add591c7 --- /dev/null +++ b/devtools/utils/init1.py @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +import ctypes +import sys +import time +from os import system + +print("Hello from Python") +time.sleep(2) +print("Goodbye") + +# Cleanup +system("umount /dev/shm") +system("umount /dev/pts") +system("umount -a") + +# Send reboot syscall, see man page +# https://man7.org/linux/man-pages/man2/reboot.2.html +libc = ctypes.CDLL(None) +libc.syscall(169, 0xFEE1DEAD, 672274793, 0x1234567, None) +# The exit should not happen due to system halt. +sys.exit(0) diff --git a/devtools/utils/inittab b/devtools/utils/inittab new file mode 100644 index 00000000..e9cb2a85 --- /dev/null +++ b/devtools/utils/inittab @@ -0,0 +1,22 @@ +# /etc/inittab + +::sysinit:/sbin/init sysinit +::sysinit:/sbin/init boot +::wait:/sbin/init default + +# Set up a couple of getty's +tty1::respawn:/sbin/getty 38400 tty1 +tty2::respawn:/sbin/getty 38400 tty2 +tty3::respawn:/sbin/getty 38400 tty3 +tty4::respawn:/sbin/getty 38400 tty4 +tty5::respawn:/sbin/getty 38400 tty5 +tty6::respawn:/sbin/getty 38400 tty6 + +# Put a getty on the serial port +ttyS0::respawn:/sbin/getty -L ttyS0 115200 vt100 + +# Stuff to do for the 3-finger salute +::ctrlaltdel:/sbin/reboot + +# Stuff to do before rebooting +::shutdown:/sbin/init shutdown diff --git a/devtools/utils/loading.html b/devtools/utils/loading.html new file mode 100644 index 00000000..75aa5428 --- /dev/null +++ b/devtools/utils/loading.html @@ -0,0 +1,346 @@ + + + VM Loading + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
Whoops!
+
Seems like your VM is still loading, please wait...
+
+ + Refresh! +
+ +
+ + \ No newline at end of file diff --git a/devtools/utils/setup.sh b/devtools/utils/setup.sh new file mode 100755 index 00000000..ebfd17dd --- /dev/null +++ b/devtools/utils/setup.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -euf + +# Install Debian Packages +apt-get install -y --no-install-recommends --no-install-suggests \ + python3-minimal \ + openssh-server \ + socat libsecp256k1-dev \ + python3-aiohttp python3-msgpack \ + python3-setuptools python3-venv \ + python3-pip python3-cytoolz python3-pydantic \ + iproute2 unzip \ + nodejs npm \ + build-essential python3-dev \ + python3-fastapi \ + docker.io \ + cgroupfs-mount \ + nftables \ + iputils-ping curl \ + locales + +# Update locale settings to en_US UTF-8 +echo "en_US.UTF-8 UTF-8" >/etc/locale.gen +locale-gen en_US.UTF-8 + +# Install Python packages +pip3 install --upgrade pip +mkdir -p /opt/aleph/libs +pip3 install --target /opt/aleph/libs aleph-sdk-python aleph-message fastapi + +# Compile Python code to bytecode for faster execution +# -o2 is needed to compile with optimization level 2 which is what we launch init1.py ("python -OO") +# otherwise they are not used +python3 -m compileall -o 2 -f /usr/local/lib/python3.11 +python3 -m compileall -o 2 -f /opt/aleph/libs + +echo "PubkeyAuthentication yes" >>/etc/ssh/sshd_config +echo "PasswordAuthentication no" >>/etc/ssh/sshd_config +echo "ChallengeResponseAuthentication no" >>/etc/ssh/sshd_config +echo "PermitRootLogin yes" >>/etc/ssh/sshd_config + +mkdir -p /overlay + +# Set up a login terminal on the serial console (ttyS0): +ln -s agetty /etc/init.d/agetty.ttyS0 +echo ttyS0 >/etc/securetty diff --git a/devtools/utils/vm_config_base.json b/devtools/utils/vm_config_base.json new file mode 100644 index 00000000..3f1d6a3e --- /dev/null +++ b/devtools/utils/vm_config_base.json @@ -0,0 +1,41 @@ +{ + "boot-source": { + "kernel_image_path": "bin/vmlinux.bin", + "boot_args": "console=ttyS0 reboot=k panic=1 pci=off noapic nomodules random.trust_cpu=on ro", + "initrd_path": null + }, + "drives": [ + { + "drive_id": "rootfs", + "path_on_host": "runtimes/test.squashfs", + "is_root_device": true, + "partuuid": null, + "is_read_only": false, + "cache_type": "Unsafe", + "io_engine": "Sync", + "rate_limiter": null + } + ], + "machine-config": { + "vcpu_count": 1, + "mem_size_mib": 512, + "smt": false, + "track_dirty_pages": false + }, + "balloon": null, + "network-interfaces": [ + { + "iface_id": "eth0", + "guest_mac": "AA:FC:00:00:00:01", + "host_dev_name": "" + } + ], + "vsock": { + "vsock_id": "1", + "guest_cid": 3, + "uds_path": "/tmp/v.sock" + }, + "logger": null, + "metrics": null, + "mmds-config": null +}