diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml new file mode 100644 index 0000000..ef755d8 --- /dev/null +++ b/.github/workflows/rust.yml @@ -0,0 +1,53 @@ +name: Rust Teus CI +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + +env: + CARGO_TERM_COLOR: always + +jobs: + teus-build-checks: + runs-on: ubuntu-latest + steps: + # 1. Checkout the repository code + - name: Checkout repository + uses: actions/checkout@v4 + + # 2. toolchain + - name: Setup Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + components: clippy, rustfmt + + # 3. cache + - name: Cache Cargo dependencies + uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + # 4. formatting + - name: Check formatting + run: cargo fmt -- --check + + # 5. clippy (is it necessary?) + - name: Run Clippy + run: cargo clippy -- -D warnings + + # 6. tests + - name: Run tests + run: cargo test --verbose + + # 7. release + - name: Build in release mode + run: cargo build --verbose --release diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..d58197c --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "dashboard"] + path = dashboard + url = https://github.com/imggion/teus-dashboard diff --git a/.zed/tasks.json b/.zed/tasks.json new file mode 100644 index 0000000..80fd286 --- /dev/null +++ b/.zed/tasks.json @@ -0,0 +1,32 @@ +[ + { + "label": "Run Teus - Debug", + "command": "cargo run", + //"args": [], + "env": {}, + "use_new_terminal": false, + // Whether to allow multiple instances of the same task to be run, or rather wait for the existing ones to finish, defaults to `false`. + "allow_concurrent_runs": false, + "reveal": "always", + "hide": "never", + "shell": "system", + "show_summary": true, + "show_output": true, + "tags": [] + }, + { + "label": "Test workspace", + "command": "cargo test --workspaces", + //"args": [], + "env": {}, + "use_new_terminal": false, + // Whether to allow multiple instances of the same task to be run, or rather wait for the existing ones to finish, defaults to `false`. + "allow_concurrent_runs": false, + "reveal": "always", + "hide": "never", + "shell": "system", + "show_summary": true, + "show_output": true, + "tags": [] + } +] diff --git a/Cargo.toml b/Cargo.toml index 0bdde6e..7a6e42c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,14 +1,25 @@ [workspace] # This file is part of the Teus project. +members = ["crates/docker"] +# members = ["crates/docker", "crates/sysd"] # add this when sysd is in developing state default-members = ["."] [package] name = "teus" version = "0.1.0" -edition = "2024" -authors = ['gdjohn4s'] +edition = "2021" +authors = ['immgion'] -[dependencies] +[lib] +name = "teus" +path = "teus/lib.rs" + +[[bin]] +name = "teus" +path = "./teus/main.rs" + +[dependencies] # we need to clean something here +docker = { path = "crates/docker"} diesel = { version = "2.2.0", features = ["sqlite", "returning_clauses_for_sqlite_3_35"] } dotenvy = "0.15" actix-cors = "0.7.0" @@ -19,6 +30,22 @@ derive_more = { version = "2.0.1", features = ["display", "error", "from"] } jsonwebtoken = "9.3.1" rusqlite = "0.34.0" serde = { version = "1.0.218", features = ["derive"] } +serde_json = "1.0" +serde_qs = "0.13" sysinfo = "0.33.1" toml = "0.8.20" argon2 = "0.5.3" + +[dev-dependencies] +tempfile = "3.8" +tokio-test = "0.4" + +[profile.release] +opt-level = "z" # Optimize for size +lto = true # Enable link-time optimization +strip = true # Strip symbols from binary +panic = "abort" # Abort on panic instead of unwinding +codegen-units = 1 # Reduce parallel code generation units + +[target.x86_64-unknown-linux-musl] +linker = "x86_64-linux-musl-gcc" diff --git a/REFERENCES.md b/REFERENCES.md new file mode 100644 index 0000000..1cf41e8 --- /dev/null +++ b/REFERENCES.md @@ -0,0 +1,2 @@ +### Documentations +- [Docker API](https://docs.docker.com/reference/api/engine/version/v1.50/) \ No newline at end of file diff --git a/crates/docker/Cargo.toml b/crates/docker/Cargo.toml new file mode 100644 index 0000000..be594eb --- /dev/null +++ b/crates/docker/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "docker" +version = "0.1.0" +edition = "2021" + +[dependencies] +reqwest = { version = "0.12", features = ["json"] } # Or latest version, "json" feature is handy +tokio = { version = "1", features = ["macros", "rt-multi-thread"] } +serde = { version = "1.0.218", features = ["derive"] } +serde_json = "1.0" diff --git a/crates/docker/src/container.rs b/crates/docker/src/container.rs new file mode 100644 index 0000000..a0025d6 --- /dev/null +++ b/crates/docker/src/container.rs @@ -0,0 +1,1127 @@ +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::collections::HashMap; + +/// Response from Container Inspect operation +/// Based on Docker Engine API v1.50 ContainerInspectResponse +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default, rename_all = "PascalCase")] +pub struct ContainerInspectResponse { + /// The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes) + #[serde(rename = "Id")] + pub id: String, + + /// Date and time at which the container was created, formatted in RFC 3339 format with nano-seconds + #[serde(rename = "Created")] + pub created: Option, + + /// The path to the command being run + #[serde(rename = "Path")] + pub path: String, + + /// The arguments to the command being run + #[serde(rename = "Args")] + pub args: Vec, + + /// Container state information + #[serde(rename = "State")] + pub state: Option, + + /// The ID (digest) of the image that this container was created from + #[serde(rename = "Image")] + pub image: String, + + /// Location of the /etc/resolv.conf generated for the container on the host + #[serde(rename = "ResolvConfPath")] + pub resolv_conf_path: String, + + /// Location of the /etc/hostname generated for the container on the host + #[serde(rename = "HostnamePath")] + pub hostname_path: String, + + /// Location of the /etc/hosts generated for the container on the host + #[serde(rename = "HostsPath")] + pub hosts_path: String, + + /// Location of the file used to buffer the container's logs + #[serde(rename = "LogPath")] + pub log_path: Option, + + /// The name associated with this container + #[serde(rename = "Name")] + pub name: String, + + /// Number of times the container was restarted since it was created + #[serde(rename = "RestartCount")] + pub restart_count: i64, + + /// The storage-driver used for the container's filesystem + #[serde(rename = "Driver")] + pub driver: String, + + /// The platform (operating system) for which the container was created + #[serde(rename = "Platform")] + pub platform: String, + + /// OCI descriptor of the platform-specific manifest of the image + #[serde(rename = "ImageManifestDescriptor")] + pub image_manifest_descriptor: Option, + + /// SELinux mount label set for the container + #[serde(rename = "MountLabel")] + pub mount_label: String, + + /// SELinux process label set for the container + #[serde(rename = "ProcessLabel")] + pub process_label: String, + + /// The AppArmor profile set for the container + #[serde(rename = "AppArmorProfile")] + pub app_armor_profile: String, + + /// IDs of exec instances that are running in the container + #[serde(rename = "ExecIDs")] + pub exec_ids: Option>, + + /// Host configuration for this container + #[serde(rename = "HostConfig")] + pub host_config: Option, + + /// Information about the container's graph driver + #[serde(rename = "GraphDriver")] + pub graph_driver: Option, + + /// The size of files that have been created or changed by this container + #[serde(rename = "SizeRw")] + pub size_rw: Option, + + /// The total size of all files in the read-only layers from the image + #[serde(rename = "SizeRootFs")] + pub size_root_fs: Option, + + /// List of mounts used by the container + #[serde(rename = "Mounts")] + pub mounts: Option>, + + /// Container configuration + #[serde(rename = "Config")] + pub config: Option, + + /// Network settings for the container + #[serde(rename = "NetworkSettings")] + pub network_settings: Option, +} + +/// ContainerState stores container's running state +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct ContainerState { + /// String representation of the container state + #[serde(rename = "Status")] + pub status: Option, + + /// Whether this container is running + #[serde(rename = "Running")] + pub running: Option, + + /// Whether this container is paused + #[serde(rename = "Paused")] + pub paused: Option, + + /// Whether this container is restarting + #[serde(rename = "Restarting")] + pub restarting: Option, + + /// Whether a process within this container has been killed because it ran out of memory + #[serde(rename = "OOMKilled")] + pub oom_killed: Option, + + /// Whether the container is dead + #[serde(rename = "Dead")] + pub dead: Option, + + /// The process ID of this container + #[serde(rename = "Pid")] + pub pid: Option, + + /// The last exit code of this container + #[serde(rename = "ExitCode")] + pub exit_code: Option, + + /// Error message if container failed + #[serde(rename = "Error")] + pub error: Option, + + /// The time when this container was last started + #[serde(rename = "StartedAt")] + pub started_at: Option, + + /// The time when this container last exited + #[serde(rename = "FinishedAt")] + pub finished_at: Option, + + /// Health check information + #[serde(rename = "Health")] + pub health: Option, +} + +/// Health check information +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct Health { + /// Health status + #[serde(rename = "Status")] + pub status: Option, + + /// Number of consecutive failures + #[serde(rename = "FailingStreak")] + pub failing_streak: Option, + + /// Health check logs + #[serde(rename = "Log")] + pub log: Option>, +} + +/// Health check log entry +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct HealthLog { + /// Start time of the health check + #[serde(rename = "Start")] + pub start: Option, + + /// End time of the health check + #[serde(rename = "End")] + pub end: Option, + + /// Exit code of the health check + #[serde(rename = "ExitCode")] + pub exit_code: Option, + + /// Output of the health check + #[serde(rename = "Output")] + pub output: Option, +} + +/// OCI descriptor for image manifests +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct OciDescriptor { + /// Media type of the descriptor + #[serde(rename = "mediaType")] + pub media_type: Option, + + /// Digest of the content + #[serde(rename = "digest")] + pub digest: Option, + + /// Size in bytes + #[serde(rename = "size")] + pub size: Option, + + /// List of URLs + #[serde(rename = "urls")] + pub urls: Option>, + + /// Annotations + #[serde(rename = "annotations")] + pub annotations: Option>, + + /// Data + #[serde(rename = "data")] + pub data: Option, + + /// Platform information + #[serde(rename = "platform")] + pub platform: Option, + + /// Artifact type + #[serde(rename = "artifactType")] + pub artifact_type: Option, +} + +/// Platform information +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct Platform { + /// Architecture + #[serde(rename = "architecture")] + pub architecture: Option, + + /// Operating system + #[serde(rename = "os")] + pub os: Option, + + /// OS version + #[serde(rename = "os.version")] + pub os_version: Option, + + /// OS features + #[serde(rename = "os.features")] + pub os_features: Option>, + + /// Architecture variant + #[serde(rename = "variant")] + pub variant: Option, +} + +/// Container host configuration +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct HostConfig { + /// Volume bindings for this container + #[serde(rename = "Binds")] + pub binds: Option>, + + /// Path to a file where the container ID is written + #[serde(rename = "ContainerIDFile")] + pub container_id_file: Option, + + /// The logging configuration for this container + #[serde(rename = "LogConfig")] + pub log_config: Option, + + /// Network mode to use for this container + #[serde(rename = "NetworkMode")] + pub network_mode: Option, + + /// Port bindings + #[serde(rename = "PortBindings")] + pub port_bindings: Option>>>, + + /// Restart policy + #[serde(rename = "RestartPolicy")] + pub restart_policy: Option, + + /// Automatically remove the container when the container's process exits + #[serde(rename = "AutoRemove")] + pub auto_remove: Option, + + /// Driver that this container uses to mount volumes + #[serde(rename = "VolumeDriver")] + pub volume_driver: Option, + + /// List of volumes to inherit from another container + #[serde(rename = "VolumesFrom")] + pub volumes_from: Option>, + + /// Specification for mounts to be added to the container + #[serde(rename = "Mounts")] + pub mounts: Option>, + + /// Initial console size, as an [height, width] array + #[serde(rename = "ConsoleSize")] + pub console_size: Option>, + + /// Arbitrary non-identifying metadata attached to container + #[serde(rename = "Annotations")] + pub annotations: Option>, + + /// CPU shares (relative weight) + #[serde(rename = "CpuShares")] + pub cpu_shares: Option, + + /// Memory limit in bytes + #[serde(rename = "Memory")] + pub memory: Option, + + /// Total memory limit (memory + swap) + #[serde(rename = "MemorySwap")] + pub memory_swap: Option, + + /// Memory soft limit in bytes + #[serde(rename = "MemoryReservation")] + pub memory_reservation: Option, + + /// Tune a container's memory swappiness behavior + #[serde(rename = "MemorySwappiness")] + pub memory_swappiness: Option, + + /// CPU quota in units of 10^-9 CPUs + #[serde(rename = "NanoCpus")] + pub nano_cpus: Option, + + /// Cgroup parent + #[serde(rename = "CgroupParent")] + pub cgroup_parent: Option, + + /// Block IO weight (relative weight) + #[serde(rename = "BlkioWeight")] + pub blkio_weight: Option, + + /// Block IO weight device + #[serde(rename = "BlkioWeightDevice")] + pub blkio_weight_device: Option>, + + /// Limit read rate (bytes per second) from a device + #[serde(rename = "BlkioDeviceReadBps")] + pub blkio_device_read_bps: Option>, + + /// Limit write rate (bytes per second) to a device + #[serde(rename = "BlkioDeviceWriteBps")] + pub blkio_device_write_bps: Option>, + + /// Limit read rate (IO per second) from a device + #[serde(rename = "BlkioDeviceReadIOps")] + pub blkio_device_read_iops: Option>, + + /// Limit write rate (IO per second) to a device + #[serde(rename = "BlkioDeviceWriteIOps")] + pub blkio_device_write_iops: Option>, + + /// The length of a CPU period in microseconds + #[serde(rename = "CpuPeriod")] + pub cpu_period: Option, + + /// Microseconds of CPU time that the container can get in a CPU period + #[serde(rename = "CpuQuota")] + pub cpu_quota: Option, + + /// The length of a CPU real-time period in microseconds + #[serde(rename = "CpuRealtimePeriod")] + pub cpu_realtime_period: Option, + + /// The length of a CPU real-time runtime in microseconds + #[serde(rename = "CpuRealtimeRuntime")] + pub cpu_realtime_runtime: Option, + + /// CPUs in which to allow execution (0-3, 0,1) + #[serde(rename = "CpusetCpus")] + pub cpuset_cpus: Option, + + /// Memory nodes (MEMs) in which to allow execution (0-3, 0,1) + #[serde(rename = "CpusetMems")] + pub cpuset_mems: Option, + + /// A list of devices to add to the container + #[serde(rename = "Devices")] + pub devices: Option>, + + /// A list of cgroup rules to apply to the container + #[serde(rename = "DeviceCgroupRules")] + pub device_cgroup_rules: Option>, + + /// A list of requests for devices to be sent to device drivers + #[serde(rename = "DeviceRequests")] + pub device_requests: Option>, + + /// Kernel memory TCP limit in bytes + #[serde(rename = "KernelMemoryTCP")] + pub kernel_memory_tcp: Option, + + /// Disable OOM Killer for the container + #[serde(rename = "OomKillDisable")] + pub oom_kill_disable: Option, + + /// Run an init inside the container that forwards signals and reaps processes + #[serde(rename = "Init")] + pub init: Option, + + /// Tune a container's PIDs limit + #[serde(rename = "PidsLimit")] + pub pids_limit: Option, + + /// A list of resource limits to set in the container + #[serde(rename = "Ulimits")] + pub ulimits: Option>, + + /// The number of usable CPUs (Windows only) + #[serde(rename = "CpuCount")] + pub cpu_count: Option, + + /// The usable percentage of the available CPUs (Windows only) + #[serde(rename = "CpuPercent")] + pub cpu_percent: Option, + + /// Maximum IOps for the container system drive (Windows only) + #[serde(rename = "IOMaximumIOps")] + pub io_maximum_iops: Option, + + /// Maximum IO in bytes per second for the container system drive (Windows only) + #[serde(rename = "IOMaximumBandwidth")] + pub io_maximum_bandwidth: Option, + + /// A list of kernel capabilities to add to the container + #[serde(rename = "CapAdd")] + pub cap_add: Option>, + + /// A list of kernel capabilities to drop from the container + #[serde(rename = "CapDrop")] + pub cap_drop: Option>, + + /// Cgroup namespace mode for the container + #[serde(rename = "CgroupnsMode")] + pub cgroupns_mode: Option, + + /// A list of DNS servers for the container to use + #[serde(rename = "Dns")] + pub dns: Option>, + + /// A list of DNS options + #[serde(rename = "DnsOptions")] + pub dns_options: Option>, + + /// A list of DNS search domains + #[serde(rename = "DnsSearch")] + pub dns_search: Option>, + + /// A list of hostnames/IP mappings to add to the container's /etc/hosts file + #[serde(rename = "ExtraHosts")] + pub extra_hosts: Option>, + + /// A list of additional groups that the container process will run as + #[serde(rename = "GroupAdd")] + pub group_add: Option>, + + /// IPC sharing mode for the container + #[serde(rename = "IpcMode")] + pub ipc_mode: Option, + + /// Cgroup to use for the container + #[serde(rename = "Cgroup")] + pub cgroup: Option, + + /// A list of links for the container in the form container_name:alias + #[serde(rename = "Links")] + pub links: Option>, + + /// An integer value containing the score given to the container in order to tune OOM killer preferences + #[serde(rename = "OomScoreAdj")] + pub oom_score_adj: Option, + + /// Set the PID (Process) Namespace mode for the container + #[serde(rename = "PidMode")] + pub pid_mode: Option, + + /// Gives the container full access to the host + #[serde(rename = "Privileged")] + pub privileged: Option, + + /// Allocates an ephemeral host port for all of a container's exposed ports + #[serde(rename = "PublishAllPorts")] + pub publish_all_ports: Option, + + /// Mount the container's root filesystem as read only + #[serde(rename = "ReadonlyRootfs")] + pub readonly_rootfs: Option, + + /// A list of string values to customize labels for MLS systems + #[serde(rename = "SecurityOpt")] + pub security_opt: Option>, + + /// Storage driver options per container + #[serde(rename = "StorageOpt")] + pub storage_opt: Option>, + + /// A map of container directories which should be replaced by tmpfs mounts + #[serde(rename = "Tmpfs")] + pub tmpfs: Option>, + + /// UTS namespace to use for the container + #[serde(rename = "UTSMode")] + pub uts_mode: Option, + + /// Sets the usernamespace mode for the container when usernamespace remapping option is enabled + #[serde(rename = "UsernsMode")] + pub userns_mode: Option, + + /// Size of /dev/shm in bytes + #[serde(rename = "ShmSize")] + pub shm_size: Option, + + /// A list of kernel parameters (sysctls) to set in the container + #[serde(rename = "Sysctls")] + pub sysctls: Option>, + + /// Runtime to use with this container + #[serde(rename = "Runtime")] + pub runtime: Option, + + /// Isolation technology of the container + #[serde(rename = "Isolation")] + pub isolation: Option, + + /// The list of paths to be masked inside the container (this overrides the default set of paths) + #[serde(rename = "MaskedPaths")] + pub masked_paths: Option>, + + /// The list of paths to be set as read-only inside the container (this overrides the default set of paths) + #[serde(rename = "ReadonlyPaths")] + pub readonly_paths: Option>, +} + +/// Port binding configuration +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct PortBinding { + /// Host IP address + #[serde(rename = "HostIp")] + pub host_ip: Option, + + /// Host port + #[serde(rename = "HostPort")] + pub host_port: Option, +} + +/// Logging configuration for a container +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct LogConfig { + /// Name of the logging driver + #[serde(rename = "Type")] + pub log_type: Option, + + /// Driver-specific configuration options + #[serde(rename = "Config")] + pub config: Option>, +} + +/// Restart policy for a container +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct RestartPolicy { + /// Restart policy name + #[serde(rename = "Name")] + pub name: Option, + + /// Maximum number of retries + #[serde(rename = "MaximumRetryCount")] + pub maximum_retry_count: Option, +} + +/// A mount for the container +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct Mount { + /// Container path + #[serde(rename = "Target")] + pub target: Option, + + /// Mount source (e.g. a volume name, a host path) + #[serde(rename = "Source")] + pub source: Option, + + /// The mount type + #[serde(rename = "Type")] + pub mount_type: Option, + + /// Whether the mount should be read-only + #[serde(rename = "ReadOnly")] + pub read_only: Option, + + /// The consistency requirement for the mount + #[serde(rename = "Consistency")] + pub consistency: Option, + + /// Optional configuration for the bind type + #[serde(rename = "BindOptions")] + pub bind_options: Option, + + /// Optional configuration for the volume type + #[serde(rename = "VolumeOptions")] + pub volume_options: Option, + + /// Optional configuration for the tmpfs type + #[serde(rename = "TmpfsOptions")] + pub tmpfs_options: Option, +} + +/// Bind mount options +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct BindOptions { + /// A propagation mode + #[serde(rename = "Propagation")] + pub propagation: Option, + + /// Disable recursive bind mount + #[serde(rename = "NonRecursive")] + pub non_recursive: Option, + + /// Create mount point on host if missing + #[serde(rename = "CreateMountpoint")] + pub create_mountpoint: Option, + + /// Make the mount non-recursively read-only + #[serde(rename = "ReadOnlyNonRecursive")] + pub read_only_non_recursive: Option, + + /// Make the mount recursively read-only + #[serde(rename = "ReadOnlyForceRecursive")] + pub read_only_force_recursive: Option, +} + +/// Volume mount options +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct VolumeOptions { + /// Populate volume with data from the target + #[serde(rename = "NoCopy")] + pub no_copy: Option, + + /// User-defined key/value metadata + #[serde(rename = "Labels")] + pub labels: Option>, + + /// Map of driver specific options + #[serde(rename = "DriverConfig")] + pub driver_config: Option, + + /// Source path inside the volume + #[serde(rename = "Subpath")] + pub subpath: Option, +} + +/// Driver configuration +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct DriverConfig { + /// Name of the driver + #[serde(rename = "Name")] + pub name: Option, + + /// Driver options + #[serde(rename = "Options")] + pub options: Option>, +} + +/// Tmpfs mount options +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct TmpfsOptions { + /// The size for the tmpfs mount in bytes + #[serde(rename = "SizeBytes")] + pub size_bytes: Option, + + /// The permission mode for the tmpfs mount in an integer + #[serde(rename = "Mode")] + pub mode: Option, + + /// Options to be passed to the tmpfs mount + #[serde(rename = "Options")] + pub options: Option>, +} + +/// Throttle device configuration +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct ThrottleDevice { + /// Device path + #[serde(rename = "Path")] + pub path: Option, + + /// Rate + #[serde(rename = "Rate")] + pub rate: Option, +} + +/// Device mapping between the host and container +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct DeviceMapping { + /// Path on the host + #[serde(rename = "PathOnHost")] + pub path_on_host: Option, + + /// Path in the container + #[serde(rename = "PathInContainer")] + pub path_in_container: Option, + + /// Cgroup permissions + #[serde(rename = "CgroupPermissions")] + pub cgroup_permissions: Option, +} + +/// A request for devices to be sent to device drivers +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct DeviceRequest { + /// Device driver name + #[serde(rename = "Driver")] + pub driver: Option, + + /// Number of devices to request + #[serde(rename = "Count")] + pub count: Option, + + /// List of device IDs + #[serde(rename = "DeviceIDs")] + pub device_ids: Option>, + + /// A list of capabilities; an OR list of AND lists of capabilities + #[serde(rename = "Capabilities")] + pub capabilities: Option>>, + + /// Driver-specific options + #[serde(rename = "Options")] + pub options: Option>, +} + +/// Resource limits for a container process +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct Ulimit { + /// Name of the ulimit + #[serde(rename = "Name")] + pub name: Option, + + /// Soft limit + #[serde(rename = "Soft")] + pub soft: Option, + + /// Hard limit + #[serde(rename = "Hard")] + pub hard: Option, +} + +/// Information about the container's graph driver +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct DriverData { + /// Name of the storage driver + #[serde(rename = "Name")] + pub name: Option, + + /// Low-level storage metadata + #[serde(rename = "Data")] + pub data: Option>, +} + +/// MountPoint represents a mount point configuration inside the container +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct MountPoint { + /// The mount type + #[serde(rename = "Type")] + pub mount_type: Option, + + /// Name reference to the underlying data defined by Source + #[serde(rename = "Name")] + pub name: Option, + + /// Source location of the mount + #[serde(rename = "Source")] + pub source: Option, + + /// Destination is the path relative to the container root where the Source is mounted + #[serde(rename = "Destination")] + pub destination: Option, + + /// Driver is the volume driver used to create the volume + #[serde(rename = "Driver")] + pub driver: Option, + + /// Mode is a comma separated list of options supplied by the user when creating the bind/volume mount + #[serde(rename = "Mode")] + pub mode: Option, + + /// Whether the mount is mounted writable (read-write) + #[serde(rename = "RW")] + pub rw: Option, + + /// Propagation describes how mounts are propagated from the host into the mount point + #[serde(rename = "Propagation")] + pub propagation: Option, +} + +/// Configuration for a container that is portable between hosts +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct ContainerConfig { + /// The hostname to use for the container + #[serde(rename = "Hostname")] + pub hostname: Option, + + /// The domain name to use for the container + #[serde(rename = "Domainname")] + pub domainname: Option, + + /// The user that commands are run as inside the container + #[serde(rename = "User")] + pub user: Option, + + /// Whether to attach to stdin + #[serde(rename = "AttachStdin")] + pub attach_stdin: Option, + + /// Whether to attach to stdout + #[serde(rename = "AttachStdout")] + pub attach_stdout: Option, + + /// Whether to attach to stderr + #[serde(rename = "AttachStderr")] + pub attach_stderr: Option, + + /// An object mapping ports to an empty object + #[serde(rename = "ExposedPorts")] + pub exposed_ports: Option>, + + /// Attach standard streams to a TTY + #[serde(rename = "Tty")] + pub tty: Option, + + /// Open stdin + #[serde(rename = "OpenStdin")] + pub open_stdin: Option, + + /// Close stdin after one attached client disconnects + #[serde(rename = "StdinOnce")] + pub stdin_once: Option, + + /// A list of environment variables to set inside the container + #[serde(rename = "Env")] + pub env: Option>, + + /// Command to run specified as a string or an array of strings + #[serde(rename = "Cmd")] + pub cmd: Option>, + + /// Health check configuration + #[serde(rename = "Healthcheck")] + pub healthcheck: Option, + + /// Command is already escaped (Windows only) + #[serde(rename = "ArgsEscaped")] + pub args_escaped: Option, + + /// The name of the image to use when creating the container + #[serde(rename = "Image")] + pub image: Option, + + /// An object mapping mount point paths inside the container to empty objects + #[serde(rename = "Volumes")] + pub volumes: Option>, + + /// The working directory for commands to run in + #[serde(rename = "WorkingDir")] + pub working_dir: Option, + + /// The entry point for the container as a string or an array of strings + #[serde(rename = "Entrypoint")] + pub entrypoint: Option>, + + /// Disable networking for the container + #[serde(rename = "NetworkDisabled")] + pub network_disabled: Option, + + /// MAC address of the container + #[serde(rename = "MacAddress")] + pub mac_address: Option, + + /// ONBUILD metadata that were defined in the image's Dockerfile + #[serde(rename = "OnBuild")] + pub on_build: Option>, + + /// User-defined key/value metadata + #[serde(rename = "Labels")] + pub labels: Option>, + + /// Signal to stop a container as a string or unsigned integer + #[serde(rename = "StopSignal")] + pub stop_signal: Option, + + /// Timeout to stop a container in seconds + #[serde(rename = "StopTimeout")] + pub stop_timeout: Option, + + /// Shell for when RUN, CMD, and ENTRYPOINT uses a shell + #[serde(rename = "Shell")] + pub shell: Option>, +} + +/// Health check configuration +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct HealthConfig { + /// The test to perform + #[serde(rename = "Test")] + pub test: Option>, + + /// The time to wait between checks in nanoseconds + #[serde(rename = "Interval")] + pub interval: Option, + + /// The time to wait before considering the check to have hung in nanoseconds + #[serde(rename = "Timeout")] + pub timeout: Option, + + /// The number of consecutive failures needed to consider a container as unhealthy + #[serde(rename = "Retries")] + pub retries: Option, + + /// Start period for the container to initialize before starting health-retries countdown in nanoseconds + #[serde(rename = "StartPeriod")] + pub start_period: Option, + + /// The time to wait between checks in nanoseconds during the start period + #[serde(rename = "StartInterval")] + pub start_interval: Option, +} + +/// NetworkSettings exposes the network settings in the API +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct NetworkSettings { + /// Name of the default bridge interface when dockerd's --bridge flag is set + #[serde(rename = "Bridge")] + pub bridge: Option, + + /// SandboxID uniquely represents a container's network stack + #[serde(rename = "SandboxID")] + pub sandbox_id: Option, + + /// Indicates if hairpin NAT should be enabled on the virtual interface + #[serde(rename = "HairpinMode")] + pub hairpin_mode: Option, + + /// IPv6 unicast address using the link-local prefix + #[serde(rename = "LinkLocalIPv6Address")] + pub link_local_ipv6_address: Option, + + /// Prefix length of the IPv6 unicast address + #[serde(rename = "LinkLocalIPv6PrefixLen")] + pub link_local_ipv6_prefix_len: Option, + + /// Port mapping + #[serde(rename = "Ports")] + pub ports: Option>>>, + + /// SandboxKey is the full path of the netns handle + #[serde(rename = "SandboxKey")] + pub sandbox_key: Option, + + /// Secondary IP addresses + #[serde(rename = "SecondaryIPAddresses")] + pub secondary_ip_addresses: Option>, + + /// Secondary IPv6 addresses + #[serde(rename = "SecondaryIPv6Addresses")] + pub secondary_ipv6_addresses: Option>, + + /// EndpointID uniquely represents a service endpoint in a Sandbox + #[serde(rename = "EndpointID")] + pub endpoint_id: Option, + + /// Gateway address for the default "bridge" network + #[serde(rename = "Gateway")] + pub gateway: Option, + + /// Global IPv6 address for the default "bridge" network + #[serde(rename = "GlobalIPv6Address")] + pub global_ipv6_address: Option, + + /// Mask length of the global IPv6 address + #[serde(rename = "GlobalIPv6PrefixLen")] + pub global_ipv6_prefix_len: Option, + + /// IPv4 address for the default "bridge" network + #[serde(rename = "IPAddress")] + pub ip_address: Option, + + /// Mask length of the IPv4 address + #[serde(rename = "IPPrefixLen")] + pub ip_prefix_len: Option, + + /// IPv6 gateway address for this network + #[serde(rename = "IPv6Gateway")] + pub ipv6_gateway: Option, + + /// MAC address for the container on the default "bridge" network + #[serde(rename = "MacAddress")] + pub mac_address: Option, + + /// Information about all networks that the container is connected to + #[serde(rename = "Networks")] + pub networks: Option>, +} + +/// Address represents an IPv4 or IPv6 IP address +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct Address { + /// IP address + #[serde(rename = "Addr")] + pub addr: Option, + + /// Mask length of the IP address + #[serde(rename = "PrefixLen")] + pub prefix_len: Option, +} + +/// EndpointSettings stores the network endpoint details +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct EndpointSettings { + /// Unique ID of the network + #[serde(rename = "NetworkID")] + pub network_id: Option, + + /// Unique ID of the service endpoint in a Sandbox + #[serde(rename = "EndpointID")] + pub endpoint_id: Option, + + /// Gateway address for this network + #[serde(rename = "Gateway")] + pub gateway: Option, + + /// IPv4 address for this network + #[serde(rename = "IPAddress")] + pub ip_address: Option, + + /// Mask length of the IPv4 address + #[serde(rename = "IPPrefixLen")] + pub ip_prefix_len: Option, + + /// IPv6 gateway address for this network + #[serde(rename = "IPv6Gateway")] + pub ipv6_gateway: Option, + + /// Global IPv6 address for this network + #[serde(rename = "GlobalIPv6Address")] + pub global_ipv6_address: Option, + + /// Mask length of the global IPv6 address + #[serde(rename = "GlobalIPv6PrefixLen")] + pub global_ipv6_prefix_len: Option, + + /// MAC address for the container on this network + #[serde(rename = "MacAddress")] + pub mac_address: Option, + + /// List of container aliases for this network + #[serde(rename = "Aliases")] + pub aliases: Option>, + + /// List of network driver options + #[serde(rename = "DriverOpts")] + pub driver_opts: Option>, + + /// List of links to other containers + #[serde(rename = "Links")] + pub links: Option>, + + /// IPAM configuration for this endpoint + #[serde(rename = "IPAMConfig")] + pub ipam_config: Option, + + /// List of DNS names assigned to this endpoint + #[serde(rename = "DNSNames")] + pub dns_names: Option>, +} + +/// IPAM configuration for an endpoint +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(default)] +pub struct EndpointIPAMConfig { + /// IPv4 address + #[serde(rename = "IPv4Address")] + pub ipv4_address: Option, + + /// IPv6 address + #[serde(rename = "IPv6Address")] + pub ipv6_address: Option, + + /// List of link-local IP addresses + #[serde(rename = "LinkLocalIPs")] + pub link_local_ips: Option>, +} diff --git a/crates/docker/src/docker.rs b/crates/docker/src/docker.rs new file mode 100644 index 0000000..0f21634 --- /dev/null +++ b/crates/docker/src/docker.rs @@ -0,0 +1,1413 @@ +use crate::{ + container::ContainerInspectResponse, + requests::{DockerApi, DockerRequestMethod, TeusRequestBuilder}, +}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::collections::HashMap; + +#[cfg(not(target_os = "macos"))] +pub const DOCKER_SOCK: &str = "/var/run/docker.sock"; + +// For testing purposes, do not forget to replace with your actual Colima or docker path +#[cfg(target_os = "macos")] +pub const DOCKER_SOCK: &str = "/Users/homeerr/.colima/default/docker.sock"; + +pub type Containers = Vec; + +// A custom error enum for our Docker operations +#[derive(Debug)] +pub enum DockerError { + Generic(String), // A catch-all for any unexpected errors + ContainerNotFound(String), // We can store the container name/ID + NetworkError(String), // Store a generic network error message + DockerDaemonDown, // A specific state with no extra data +} + +#[derive(Debug, Deserialize)] +struct DockerErrorResponse { + message: String, +} + +/* ------------------------- + * Docker Container + * ----------------------- */ +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +/// Docker container information structure. +/// +/// This structure represents comprehensive information about a Docker container +/// as returned by the Docker daemon API. It includes container metadata, +/// runtime state, network configuration, and resource bindings. +/// +/// # API Mapping +/// +/// Maps to the Docker API `/containers/json` endpoint response format. +/// Field names use serde rename attributes to match Docker's JSON field naming. +/// +/// # Usage +/// +/// Used for: +/// - Container listing and inventory +/// - Container status monitoring +/// - Runtime configuration inspection +/// - Network and storage analysis +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::Container; +/// +/// // Typically populated from Docker API response +/// let container = Container { +/// id: "abc123def456".to_string(), +/// names: vec!["/my-app".to_string()], +/// image: "nginx:latest".to_string(), +/// state: "running".to_string(), +/// // ... other fields +/// }; +/// ``` +/// +/// # JSON Response Format +/// +/// ```json +/// { +/// "Id": "abc123def456", +/// "Names": ["/my-app"], +/// "Image": "nginx:latest", +/// "State": "running", +/// "Status": "Up 2 hours" +/// } +/// ``` +pub struct Container { + /// Unique container identifier (full SHA256 hash). + /// + /// This is the complete container ID as assigned by Docker. + /// Used for all container operations and API calls. + #[serde(rename = "Id")] + pub id: String, + + /// Container names as assigned by Docker. + /// + /// Typically includes the primary name with leading slash + /// (e.g., "/my-container") and any aliases. Multiple names + /// are possible when containers are linked. + #[serde(rename = "Names")] + pub names: Vec, + + /// Docker image name and tag used to create this container. + /// + /// Format: `repository:tag` or `repository@digest` + /// Examples: "nginx:latest", "ubuntu:22.04", "redis:alpine" + #[serde(rename = "Image")] + pub image: String, + + /// Unique identifier of the Docker image. + /// + /// SHA256 hash of the image used to create this container. + /// Used for image management and container-to-image relationships. + #[serde(rename = "ImageID")] + pub image_id: String, + + /// Command executed when the container starts. + /// + /// The primary process command line that runs inside the container. + /// This is either the default command from the image or the + /// command specified when the container was created. + #[serde(rename = "Command")] + pub command: String, + + /// Container creation timestamp (Unix timestamp). + /// + /// When the container was created (not started). Used for + /// sorting, filtering, and lifecycle management. + #[serde(rename = "Created")] + pub created: i64, + + /// Network port mappings and exposed ports. + /// + /// Contains information about ports exposed by the container + /// and any host port mappings. Essential for network connectivity + /// and service discovery. + #[serde(rename = "Ports")] + pub ports: Vec, + + /// Container labels as key-value pairs. + /// + /// Metadata labels assigned to the container, including + /// Docker Compose labels, custom application labels, and + /// orchestration system labels. Uses HashMap for flexible + /// label handling when some labels may be missing. + #[serde(rename = "Labels", default)] + pub labels: HashMap, + + /// Current container state. + /// + /// Basic container state: "created", "restarting", "running", + /// "removing", "paused", "exited", or "dead". + #[serde(rename = "State")] + pub state: String, + + /// Human-readable container status description. + /// + /// Detailed status information including uptime for running + /// containers or exit information for stopped containers. + /// Examples: "Up 2 hours", "Exited (0) 5 minutes ago" + #[serde(rename = "Status")] + pub status: String, + + /// Host system configuration for the container. + /// + /// Contains resource limits, networking mode, and other + /// host-level configuration that affects container runtime behavior. + #[serde(rename = "HostConfig")] + pub host_config: HostConfig, + + /// Network configuration and connectivity information. + /// + /// Details about networks the container is connected to, + /// IP addresses, and network-related settings. + #[serde(rename = "NetworkSettings")] + pub network_settings: NetworkSettings, + + /// Volume and bind mount information. + /// + /// Contains details about storage volumes, bind mounts, and + /// tmpfs mounts attached to the container. Critical for + /// data persistence and sharing. + #[serde(rename = "Mounts")] + pub mounts: Vec, +} + +/// Container port mapping and exposure information. +/// +/// This structure represents network port configuration for Docker containers, +/// including both exposed ports within the container and any mappings to +/// host system ports. Essential for understanding container network accessibility. +/// +/// # Port Types +/// +/// - **Private Port**: Port exposed inside the container +/// - **Public Port**: Port mapped on the host system (if any) +/// - **IP Address**: Host IP address for the port binding +/// - **Protocol**: Network protocol (tcp, udp, sctp) +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::Port; +/// +/// // HTTP port exposed but not mapped to host +/// let exposed_port = Port { +/// ip: None, +/// private_port: 80, +/// public_port: None, +/// type_field: "tcp".to_string(), +/// }; +/// +/// // HTTPS port mapped to host port 8443 +/// let mapped_port = Port { +/// ip: Some("0.0.0.0".to_string()), +/// private_port: 443, +/// public_port: Some(8443), +/// type_field: "tcp".to_string(), +/// }; +/// ``` +/// +/// # JSON Format +/// +/// ```json +/// { +/// "IP": "0.0.0.0", +/// "PrivatePort": 80, +/// "PublicPort": 8080, +/// "Type": "tcp" +/// } +/// ``` +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Port { + /// Host IP address where the port is bound. + /// + /// - `None`: Port is exposed but not bound to host + /// - `Some("0.0.0.0")`: Bound to all host interfaces + /// - `Some("127.0.0.1")`: Bound only to localhost + /// - `Some("192.168.1.100")`: Bound to specific IP + #[serde(rename = "IP")] + pub ip: Option, + + /// Port number inside the container. + /// + /// This is the port that the application inside the container + /// is listening on. Always present for exposed ports. + #[serde(rename = "PrivatePort")] + pub private_port: i64, + + /// Port number on the host system. + /// + /// - `None`: Port is exposed but not published to host + /// - `Some(port)`: Port is mapped to this host port number + /// + /// When present, external traffic to this host port will be + /// forwarded to the container's private port. + #[serde(rename = "PublicPort")] + pub public_port: Option, + + /// Network protocol type. + /// + /// Common values: + /// - "tcp": Transmission Control Protocol + /// - "udp": User Datagram Protocol + /// - "sctp": Stream Control Transmission Protocol + #[serde(rename = "Type")] + pub type_field: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Labels { + #[serde(rename = "com.docker.compose.config-hash")] + pub com_docker_compose_config_hash: Option, + #[serde(rename = "com.docker.compose.container-number")] + pub com_docker_compose_container_number: Option, + #[serde(rename = "com.docker.compose.depends_on")] + pub com_docker_compose_depends_on: Option, + #[serde(rename = "com.docker.compose.image")] + pub com_docker_compose_image: Option, + #[serde(rename = "com.docker.compose.oneoff")] + pub com_docker_compose_oneoff: Option, + #[serde(rename = "com.docker.compose.project")] + pub com_docker_compose_project: Option, + #[serde(rename = "com.docker.compose.project.config_files")] + pub com_docker_compose_project_config_files: Option, + #[serde(rename = "com.docker.compose.project.working_dir")] + pub com_docker_compose_project_working_dir: Option, + #[serde(rename = "com.docker.compose.service")] + pub com_docker_compose_service: Option, + #[serde(rename = "com.docker.compose.version")] + pub com_docker_compose_version: Option, + #[serde(rename = "io.portainer.agent")] + pub io_portainer_agent: Option, + #[serde(rename = "com.docker.desktop.extension.api.version")] + pub com_docker_desktop_extension_api_version: Option, + #[serde(rename = "com.docker.desktop.extension.icon")] + pub com_docker_desktop_extension_icon: Option, + #[serde(rename = "com.docker.extension.additional-urls")] + pub com_docker_extension_additional_urls: Option, + #[serde(rename = "com.docker.extension.detailed-description")] + pub com_docker_extension_detailed_description: Option, + #[serde(rename = "com.docker.extension.publisher-url")] + pub com_docker_extension_publisher_url: Option, + #[serde(rename = "com.docker.extension.screenshots")] + pub com_docker_extension_screenshots: Option, + #[serde(rename = "io.portainer.server")] + pub io_portainer_server: Option, + #[serde(rename = "org.opencontainers.image.description")] + pub org_opencontainers_image_description: Option, + #[serde(rename = "org.opencontainers.image.title")] + pub org_opencontainers_image_title: Option, + #[serde(rename = "org.opencontainers.image.vendor")] + pub org_opencontainers_image_vendor: Option, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +/// Container host system configuration. +/// +/// This structure represents the host-level configuration settings +/// that affect how the container interacts with the host system. +/// Currently focused on network configuration but can be extended +/// for other host-level settings. +/// +/// # Network Modes +/// +/// Common network mode values: +/// - "bridge": Default Docker bridge network +/// - "host": Use host network stack directly +/// - "none": No network access +/// - "container:": Share network with another container +/// - "": Connect to a specific Docker network +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::HostConfig; +/// +/// let bridge_config = HostConfig { +/// network_mode: "bridge".to_string(), +/// }; +/// +/// let host_config = HostConfig { +/// network_mode: "host".to_string(), +/// }; +/// ``` +pub struct HostConfig { + /// Network mode configuration for the container. + /// + /// Determines how the container connects to networks and + /// interacts with the host system's network stack. This + /// setting affects container isolation and connectivity. + #[serde(rename = "NetworkMode")] + pub network_mode: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +/// Container network configuration and connectivity details. +/// +/// This structure contains information about the networks that a container +/// is connected to, including IP addresses, network configuration, and +/// connectivity details for each network. +/// +/// # Network Information +/// +/// Each network connection includes: +/// - IP address assignments +/// - Network aliases and DNS names +/// - MAC address configuration +/// - Driver-specific options +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::{NetworkSettings, NetworkDetails}; +/// use std::collections::HashMap; +/// +/// let mut networks = HashMap::new(); +/// networks.insert("bridge".to_string(), NetworkDetails { +/// // ... network details +/// }); +/// +/// let network_settings = NetworkSettings { networks }; +/// ``` +pub struct NetworkSettings { + /// Map of network names to detailed network configuration. + /// + /// Each entry represents a network that the container is + /// connected to, with the key being the network name and + /// the value containing detailed connection information. + #[serde(rename = "Networks")] + pub networks: HashMap, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +/// Detailed network connection information for a container. +/// +/// This structure provides comprehensive details about how a container +/// is connected to a specific Docker network, including IP addressing, +/// DNS configuration, and network-specific settings. +/// +/// # Network Configuration +/// +/// Contains both IPv4 and IPv6 addressing information, gateway settings, +/// DNS names, and network driver configuration. Essential for understanding +/// container network connectivity and troubleshooting network issues. +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::NetworkDetails; +/// use serde_json::Value; +/// +/// let network_details = NetworkDetails { +/// network_id: "bridge123".to_string(), +/// endpoint_id: "endpoint456".to_string(), +/// gateway: "172.17.0.1".to_string(), +/// ipaddress: "172.17.0.2".to_string(), +/// ipprefix_len: 16, +/// mac_address: Some("02:42:ac:11:00:02".to_string()), +/// // ... other fields +/// }; +/// ``` +pub struct NetworkDetails { + /// IP Address Management configuration. + /// + /// Contains static IP configuration and IPAM-specific settings + /// for this network connection. Structure varies by network driver. + #[serde(rename = "IPAMConfig")] + pub ipamconfig: Value, + + /// Container links configuration. + /// + /// Legacy Docker links to other containers on the same network. + /// Generally replaced by custom networks and service discovery. + #[serde(rename = "Links")] + pub links: Value, + + /// Network aliases for this container. + /// + /// DNS names by which this container can be reached on this network. + /// Used for service discovery within Docker networks. + #[serde(rename = "Aliases")] + pub aliases: Value, + + /// MAC address assigned to the container's network interface. + /// + /// Hardware address used for this network connection. May be + /// automatically assigned or explicitly configured. + #[serde(rename = "MacAddress")] + pub mac_address: Option, + + /// Network driver-specific options. + /// + /// Configuration options specific to the network driver being used. + /// Content varies depending on the network driver (bridge, overlay, etc.). + #[serde(rename = "DriverOpts")] + pub driver_opts: Value, + + /// Unique identifier of the Docker network. + /// + /// Internal network ID used by Docker to identify the network + /// this container is connected to. + #[serde(rename = "NetworkID")] + pub network_id: String, + + /// Unique identifier of the network endpoint. + /// + /// Internal endpoint ID representing this container's connection + /// point to the network. + #[serde(rename = "EndpointID")] + pub endpoint_id: String, + + /// IPv4 gateway address for this network. + /// + /// Default route gateway that the container uses to reach + /// addresses outside this network. + #[serde(rename = "Gateway")] + pub gateway: String, + + /// IPv4 address assigned to the container on this network. + /// + /// Primary IP address used for communication on this network. + /// Used by other containers and external systems to reach this container. + #[serde(rename = "IPAddress")] + pub ipaddress: String, + + /// IPv4 subnet prefix length. + /// + /// Number of bits in the network portion of the IP address. + /// Used to determine the network range and subnet mask. + #[serde(rename = "IPPrefixLen")] + pub ipprefix_len: i64, + + /// IPv6 gateway address for this network. + /// + /// Default IPv6 gateway for traffic leaving this network. + /// Empty string if IPv6 is not configured. + #[serde(rename = "IPv6Gateway")] + pub ipv6gateway: String, + + /// Global IPv6 address assigned to the container. + /// + /// Routable IPv6 address if IPv6 networking is enabled. + /// Empty string if IPv6 is not configured. + #[serde(rename = "GlobalIPv6Address")] + pub global_ipv6address: String, + + /// IPv6 subnet prefix length. + /// + /// Number of bits in the IPv6 network portion. + /// Zero if IPv6 is not configured. + #[serde(rename = "GlobalIPv6PrefixLen")] + pub global_ipv6prefix_len: i64, + + /// DNS names associated with this container on the network. + /// + /// Additional DNS names that can be used to resolve to this + /// container within the network. + #[serde(rename = "DNSNames")] + pub dnsnames: Value, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +/// Container mount and volume information. +/// +/// This structure represents storage mounts attached to a Docker container, +/// including volume mounts, bind mounts, and tmpfs mounts. Essential for +/// understanding data persistence and storage configuration. +/// +/// # Mount Types +/// +/// - **bind**: Bind mount from host filesystem +/// - **volume**: Docker-managed volume +/// - **tmpfs**: Temporary filesystem in memory +/// +/// # Access Modes +/// +/// - **rw**: Read-write access (default) +/// - **ro**: Read-only access +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::Mount; +/// +/// // Bind mount from host directory +/// let bind_mount = Mount { +/// type_field: "bind".to_string(), +/// source: "/host/data".to_string(), +/// destination: "/app/data".to_string(), +/// mode: "rw".to_string(), +/// rw: true, +/// propagation: "rprivate".to_string(), +/// }; +/// +/// // Docker volume mount +/// let volume_mount = Mount { +/// type_field: "volume".to_string(), +/// source: "app-data".to_string(), +/// destination: "/app/storage".to_string(), +/// mode: "rw".to_string(), +/// rw: true, +/// propagation: "".to_string(), +/// }; +/// ``` +pub struct Mount { + /// Type of mount (bind, volume, tmpfs). + /// + /// Determines how the storage is provided: + /// - "bind": Direct mount from host filesystem + /// - "volume": Docker-managed named volume + /// - "tmpfs": Temporary memory-based filesystem + #[serde(rename = "Type")] + pub type_field: String, + + /// Source path or volume name. + /// + /// For bind mounts: absolute path on host filesystem + /// For volumes: volume name as managed by Docker + /// For tmpfs: not applicable (empty string) + #[serde(rename = "Source")] + pub source: String, + + /// Mount destination path inside the container. + /// + /// Absolute path where the mount appears within the + /// container's filesystem namespace. + #[serde(rename = "Destination")] + pub destination: String, + + /// Access mode string representation. + /// + /// Common values: + /// - "rw": Read-write access + /// - "ro": Read-only access + /// - May include additional options + #[serde(rename = "Mode")] + pub mode: String, + + /// Read-write access flag. + /// + /// - `true`: Mount allows write access + /// - `false`: Mount is read-only + #[serde(rename = "RW")] + pub rw: bool, + + /// Mount propagation setting. + /// + /// Controls how mount events propagate between host and container: + /// - "rprivate": Private (default) + /// - "shared": Shared propagation + /// - "slave": Slave propagation + /// - "rshared": Recursive shared + /// - "rslave": Recursive slave + #[serde(rename = "Propagation")] + pub propagation: String, + + /// Optional name for named volumes. + /// + /// For volume mounts, this is the name of the Docker volume. + /// For bind mounts, this field is typically not used. + #[serde(rename = "Name")] + pub name: Option, + + /// Volume driver name (for volume mounts). + /// + /// Specifies the volume driver used for volume mounts. + /// Common drivers include "local" for local storage. + #[serde(rename = "Driver")] + pub driver: Option, +} + +/* ------------------------- + * Docker Version + * ----------------------- */ + +/// Docker daemon version and build information. +/// +/// This structure contains comprehensive version information about the Docker +/// daemon, including API versions, build details, and platform information. +/// Used for compatibility checking and system information reporting. +/// +/// # API Compatibility +/// +/// The API version fields are crucial for ensuring compatibility between +/// client and server. The `min_apiversion` field indicates the oldest +/// API version supported by this Docker daemon. +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::DockerVersion; +/// +/// // Typically populated from Docker API /version endpoint +/// let version_info = DockerVersion { +/// version: "24.0.5".to_string(), +/// api_version: "1.43".to_string(), +/// min_apiversion: "1.12".to_string(), +/// go_version: "go1.20.6".to_string(), +/// os: "linux".to_string(), +/// arch: "amd64".to_string(), +/// // ... other fields +/// }; +/// ``` +/// +/// # JSON Response Format +/// +/// ```json +/// { +/// "Version": "24.0.5", +/// "ApiVersion": "1.43", +/// "MinAPIVersion": "1.12", +/// "GitCommit": "ced0996", +/// "GoVersion": "go1.20.6", +/// "Os": "linux", +/// "Arch": "amd64" +/// } +/// ``` +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DockerVersion { + /// Platform information for the Docker daemon. + /// + /// Contains details about the platform where Docker is running, + /// including operating system and architecture information. + #[serde(rename = "Platform")] + pub platform: Platform, + + /// List of Docker components and their versions. + /// + /// Includes information about Docker Engine, containerd, runc, + /// and other components that make up the Docker runtime stack. + #[serde(rename = "Components")] + pub components: Vec, + + /// Docker daemon version string. + /// + /// The main version identifier for the Docker daemon. + /// Format typically follows semantic versioning (e.g., "24.0.5"). + #[serde(rename = "Version")] + pub version: String, + + /// Current Docker API version supported. + /// + /// The API version that this Docker daemon currently supports. + /// Used by clients to determine available features and endpoints. + #[serde(rename = "ApiVersion")] + pub api_version: String, + + /// Minimum Docker API version supported. + /// + /// The oldest API version that this Docker daemon still supports. + /// Critical for backward compatibility with older Docker clients. + #[serde(rename = "MinAPIVersion")] + pub min_apiversion: String, + + /// Git commit hash of the Docker build. + /// + /// Short commit hash identifying the exact source code version + /// used to build this Docker daemon. Useful for debugging and + /// exact version identification. + #[serde(rename = "GitCommit")] + pub git_commit: String, + + /// Go language version used to build Docker. + /// + /// Version of the Go programming language used to compile + /// the Docker daemon. Important for compatibility and + /// performance characteristics. + #[serde(rename = "GoVersion")] + pub go_version: String, + + /// Operating system where Docker is running. + /// + /// The host operating system (e.g., "linux", "windows"). + /// Affects available features and container capabilities. + #[serde(rename = "Os")] + pub os: String, + + /// System architecture where Docker is running. + /// + /// The CPU architecture (e.g., "amd64", "arm64", "386"). + /// Determines container image compatibility and performance. + #[serde(rename = "Arch")] + pub arch: String, + + /// Host kernel version. + /// + /// Version of the operating system kernel. Important for + /// container feature support and security capabilities. + #[serde(rename = "KernelVersion")] + pub kernel_version: String, + + /// Docker daemon build timestamp. + /// + /// When this version of Docker was compiled and built. + /// Useful for age assessment and update planning. + #[serde(rename = "BuildTime")] + pub build_time: String, +} + +/// Docker platform information. +/// +/// This structure represents the platform where the Docker daemon +/// is running, providing basic identification of the host environment. +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::Platform; +/// +/// let platform = Platform { +/// name: "Docker Engine - Community".to_string(), +/// }; +/// ``` +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Platform { + #[serde(rename = "Name")] + pub name: String, +} + +/// Docker component version information. +/// +/// This structure represents version and build information for individual +/// components that make up the Docker runtime stack, such as the Docker +/// engine, containerd, and runc. +/// +/// # Component Types +/// +/// Common components include: +/// - "Engine": Docker daemon itself +/// - "containerd": Container runtime +/// - "runc": OCI runtime +/// - "docker-init": Init process for containers +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::Component; +/// +/// let engine_component = Component { +/// name: "Engine".to_string(), +/// version: "24.0.5".to_string(), +/// details: Details { +/// git_commit: "ced0996".to_string(), +/// // ... other details +/// }, +/// }; +/// ``` +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Component { + /// Name of the Docker component. + /// + /// Identifies which part of the Docker stack this component represents. + /// Examples: "Engine", "containerd", "runc", "docker-init" + #[serde(rename = "Name")] + pub name: String, + + /// Version string for this component. + /// + /// The specific version of this component in the Docker installation. + /// May follow different versioning schemes depending on the component. + #[serde(rename = "Version")] + pub version: String, + + /// Detailed build and version information for this component. + /// + /// Contains additional metadata about the component build, + /// including commit hashes, build times, and platform details. + #[serde(rename = "Details")] + pub details: Details, +} + +/// Detailed build information for Docker components. +/// +/// This structure contains comprehensive build and version metadata +/// for individual Docker components. Not all fields are present for +/// every component, hence the use of `Option` types. +/// +/// # Build Information +/// +/// Includes git commit information, build timestamps, Go version used, +/// and platform-specific details that help identify the exact build +/// of each component. +/// +/// # Examples +/// +/// ```rust +/// use teus::docker::Details; +/// +/// let details = Details { +/// git_commit: "de40ad0".to_string(), +/// api_version: Some("1.43".to_string()), +/// go_version: Some("go1.20.6".to_string()), +/// build_time: Some("2023-07-06T19:33:28.000000000+00:00".to_string()), +/// // ... other optional fields +/// }; +/// ``` +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Details { + /// API version supported by this component (if applicable). + /// + /// For components that expose APIs, this indicates the + /// API version they support. Not all components have APIs. + #[serde(rename = "ApiVersion")] + pub api_version: Option, + + /// Architecture this component was built for. + /// + /// CPU architecture (e.g., "amd64", "arm64") that this + /// component binary targets. + #[serde(rename = "Arch")] + pub arch: Option, + + /// Timestamp when this component was built. + /// + /// ISO 8601 formatted timestamp indicating when this + /// specific build of the component was created. + #[serde(rename = "BuildTime")] + pub build_time: Option, + + /// Whether experimental features are enabled. + /// + /// Indicates if this component build includes experimental + /// or preview features. Values typically "true" or "false". + #[serde(rename = "Experimental")] + pub experimental: Option, + + /// Git commit hash for this component build. + /// + /// Short commit hash identifying the exact source code + /// version used to build this component. Always present. + #[serde(rename = "GitCommit")] + pub git_commit: String, + + /// Go language version used to build this component. + /// + /// Version of Go used for compilation, if the component + /// is written in Go (most Docker components are). + #[serde(rename = "GoVersion")] + pub go_version: Option, + + /// Host kernel version (if relevant to this component). + /// + /// Operating system kernel version, included for components + /// that interact closely with kernel features. + #[serde(rename = "KernelVersion")] + pub kernel_version: Option, + + /// Minimum API version supported (if applicable). + /// + /// For API-exposing components, the oldest API version + /// still supported for backward compatibility. + #[serde(rename = "MinAPIVersion")] + pub min_apiversion: Option, + + /// Operating system this component was built for. + /// + /// Target operating system (e.g., "linux", "windows") + /// for this component build. + #[serde(rename = "Os")] + pub os: Option, +} + +/* ------------------------- + * Docker Info + * ----------------------- */ +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DockerInfo { + #[serde(rename = "ID")] + pub id: String, + #[serde(rename = "Containers")] + pub containers: i64, + #[serde(rename = "ContainersRunning")] + pub containers_running: i64, + #[serde(rename = "ContainersPaused")] + pub containers_paused: i64, + #[serde(rename = "ContainersStopped")] + pub containers_stopped: i64, + #[serde(rename = "Images")] + pub images: i64, + #[serde(rename = "Driver")] + pub driver: String, + #[serde(rename = "DriverStatus")] + pub driver_status: Vec>, + #[serde(rename = "Plugins")] + pub plugins: Plugins, + #[serde(rename = "MemoryLimit")] + pub memory_limit: bool, + #[serde(rename = "SwapLimit")] + pub swap_limit: bool, + #[serde(rename = "CpuCfsPeriod")] + pub cpu_cfs_period: bool, + #[serde(rename = "CpuCfsQuota")] + pub cpu_cfs_quota: bool, + #[serde(rename = "CPUShares")] + pub cpushares: bool, + #[serde(rename = "CPUSet")] + pub cpuset: bool, + #[serde(rename = "PidsLimit")] + pub pids_limit: bool, + #[serde(rename = "IPv4Forwarding")] + pub ipv4forwarding: bool, + #[serde(rename = "BridgeNfIptables")] + pub bridge_nf_iptables: bool, + #[serde(rename = "BridgeNfIp6tables")] + pub bridge_nf_ip6tables: bool, + #[serde(rename = "Debug")] + pub debug: bool, + #[serde(rename = "NFd")] + pub nfd: i64, + #[serde(rename = "OomKillDisable")] + pub oom_kill_disable: bool, + #[serde(rename = "NGoroutines")] + pub ngoroutines: i64, + #[serde(rename = "SystemTime")] + pub system_time: String, + #[serde(rename = "LoggingDriver")] + pub logging_driver: String, + #[serde(rename = "CgroupDriver")] + pub cgroup_driver: String, + #[serde(rename = "CgroupVersion")] + pub cgroup_version: String, + #[serde(rename = "NEventsListener")] + pub nevents_listener: i64, + #[serde(rename = "KernelVersion")] + pub kernel_version: String, + #[serde(rename = "OperatingSystem")] + pub operating_system: String, + #[serde(rename = "OSVersion")] + pub osversion: String, + #[serde(rename = "OSType")] + pub ostype: String, + #[serde(rename = "Architecture")] + pub architecture: String, + #[serde(rename = "IndexServerAddress")] + pub index_server_address: String, + #[serde(rename = "RegistryConfig")] + pub registry_config: RegistryConfig, + #[serde(rename = "NCPU")] + pub ncpu: i64, + #[serde(rename = "MemTotal")] + pub mem_total: i64, + #[serde(rename = "GenericResources")] + pub generic_resources: Value, + #[serde(rename = "DockerRootDir")] + pub docker_root_dir: String, + #[serde(rename = "HttpProxy")] + pub http_proxy: String, + #[serde(rename = "HttpsProxy")] + pub https_proxy: String, + #[serde(rename = "NoProxy")] + pub no_proxy: String, + #[serde(rename = "Name")] + pub name: String, + #[serde(rename = "Labels")] + pub labels: Vec, + #[serde(rename = "ExperimentalBuild")] + pub experimental_build: bool, + #[serde(rename = "ServerVersion")] + pub server_version: String, + #[serde(rename = "Runtimes")] + pub runtimes: Runtimes, + #[serde(rename = "DefaultRuntime")] + pub default_runtime: String, + #[serde(rename = "Swarm")] + pub swarm: Swarm, + #[serde(rename = "LiveRestoreEnabled")] + pub live_restore_enabled: bool, + #[serde(rename = "Isolation")] + pub isolation: String, + #[serde(rename = "InitBinary")] + pub init_binary: String, + #[serde(rename = "ContainerdCommit")] + pub containerd_commit: ContainerdCommit, + #[serde(rename = "RuncCommit")] + pub runc_commit: RuncCommit, + #[serde(rename = "InitCommit")] + pub init_commit: InitCommit, + #[serde(rename = "SecurityOptions")] + pub security_options: Vec, + #[serde(rename = "CDISpecDirs")] + pub cdispec_dirs: Vec, + #[serde(rename = "Containerd")] + pub containerd: Containerd, + #[serde(rename = "Warnings")] + pub warnings: Value, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Plugins { + #[serde(rename = "Volume")] + pub volume: Vec, + #[serde(rename = "Network")] + pub network: Vec, + #[serde(rename = "Authorization")] + pub authorization: Value, + #[serde(rename = "Log")] + pub log: Vec, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RegistryConfig { + #[serde(rename = "AllowNondistributableArtifactsCIDRs")] + pub allow_nondistributable_artifacts_cidrs: Value, + #[serde(rename = "AllowNondistributableArtifactsHostnames")] + pub allow_nondistributable_artifacts_hostnames: Value, + #[serde(rename = "InsecureRegistryCIDRs")] + pub insecure_registry_cidrs: Vec, + #[serde(rename = "IndexConfigs")] + pub index_configs: IndexConfigs, + #[serde(rename = "Mirrors")] + pub mirrors: Value, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct IndexConfigs { + #[serde(rename = "docker.io")] + pub docker_io: DockerIo, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DockerIo { + #[serde(rename = "Name")] + pub name: String, + #[serde(rename = "Mirrors")] + pub mirrors: Vec, + #[serde(rename = "Secure")] + pub secure: bool, + #[serde(rename = "Official")] + pub official: bool, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Runtimes { + #[serde(rename = "io.containerd.runc.v2")] + pub io_containerd_runc_v2: IoContainerdRuncV2, + pub runc: Runc, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct IoContainerdRuncV2 { + pub path: String, + pub status: Status, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Status { + #[serde(rename = "org.opencontainers.runtime-spec.features")] + pub org_opencontainers_runtime_spec_features: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Runc { + pub path: String, + pub status: Status2, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Status2 { + #[serde(rename = "org.opencontainers.runtime-spec.features")] + pub org_opencontainers_runtime_spec_features: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Swarm { + #[serde(rename = "NodeID")] + pub node_id: String, + #[serde(rename = "NodeAddr")] + pub node_addr: String, + #[serde(rename = "LocalNodeState")] + pub local_node_state: String, + #[serde(rename = "ControlAvailable")] + pub control_available: bool, + #[serde(rename = "Error")] + pub error: String, + #[serde(rename = "RemoteManagers")] + pub remote_managers: Value, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ContainerdCommit { + #[serde(rename = "ID")] + pub id: String, + #[serde(rename = "Expected")] + pub expected: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RuncCommit { + #[serde(rename = "ID")] + pub id: String, + #[serde(rename = "Expected")] + pub expected: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct InitCommit { + #[serde(rename = "ID")] + pub id: String, + #[serde(rename = "Expected")] + pub expected: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Containerd { + #[serde(rename = "Address")] + pub address: String, + #[serde(rename = "Namespaces")] + pub namespaces: Namespaces, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Namespaces { + #[serde(rename = "Containers")] + pub containers: String, + #[serde(rename = "Plugins")] + pub plugins: String, +} + +/* ------------------------- + * Docker Volumes + * ----------------------- */ +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DockerVolumes { + #[serde(rename = "Volumes")] + pub volumes: Vec, + #[serde(rename = "Warnings")] + pub warnings: Value, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Volume { + #[serde(rename = "CreatedAt")] + pub created_at: String, + #[serde(rename = "Driver")] + pub driver: String, + #[serde(rename = "Labels")] + pub labels: Option, + #[serde(rename = "Mountpoint")] + pub mountpoint: String, + #[serde(rename = "Name")] + pub name: String, + #[serde(rename = "Options")] + pub options: Value, + #[serde(rename = "Scope")] + pub scope: String, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct VolumeLabels { + #[serde(rename = "com.docker.compose.project")] + pub com_docker_compose_project: Option, + #[serde(rename = "com.docker.compose.version")] + pub com_docker_compose_version: Option, + #[serde(rename = "com.docker.compose.volume")] + pub com_docker_compose_volume: Option, + #[serde(rename = "com.docker.volume.anonymous")] + pub com_docker_volume_anonymous: Option, +} + +#[derive(Debug)] +pub struct DockerClient { + pub request_builder: TeusRequestBuilder, // i think this can be initialized outside of the struct +} + +// #[derive(Debug, Deserialize)] +// pub struct ContainersQuery { +// all: Option, +// } + +impl DockerClient { + /// Creates a new DockerClient. + /// + /// If `socket_path` is `None`, it defaults to the standard Unix socket path + /// "/var/run/docker.sock". + pub fn new(socket_path: Option) -> Self { + // If socket_path is Some(path), use it. + // If socket_path is None, execute the closure to get the default path. + let path = socket_path.unwrap_or_else(|| DOCKER_SOCK.to_string()); + + DockerClient { + // We now pass the guaranteed-to-be-valid path to the builder + request_builder: TeusRequestBuilder::new(path, "localhost".to_string()) + .expect("Are you sure docker is up and running?"), + } + } + + /// Helper method to parse Docker responses that might contain errors + fn parse_docker_response(&self, response: &str) -> Result + where + T: for<'de> Deserialize<'de>, + { + // First, try to deserialize as the expected type + match serde_json::from_str::(response) { + Ok(data) => Ok(data), + Err(_) => { + // If that fails, try to deserialize as a Docker error response + match serde_json::from_str::(response) { + Ok(error_response) => Err(DockerError::Generic(error_response.message)), + Err(_) => { + // If both fail, return the raw response as a generic error + Err(DockerError::Generic(format!( + "Failed to parse Docker response: {}", + response + ))) + } + } + } + } + } + + pub fn get_containers(&mut self, query: Option) -> Result { + let response = self.request_builder.make_request( + DockerRequestMethod::Get, + DockerApi::Containers, + query, + ); + self.parse_docker_response(&response) + } + + pub fn get_container_details( + &mut self, + container_id: String, + ) -> Result { + let response = self.request_builder.make_request( + DockerRequestMethod::Get, + DockerApi::ContainerDetails(container_id), + None, + ); + self.parse_docker_response(&response) + } + + pub fn get_version(&mut self) -> Result { + let response = + self.request_builder + .make_request(DockerRequestMethod::Get, DockerApi::Version, None); + self.parse_docker_response(&response) + } + + pub fn get_volumes(&mut self) -> Result { + let response = + self.request_builder + .make_request(DockerRequestMethod::Get, DockerApi::Volumes, None); + self.parse_docker_response(&response) + } + + pub fn get_volume_details(&mut self, volume_name: String) -> Result { + let response = self.request_builder.make_request( + DockerRequestMethod::Get, + DockerApi::VolumeDetails(volume_name), + None, + ); + self.parse_docker_response(&response) + } +} + +mod tests { + #[allow(unused_imports)] + use super::*; + use std::env; + + // For MacOS + // TODO: Try to get the home directory from the Env + #[allow(dead_code)] + #[cfg(target_os = "macos")] + fn get_test_socket_path() -> Option { + // Path for Colima or Docker Desktop on macOS + let home_dir = env::var("HOME").unwrap(); + Some(format!("{home_dir}/.colima/default/docker.sock")) + } + + // This covers Linux, Windows (via WSL), etc. + #[cfg(not(target_os = "macos"))] + fn get_test_socket_path() -> Option { + // The standard path for Linux. `None` might also be an option if + // DockerClient's `new` method already defaults to this. + Some("/var/run/docker.sock".to_string()) + } + + #[test] + fn test_get_containers() { + // Our test now calls the correct helper function automatically. + let test_socket = get_test_socket_path(); + let mut client = DockerClient::new(test_socket); + println!("{:?}", client); + + let containers = client.get_containers(None).unwrap(); + println!("{:?}", containers); + assert!(!containers.is_empty()); + } + + #[test] + fn test_get_version() { + // Our test now calls the correct helper function automatically. + let test_socket = get_test_socket_path(); + let mut client = DockerClient::new(test_socket); + println!("{:?}", client); + + let version = client.get_version().unwrap(); + println!("{:?}", version); + assert!(!version.version.is_empty()); + } + + #[test] + fn test_get_volumes() { + // Our test now calls the correct helper function automatically. + let test_socket = get_test_socket_path(); + let mut client = DockerClient::new(test_socket); + println!("{:?}", client); + + let volumes = client.get_volumes().unwrap(); + println!("{:?}", volumes); + assert!(!volumes.volumes.is_empty()); + } + + #[test] + fn test_get_volume_details() { + // Our test now calls the correct helper function automatically. + let test_socket = get_test_socket_path(); + let mut client = DockerClient::new(test_socket); + println!("{:?}", client); + + let volume_name = + "84146ce4581849ab32389b4fa709e47ce80f2a78075f9a32dbb2f6f8b19456de".to_string(); + let volume_details = client.get_volume_details(volume_name).unwrap(); + println!("{:?}", volume_details); + assert!(!volume_details.name.is_empty()); + } +} diff --git a/crates/docker/src/lib.rs b/crates/docker/src/lib.rs new file mode 100644 index 0000000..e32f10d --- /dev/null +++ b/crates/docker/src/lib.rs @@ -0,0 +1,149 @@ +mod container; +pub mod docker; +pub mod requests; + +#[cfg(test)] +mod tests { + use std::io::{Read, Write}; + use std::os::unix::net::UnixStream; + + // Replace with your actual Docker socket path + // const DOCKER_SOCKET_PATH: &str = "/var/run/docker.sock"; // Standard Linux + #[cfg(target_os = "linux")] + const DOCKER_SOCKET_PATH: &str = "/var/run/docker.sock"; // Standard Linux + #[cfg(target_os = "macos")] + // For testing purposes, do not forget to replace with your actual Colima or docker path + const DOCKER_SOCKET_PATH: &str = "/Users/homeerr/.colima/default/docker.sock"; // Your Colima path + + #[test] + fn docker_sock_connection() { + let docker_sock = UnixStream::connect(DOCKER_SOCKET_PATH).unwrap(); + + println!("{:?}", docker_sock); + } + + #[test] + fn docker_sock_request() { + let mut stream = match UnixStream::connect(DOCKER_SOCKET_PATH) { + Ok(sock) => sock, + Err(e) => { + eprintln!("Failed to connect: {}", e); + return; + } + }; + + // GET /version HTTP/1.1 + // Host: localhost (required for HTTP/1.1) + // Connection: close (optional, but good for simplicity here) + // (empty line) + let request = "GET /version HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n"; + + match stream.write_all(request.as_bytes()) { + Ok(_) => println!("Sent request:\n{}", request), + Err(e) => { + eprintln!("Failed to write to socket: {}", e); + return; + } + } + + let mut response = String::new(); + match stream.read_to_string(&mut response) { + Ok(_) => { + println!("Received response:\n{}", response); + + // --- Basic Parsing --- + // This just separates headers and body. + if let Some((_, _)) = response.split_once("\r\n\r\n") { + println!("\n--- Headers ---"); + // println!("{}", headers); + println!("\n--- Body ---"); + // println!("{}", body); + } else { + println!("\nCould not parse HTTP response."); + } + } + Err(e) => { + eprintln!("Failed to read from socket: {}", e); + } + } + } + + // #[tokio::test] + // async fn docker_reqwest_uds_connection() { + // // 1. Create a Unix Domain Socket transport + // let transport = match Transport::unix(DOCKER_SOCKET_PATH) { + // Ok(t) => t, + // Err(e) => { + // eprintln!("Failed to create Unix transport: {}", e); + // // Optionally, check if the socket file exists + // if !std::path::Path::new(DOCKER_SOCKET_PATH).exists() { + // eprintln!("Docker socket not found at {}. Please ensure Docker is running and the path is correct.", DOCKER_SOCKET_PATH); + // } + // panic!("Transport creation failed."); // Fail the test + // } + // }; + + // // 2. Build a reqwest client with this transport + // // Here, we use reqwest::Client::builder() and provide our custom transport. + // let client = Client::builder() + // .build(transport) + // .expect("Failed to build reqwest client with UDS transport"); + + // // 3. Make requests using a dummy HTTP base URL. + // // The hostname ("localhost" or "docker" or anything) is ignored by the Unix socket transport; + // // it only cares that it's an HTTP request going to the pre-configured socket. + // // The path ("/version", "/containers/json", etc.) is what Docker API uses. + // let version_url = "http://localhost/version"; // Hostname is a placeholder + + // println!("Attempting to GET: {}", version_url); + + // match client.get(version_url).send().await { + // Ok(response) => { + // println!("Status: {}", response.status()); + // println!("Headers:\n{:#?}", response.headers()); + + // // Example: Parse the response body as JSON + // match response.json::().await { + // Ok(json_body) => { + // println!("Body (JSON):\n{:#?}", json_body); + // // You can now access parts of the JSON, e.g., json_body["ApiVersion"] + // assert!( + // json_body["ApiVersion"].is_string(), + // "Expected ApiVersion to be a string" + // ); + // } + // Err(e) => { + // eprintln!("Failed to parse JSON body: {}", e); + // // Fallback to text if JSON parsing fails + // // match response.text().await { + // // Ok(text_body) => println!("Body (Text):\n{}", text_body), + // // Err(e_text) => eprintln!("Failed to read text body: {}", e_text), + // // } + // } + // } + // } + // Err(e) => { + // eprintln!("Request to {} failed: {}", version_url, e); + // panic!("Request failed."); // Fail the test + // } + // } + + // // Example: Listing containers (GET /containers/json) + // let containers_url = "http://localhost/containers/json"; + // println!("\nAttempting to GET: {}", containers_url); + // match client.get(containers_url).send().await { + // Ok(response) => { + // println!("Status for {}: {}", containers_url, response.status()); + // match response.json::>().await { + // // Expecting an array of container objects + // Ok(json_body) => { + // println!("Containers (JSON):\n{:#?}", json_body); + // println!("Found {} containers.", json_body.len()); + // } + // Err(e) => eprintln!("Failed to parse containers JSON: {}", e), + // } + // } + // Err(e) => eprintln!("Request to {} failed: {}", containers_url, e), + // } + // } +} diff --git a/crates/docker/src/requests.rs b/crates/docker/src/requests.rs new file mode 100644 index 0000000..4a6bf04 --- /dev/null +++ b/crates/docker/src/requests.rs @@ -0,0 +1,218 @@ +use std::{ + io::{Read, Write}, + os::unix::net::UnixStream, +}; + +pub enum DockerApi { + Version, + Info, + Containers, + ContainerDetails(String), + Volumes, + Images, + Networks, + Ping, + VolumeDetails(String), +} + +impl DockerApi { + pub fn endpoint(&self) -> String { + match self { + DockerApi::Version => "version".to_string(), + DockerApi::Info => "info".to_string(), + DockerApi::Containers => "containers/json".to_string(), + DockerApi::ContainerDetails(container_id) => { + format!("containers/{}/json", container_id) + } + DockerApi::Volumes => "volumes".to_string(), + DockerApi::Images => "images".to_string(), + DockerApi::Networks => "networks".to_string(), + DockerApi::Ping => "_ping".to_string(), + DockerApi::VolumeDetails(volume_name) => format!("volumes/{}", volume_name), + } + } +} + +pub enum DockerRequestMethod { + Get, + Post, + Put, + Delete, +} + +impl DockerRequestMethod { + pub fn method(&self) -> &str { + match self { + DockerRequestMethod::Get => "GET", + DockerRequestMethod::Post => "POST", + DockerRequestMethod::Put => "PUT", + DockerRequestMethod::Delete => "DELETE", + } + } +} + +// TODO: Create a HashMap with DockerApi and DockerRequestMethod +// DockerApiConfig +// Maybe in the future, use a Complex configuration struct if needed. + +#[derive(Debug)] +pub struct TeusRequestBuilder { + pub socket: String, + pub host: String, + socket_stream: UnixStream, +} + +impl TeusRequestBuilder { + // "GET /version HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n"; + pub fn new(socket: String, host: String) -> Result> { + let stream = match UnixStream::connect(&socket) { + Ok(sock) => sock, + Err(e) => { + eprintln!("Failed to connect: {}", e); + return Err(Box::new(e)); + } + }; + + Ok(TeusRequestBuilder { + socket, + host, + socket_stream: stream, + }) + } + + #[inline] + fn format_url_request( + &self, + method: DockerRequestMethod, + api: DockerApi, + query: Option, + ) -> String { + let query_str = query.map(|q| format!("?{}", q)).unwrap_or_default(); + println!("QUERY STR: {}", query_str); + format!( + "{} /{}{} HTTP/1.1\r\nHost: {}\r\nConnection: close\r\n\r\n", + method.method(), + api.endpoint(), + query_str, + self.host, + ) + } + + /// Helper method to parse the response buffer into a string. + fn parse_buffer_to_string(&self, response_buffer: String) -> String { + println!("RESPONSE BUFFER:\n{}", response_buffer); + + // Keep the headers and body parts separately. + let (headers_str, body_part) = match response_buffer.split_once("\r\n\r\n") { + Some((headers, body)) => (headers, body), + None => { + eprintln!("Could not parse HTTP response: No header/body separator found."); + return "Error: Invalid HTTP response".to_string(); + } + }; + + // Check the headers to see if the response is chunked. + let is_chunked = headers_str + .lines() + .any(|line| line.to_lowercase().contains("transfer-encoding: chunked")); + + if is_chunked { + println!("--> Detected chunked response. Parsing chunks..."); + let mut lines = body_part.trim().lines(); + lines.next(); // Skip the hex size + lines.next_back(); // Skip the trailing "0" + lines.collect::() + } else { + println!("--> Detected Content-Length response. Body is raw JSON."); + // If not chunked, the body is already the complete JSON. No processing needed. + body_part.to_string() + } + } + + // TODO: Fill the method by matching the Docker API + // Ex: Version => GET + // StartService => POST + // DeleteContainer => DELETE + // etc.. + // TODO: Implement query params in the request builder + pub fn make_request( + &mut self, + method: DockerRequestMethod, + api: DockerApi, + query: Option, + ) -> String { + let request = self.format_url_request(method, api, query); + println!("REQUEST:\n{}", request); + if let Err(e) = self.socket_stream.write_all(request.as_bytes()) { + eprintln!("Failed to write to socket: {}", e); + return String::new(); // Return early on error + } + + // Flush the write buffer to ensure the request is sent + if let Err(e) = self.socket_stream.flush() { + eprintln!("Failed to flush socket: {}", e); + return String::new(); + } + + let mut response_buffer = String::new(); + if let Err(e) = self.socket_stream.read_to_string(&mut response_buffer) { + eprintln!("Failed to read from socket: {}", e); + return "Error: Failed to read response".to_string(); + } + + let response = self.parse_buffer_to_string(response_buffer); + response + } +} + +mod tests { + use super::*; + + // Helper function to avoid repeating setup code + fn _setup_builder() -> TeusRequestBuilder { + let socket = "/Users/homeerr/.colima/default/docker.sock".to_string(); + let host = "localhost".to_string(); + TeusRequestBuilder::new(socket, host).unwrap() + } + + #[test] + fn builds_get_request_correctly() { + let builder = _setup_builder(); + let get = builder.format_url_request(DockerRequestMethod::Get, DockerApi::Version, None); + assert_eq!( + get, + "GET /version HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n" + ); + } + + #[test] + fn builds_put_request_correctly() { + let builder = _setup_builder(); + let put = builder.format_url_request(DockerRequestMethod::Put, DockerApi::Version, None); + assert_eq!( + put, + "PUT /version HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n" + ); + } + + #[test] + fn builds_post_request_correctly() { + let builder = _setup_builder(); + let post = builder.format_url_request(DockerRequestMethod::Post, DockerApi::Version, None); + assert_eq!( + post, + "POST /version HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n" + ); + } + + #[test] + fn builds_delete_request_correctly() { + let builder = _setup_builder(); + let delete = + builder.format_url_request(DockerRequestMethod::Delete, DockerApi::Version, None); + assert_eq!( + delete, + "DELETE /version HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n" + ); + } +} diff --git a/dashboard b/dashboard new file mode 160000 index 0000000..6723615 --- /dev/null +++ b/dashboard @@ -0,0 +1 @@ +Subproject commit 672361506c6bb21c5944258a31c628ca595249f3 diff --git a/docs/docker-openapi.yaml b/docs/docker-openapi.yaml new file mode 100644 index 0000000..d42a507 --- /dev/null +++ b/docs/docker-openapi.yaml @@ -0,0 +1,13431 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.50" +info: + title: "Docker Engine API" + version: "1.50" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.50/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `image` a docker image + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `image` Mounts an image. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. + example: + "User": "web:web" + "ExposedPorts": { + "80/tcp": {}, + "443/tcp": {} + } + "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + "Cmd": ["/bin/sh"] + "Healthcheck": { + "Test": ["string"], + "Interval": 0, + "Timeout": 0, + "Retries": 0, + "StartPeriod": 0, + "StartInterval": 0 + } + "ArgsEscaped": true + "Volumes": { + "/app/data": {}, + "/app/config": {} + } + "WorkingDir": "/public/" + "Entrypoint": [] + "OnBuild": [] + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + "StopSignal": "SIGTERM" + "Shell": ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "27.0.1" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 1239828 + GraphDriver: + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: |- + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + example: false + IPAM: + $ref: "#/definitions/IPAM" + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + example: "container_1" + EndpointID: + type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + example: "02:42:ac:13:00:02" + IPv4Address: + type: "string" + example: "172.19.0.2/16" + IPv6Address: + type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "number" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. when using cgroups v2. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + BridgeNfIp6tables: + description: | + Indicates if `bridge-nf-call-ip6tables` is available on the host. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand, and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + - `until=` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.49. + type: "integer" + format: "int64" + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: "Include Manifests in the image summary." + type: "boolean" + default: false + required: false + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + Priority: 100 + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/src/config/parser.rs b/src/config/parser.rs deleted file mode 100644 index 338db47..0000000 --- a/src/config/parser.rs +++ /dev/null @@ -1,15 +0,0 @@ -use crate::config::types::Config; -use std::error::Error; -use std::{fs, path::Path}; - -#[allow(dead_code)] -type GeneralError = Box; -#[allow(dead_code)] -type ConfigResult = Result; - -pub fn load_config>(path: P) -> ConfigResult { - let content = fs::read_to_string(path)?; - let config: Config = toml::from_str(&content)?; - - Ok(config) -} diff --git a/src/config/types.rs b/src/config/types.rs deleted file mode 100644 index e71263c..0000000 --- a/src/config/types.rs +++ /dev/null @@ -1,62 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::str::FromStr; - -#[derive(Debug, Clone, Deserialize)] -pub enum Environment { - Development, - Test, - Production, -} - -impl Environment { - pub fn as_str(&self) -> &'static str { - match self { - Environment::Development => "dev", - Environment::Test => "test", - Environment::Production => "prod", - } - } -} - -impl FromStr for Environment { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "dev" => Ok(Environment::Development), - "test" => Ok(Environment::Test), - "prod" => Ok(Environment::Production), - _ => Err(format!("Unknown environment: {}", s)), - } - } -} - -#[derive(Debug, Deserialize, Clone)] -pub struct Config { - pub server: ServerConfig, - pub database: DatabaseConfig, - pub monitor: MonitorConfig, -} - -#[derive(Debug, Deserialize, Clone)] -pub struct ServerConfig { - pub host: String, - pub port: u16, - pub secret: String, - pub environment: Environment, -} - -#[derive(Debug, Deserialize, Clone)] -pub struct DatabaseConfig { - pub path: String, -} - -#[derive(Debug, Deserialize, Clone)] -pub struct MonitorConfig { - pub interval_secs: u64, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct IsFirstVisitResponse { - pub first_visit: bool, -} diff --git a/src/monitor/schema.rs b/src/monitor/schema.rs deleted file mode 100644 index 2020939..0000000 --- a/src/monitor/schema.rs +++ /dev/null @@ -1,85 +0,0 @@ -// src/monitor/schema.rs -use crate::schema::{diskinfo, sysinfo}; -use diesel::prelude::*; -use serde::{Deserialize, Serialize}; - -#[derive(Insertable, Debug, Serialize, Deserialize)] -#[diesel(table_name = sysinfo)] -pub struct SchemaSysInfo { - pub timestamp: String, - pub cpu_usage: f32, // Changed to f32 to match schema - pub ram_usage: f32, // Changed to f32 to match schema - pub total_ram: f32, // Changed to f32 to match schema - pub free_ram: f32, // Changed to f32 to match schema - pub used_swap: f32, // Changed to f32 to match schema - // pub user_id: i32, // Assuming user_id is i32 -} - -#[derive(Insertable, Debug, Serialize, Deserialize)] -#[diesel(table_name = diskinfo)] -pub struct SchemaDiskInfo { - pub sysinfo_id: i32, - pub filesystem: String, - pub size: i32, // Changed to i32 to match schema (Integer maps to i32 often) - pub used: i32, // Changed to i32 - pub available: i32, // Changed to i32 - pub used_percentage: i32, // Changed to i32 - pub mounted_path: String, -} - -// You might also want structs for querying data later -#[derive(Queryable, Selectable, Identifiable, Debug, Serialize, Deserialize)] -#[diesel(table_name = sysinfo)] -pub struct SysInfo { - #[diesel(column_name = id)] // Explicitly map id if needed, depends on schema generation - pub id: Option, - pub timestamp: String, - pub cpu_usage: f32, - pub ram_usage: f32, - pub total_ram: f32, - pub free_ram: f32, - pub used_swap: f32, - // pub user_id: i32, -} - -#[derive(Queryable, Selectable, Identifiable, Debug, Serialize, Deserialize)] -#[diesel(table_name = diskinfo)] -pub struct DiskInfo { - #[diesel(column_name = id)] // Explicitly map id - pub id: Option, - pub sysinfo_id: i32, - pub filesystem: String, - pub size: i32, // Changed to i32 to match schema (Integer maps to i32 often) - pub used: i32, // Changed to i32 - pub available: i32, // Changed to i32 - pub used_percentage: i32, - pub mounted_path: String, -} - -impl Default for SchemaSysInfo { - fn default() -> Self { - Self { - timestamp: "".to_string(), - cpu_usage: 0.0, - ram_usage: 0.0, - total_ram: 0.0, - free_ram: 0.0, - used_swap: 0.0, - // user_id: 0, - } - } -} - -impl Default for SchemaDiskInfo { - fn default() -> Self { - Self { - sysinfo_id: 0, - filesystem: "".to_string(), - size: 0, - used: 0, - available: 0, - used_percentage: 0, - mounted_path: "".to_string(), - } - } -} diff --git a/src/monitor/storage.rs b/src/monitor/storage.rs deleted file mode 100644 index a857fc5..0000000 --- a/src/monitor/storage.rs +++ /dev/null @@ -1,53 +0,0 @@ -use diesel::{Connection as ConnectionDiesel, SqliteConnection}; -use diesel::connection::SimpleConnection; // Added -use std::path::Path; -use std::sync::{Arc, Mutex}; -use std::error::Error; // Added for Box - -// Removed: use rusqlite::{Connection, Result}; -// Removed: use std::time::Duration; - -#[derive(Clone)] -pub struct Storage { - // pub conn: Arc, // @Info: old Arc reference to don't break the code - pub diesel_conn: Arc>, // @Info: use to test diesel for now -} - -mod storage_utils { - use std::{fs, io, path::Path}; - - pub fn ensure_directory_exists(path: &str) -> io::Result<()> { - let dir_path = Path::new(path); - if !dir_path.exists() { - fs::create_dir_all(dir_path)?; - // Consider using log crate for messages instead of println! - // println!("Directory '{}' created.", path); - } - Ok(()) - } -} - -// TODO: Migrate Connection -> SqliteConnection // This TODO can be removed after this refactor -impl Storage { - pub fn new(db_path: &str) -> Result> { // Changed return type - if let Some(parent) = Path::new(db_path).parent() { - if let Some(parent_str) = parent.to_str() { - storage_utils::ensure_directory_exists(parent_str)?; // Changed from expect - } - } - - // Removed rusqlite connection logic - - let mut conn_new = SqliteConnection::establish(&db_path)?; // Changed from unwrap_or_else - - // Apply PRAGMAs to Diesel connection - // Note: busy_timeout is set in milliseconds for SQLite PRAGMA - conn_new.batch_execute("PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL; PRAGMA busy_timeout = 5000;")?; - - Ok(Self { - diesel_conn: Arc::new(Mutex::new(conn_new)), - }) - } - - // Removed _init_db method -} diff --git a/src/monitor/sys.rs b/src/monitor/sys.rs deleted file mode 100644 index 3c88479..0000000 --- a/src/monitor/sys.rs +++ /dev/null @@ -1,176 +0,0 @@ -use super::mutation; -use super::schema::{SchemaDiskInfo, SchemaSysInfo}; // Import the Diesel insertable structs -use crate::{config::types::Config, monitor::storage::Storage}; -use chrono::Utc; -use diesel::SqliteConnection; // Import SqliteConnection -use std::{thread, time::Duration}; // Import Mutex -use sysinfo::{Disks, MemoryRefreshKind, System}; - -#[derive(Clone, Debug)] -pub struct DiskInfo { - pub available: usize, - // Add disk-related fields here - pub filesystem: String, - pub mounted_path: String, - pub size: usize, - pub used: usize, - pub used_percentage: usize, -} - -#[derive(Clone, Debug)] -pub struct SysInfo { - #[allow(dead_code)] - pub id: i64, - pub timestamp: String, - pub cpu_usage: f64, - pub ram_usage: f64, - pub total_ram: f64, - pub free_ram: f64, - pub used_swap: f64, - pub disks: Vec, -} - -#[allow(dead_code)] -impl SysInfo { - pub fn new( - cpu_usage: f64, - ram_usage: f64, - total_ram: f64, - free_ram: f64, - used_swap: f64, - disks: Vec, - ) -> Self { - Self { - id: 0, - timestamp: "".to_string(), - cpu_usage, - ram_usage, - total_ram, - free_ram, - used_swap, - disks, - } - } - - // Default constructor - pub fn default() -> Self { - Self { - id: 0, - timestamp: Utc::now().to_rfc3339(), - cpu_usage: 0.0, - ram_usage: 0.0, - total_ram: 0.0, - free_ram: 0.0, - used_swap: 0.0, - disks: vec![DiskInfo { - filesystem: String::new(), - size: 0, - used: 0, - available: 0, - used_percentage: 0, - mounted_path: String::new(), - }], - } - } - - pub fn run_monitor(mut self, config: &Config) { - let storage = match Storage::new(&config.database.path) { - Ok(storage) => storage, - Err(e) => { - eprintln!("Failed to create storage: {}", e); - return; - } - }; - - // Get a mutable connection from the Arc> - let mut conn_guard = match storage.diesel_conn.lock() { - Ok(guard) => guard, - Err(poisoned) => { - eprintln!("Failed to acquire lock on DB connection: {}", poisoned); - // Handle the poisoned mutex appropriately, maybe panic or return - return; - } - }; - // Dereference the guard to get the &mut SqliteConnection - let conn: &mut SqliteConnection = &mut *conn_guard; - - let mut sys = System::new_all(); - let disks_sysinfo = Disks::new_with_refreshed_list(); // Renamed to avoid conflict - - sys.refresh_all(); - sys.refresh_memory_specifics(MemoryRefreshKind::nothing().with_ram()); - - self.total_ram = sys.total_memory() as f64; - self.free_ram = sys.free_memory() as f64; - self.used_swap = sys.used_swap() as f64; - self.ram_usage = sys.used_memory() as f64; // Use used_memory for ram_usage - - thread::sleep(Duration::from_millis(250)); - sys.refresh_cpu_all(); - - let cpu_count = sys.cpus().len(); - let total_cpu_usage: f64 = sys.cpus().iter().map(|cpu| cpu.cpu_usage() as f64).sum(); - self.cpu_usage = if cpu_count > 0 { - total_cpu_usage / cpu_count as f64 - } else { - 0.0 - }; - - // --- Prepare data for Diesel insertion --- - self.timestamp = Utc::now().to_rfc3339(); // Ensure timestamp is current - - // Create the SchemaSysInfo struct for insertion - let new_sys_info_to_insert = SchemaSysInfo { - timestamp: self.timestamp, - cpu_usage: self.cpu_usage as f32, // Cast f64 to f32 - ram_usage: self.ram_usage as f32, // Cast f64 to f32 - total_ram: self.total_ram as f32, // Cast f64 to f32 - free_ram: self.free_ram as f32, // Cast f64 to f32 - used_swap: self.used_swap as f32, // Cast f64 to f32 - }; - - // Insert system info using the SchemaSysInfo struct - let sysinfo_id = match mutation::insert_sysinfo(conn, &new_sys_info_to_insert) { - Ok(id) => id, - Err(e) => { - eprintln!("Failed to insert system info: {}", e); - // Drop the lock before returning - drop(conn_guard); - return; - } - }; - - // Prepare disk info data for batch insertion - let mut disk_infos_to_insert: Vec = Vec::new(); - for disk in disks_sysinfo.list() { - let space_used = disk.total_space() - disk.available_space(); - // Calculate usage percentage correctly - let usage_percentage = if disk.total_space() > 0 { - (space_used as f64 / disk.total_space() as f64 * 100.0) as i32 - } else { - 0 - }; - - let fs_name = disk.name().to_string_lossy().to_string(); - let mount_point = disk.mount_point().to_string_lossy().to_string(); - - disk_infos_to_insert.push(SchemaDiskInfo { - sysinfo_id, // Use the ID from the inserted sysinfo - filesystem: fs_name, - size: (disk.total_space() / 1024 / 1024) as i32, // Convert bytes to MB (adjust if needed) and cast usize to i32 - used: (space_used / 1024 / 1024) as i32, // Convert bytes to MB and cast usize to i32 - available: (disk.available_space() / 1024 / 1024) as i32, // Convert bytes to MB and cast usize to i32 - used_percentage: usage_percentage, // Use calculated percentage - mounted_path: mount_point, - }); - } - - // Insert disk info using the SchemaDiskInfo structs - if !disk_infos_to_insert.is_empty() { - if let Err(e) = mutation::insert_multiple_diskinfo(conn, &disk_infos_to_insert) { - eprintln!("Failed to insert disk info batch: {}", e); - } - } - // Lock is automatically dropped here when conn_guard goes out of scope - } -} diff --git a/src/utils.rs b/src/utils.rs deleted file mode 100644 index 5c5e603..0000000 --- a/src/utils.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[allow(dead_code)] -pub trait SysUtils { - // Default implementation to convert bytes to gigabytes - fn to_gb(&self, bytes: u64) -> f64 { - bytes as f64 / 1024.0 / 1024.0 / 1024.0 - } -} diff --git a/src/webserver/auth/handlers.rs b/src/webserver/auth/handlers.rs deleted file mode 100644 index c85dc0a..0000000 --- a/src/webserver/auth/handlers.rs +++ /dev/null @@ -1,180 +0,0 @@ -use crate::{ - config::{schema::TeusConfig, types::Config}, - monitor::storage::Storage, - webserver::auth::{middleware::Claims, schema::User}, -}; -use actix_web::{HttpResponse, Responder, post, web}; -use argon2::{ - Argon2, - password_hash::{PasswordHash, PasswordVerifier}, -}; -use chrono::{Duration, Utc}; -use jsonwebtoken::{EncodingKey, Header, encode}; -use serde::{Deserialize, Serialize}; - -#[derive(Deserialize)] -pub struct LoginRequest { - username: String, - password: String, -} - -#[derive(Deserialize)] -pub struct SignupRequest { - username: String, - password: String, -} - -#[derive(Serialize)] -pub struct TokenResponse { - access: String, - refresh: String, - expires_in: i64, -} - -pub struct JwtConfig { - pub secret: String, - pub expiration_hours: i64, -} - -#[derive(Serialize)] -// TODO: Move this to a separate file for every kind of response -struct GenericResponse { - message: String, -} - -#[derive(Serialize)] -struct NewUserResponse { - id: i32, - username: String, -} - -// Verify the password against the hash -fn is_same_password(password_hash: &str, clear_pass: &str, _salt: &str) -> bool { - // Try to parse the password hash - let parsed_hash = match PasswordHash::new(password_hash) { - Ok(hash) => hash, - Err(_) => return false, - }; - - // Verify the password against the hash - Argon2::default() - .verify_password(clear_pass.as_bytes(), &parsed_hash) - .is_ok() -} - -// Handler di login -#[post("/login")] -pub async fn login( - login_data: web::Json, - jwt_config: web::Data, - config: actix_web::web::Data, -) -> impl Responder { - let storage = Storage::new(&config.database.path).unwrap(); - let mut conn = storage.diesel_conn.lock().unwrap(); - let user = User::find_by_username(&mut *conn, &login_data.username).unwrap(); - let user_id: i32; - - match user { - Some(user) => { - println!("User found: {:?}", user); - let is_password_correct = - is_same_password(&user.password, &login_data.password, &user.salt); - - if !is_password_correct { - let response = GenericResponse { - message: "Invalid Credentials".to_string(), - }; - return HttpResponse::Unauthorized().json(response); - } - - user_id = user.id.unwrap(); - } - None => { - println!("User not found"); - let response = GenericResponse { - message: "Invalid Credentials".to_string(), - }; - return HttpResponse::Unauthorized().json(response); - } - } - - // Calcola la scadenza per access token - let access_expiration = Utc::now() - .checked_add_signed(Duration::hours(jwt_config.expiration_hours)) - .expect("Valid timestamp") - .timestamp() as usize; - - // Calcola la scadenza per refresh token (più lunga, ad esempio 7 giorni) - let refresh_expiration = Utc::now() - .checked_add_signed(Duration::hours(24 * 7)) // 7 days - .expect("Valid timestamp") - .timestamp() as usize; - - // Crea i claims per access token - let access_claims = Claims { - sub: login_data.username.clone(), - exp: access_expiration, - iat: Utc::now().timestamp() as usize, - id: user_id, - }; - - // Crea i claims per refresh token - let refresh_claims = Claims { - sub: login_data.username.clone(), - exp: refresh_expiration, - iat: Utc::now().timestamp() as usize, - id: user_id, - }; - - // Genera l'access token - let access_token = encode( - &Header::default(), - &access_claims, - &EncodingKey::from_secret(jwt_config.secret.as_bytes()), - ) - .unwrap(); - - // Genera il refresh token - let refresh_token = encode( - &Header::default(), - &refresh_claims, - &EncodingKey::from_secret(jwt_config.secret.as_bytes()), - ) - .unwrap(); - - // Restituisci i token - let response = TokenResponse { - access: access_token, - refresh: refresh_token, - expires_in: jwt_config.expiration_hours * 3600, - }; - - HttpResponse::Ok().json(response) -} - -#[post("/signup")] -pub async fn signup( - signup_data: web::Json, - config: actix_web::web::Data, -) -> impl Responder { - let storage = Storage::new(&config.database.path).unwrap(); - let mut conn = storage.diesel_conn.lock().unwrap(); - - let existing_user = User::find_by_username(&mut *conn, &signup_data.username).unwrap(); - if existing_user.is_some() { - let response = GenericResponse { - message: "Username already exists".to_string(), - }; - return HttpResponse::Conflict().json(response); - } - let user = User::create(&mut *conn, &signup_data.username, &signup_data.password).unwrap(); - TeusConfig::set_first_visit(&mut *conn, false).unwrap(); - - // Create a response without the sensitive data - let user_response = NewUserResponse { - id: user.id.unwrap(), - username: user.username, - }; - - HttpResponse::Created().json(user_response) -} diff --git a/src/webserver/auth/middleware.rs b/src/webserver/auth/middleware.rs deleted file mode 100644 index aee3638..0000000 --- a/src/webserver/auth/middleware.rs +++ /dev/null @@ -1,103 +0,0 @@ -use actix_web::{ - Error, HttpMessage, - dev::{Service, ServiceRequest, ServiceResponse, Transform, forward_ready}, - error::ErrorUnauthorized, -}; -use jsonwebtoken::{Algorithm, DecodingKey, Validation, decode}; -use serde::{Deserialize, Serialize}; -use std::{ - future::{Future, Ready, ready}, - pin::Pin, - rc::Rc, -}; - -// JWT claims structure -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct Claims { - pub sub: String, // Subject (user ID) - pub exp: usize, // Expiration time - pub iat: usize, // Issued at - pub id: i32, // User ID -} - -pub struct AuthMiddlewareFactory { - jwt_secret: String, -} - -impl AuthMiddlewareFactory { - pub fn new(jwt_secret: String) -> Self { - Self { jwt_secret } - } -} - -pub struct AuthMiddleware { - service: Rc, - jwt_secret: String, -} - -impl Transform for AuthMiddlewareFactory -where - S: Service, Error = Error> + 'static, - S::Future: 'static, - B: 'static, -{ - type Response = ServiceResponse; - type Error = Error; - type Transform = AuthMiddleware; - type InitError = (); - type Future = Ready>; - - fn new_transform(&self, service: S) -> Self::Future { - ready(Ok(AuthMiddleware { - service: Rc::new(service), - jwt_secret: self.jwt_secret.clone(), - })) - } -} - -impl Service for AuthMiddleware -where - S: Service, Error = Error> + 'static, - S::Future: 'static, - B: 'static, -{ - type Response = ServiceResponse; - type Error = Error; - type Future = Pin>>>; - - forward_ready!(service); - - fn call(&self, req: ServiceRequest) -> Self::Future { - let service = self.service.clone(); - let jwt_secret = self.jwt_secret.clone(); - - Box::pin(async move { - let auth_header = req.headers().get("Authorization"); - let token = match auth_header { - Some(header) => { - let header_str = header - .to_str() - .map_err(|_| ErrorUnauthorized("Invalid Authorization header format"))?; - - if !header_str.starts_with("Bearer ") { - return Err(ErrorUnauthorized("Invalid Authorization header format")); - } - - header_str[7..].trim() - } - None => return Err(ErrorUnauthorized("Authorization header missing")), - }; - - let token_data = decode::( - token, - &DecodingKey::from_secret(jwt_secret.as_bytes()), - &Validation::new(Algorithm::HS256), - ) - .map_err(|_| ErrorUnauthorized("Invalid token"))?; - - let claims = token_data.claims; - req.extensions_mut().insert(claims.clone()); - service.call(req).await - }) - } -} diff --git a/src/webserver/auth/schema.rs b/src/webserver/auth/schema.rs deleted file mode 100644 index 1cad080..0000000 --- a/src/webserver/auth/schema.rs +++ /dev/null @@ -1,12 +0,0 @@ -use diesel::prelude::*; -use serde::Serialize; - -#[derive(Insertable, Queryable, Selectable, Serialize, Debug)] -#[diesel(table_name = crate::schema::user)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct User { - pub id: Option, - pub username: String, - pub password: String, - pub salt: String, -} \ No newline at end of file diff --git a/src/webserver/models/sysmodels.rs b/src/webserver/models/sysmodels.rs deleted file mode 100644 index 9de8a0c..0000000 --- a/src/webserver/models/sysmodels.rs +++ /dev/null @@ -1,89 +0,0 @@ -use serde::Serialize; - -/// Represents information about an IP address (private). -#[derive(serde::Serialize, Debug)] -pub struct IpInfo { - /// The network interface name. - pub interface: String, - /// The IP address. - pub addr: String, - /// The subnet prefix length. - pub prefix: u8, -} - -/// Represents information about a MAC address (private). -#[derive(serde::Serialize, Debug)] -pub struct MACInfo { - /// The network interface name. - pub interface: String, - /// The MAC address. - pub mac: String, -} - -/// Represents generic system information (private). -#[derive(serde::Serialize, Debug)] -pub struct GenericSysInfoResponse { - /// The system's hostname. - pub hostname: String, - /// The operating system name. - pub os: String, - /// The system uptime. - pub uptime: String, - /// The kernel version. - pub kernel_version: String, - /// The primary IPv4 address. - pub ipv4: String, - /// List of network interfaces with IP information. - pub networks: Vec, - /// List of MAC addresses. - pub mac_addresses: Vec, -} - -impl GenericSysInfoResponse {} -impl Default for GenericSysInfoResponse { - fn default() -> Self { - Self { - hostname: "No Info".to_string(), - os: "No Info".to_string(), - uptime: "No Info".to_string(), - kernel_version: "No Info".to_string(), - ipv4: "No Info".to_string(), - networks: vec![], - mac_addresses: vec![], - } - } -} - -/// Represents the overall system information. -#[derive(Serialize)] -pub struct SysInfoResponse { - /// The timestamp of the system information snapshot. - pub timestamp: String, - /// Percentage of CPU usage. - pub cpu_usage: f32, - /// Percentage of RAM usage. - pub ram_usage: f32, - /// Total RAM in the system (in bytes). - pub total_ram: f32, - /// Free RAM available (in bytes). - pub free_ram: f32, - /// Swap memory used (in bytes). - pub used_swap: f32, - /// List of disk information. - pub disks: Vec, -} - -/// Represents information about a single disk. -#[derive(Serialize)] -pub struct DiskInfoResponse { - /// The type of filesystem (e.g., ext4, NTFS). - pub filesystem: String, - /// The mount point of the disk. - pub mount_point: String, - /// Total space on the disk (in bytes). - pub total_space: i32, - /// Available space on the disk (in bytes). - pub available_space: i32, - /// Used space on the disk (in bytes). - pub used_space: i32, -} diff --git a/src/webserver/services/mod.rs b/src/webserver/services/mod.rs deleted file mode 100644 index eea1f02..0000000 --- a/src/webserver/services/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod systeminfo; \ No newline at end of file diff --git a/teus.toml b/teus.toml index ccc4be8..a5dc958 100644 --- a/teus.toml +++ b/teus.toml @@ -2,7 +2,7 @@ host = "0.0.0.0" port = 26783 secret = "8ec5cf90-a90a-4037-b8bd-157883f53fd3" -environment = "Production" # dev | prod +environment = "prod" # dev | prod [database] path = "/var/lib/teus/sysinfo.db" diff --git a/teus/bookmarks/handlers.rs b/teus/bookmarks/handlers.rs new file mode 100644 index 0000000..91e27a6 --- /dev/null +++ b/teus/bookmarks/handlers.rs @@ -0,0 +1,152 @@ +use crate::bookmarks::schema::{NewService, Service, ServicePatchPayload, ServicePayload}; +use crate::config::types::Config; +use crate::monitor::storage::Storage; +use crate::webserver::auth::middleware::Claims; +use actix_web::{delete, get, patch, post, web, HttpMessage, HttpRequest, HttpResponse, Responder}; + +#[allow(dead_code)] +/// Helper function to extract claims from request +fn extract_claims_from_request(req: &HttpRequest) -> Result { + req.extensions().get::().cloned().ok_or_else(|| { + HttpResponse::Unauthorized().json(serde_json::json!({ + "error": "No authentication claims found" + })) + }) +} + +#[get("/bookmarks")] +/// Get all services for the authenticated user +pub async fn get_user_services( + req: HttpRequest, + config: actix_web::web::Data, +) -> impl Responder { + // Clone the claims to own them + let claims = match req.extensions().get::().cloned() { + Some(claims) => claims, + None => { + return HttpResponse::Unauthorized().json(serde_json::json!({ + "error": "No authentication claims found" + })); + } + }; + + let user_id = claims.id; + let storage = Storage::new(&config.database.path).unwrap(); + let mut conn = storage.diesel_conn.lock().unwrap(); + let services = + Service::get_services_by_user_id(&mut conn, user_id).expect("Error getting services"); + + HttpResponse::Ok().json(serde_json::json!(services)) +} + +#[post("/bookmarks")] +/// Add a new service for the authenticated user +pub async fn add_service( + req: HttpRequest, + service_data: web::Json, + config: actix_web::web::Data, +) -> impl Responder { + // if !service_data.values() {} + let claims = extract_claims_from_request(&req).expect("Cannot extract claims from request"); + let new_service = NewService { + name: service_data.name.clone(), + link: service_data.link.clone(), + icon: service_data.icon.clone(), + user_id: claims.id, + }; + + let storage = Storage::new(&config.database.path).unwrap(); + let mut conn = storage.diesel_conn.lock().unwrap(); + + let service_added = match Service::add_service(&mut conn, new_service) { + Ok(service) => service, + Err(_) => { + return HttpResponse::InternalServerError().json(serde_json::json!({ + "message": "Error creating a new Service", + })) + } + }; + + HttpResponse::Created().json(service_added) +} + +#[delete("/bookmarks/{id}")] +pub async fn delete_service_by_id( + id: web::Path, + req: HttpRequest, + config: actix_web::web::Data, +) -> impl Responder { + let claims = extract_claims_from_request(&req).expect("Cannot extract claims from request"); + let user_id = claims.id; + let bookmark_id = id.clone(); + + let storage = Storage::new(&config.database.path).unwrap(); + let mut conn = storage.diesel_conn.lock().unwrap(); + + match Service::_get_service_by_id(&mut conn, bookmark_id) { + Ok(service) => { + if service.user_id != user_id { + return HttpResponse::Unauthorized().json(serde_json::json!({ + "message": "You are not authorized to delete this service" + })); + } + + match Service::delete_service(&mut conn, bookmark_id, user_id) { + Ok(rows_affected) => { + if rows_affected > 0 { + HttpResponse::NoContent().finish() + } else { + HttpResponse::InternalServerError().json(serde_json::json!({ + "message": "Unexpected error during deletion" + })) + } + } + Err(_) => HttpResponse::InternalServerError().json(serde_json::json!({ + "message": "Error deleting bookmark" + })), + } + } + Err(_) => { + // Service doesn't exist + HttpResponse::NotFound().json(serde_json::json!({ + "message": "Service not found" + })) + } + } +} + +#[patch("/bookmarks/{id}")] +pub async fn update_service_by_id( + id: web::Path, + service_data: web::Json, + req: HttpRequest, + config: actix_web::web::Data, +) -> impl Responder { + let claims = extract_claims_from_request(&req).expect("Cannot extract claims from request"); + let user_id = claims.id; + let bookmark_id = id.clone(); + + let storage = Storage::new(&config.database.path).unwrap(); + let mut conn = storage.diesel_conn.lock().unwrap(); + + match Service::_get_service_by_id(&mut conn, bookmark_id) { + Ok(service) => { + if service.user_id != user_id { + return HttpResponse::Unauthorized().json(serde_json::json!({ + "message": "You are not authorized to update this service" + })); + } + + match Service::patch_service(&mut conn, bookmark_id, user_id, service_data.into_inner()) + { + Ok(service) => HttpResponse::Ok().json(service), + Err(_) => HttpResponse::InternalServerError().json(serde_json::json!({ + "message": "Error updating bookmark" + })), + } + } + Err(_) => HttpResponse::NotFound().json(serde_json::json!({ + "message": "Service not found" + })), + } +} diff --git a/teus/bookmarks/mod.rs b/teus/bookmarks/mod.rs new file mode 100644 index 0000000..fc95d25 --- /dev/null +++ b/teus/bookmarks/mod.rs @@ -0,0 +1,3 @@ +pub mod handlers; +pub mod mutation; +pub mod schema; diff --git a/teus/bookmarks/mutation.rs b/teus/bookmarks/mutation.rs new file mode 100644 index 0000000..164e52b --- /dev/null +++ b/teus/bookmarks/mutation.rs @@ -0,0 +1,95 @@ +use super::schema::{NewService, Service, ServicePatchPayload}; +use diesel::prelude::*; +use diesel::result::Error; +use diesel::{RunQueryDsl, SqliteConnection}; + +impl Service { + /// Add a new service to the database + pub fn add_service( + conn: &mut SqliteConnection, + new_service: NewService, + ) -> Result { + use crate::schema::services; + + diesel::insert_into(services::table) + .values(&new_service) + .returning(Service::as_returning()) + .get_result(conn) + } + + /// Get all services for a specific user + pub fn get_services_by_user_id( + conn: &mut SqliteConnection, + user_id_claim: i32, + ) -> Result, Error> { + use crate::schema::services::dsl::*; + + services + .filter(user_id.eq(user_id_claim)) + .select(Service::as_select()) + .load(conn) + } + + /// Get a specific service by ID + pub fn _get_service_by_id( + conn: &mut SqliteConnection, + service_id: i32, + ) -> Result { + use crate::schema::services::dsl::*; + + services + .filter(id.eq(service_id)) + .select(Service::as_select()) + .first(conn) + } + + /// Update a service + pub fn update_service( + conn: &mut SqliteConnection, + service_id: i32, + user_query_id: i32, + updated_service: NewService, + ) -> Result { + use crate::schema::services::dsl::*; + + diesel::update(services.filter(id.eq(service_id).and(user_id.eq(user_query_id)))) + .set(( + name.eq(&updated_service.name), + link.eq(&updated_service.link), + icon.eq(&updated_service.icon), + user_id.eq(&updated_service.user_id), + )) + .returning(Service::as_returning()) + .get_result(conn) + } + + /// Update a service with partial data (PATCH operation) + pub fn patch_service( + conn: &mut SqliteConnection, + service_id: i32, + user_query_id: i32, + patch_data: ServicePatchPayload, + ) -> Result { + let current_service = Self::_get_service_by_id(conn, service_id)?; + let updated_service = NewService { + name: patch_data.name.unwrap_or(current_service.name), + link: patch_data.link.unwrap_or(current_service.link), + icon: patch_data.icon.or(current_service.icon), + user_id: user_query_id, + }; + + Self::update_service(conn, service_id, user_query_id, updated_service) + } + + /// Delete a service + pub fn delete_service( + conn: &mut SqliteConnection, + service_id: i32, + user_query_id: i32, + ) -> Result { + use crate::schema::services::dsl::*; + + diesel::delete(services.filter(id.eq(service_id).and(user_id.eq(user_query_id)))) + .execute(conn) + } +} diff --git a/teus/bookmarks/schema.rs b/teus/bookmarks/schema.rs new file mode 100644 index 0000000..ace354b --- /dev/null +++ b/teus/bookmarks/schema.rs @@ -0,0 +1,49 @@ +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; + +#[allow(dead_code)] +pub type Bookmarks = Vec; + +// For querying existing services from the database +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Queryable, Selectable)] +#[diesel(table_name = crate::schema::services)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +#[serde(rename_all = "camelCase")] +pub struct Service { + pub id: Option, + pub name: String, + pub link: String, + pub icon: Option, + pub user_id: i32, +} + +// For inserting new services into the database +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Insertable)] +#[diesel(table_name = crate::schema::services)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +#[serde(rename_all = "camelCase")] +pub struct NewService { + pub name: String, + pub link: String, + pub icon: Option, + pub user_id: i32, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NewServiceSchema { + pub name: String, + pub link: String, + pub icon: Option, +} + +// For backwards compatibility if you still need BookmarkService +pub type BookmarkService = Service; +pub type ServicePayload = NewServiceSchema; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ServicePatchPayload { + pub name: Option, + pub link: Option, + pub icon: Option, +} diff --git a/src/config/handlers.rs b/teus/config/handlers.rs similarity index 83% rename from src/config/handlers.rs rename to teus/config/handlers.rs index fe85108..29e99d3 100644 --- a/src/config/handlers.rs +++ b/teus/config/handlers.rs @@ -5,13 +5,11 @@ use crate::{ }, monitor::storage::Storage, }; -use actix_web::{get, web, Error, HttpResponse, Responder}; use actix_web::error::ErrorInternalServerError; +use actix_web::{get, web, Error, HttpResponse}; #[get("/teus-config")] -pub async fn get_teus_config( - config: web::Data, -) -> Result { +pub async fn get_teus_config(config: web::Data) -> Result { let storage = Storage::new(&config.database.path).map_err(|e| { eprintln!("Failed to initialize storage: {:?}", e); // TODO: Use log::error! ErrorInternalServerError("Failed to initialize storage") @@ -33,11 +31,12 @@ pub async fn get_teus_config( } #[get("/teus-config/first-visit")] -pub async fn is_first_visit( - config: web::Data, -) -> Result { +pub async fn is_first_visit(config: web::Data) -> Result { let storage = Storage::new(&config.database.path).map_err(|e| { - eprintln!("Failed to initialize storage for first-visit check: {:?}", e); // TODO: Use log::error! + eprintln!( + "Failed to initialize storage for first-visit check: {:?}", + e + ); // TODO: Use log::error! ErrorInternalServerError("Failed to initialize storage") })?; let mut conn = storage.diesel_conn.lock().map_err(|_| { diff --git a/src/config/mod.rs b/teus/config/mod.rs similarity index 98% rename from src/config/mod.rs rename to teus/config/mod.rs index b764e77..9054531 100644 --- a/src/config/mod.rs +++ b/teus/config/mod.rs @@ -4,4 +4,3 @@ pub mod parser; pub mod query; pub mod schema; pub mod types; - diff --git a/src/config/mutation.rs b/teus/config/mutation.rs similarity index 100% rename from src/config/mutation.rs rename to teus/config/mutation.rs diff --git a/teus/config/parser.rs b/teus/config/parser.rs new file mode 100644 index 0000000..a299892 --- /dev/null +++ b/teus/config/parser.rs @@ -0,0 +1,113 @@ +use crate::config::types::Config; +use std::error::Error; +use std::{fs, path::Path}; + +#[allow(dead_code)] +type GeneralError = Box; +#[allow(dead_code)] +type ConfigResult = Result; + +pub fn load_config>(path: P) -> ConfigResult { + let content = fs::read_to_string(path)?; + let config: Config = toml::from_str(&content)?; + + Ok(config) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::NamedTempFile; + + fn create_test_config_content() -> String { + r#" +[server] +host = "127.0.0.1" +port = 8080 +secret = "test_secret_key" +environment = "test" + +[database] +path = "./test.db" + +[monitor] +interval_secs = 60 +"# + .to_string() + } + + #[test] + fn test_load_valid_config() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let config_content = create_test_config_content(); + fs::write(temp_file.path(), config_content).expect("Failed to write config"); + + let result = load_config(temp_file.path()); + assert!(result.is_ok()); + + let config = result.unwrap(); + assert_eq!(config.server.host, "127.0.0.1"); + assert_eq!(config.server.port, 8080); + assert_eq!(config.server.secret, "test_secret_key"); + assert_eq!(config.database.path, "./test.db"); + assert_eq!(config.monitor.interval_secs, 60); + } + + #[test] + fn test_load_nonexistent_file() { + let result = load_config("nonexistent_file.toml"); + assert!(result.is_err()); + } + + #[test] + fn test_load_invalid_toml() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let invalid_content = "invalid toml content [[["; + fs::write(temp_file.path(), invalid_content).expect("Failed to write invalid config"); + + let result = load_config(temp_file.path()); + assert!(result.is_err()); + } + + #[test] + fn test_load_incomplete_config() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let incomplete_content = r#" +[server] +host = "127.0.0.1" +# Missing port, secret, environment +"#; + fs::write(temp_file.path(), incomplete_content).expect("Failed to write incomplete config"); + + let result = load_config(temp_file.path()); + assert!(result.is_err()); + } + + #[test] + fn test_load_config_with_different_environment() { + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let config_content = r#" +[server] +host = "0.0.0.0" +port = 3000 +secret = "prod_secret" +environment = "prod" + +[database] +path = "/var/lib/teus/teus.db" + +[monitor] +interval_secs = 30 +"#; + fs::write(temp_file.path(), config_content).expect("Failed to write config"); + + let result = load_config(temp_file.path()); + assert!(result.is_ok()); + + let config = result.unwrap(); + assert_eq!(config.server.host, "0.0.0.0"); + assert_eq!(config.server.port, 3000); + assert_eq!(config.monitor.interval_secs, 30); + } +} diff --git a/src/config/query.rs b/teus/config/query.rs similarity index 100% rename from src/config/query.rs rename to teus/config/query.rs diff --git a/src/config/schema.rs b/teus/config/schema.rs similarity index 100% rename from src/config/schema.rs rename to teus/config/schema.rs diff --git a/teus/config/types.rs b/teus/config/types.rs new file mode 100644 index 0000000..ac39f7d --- /dev/null +++ b/teus/config/types.rs @@ -0,0 +1,474 @@ +use serde::{Deserialize, Deserializer, Serialize}; +use std::str::FromStr; + +/// Represents the deployment environment for the Teus application. +/// +/// This enum is used to configure environment-specific behavior throughout +/// the application, such as logging levels, database connections, and +/// security settings. +/// +/// # Examples +/// +/// ```rust +/// use teus::config::types::Environment; +/// use std::str::FromStr; +/// +/// let env = Environment::from_str("dev").unwrap(); +/// assert_eq!(env.as_str(), "dev"); +/// ``` +/// +/// # Serialization +/// +/// This enum can be serialized and deserialized with serde, making it +/// suitable for use in configuration files (TOML, JSON, etc.). +#[derive(Debug, Clone, Serialize)] +pub enum Environment { + /// Development environment - typically used for local development + /// with debug logging and relaxed security settings. + Development, + /// Test environment - used for automated testing and QA. + Test, + /// Production environment - live deployment with optimized + /// performance and strict security settings. + Production, +} + +impl<'de> Deserialize<'de> for Environment { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Environment::from_str(&s).map_err(serde::de::Error::custom) + } +} + +impl Environment { + #[allow(dead_code)] + pub fn as_str(&self) -> &'static str { + match self { + Environment::Development => "dev", + Environment::Test => "test", + Environment::Production => "prod", + } + } +} + +impl FromStr for Environment { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "dev" => Ok(Environment::Development), + "test" => Ok(Environment::Test), + "prod" => Ok(Environment::Production), + _ => Err(format!("Unknown environment: {}", s)), + } + } +} + +/// Main configuration structure for the Teus application. +/// +/// This structure holds all the configuration settings required to run +/// the Teus monitoring and management system. It is typically loaded +/// from a TOML configuration file at application startup. +/// +/// # Structure +/// +/// The configuration is organized into three main sections: +/// - `server`: Web server and API configuration +/// - `database`: Database connection settings +/// - `monitor`: System monitoring parameters +/// +/// # Examples +/// +/// Loading configuration from a TOML file: +/// +/// ```rust +/// use teus::config::types::Config; +/// +/// let toml_content = r#" +/// [server] +/// host = "0.0.0.0" +/// port = 8080 +/// secret = "your-secret-key" +/// environment = "prod" +/// +/// [database] +/// path = "./teus.db" +/// +/// [monitor] +/// interval_secs = 30 +/// "#; +/// +/// let config: Config = toml::from_str(toml_content).unwrap(); +/// ``` +#[derive(Debug, Deserialize, Clone)] +pub struct Config { + /// Web server configuration settings + pub server: ServerConfig, + /// Database configuration settings + pub database: DatabaseConfig, + /// System monitoring configuration settings + pub monitor: MonitorConfig, +} + +/// Configuration for the Teus web server and API. +/// +/// This structure contains all the settings needed to configure the +/// HTTP server that serves the Teus web interface and API endpoints. +/// +/// # Security Considerations +/// +/// The `secret` field is used for JWT token signing and session management. +/// It should be a cryptographically secure random string and must be kept +/// confidential in production environments. +/// +/// # Examples +/// +/// ```rust +/// use teus::config::types::{ServerConfig, Environment}; +/// +/// let server_config = ServerConfig { +/// host: "127.0.0.1".to_string(), +/// port: 3000, +/// secret: "secure-random-secret-key".to_string(), +/// environment: Environment::Development, +/// }; +/// ``` +#[allow(dead_code)] +#[derive(Debug, Deserialize, Clone)] +pub struct ServerConfig { + /// The IP address or hostname the server should bind to. + /// + /// Common values: + /// - "127.0.0.1" or "localhost" for local-only access + /// - "0.0.0.0" to bind to all available interfaces + pub host: String, + + /// The TCP port number the server should listen on. + /// + /// Must be a valid port number (1-65535). Common values: + /// - 8080 for development + /// - 80 for HTTP in production + /// - 443 for HTTPS in production + pub port: u16, + + /// Secret key used for JWT token signing and other cryptographic operations. + /// + /// This should be a long, random string. In production, this should be + /// loaded from environment variables or a secure configuration management + /// system rather than stored in plain text. + pub secret: String, + + /// The deployment environment this server is running in. + /// + /// Used to configure environment-specific behavior such as logging + /// levels and security settings. Currently not fully implemented. + pub environment: Environment, +} + +/// Configuration for the SQLite database used by Teus. +/// +/// Teus uses SQLite as its embedded database for storing system monitoring +/// data, user accounts, and application state. This configuration specifies +/// where the database file should be located. +/// +/// # File Path Considerations +/// +/// The path can be: +/// - Relative to the application's working directory +/// - An absolute path +/// - ":memory:" for an in-memory database (testing only) +/// +/// # Examples +/// +/// ```rust +/// use teus::config::types::DatabaseConfig; +/// +/// // Relative path +/// let db_config = DatabaseConfig { +/// path: "./data/teus.db".to_string(), +/// }; +/// +/// // Absolute path +/// let db_config = DatabaseConfig { +/// path: "/var/lib/teus/teus.db".to_string(), +/// }; +/// ``` +#[derive(Debug, Deserialize, Clone)] +pub struct DatabaseConfig { + /// Path to the SQLite database file. + /// + /// The directory containing this file must exist and be writable + /// by the Teus process. If the file doesn't exist, it will be + /// created automatically on first startup. + pub path: String, +} + +/// Configuration for the system monitoring component. +/// +/// This structure controls how frequently Teus collects and stores +/// system metrics such as CPU usage, memory consumption, and disk space. +/// +/// # Performance Considerations +/// +/// Lower intervals provide more granular data but increase: +/// - CPU overhead from frequent metric collection +/// - Database storage requirements +/// - Memory usage +/// +/// Higher intervals reduce overhead but may miss short-term spikes +/// in resource usage. +/// +/// # Recommended Values +/// +/// - Development/Testing: 5-10 seconds +/// - Production monitoring: 30-60 seconds +/// - Long-term trending: 300+ seconds +/// +/// # Examples +/// +/// ```rust +/// use teus::config::types::MonitorConfig; +/// +/// // High-frequency monitoring +/// let monitor_config = MonitorConfig { +/// interval_secs: 10, +/// }; +/// +/// // Standard production monitoring +/// let monitor_config = MonitorConfig { +/// interval_secs: 60, +/// }; +/// ``` +#[derive(Debug, Deserialize, Clone)] +pub struct MonitorConfig { + /// Interval between system metric collection cycles, in seconds. + /// + /// This value determines how often Teus will: + /// - Collect CPU, memory, and disk usage statistics + /// - Store the collected data in the database + /// - Update real-time monitoring displays + /// + /// Must be greater than 0. Values less than 5 seconds are not + /// recommended for production use due to performance overhead. + pub interval_secs: u64, +} + +/// Response structure for the first-visit check API endpoint. +/// +/// This structure is returned by the API to indicate whether this is +/// the first time the Teus application is being accessed. It's used +/// by the frontend to determine whether to show initial setup screens +/// or proceed to the normal interface. +/// +/// # API Usage +/// +/// This structure is typically returned as JSON from the `/api/first-visit` +/// endpoint and is used to drive the initial user experience flow. +/// +/// # Examples +/// +/// JSON representation: +/// ```json +/// { +/// "first_visit": true +/// } +/// ``` +/// +/// Rust usage: +/// ```rust +/// use teus::config::types::IsFirstVisitResponse; +/// +/// let response = IsFirstVisitResponse { +/// first_visit: false, +/// }; +/// +/// let json = serde_json::to_string(&response).unwrap(); +/// ``` +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct IsFirstVisitResponse { + /// Indicates whether this is the first visit to the application. + /// + /// - `true`: This is the first time the application is being accessed, + /// and initial setup may be required + /// - `false`: The application has been accessed before and is already + /// configured + pub first_visit: bool, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_environment_as_str() { + assert_eq!(Environment::Development.as_str(), "dev"); + assert_eq!(Environment::Test.as_str(), "test"); + assert_eq!(Environment::Production.as_str(), "prod"); + } + + #[test] + fn test_environment_from_str() { + assert!(matches!( + Environment::from_str("dev"), + Ok(Environment::Development) + )); + assert!(matches!( + Environment::from_str("test"), + Ok(Environment::Test) + )); + assert!(matches!( + Environment::from_str("prod"), + Ok(Environment::Production) + )); + + // Test error case + let result = Environment::from_str("invalid"); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "Unknown environment: invalid"); + } + + #[test] + fn test_environment_case_sensitivity() { + // Should fail for different cases + assert!(Environment::from_str("DEV").is_err()); + assert!(Environment::from_str("Test").is_err()); + assert!(Environment::from_str("PROD").is_err()); + } + + #[test] + fn test_environment_debug_format() { + assert_eq!(format!("{:?}", Environment::Development), "Development"); + assert_eq!(format!("{:?}", Environment::Test), "Test"); + assert_eq!(format!("{:?}", Environment::Production), "Production"); + } + + #[test] + fn test_config_deserialization() { + let toml_str = r#" + [server] + host = "localhost" + port = 8080 + secret = "secret_key" + environment = "dev" + + [database] + path = "./test.db" + + [monitor] + interval_secs = 60 + "#; + + let config: Result = toml::from_str(toml_str); + assert!(config.is_ok()); + + let config = config.unwrap(); + assert_eq!(config.server.host, "localhost"); + assert_eq!(config.server.port, 8080); + assert_eq!(config.server.secret, "secret_key"); + assert!(matches!( + config.server.environment, + Environment::Development + )); + assert_eq!(config.database.path, "./test.db"); + assert_eq!(config.monitor.interval_secs, 60); + } + + #[test] + fn test_server_config_clone() { + let server_config = ServerConfig { + host: "127.0.0.1".to_string(), + port: 3000, + secret: "test_secret".to_string(), + environment: Environment::Test, + }; + + let cloned = server_config.clone(); + assert_eq!(server_config.host, cloned.host); + assert_eq!(server_config.port, cloned.port); + assert_eq!(server_config.secret, cloned.secret); + } + + #[test] + fn test_is_first_visit_response_serialization() { + let response = IsFirstVisitResponse { first_visit: true }; + let serialized = serde_json::to_string(&response).unwrap(); + assert!(serialized.contains("\"first_visit\":true")); + + let response = IsFirstVisitResponse { first_visit: false }; + let serialized = serde_json::to_string(&response).unwrap(); + assert!(serialized.contains("\"first_visit\":false")); + } + + #[test] + fn test_is_first_visit_response_deserialization() { + let json_str = r#"{"first_visit": true}"#; + let response: IsFirstVisitResponse = serde_json::from_str(json_str).unwrap(); + assert!(response.first_visit); + + let json_str = r#"{"first_visit": false}"#; + let response: IsFirstVisitResponse = serde_json::from_str(json_str).unwrap(); + assert!(!response.first_visit); + } + + #[test] + fn test_config_invalid_environment() { + let toml_str = r#" + [server] + host = "localhost" + port = 8080 + secret = "secret_key" + environment = "invalid_env" + + [database] + path = "./test.db" + + [monitor] + interval_secs = 60 + "#; + + let config: Result = toml::from_str(toml_str); + assert!(config.is_err()); + } + + #[test] + fn test_monitor_config_edge_cases() { + // Test with very small interval + let toml_str = r#" + [server] + host = "localhost" + port = 8080 + secret = "secret_key" + environment = "test" + + [database] + path = "./test.db" + + [monitor] + interval_secs = 1 + "#; + + let config: Config = toml::from_str(toml_str).unwrap(); + assert_eq!(config.monitor.interval_secs, 1); + + // Test with large interval + let toml_str = r#" + [server] + host = "localhost" + port = 8080 + secret = "secret_key" + environment = "test" + + [database] + path = "./test.db" + + [monitor] + interval_secs = 3600 + "#; + + let config: Config = toml::from_str(toml_str).unwrap(); + assert_eq!(config.monitor.interval_secs, 3600); + } +} diff --git a/teus/lib.rs b/teus/lib.rs new file mode 100644 index 0000000..14fdc1a --- /dev/null +++ b/teus/lib.rs @@ -0,0 +1,6 @@ +pub mod bookmarks; +pub mod config; +pub mod monitor; +pub mod schema; +pub mod utils; +pub mod webserver; diff --git a/src/main.rs b/teus/main.rs similarity index 97% rename from src/main.rs rename to teus/main.rs index 02380df..7275b0a 100644 --- a/src/main.rs +++ b/teus/main.rs @@ -1,8 +1,4 @@ -mod config; -mod monitor; -mod schema; -mod utils; -mod webserver; +use teus::{config, monitor, webserver}; use monitor::sys::SysInfo; use std::{ @@ -10,8 +6,8 @@ use std::{ path::Path, process, sync::{ - Arc, atomic::{AtomicBool, Ordering}, + Arc, }, thread, }; diff --git a/src/monitor/mod.rs b/teus/monitor/mod.rs similarity index 81% rename from src/monitor/mod.rs rename to teus/monitor/mod.rs index e03e330..cc1208a 100644 --- a/src/monitor/mod.rs +++ b/teus/monitor/mod.rs @@ -1,5 +1,5 @@ -pub mod sys; -pub mod storage; -pub mod schema; pub mod mutation; -pub mod query; \ No newline at end of file +pub mod query; +pub mod schema; +pub mod storage; +pub mod sys; diff --git a/src/monitor/mutation.rs b/teus/monitor/mutation.rs similarity index 100% rename from src/monitor/mutation.rs rename to teus/monitor/mutation.rs diff --git a/src/monitor/query.rs b/teus/monitor/query.rs similarity index 100% rename from src/monitor/query.rs rename to teus/monitor/query.rs diff --git a/teus/monitor/schema.rs b/teus/monitor/schema.rs new file mode 100644 index 0000000..257aa65 --- /dev/null +++ b/teus/monitor/schema.rs @@ -0,0 +1,510 @@ +//! Database schema structures for system monitoring data. +//! +//! This module defines the data structures used to store and retrieve +//! system monitoring information in the SQLite database. It includes +//! both insertable structures for writing new data and queryable +//! structures for reading existing data. + +use crate::schema::{diskinfo, sysinfo}; +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; + +/// Structure for inserting system information records into the database. +/// +/// This structure represents a snapshot of system resource usage at a +/// specific point in time. It's designed to be inserted into the `sysinfo` +/// table and serves as the primary record for system monitoring data. +/// +/// # Database Schema +/// +/// Maps to the `sysinfo` table with the following constraints: +/// - `timestamp` should be in RFC3339 format for consistency +/// - All usage values are stored as floating-point numbers for precision +/// - Memory values are typically stored in bytes or megabytes +/// +/// # Examples +/// +/// ```rust +/// use teus::monitor::schema::SchemaSysInfo; +/// use chrono::Utc; +/// +/// let sys_info = SchemaSysInfo { +/// timestamp: Utc::now().to_rfc3339(), +/// cpu_usage: 25.5, +/// ram_usage: 4096.0, +/// total_ram: 16384.0, +/// free_ram: 8192.0, +/// used_swap: 512.0, +/// }; +/// ``` +#[derive(Insertable, Debug, Serialize, Deserialize)] +#[diesel(table_name = sysinfo)] +pub struct SchemaSysInfo { + /// Timestamp when this system information was collected. + /// + /// Should be in RFC3339 format (e.g., "2024-01-01T12:00:00Z") + /// for consistent parsing and sorting. + pub timestamp: String, + + /// CPU usage percentage at the time of collection. + /// + /// Range: 0.0 to 100.0, where 100.0 represents full CPU utilization. + pub cpu_usage: f32, + + /// Amount of RAM currently in use, in megabytes. + /// + /// This represents the memory actively being used by processes, + /// excluding cached and buffered memory. + pub ram_usage: f32, + + /// Total amount of RAM available in the system, in megabytes. + /// + /// This is the physical memory capacity and should remain + /// relatively constant unless hardware changes occur. + pub total_ram: f32, + + /// Amount of RAM currently free and available, in megabytes. + /// + /// This represents memory that is immediately available for + /// new processes without requiring swapping or cache eviction. + pub free_ram: f32, + + /// Amount of swap space currently in use, in megabytes. + /// + /// High swap usage may indicate memory pressure and can + /// significantly impact system performance. + pub used_swap: f32, +} + +/// Structure for inserting disk information records into the database. +/// +/// This structure represents disk usage information for a specific filesystem +/// at the time of system monitoring. Multiple disk records can be associated +/// with a single system information record through the `sysinfo_id` foreign key. +/// +/// # Database Relationships +/// +/// - `sysinfo_id`: Foreign key referencing the `sysinfo` table +/// - Each `SchemaSysInfo` record can have multiple associated `SchemaDiskInfo` records +/// +/// # Storage Units +/// +/// All size values are stored in megabytes for consistency and to avoid +/// integer overflow issues with very large storage devices. +/// +/// # Examples +/// +/// ```rust +/// use teus::monitor::schema::SchemaDiskInfo; +/// +/// let disk_info = SchemaDiskInfo { +/// sysinfo_id: 1, +/// filesystem: "ext4".to_string(), +/// size: 1000000, // 1TB in MB +/// used: 750000, // 750GB in MB +/// available: 250000, // 250GB in MB +/// used_percentage: 75, +/// mounted_path: "/".to_string(), +/// }; +/// ``` +#[derive(Insertable, Debug, Serialize, Deserialize)] +#[diesel(table_name = diskinfo)] +pub struct SchemaDiskInfo { + /// Foreign key reference to the associated system information record. + /// + /// This links the disk information to a specific monitoring snapshot, + /// allowing for historical tracking of disk usage over time. + pub sysinfo_id: i32, + + /// Type of filesystem (e.g., "ext4", "ntfs", "xfs", "btrfs"). + /// + /// This information helps identify the storage technology and + /// can be useful for performance analysis and troubleshooting. + pub filesystem: String, + + /// Total size of the filesystem in megabytes. + /// + /// This represents the total capacity of the storage device + /// or partition, including space used by the filesystem metadata. + pub size: i32, + + /// Amount of space currently used in megabytes. + /// + /// This includes all files, directories, and filesystem overhead, + /// but may not account for reserved space depending on the filesystem. + pub used: i32, + + /// Amount of space available for new data in megabytes. + /// + /// This is the space that can be immediately used for new files + /// and may be less than (total - used) due to filesystem reservations. + pub available: i32, + + /// Percentage of disk space currently in use. + /// + /// Range: 0 to 100, calculated as (used / total) * 100. + /// Values above 90% typically indicate the need for cleanup or expansion. + pub used_percentage: i32, + + /// Mount point or drive letter where the filesystem is accessible. + /// + /// Examples: "/", "/home", "/var", "C:", "D:" + /// This helps identify which part of the system's storage hierarchy + /// this disk information represents. + pub mounted_path: String, +} + +/// Structure for querying system information records from the database. +/// +/// This structure is used when retrieving system monitoring data from the +/// database. It includes the database-generated ID field and can be used +/// for displaying historical monitoring data, generating reports, and +/// API responses. +/// +/// # Usage Patterns +/// +/// - Retrieving recent system performance data for dashboards +/// - Historical analysis and trend reporting +/// - API endpoints that return monitoring data to clients +/// - Data export and backup operations +/// +/// # Examples +/// +/// ```rust +/// use teus::monitor::schema::SysInfo; +/// use diesel::prelude::*; +/// +/// // Query recent system information (pseudo-code) +/// // let recent_data: Vec = sysinfo::table +/// // .order(sysinfo::timestamp.desc()) +/// // .limit(10) +/// // .load(&mut connection)?; +/// ``` +#[derive(Queryable, Selectable, Identifiable, Debug, Serialize, Deserialize)] +#[diesel(table_name = sysinfo)] +pub struct SysInfo { + /// Database-generated unique identifier for this record. + /// + /// This is the primary key and is automatically assigned when + /// the record is inserted into the database. Used for referencing + /// this specific monitoring snapshot. + #[diesel(column_name = id)] + pub id: Option, + + /// Timestamp when this system information was collected. + /// + /// Stored in RFC3339 format for consistent parsing and timezone handling. + pub timestamp: String, + + /// CPU usage percentage at the time of collection. + /// + /// Range: 0.0 to 100.0, representing the overall CPU utilization + /// across all cores and threads. + pub cpu_usage: f32, + + /// Amount of RAM currently in use, in megabytes. + /// + /// Active memory usage excluding cached and buffered memory. + pub ram_usage: f32, + + /// Total amount of RAM available in the system, in megabytes. + /// + /// Physical memory capacity of the system. + pub total_ram: f32, + + /// Amount of RAM currently free and available, in megabytes. + /// + /// Memory immediately available for allocation to new processes. + pub free_ram: f32, + + /// Amount of swap space currently in use, in megabytes. + /// + /// High values may indicate memory pressure and performance issues. + pub used_swap: f32, +} + +/// Structure for querying disk information records from the database. +/// +/// This structure represents stored disk usage information that can be +/// retrieved for historical analysis, reporting, and API responses. +/// It includes the database-generated ID and maintains the relationship +/// to its parent system information record. +/// +/// # Relationships +/// +/// Each `DiskInfo` record is linked to a `SysInfo` record through +/// the `sysinfo_id` foreign key, allowing for comprehensive system +/// monitoring data retrieval. +/// +/// # Common Query Patterns +/// +/// - Retrieving disk usage trends over time +/// - Finding disks approaching capacity limits +/// - Generating storage utilization reports +/// - Monitoring filesystem-specific usage patterns +/// +/// # Examples +/// +/// ```rust +/// use teus::monitor::schema::DiskInfo; +/// use diesel::prelude::*; +/// +/// // Query disk info for high usage (pseudo-code) +/// // let high_usage_disks: Vec = diskinfo::table +/// // .filter(diskinfo::used_percentage.gt(90)) +/// // .load(&mut connection)?; +/// ``` +#[derive(Queryable, Selectable, Identifiable, Debug, Serialize, Deserialize)] +#[diesel(table_name = diskinfo)] +pub struct DiskInfo { + /// Database-generated unique identifier for this disk record. + /// + /// Primary key used for referencing this specific disk monitoring entry. + #[diesel(column_name = id)] + pub id: Option, + + /// Foreign key reference to the associated system information record. + /// + /// Links this disk information to a specific monitoring snapshot, + /// enabling time-series analysis of disk usage. + pub sysinfo_id: i32, + + /// Type of filesystem (e.g., "ext4", "ntfs", "xfs", "btrfs"). + /// + /// Identifies the storage technology and formatting of this disk. + pub filesystem: String, + + /// Total size of the filesystem in megabytes. + /// + /// Total capacity including filesystem overhead and reserved space. + pub size: i32, + + /// Amount of space currently used in megabytes. + /// + /// Space occupied by files, directories, and filesystem metadata. + pub used: i32, + + /// Amount of space available for new data in megabytes. + /// + /// Immediately usable space, may be less than (size - used) + /// due to filesystem reservations and overhead. + pub available: i32, + + /// Percentage of disk space currently in use. + /// + /// Range: 0 to 100, useful for quick assessment of storage pressure. + /// Values above 90% typically require attention. + pub used_percentage: i32, + + /// Mount point or drive letter where the filesystem is accessible. + /// + /// The path in the system's directory hierarchy where this + /// storage device can be accessed (e.g., "/", "/home", "C:"). + pub mounted_path: String, +} + +impl Default for SchemaSysInfo { + fn default() -> Self { + Self { + timestamp: "".to_string(), + cpu_usage: 0.0, + ram_usage: 0.0, + total_ram: 0.0, + free_ram: 0.0, + used_swap: 0.0, + // user_id: 0, + } + } +} + +impl Default for SchemaDiskInfo { + fn default() -> Self { + Self { + sysinfo_id: 0, + filesystem: "".to_string(), + size: 0, + used: 0, + available: 0, + used_percentage: 0, + mounted_path: "".to_string(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::Utc; + + #[test] + fn test_schema_sys_info_default() { + let sys_info = SchemaSysInfo::default(); + assert_eq!(sys_info.timestamp, ""); + assert_eq!(sys_info.cpu_usage, 0.0); + assert_eq!(sys_info.ram_usage, 0.0); + assert_eq!(sys_info.total_ram, 0.0); + assert_eq!(sys_info.free_ram, 0.0); + assert_eq!(sys_info.used_swap, 0.0); + } + + #[test] + fn test_schema_disk_info_default() { + let disk_info = SchemaDiskInfo::default(); + assert_eq!(disk_info.sysinfo_id, 0); + assert_eq!(disk_info.filesystem, ""); + assert_eq!(disk_info.size, 0); + assert_eq!(disk_info.used, 0); + assert_eq!(disk_info.available, 0); + assert_eq!(disk_info.used_percentage, 0); + assert_eq!(disk_info.mounted_path, ""); + } + + #[test] + fn test_schema_sys_info_creation() { + let sys_info = SchemaSysInfo { + timestamp: Utc::now().to_rfc3339(), + cpu_usage: 25.5, + ram_usage: 1024.0, + total_ram: 8192.0, + free_ram: 4096.0, + used_swap: 512.0, + }; + + assert!(!sys_info.timestamp.is_empty()); + assert_eq!(sys_info.cpu_usage, 25.5); + assert_eq!(sys_info.ram_usage, 1024.0); + assert_eq!(sys_info.total_ram, 8192.0); + assert_eq!(sys_info.free_ram, 4096.0); + assert_eq!(sys_info.used_swap, 512.0); + } + + #[test] + fn test_schema_disk_info_creation() { + let disk_info = SchemaDiskInfo { + sysinfo_id: 1, + filesystem: "ext4".to_string(), + size: 1000, + used: 500, + available: 500, + used_percentage: 50, + mounted_path: "/".to_string(), + }; + + assert_eq!(disk_info.sysinfo_id, 1); + assert_eq!(disk_info.filesystem, "ext4"); + assert_eq!(disk_info.size, 1000); + assert_eq!(disk_info.used, 500); + assert_eq!(disk_info.available, 500); + assert_eq!(disk_info.used_percentage, 50); + assert_eq!(disk_info.mounted_path, "/"); + } + + #[test] + fn test_sys_info_serialization() { + let sys_info = SysInfo { + id: Some(1), + timestamp: Utc::now().to_rfc3339(), + cpu_usage: 25.5, + ram_usage: 1024.0, + total_ram: 8192.0, + free_ram: 4096.0, + used_swap: 512.0, + }; + + let serialized = serde_json::to_string(&sys_info).unwrap(); + assert!(serialized.contains("\"id\":1")); + assert!(serialized.contains("\"cpu_usage\":25.5")); + assert!(serialized.contains("\"ram_usage\":1024")); + } + + #[test] + fn test_disk_info_serialization() { + let disk_info = DiskInfo { + id: Some(1), + sysinfo_id: 1, + filesystem: "ext4".to_string(), + size: 1000, + used: 500, + available: 500, + used_percentage: 50, + mounted_path: "/".to_string(), + }; + + let serialized = serde_json::to_string(&disk_info).unwrap(); + assert!(serialized.contains("\"id\":1")); + assert!(serialized.contains("\"sysinfo_id\":1")); + assert!(serialized.contains("\"filesystem\":\"ext4\"")); + assert!(serialized.contains("\"mounted_path\":\"/\"")); + } + + #[test] + fn test_sys_info_debug_format() { + let sys_info = SysInfo { + id: Some(1), + timestamp: "2024-01-01T00:00:00Z".to_string(), + cpu_usage: 25.5, + ram_usage: 1024.0, + total_ram: 8192.0, + free_ram: 4096.0, + used_swap: 512.0, + }; + + let debug_str = format!("{:?}", sys_info); + assert!(debug_str.contains("SysInfo")); + assert!(debug_str.contains("25.5")); + assert!(debug_str.contains("1024")); + } + + #[test] + fn test_edge_values() { + // Test with extreme values + let sys_info = SchemaSysInfo { + timestamp: "2024-01-01T00:00:00Z".to_string(), + cpu_usage: 100.0, + ram_usage: 0.0, + total_ram: f32::MAX, + free_ram: f32::MAX, + used_swap: 0.0, + }; + + assert_eq!(sys_info.cpu_usage, 100.0); + assert_eq!(sys_info.ram_usage, 0.0); + assert_eq!(sys_info.total_ram, f32::MAX); + assert_eq!(sys_info.free_ram, f32::MAX); + assert_eq!(sys_info.used_swap, 0.0); + + // Test disk info with edge values + let disk_info = SchemaDiskInfo { + sysinfo_id: i32::MAX, + filesystem: "test".to_string(), + size: i32::MAX, + used: 0, + available: i32::MAX, + used_percentage: 100, + mounted_path: "/test".to_string(), + }; + + assert_eq!(disk_info.sysinfo_id, i32::MAX); + assert_eq!(disk_info.size, i32::MAX); + assert_eq!(disk_info.used, 0); + assert_eq!(disk_info.available, i32::MAX); + assert_eq!(disk_info.used_percentage, 100); + } + + #[test] + fn test_deserialization() { + let json_str = r#"{ + "timestamp": "2024-01-01T00:00:00Z", + "cpu_usage": 25.5, + "ram_usage": 1024.0, + "total_ram": 8192.0, + "free_ram": 4096.0, + "used_swap": 512.0 + }"#; + + let sys_info: SchemaSysInfo = serde_json::from_str(json_str).unwrap(); + assert_eq!(sys_info.timestamp, "2024-01-01T00:00:00Z"); + assert_eq!(sys_info.cpu_usage, 25.5); + assert_eq!(sys_info.ram_usage, 1024.0); + } +} diff --git a/teus/monitor/storage.rs b/teus/monitor/storage.rs new file mode 100644 index 0000000..8c913c0 --- /dev/null +++ b/teus/monitor/storage.rs @@ -0,0 +1,205 @@ +use diesel::connection::SimpleConnection; // Added +use diesel::{Connection as ConnectionDiesel, SqliteConnection}; +use std::error::Error; +use std::path::Path; +use std::sync::{Arc, Mutex}; // Added for Box + +// Removed: use rusqlite::{Connection, Result}; +// Removed: use std::time::Duration; + +#[derive(Clone)] +pub struct Storage { + // pub conn: Arc, // @Info: old Arc reference to don't break the code + pub diesel_conn: Arc>, // @Info: use to test diesel for now +} + +mod storage_utils { + use std::{fs, io, path::Path}; + + pub fn ensure_directory_exists(path: &str) -> io::Result<()> { + let dir_path = Path::new(path); + if !dir_path.exists() { + fs::create_dir_all(dir_path)?; + // Consider using log crate for messages instead of println! + // println!("Directory '{}' created.", path); + } + Ok(()) + } +} + +// TODO: Migrate Connection -> SqliteConnection // This TODO can be removed after this refactor +impl Storage { + pub fn new(db_path: &str) -> Result> { + // Changed return type + if let Some(parent) = Path::new(db_path).parent() { + if let Some(parent_str) = parent.to_str() { + storage_utils::ensure_directory_exists(parent_str)?; // Changed from expect + } + } + + // Removed rusqlite connection logic + + let mut conn_new = SqliteConnection::establish(&db_path)?; // Changed from unwrap_or_else + + // Apply PRAGMAs to Diesel connection + // Note: busy_timeout is set in milliseconds for SQLite PRAGMA + conn_new.batch_execute( + "PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL; PRAGMA busy_timeout = 5000;", + )?; + + Ok(Self { + diesel_conn: Arc::new(Mutex::new(conn_new)), + }) + } + + // Removed _init_db method +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn test_storage_new_success() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + let db_path_str = db_path.to_str().unwrap(); + + let storage = Storage::new(db_path_str); + assert!(storage.is_ok()); + + let storage = storage.unwrap(); + + // Test that we can acquire the mutex lock + let conn_guard = storage.diesel_conn.lock(); + assert!(conn_guard.is_ok()); + } + + #[test] + fn test_storage_creates_parent_directory() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let nested_path = temp_dir.path().join("nested").join("path").join("test.db"); + let db_path_str = nested_path.to_str().unwrap(); + + let storage = Storage::new(db_path_str); + assert!(storage.is_ok()); + + // Verify the nested directories were created + assert!(temp_dir.path().join("nested").join("path").exists()); + } + + #[test] + fn test_storage_with_memory_database() { + // SQLite in-memory database + let storage = Storage::new(":memory:"); + assert!(storage.is_ok()); + + let storage = storage.unwrap(); + let conn_guard = storage.diesel_conn.lock(); + assert!(conn_guard.is_ok()); + } + + #[test] + fn test_storage_invalid_path() { + // Try to create database in a location that doesn't exist and can't be created + // This might not fail on all systems, but it's worth testing + let invalid_path = "/invalid/path/that/should/not/exist/test.db"; + let storage = Storage::new(invalid_path); + + // On most systems this should fail due to permission issues + // But SQLite might create the path in some cases, so we just ensure it returns a Result + match storage { + Ok(_) => { + // If it succeeds, that's fine too - SQLite is quite permissive + } + Err(_) => { + // This is the expected case for most invalid paths + } + } + } + + #[test] + fn test_storage_clone() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + let db_path_str = db_path.to_str().unwrap(); + + let storage = Storage::new(db_path_str).expect("Failed to create storage"); + let cloned_storage = storage.clone(); + + // Both should be able to access the same connection + let conn1 = storage.diesel_conn.lock(); + assert!(conn1.is_ok()); + drop(conn1); // Release lock before trying with clone + + let conn2 = cloned_storage.diesel_conn.lock(); + assert!(conn2.is_ok()); + } + + #[test] + fn test_storage_concurrent_access() { + use std::thread; + use std::time::Duration; + + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + let db_path_str = db_path.to_str().unwrap(); + + let storage = Storage::new(db_path_str).expect("Failed to create storage"); + let storage_clone = storage.clone(); + + let handle = thread::spawn(move || { + let conn = storage_clone.diesel_conn.lock(); + assert!(conn.is_ok()); + thread::sleep(Duration::from_millis(10)); + }); + + // Give the thread a moment to start + thread::sleep(Duration::from_millis(5)); + + // This should be able to access after the thread releases the lock + handle.join().expect("Thread panicked"); + + let conn = storage.diesel_conn.lock(); + assert!(conn.is_ok()); + } + + #[test] + fn test_storage_utils_ensure_directory_exists() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let test_path = temp_dir.path().join("test_subdir"); + let test_path_str = test_path.to_str().unwrap(); + + // Directory doesn't exist initially + assert!(!test_path.exists()); + + // Call the utility function + let result = storage_utils::ensure_directory_exists(test_path_str); + assert!(result.is_ok()); + + // Directory should now exist + assert!(test_path.exists()); + assert!(test_path.is_dir()); + + // Calling again on existing directory should also work + let result = storage_utils::ensure_directory_exists(test_path_str); + assert!(result.is_ok()); + } + + #[test] + fn test_storage_utils_invalid_path() { + // Try to create a directory in an invalid location + let result = storage_utils::ensure_directory_exists("/root/invalid/path"); + + // This should fail on most systems due to permission issues + match result { + Ok(_) => { + // In some test environments this might succeed + } + Err(_) => { + // This is the expected case for most systems + } + } + } +} diff --git a/teus/monitor/sys.rs b/teus/monitor/sys.rs new file mode 100644 index 0000000..594ed69 --- /dev/null +++ b/teus/monitor/sys.rs @@ -0,0 +1,409 @@ +use super::mutation; +use super::schema::{SchemaDiskInfo, SchemaSysInfo}; // Import the Diesel insertable structs +use crate::{config::types::Config, monitor::storage::Storage}; +use chrono::Utc; +use diesel::SqliteConnection; // Import SqliteConnection +use std::{thread, time::Duration}; // Import Mutex +use sysinfo::{Disks, MemoryRefreshKind, System}; + +#[allow(dead_code)] +#[derive(Clone, Debug)] +pub struct DiskInfo { + pub available: usize, + // Add disk-related fields here + pub filesystem: String, + pub mounted_path: String, + pub size: usize, + pub used: usize, + pub used_percentage: usize, +} + +#[allow(dead_code)] +#[derive(Clone, Debug)] +pub struct SysInfo { + #[allow(dead_code)] + pub id: i64, + pub timestamp: String, + pub cpu_usage: f64, + pub ram_usage: f64, + pub total_ram: f64, + pub free_ram: f64, + pub used_swap: f64, + pub disks: Vec, +} + +#[allow(dead_code)] +impl SysInfo { + pub fn new( + cpu_usage: f64, + ram_usage: f64, + total_ram: f64, + free_ram: f64, + used_swap: f64, + disks: Vec, + ) -> Self { + Self { + id: 0, + timestamp: "".to_string(), + cpu_usage, + ram_usage, + total_ram, + free_ram, + used_swap, + disks, + } + } + + // Default constructor + pub fn default() -> Self { + Self { + id: 0, + timestamp: Utc::now().to_rfc3339(), + cpu_usage: 0.0, + ram_usage: 0.0, + total_ram: 0.0, + free_ram: 0.0, + used_swap: 0.0, + disks: vec![DiskInfo { + filesystem: String::new(), + size: 0, + used: 0, + available: 0, + used_percentage: 0, + mounted_path: String::new(), + }], + } + } + + pub fn run_monitor(mut self, config: &Config) { + let storage = match Storage::new(&config.database.path) { + Ok(storage) => storage, + Err(e) => { + eprintln!("Failed to create storage: {}", e); + return; + } + }; + + // Get a mutable connection from the Arc> + let mut conn_guard = match storage.diesel_conn.lock() { + Ok(guard) => guard, + Err(poisoned) => { + eprintln!("Failed to acquire lock on DB connection: {}", poisoned); + // Handle the poisoned mutex appropriately, maybe panic or return + return; + } + }; + // Dereference the guard to get the &mut SqliteConnection + let conn: &mut SqliteConnection = &mut *conn_guard; + + let mut sys = System::new_all(); + let disks_sysinfo = Disks::new_with_refreshed_list(); // Renamed to avoid conflict + + sys.refresh_all(); + sys.refresh_memory_specifics(MemoryRefreshKind::nothing().with_ram()); + + self.total_ram = sys.total_memory() as f64; + self.free_ram = sys.free_memory() as f64; + self.used_swap = sys.used_swap() as f64; + self.ram_usage = sys.used_memory() as f64; // Use used_memory for ram_usage + + thread::sleep(Duration::from_millis(250)); + sys.refresh_cpu_all(); + + let cpu_count = sys.cpus().len(); + let total_cpu_usage: f64 = sys.cpus().iter().map(|cpu| cpu.cpu_usage() as f64).sum(); + self.cpu_usage = if cpu_count > 0 { + total_cpu_usage / cpu_count as f64 + } else { + 0.0 + }; + + // --- Prepare data for Diesel insertion --- + self.timestamp = Utc::now().to_rfc3339(); // Ensure timestamp is current + + // Create the SchemaSysInfo struct for insertion + let new_sys_info_to_insert = SchemaSysInfo { + timestamp: self.timestamp, + cpu_usage: self.cpu_usage as f32, // Cast f64 to f32 + ram_usage: self.ram_usage as f32, // Cast f64 to f32 + total_ram: self.total_ram as f32, // Cast f64 to f32 + free_ram: self.free_ram as f32, // Cast f64 to f32 + used_swap: self.used_swap as f32, // Cast f64 to f32 + }; + + // Insert system info using the SchemaSysInfo struct + let sysinfo_id = match mutation::insert_sysinfo(conn, &new_sys_info_to_insert) { + Ok(id) => id, + Err(e) => { + eprintln!("Failed to insert system info: {}", e); + // Drop the lock before returning + drop(conn_guard); + return; + } + }; + + // Prepare disk info data for batch insertion + let mut disk_infos_to_insert: Vec = Vec::new(); + for disk in disks_sysinfo.list() { + let space_used = disk.total_space() - disk.available_space(); + // Calculate usage percentage correctly + let usage_percentage = if disk.total_space() > 0 { + (space_used as f64 / disk.total_space() as f64 * 100.0) as i32 + } else { + 0 + }; + + let fs_name = disk.name().to_string_lossy().to_string(); + let mount_point = disk.mount_point().to_string_lossy().to_string(); + + disk_infos_to_insert.push(SchemaDiskInfo { + sysinfo_id, // Use the ID from the inserted sysinfo + filesystem: fs_name, + size: (disk.total_space() / 1024 / 1024) as i32, // Convert bytes to MB (adjust if needed) and cast usize to i32 + used: (space_used / 1024 / 1024) as i32, // Convert bytes to MB and cast usize to i32 + available: (disk.available_space() / 1024 / 1024) as i32, // Convert bytes to MB and cast usize to i32 + used_percentage: usage_percentage, // Use calculated percentage + mounted_path: mount_point, + }); + } + + // Insert disk info using the SchemaDiskInfo structs + if !disk_infos_to_insert.is_empty() { + if let Err(e) = mutation::insert_multiple_diskinfo(conn, &disk_infos_to_insert) { + eprintln!("Failed to insert disk info batch: {}", e); + } + } + // Lock is automatically dropped here when conn_guard goes out of scope + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::types::{Config, DatabaseConfig, Environment, MonitorConfig, ServerConfig}; + + #[allow(dead_code)] + fn create_test_config() -> Config { + Config { + server: ServerConfig { + host: "127.0.0.1".to_string(), + port: 8080, + secret: "test_secret".to_string(), + environment: Environment::Test, + }, + database: DatabaseConfig { + path: ":memory:".to_string(), + }, + monitor: MonitorConfig { interval_secs: 60 }, + } + } + + #[test] + fn test_disk_info_creation() { + let disk_info = DiskInfo { + available: 1000, + filesystem: "ext4".to_string(), + mounted_path: "/".to_string(), + size: 2000, + used: 1000, + used_percentage: 50, + }; + + assert_eq!(disk_info.available, 1000); + assert_eq!(disk_info.filesystem, "ext4"); + assert_eq!(disk_info.mounted_path, "/"); + assert_eq!(disk_info.size, 2000); + assert_eq!(disk_info.used, 1000); + assert_eq!(disk_info.used_percentage, 50); + } + + #[test] + fn test_disk_info_clone() { + let disk_info = DiskInfo { + available: 500, + filesystem: "ntfs".to_string(), + mounted_path: "C:\\".to_string(), + size: 1000, + used: 500, + used_percentage: 50, + }; + + let cloned = disk_info.clone(); + assert_eq!(disk_info.available, cloned.available); + assert_eq!(disk_info.filesystem, cloned.filesystem); + assert_eq!(disk_info.mounted_path, cloned.mounted_path); + assert_eq!(disk_info.size, cloned.size); + assert_eq!(disk_info.used, cloned.used); + assert_eq!(disk_info.used_percentage, cloned.used_percentage); + } + + #[test] + fn test_sysinfo_new() { + let disks = vec![DiskInfo { + available: 1000, + filesystem: "ext4".to_string(), + mounted_path: "/".to_string(), + size: 2000, + used: 1000, + used_percentage: 50, + }]; + + let sysinfo = SysInfo::new(25.5, 8000.0, 16000.0, 8000.0, 2000.0, disks.clone()); + + assert_eq!(sysinfo.id, 0); + assert_eq!(sysinfo.timestamp, ""); + assert_eq!(sysinfo.cpu_usage, 25.5); + assert_eq!(sysinfo.ram_usage, 8000.0); + assert_eq!(sysinfo.total_ram, 16000.0); + assert_eq!(sysinfo.free_ram, 8000.0); + assert_eq!(sysinfo.used_swap, 2000.0); + assert_eq!(sysinfo.disks.len(), 1); + assert_eq!(sysinfo.disks[0].filesystem, "ext4"); + } + + #[test] + fn test_sysinfo_default() { + let sysinfo = SysInfo::default(); + + assert_eq!(sysinfo.id, 0); + assert!(!sysinfo.timestamp.is_empty()); // Should have a timestamp + assert_eq!(sysinfo.cpu_usage, 0.0); + assert_eq!(sysinfo.ram_usage, 0.0); + assert_eq!(sysinfo.total_ram, 0.0); + assert_eq!(sysinfo.free_ram, 0.0); + assert_eq!(sysinfo.used_swap, 0.0); + assert_eq!(sysinfo.disks.len(), 1); + assert_eq!(sysinfo.disks[0].filesystem, ""); + assert_eq!(sysinfo.disks[0].mounted_path, ""); + assert_eq!(sysinfo.disks[0].size, 0); + assert_eq!(sysinfo.disks[0].used, 0); + assert_eq!(sysinfo.disks[0].available, 0); + assert_eq!(sysinfo.disks[0].used_percentage, 0); + } + + #[test] + fn test_sysinfo_clone() { + let disks = vec![DiskInfo { + available: 2000, + filesystem: "btrfs".to_string(), + mounted_path: "/home".to_string(), + size: 4000, + used: 2000, + used_percentage: 50, + }]; + + let sysinfo = SysInfo::new(15.7, 4000.0, 8000.0, 4000.0, 1000.0, disks); + + let cloned = sysinfo.clone(); + assert_eq!(sysinfo.id, cloned.id); + assert_eq!(sysinfo.timestamp, cloned.timestamp); + assert_eq!(sysinfo.cpu_usage, cloned.cpu_usage); + assert_eq!(sysinfo.ram_usage, cloned.ram_usage); + assert_eq!(sysinfo.total_ram, cloned.total_ram); + assert_eq!(sysinfo.free_ram, cloned.free_ram); + assert_eq!(sysinfo.used_swap, cloned.used_swap); + assert_eq!(sysinfo.disks.len(), cloned.disks.len()); + assert_eq!(sysinfo.disks[0].filesystem, cloned.disks[0].filesystem); + } + + #[test] + fn test_sysinfo_debug_format() { + let sysinfo = SysInfo::default(); + let debug_str = format!("{:?}", sysinfo); + assert!(debug_str.contains("SysInfo")); + assert!(debug_str.contains("cpu_usage")); + assert!(debug_str.contains("ram_usage")); + } + + #[test] + fn test_disk_info_debug_format() { + let disk_info = DiskInfo { + available: 1000, + filesystem: "ext4".to_string(), + mounted_path: "/".to_string(), + size: 2000, + used: 1000, + used_percentage: 50, + }; + + let debug_str = format!("{:?}", disk_info); + assert!(debug_str.contains("DiskInfo")); + assert!(debug_str.contains("ext4")); + assert!(debug_str.contains("1000")); + } + + #[test] + fn test_sysinfo_edge_values() { + let disks = vec![]; + + // Test with extreme values + let sysinfo = SysInfo::new( + 100.0, // Max CPU usage + 0.0, // No RAM usage + u64::MAX as f64, // Maximum possible RAM + u64::MAX as f64, // Maximum free RAM + 0.0, // No swap usage + disks, + ); + + assert_eq!(sysinfo.cpu_usage, 100.0); + assert_eq!(sysinfo.ram_usage, 0.0); + assert_eq!(sysinfo.total_ram, u64::MAX as f64); + assert_eq!(sysinfo.free_ram, u64::MAX as f64); + assert_eq!(sysinfo.used_swap, 0.0); + assert_eq!(sysinfo.disks.len(), 0); + } + + #[test] + fn test_disk_info_percentage_calculation() { + // Test 100% usage + let full_disk = DiskInfo { + available: 0, + filesystem: "ext4".to_string(), + mounted_path: "/".to_string(), + size: 1000, + used: 1000, + used_percentage: 100, + }; + assert_eq!(full_disk.used_percentage, 100); + + // Test 0% usage + let empty_disk = DiskInfo { + available: 1000, + filesystem: "ext4".to_string(), + mounted_path: "/".to_string(), + size: 1000, + used: 0, + used_percentage: 0, + }; + assert_eq!(empty_disk.used_percentage, 0); + } + + #[test] + fn test_sysinfo_with_multiple_disks() { + let disks = vec![ + DiskInfo { + available: 1000, + filesystem: "ext4".to_string(), + mounted_path: "/".to_string(), + size: 2000, + used: 1000, + used_percentage: 50, + }, + DiskInfo { + available: 500, + filesystem: "ext4".to_string(), + mounted_path: "/home".to_string(), + size: 1000, + used: 500, + used_percentage: 50, + }, + ]; + + let sysinfo = SysInfo::new(30.0, 4000.0, 8000.0, 4000.0, 1000.0, disks); + + assert_eq!(sysinfo.disks.len(), 2); + assert_eq!(sysinfo.disks[0].mounted_path, "/"); + assert_eq!(sysinfo.disks[1].mounted_path, "/home"); + } +} diff --git a/src/schema.rs b/teus/schema.rs similarity index 90% rename from src/schema.rs rename to teus/schema.rs index 2face40..d2737c9 100644 --- a/src/schema.rs +++ b/teus/schema.rs @@ -54,10 +54,4 @@ diesel::table! { diesel::joinable!(diskinfo -> sysinfo (sysinfo_id)); diesel::joinable!(services -> user (user_id)); -diesel::allow_tables_to_appear_in_same_query!( - config, - diskinfo, - services, - sysinfo, - user, -); +diesel::allow_tables_to_appear_in_same_query!(config, diskinfo, services, sysinfo, user,); diff --git a/teus/utils.rs b/teus/utils.rs new file mode 100644 index 0000000..f4dbafe --- /dev/null +++ b/teus/utils.rs @@ -0,0 +1,40 @@ +#[allow(dead_code)] +pub trait SysUtils { + // Default implementation to convert bytes to gigabytes + fn to_gb(&self, bytes: u64) -> f64 { + bytes as f64 / 1024.0 / 1024.0 / 1024.0 + } +} + +#[cfg(test)] +mod tests { + use super::SysUtils; + + struct TestStruct; + impl SysUtils for TestStruct {} + + #[test] + fn test_to_gb_conversion() { + let test_struct = TestStruct; + + // Test zero bytes + assert_eq!(test_struct.to_gb(0), 0.0); + + // Test 1 GB (1024^3 bytes) + let one_gb_bytes = 1024_u64.pow(3); + assert_eq!(test_struct.to_gb(one_gb_bytes), 1.0); + + // Test 2 GB + let two_gb_bytes = 2 * 1024_u64.pow(3); + assert_eq!(test_struct.to_gb(two_gb_bytes), 2.0); + + // Test fractional GB (512 MB = 0.5 GB) + let half_gb_bytes = 512 * 1024 * 1024; + assert_eq!(test_struct.to_gb(half_gb_bytes), 0.5); + + // Test precision with a known value + let bytes = 1536 * 1024 * 1024; // 1.5 GB + let result = test_struct.to_gb(bytes); + assert!((result - 1.5).abs() < f64::EPSILON); + } +} diff --git a/src/webserver/api.rs b/teus/webserver/api.rs similarity index 77% rename from src/webserver/api.rs rename to teus/webserver/api.rs index 35b852a..cb5cd86 100644 --- a/src/webserver/api.rs +++ b/teus/webserver/api.rs @@ -1,14 +1,20 @@ +use crate::bookmarks::handlers as bookmark_handlers; use crate::config::handlers::get_teus_config; use crate::monitor::query; -use crate::webserver::auth::handlers::{login, signup, JwtConfig}; +use crate::webserver::auth::handlers::{check, login, signup, JwtConfig}; use crate::webserver::auth::middleware::AuthMiddlewareFactory; +use crate::webserver::docker::handlers::{ + get_docker_container, get_docker_containers, get_docker_version, get_docker_volume, + get_docker_volumes, +}; use crate::webserver::models::sysmodels::{DiskInfoResponse, SysInfoResponse}; use crate::webserver::services::systeminfo; use crate::{config::types::Config, monitor::storage::Storage}; use actix_cors::Cors; -use actix_web::{get, http, middleware, web, App, Error, HttpResponse, HttpServer, Responder}; use actix_web::error::ErrorInternalServerError; +use actix_web::{get, http, middleware, web, App, Error, HttpResponse, HttpServer}; +// TODO: move this api into another file `syshandler` or something #[get("/sysinfo")] async fn sysinfo_handler(storage: web::Data) -> Result { let mut conn = storage.diesel_conn.lock().map_err(|_| { @@ -68,7 +74,7 @@ pub async fn start_webserver(config: &Config, storage: Storage) -> std::io::Resu HttpServer::new(move || { let cors = Cors::default() .allow_any_origin() - .allowed_methods(vec!["GET", "POST"]) + .allowed_methods(vec!["GET", "POST", "DELETE", "PATCH"]) .allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT]) .allowed_header(http::header::CONTENT_TYPE) .max_age(3600); @@ -90,8 +96,18 @@ pub async fn start_webserver(config: &Config, storage: Storage) -> std::io::Resu .service( web::scope("/api/v1/teus") .wrap(AuthMiddlewareFactory::new(jwt_secret.to_string())) + .service(check) // check for auth .service(sysinfo_handler) - .service(systeminfo::get_sysinfo), + .service(systeminfo::get_sysinfo) + .service(get_docker_version) + .service(get_docker_containers) + .service(get_docker_container) + .service(get_docker_volume) + .service(get_docker_volumes) + .service(bookmark_handlers::get_user_services) + .service(bookmark_handlers::add_service) + .service(bookmark_handlers::delete_service_by_id) + .service(bookmark_handlers::update_service_by_id), ) }) .bind(&url)? diff --git a/teus/webserver/auth/handlers.rs b/teus/webserver/auth/handlers.rs new file mode 100644 index 0000000..f8bb6c3 --- /dev/null +++ b/teus/webserver/auth/handlers.rs @@ -0,0 +1,397 @@ +use crate::{ + config::{schema::TeusConfig, types::Config}, + monitor::storage::Storage, + webserver::auth::{middleware::Claims, schema::User}, +}; +use actix_web::{post, web, HttpResponse, Responder}; +use argon2::{ + password_hash::{PasswordHash, PasswordVerifier}, + Argon2, +}; +use chrono::{Duration, Utc}; +use jsonwebtoken::{encode, EncodingKey, Header}; +use serde::{Deserialize, Serialize}; + +/// Request structure for user authentication login endpoint. +/// +/// This structure represents the data required for a user to authenticate +/// with the Teus system. It contains the credentials that will be validated +/// against the stored user data in the database. +/// +/// # Security Considerations +/// +/// - The password field should be transmitted over HTTPS in production +/// - Passwords are never logged or stored in plain text +/// - Consider implementing rate limiting to prevent brute force attacks +/// +/// # Examples +/// +/// JSON request body: +/// ```json +/// { +/// "username": "admin", +/// "password": "secure_password" +/// } +/// ``` +/// +/// # API Endpoint +/// +/// Used with `POST /auth/login` +#[derive(Deserialize)] +pub struct LoginRequest { + /// The user's login identifier. + /// + /// This should be a unique username that exists in the system. + /// Case-sensitive matching is performed against stored usernames. + username: String, + + /// The user's password in plain text. + /// + /// This will be verified against the stored password hash using + /// Argon2 password hashing. The plain text password is never + /// stored or logged by the system. + password: String, +} + +/// Request structure for user registration endpoint. +/// +/// This structure represents the data required to create a new user +/// account in the Teus system. The provided password will be securely +/// hashed using Argon2 before storage. +/// +/// # Validation Requirements +/// +/// - Username must be unique in the system +/// - Password should meet minimum security requirements (implemented at application level) +/// - Both fields are required and cannot be empty +/// +/// # Security Features +/// +/// - Passwords are hashed with Argon2 and a unique salt +/// - Plain text passwords are never stored +/// - Username uniqueness is enforced at the database level +/// +/// # Examples +/// +/// JSON request body: +/// ```json +/// { +/// "username": "newuser", +/// "password": "secure_password123" +/// } +/// ``` +/// +/// # API Endpoint +/// +/// Used with `POST /auth/signup` +#[derive(Deserialize)] +pub struct SignupRequest { + /// The desired username for the new account. + /// + /// Must be unique across all users in the system. If a user + /// with this username already exists, the registration will fail + /// with a 409 Conflict status. + username: String, + + /// The password for the new account in plain text. + /// + /// This will be securely hashed using Argon2 with a unique salt + /// before being stored in the database. The plain text password + /// is never persisted. + password: String, +} + +/// Response structure for successful authentication operations. +/// +/// This structure contains the JWT tokens issued after successful login +/// or token refresh operations. It provides both access and refresh tokens +/// following OAuth 2.0-style token patterns. +/// +/// # Token Types +/// +/// - **Access Token**: Short-lived token for API authentication (default: configurable hours) +/// - **Refresh Token**: Long-lived token for obtaining new access tokens (default: 7 days) +/// +/// # Security Notes +/// +/// - Access tokens should be stored securely on the client (e.g., memory, secure storage) +/// - Refresh tokens should be stored even more securely and used only for token renewal +/// - Both tokens are JWTs signed with the server's secret key +/// +/// # Examples +/// +/// JSON response: +/// ```json +/// { +/// "access": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", +/// "refresh": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", +/// "expires_in": 3600 +/// } +/// ``` +#[derive(Serialize)] +pub struct TokenResponse { + /// JWT access token for API authentication. + /// + /// This token should be included in the Authorization header + /// of subsequent API requests as "Bearer {token}". It has a + /// shorter expiration time for security. + access: String, + + /// JWT refresh token for obtaining new access tokens. + /// + /// This token can be used to obtain a new access token when + /// the current one expires, without requiring the user to + /// log in again. It has a longer expiration time. + refresh: String, + + /// Time until the access token expires, in seconds. + /// + /// Clients should use this value to determine when to refresh + /// the access token using the refresh token. This is typically + /// calculated as expiration_hours * 3600. + expires_in: i64, +} + +/// Configuration structure for JWT token generation and validation. +/// +/// This structure holds the configuration parameters needed for +/// JSON Web Token operations in the authentication system. It's +/// typically initialized once at application startup. +/// +/// # Security Considerations +/// +/// - The secret should be cryptographically secure and sufficiently long +/// - The secret should be different for each environment (dev, test, prod) +/// - Consider rotating secrets periodically in production environments +/// - Never log or expose the secret in error messages +/// +/// # Examples +/// +/// ```rust +/// use teus::webserver::auth::handlers::JwtConfig; +/// +/// let jwt_config = JwtConfig { +/// secret: "your-256-bit-secret-key".to_string(), +/// expiration_hours: 1, // 1 hour for access tokens +/// }; +/// ``` +pub struct JwtConfig { + /// Secret key used for signing and verifying JWT tokens. + /// + /// This should be a cryptographically secure random string, + /// at least 256 bits (32 characters) long. The same secret + /// must be used for both token generation and validation. + pub secret: String, + + /// Number of hours until access tokens expire. + /// + /// Shorter expiration times improve security by reducing the + /// window of opportunity if a token is compromised, but may + /// require more frequent token refreshes. Typical values + /// range from 1-24 hours. + pub expiration_hours: i64, +} + +/// Generic response structure for simple API messages. +/// +/// This structure is used for API endpoints that need to return +/// a simple text message, such as error responses or confirmation +/// messages. It provides a consistent format for client applications. +/// +/// # Usage Patterns +/// +/// - Error messages (e.g., "Invalid credentials", "User not found") +/// - Success confirmations (e.g., "Operation completed") +/// - Validation errors (e.g., "Username already exists") +/// +/// # Examples +/// +/// JSON response: +/// ```json +/// { +/// "message": "Invalid credentials" +/// } +/// ``` +/// +/// # TODO +/// +/// Consider moving this to a separate common response module +/// to be shared across different handlers and avoid duplication. +#[derive(Serialize)] +struct GenericResponse { + /// The message content to be returned to the client. + /// + /// Should be user-friendly and provide clear information + /// about the result of the operation or the nature of any error. + message: String, +} + +/// Response structure for successful user registration. +/// +/// This structure contains the essential information about a newly +/// created user account, returned after successful signup operations. +/// It excludes sensitive information like passwords and salts. +/// +/// # Security Notes +/// +/// - Only non-sensitive user information is included +/// - Password hashes and salts are never included in responses +/// - The user ID can be used for subsequent API operations +/// +/// # Examples +/// +/// JSON response: +/// ```json +/// { +/// "id": 1, +/// "username": "newuser" +/// } +/// ``` +/// +/// # API Endpoint +/// +/// Returned by `POST /auth/signup` on successful user creation +#[derive(Serialize)] +struct NewUserResponse { + /// The database-generated unique identifier for the new user. + /// + /// This ID is used internally by the system to reference + /// the user in database relations and API operations. + id: i32, + + /// The username of the newly created account. + /// + /// Confirms the username that was successfully registered, + /// useful for client-side confirmation and user feedback. + username: String, +} + +// Verify the password against the hash +fn is_same_password(password_hash: &str, clear_pass: &str, _salt: &str) -> bool { + // Try to parse the password hash + let parsed_hash = match PasswordHash::new(password_hash) { + Ok(hash) => hash, + Err(_) => return false, + }; + + // Verify the password against the hash + Argon2::default() + .verify_password(clear_pass.as_bytes(), &parsed_hash) + .is_ok() +} + +// Handler di login +#[post("/login")] +pub async fn login( + login_data: web::Json, + jwt_config: web::Data, + config: actix_web::web::Data, +) -> impl Responder { + let storage = Storage::new(&config.database.path).unwrap(); + let mut conn = storage.diesel_conn.lock().unwrap(); + let user = User::find_by_username(&mut *conn, &login_data.username).unwrap(); + let user_id: i32; + + match user { + Some(user) => { + println!("User found: {:?}", user); + let is_password_correct = + is_same_password(&user.password, &login_data.password, &user.salt); + + if !is_password_correct { + let response = GenericResponse { + message: "Invalid Credentials".to_string(), + }; + return HttpResponse::Unauthorized().json(response); + } + + user_id = user.id.unwrap(); + } + None => { + println!("User not found"); + let response = GenericResponse { + message: "Invalid Credentials".to_string(), + }; + return HttpResponse::Unauthorized().json(response); + } + } + + let access_expiration = Utc::now() + .checked_add_signed(Duration::hours(jwt_config.expiration_hours)) + .expect("Valid timestamp") + .timestamp() as usize; + + let refresh_expiration = Utc::now() + .checked_add_signed(Duration::hours(24 * 7)) // 7 days + .expect("Valid timestamp") + .timestamp() as usize; + + let access_claims = Claims { + sub: login_data.username.clone(), + exp: access_expiration, + iat: Utc::now().timestamp() as usize, + id: user_id, + }; + + let refresh_claims = Claims { + sub: login_data.username.clone(), + exp: refresh_expiration, + iat: Utc::now().timestamp() as usize, + id: user_id, + }; + + let access_token = encode( + &Header::default(), + &access_claims, + &EncodingKey::from_secret(jwt_config.secret.as_bytes()), + ) + .unwrap(); + + let refresh_token = encode( + &Header::default(), + &refresh_claims, + &EncodingKey::from_secret(jwt_config.secret.as_bytes()), + ) + .unwrap(); + + let response = TokenResponse { + access: access_token, + refresh: refresh_token, + expires_in: jwt_config.expiration_hours * 3600, + }; + + HttpResponse::Ok().json(response) +} + +#[post("/signup")] +pub async fn signup( + signup_data: web::Json, + config: actix_web::web::Data, +) -> impl Responder { + let storage = Storage::new(&config.database.path).unwrap(); + let mut conn = storage.diesel_conn.lock().unwrap(); + + let existing_user = User::find_by_username(&mut *conn, &signup_data.username).unwrap(); + if existing_user.is_some() { + let response = GenericResponse { + message: "Username already exists".to_string(), + }; + return HttpResponse::Conflict().json(response); + } + let user = User::create(&mut *conn, &signup_data.username, &signup_data.password).unwrap(); + TeusConfig::set_first_visit(&mut *conn, false).unwrap(); + + // Create a response without the sensitive data + let user_response = NewUserResponse { + id: user.id.unwrap(), + username: user.username, + }; + + HttpResponse::Created().json(user_response) +} + +// TODO: Move this `health` somewhere else +#[post("/check")] +pub async fn check() -> impl Responder { + HttpResponse::Ok() +} diff --git a/teus/webserver/auth/middleware.rs b/teus/webserver/auth/middleware.rs new file mode 100644 index 0000000..e0e46f0 --- /dev/null +++ b/teus/webserver/auth/middleware.rs @@ -0,0 +1,235 @@ +//! Authentication middleware for JWT token validation. +//! +//! This module provides middleware components for validating JWT tokens +//! in HTTP requests. It automatically extracts and validates Bearer tokens +//! from the Authorization header, making user claims available to handlers. + +use actix_web::{ + dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform}, + error::ErrorUnauthorized, + Error, HttpMessage, +}; +use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation}; +use serde::{Deserialize, Serialize}; +use std::{ + future::{ready, Future, Ready}, + pin::Pin, + rc::Rc, +}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +struct NotAuth { + message: String, +} + +/// JWT claims structure containing user authentication information. +/// +/// This structure represents the payload of a JSON Web Token (JWT) used +/// for user authentication in the Teus system. It follows JWT standard +/// claims with additional application-specific fields. +/// +/// # Standard JWT Claims +/// +/// - `sub` (Subject): Identifies the user the token was issued for +/// - `exp` (Expiration): Unix timestamp when the token expires +/// - `iat` (Issued At): Unix timestamp when the token was created +/// +/// # Custom Claims +/// +/// - `id`: Numeric user ID for database operations +/// +/// # Security Considerations +/// +/// - Tokens should be validated for expiration before use +/// - The signing key must be kept secure and consistent +/// - Claims should not contain sensitive information +/// +/// # Examples +/// +/// ```rust +/// use teus::webserver::auth::middleware::Claims; +/// use chrono::Utc; +/// +/// let claims = Claims { +/// sub: "admin".to_string(), +/// exp: (Utc::now().timestamp() + 3600) as usize, // 1 hour from now +/// iat: Utc::now().timestamp() as usize, +/// id: 1, +/// }; +/// ``` +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct Claims { + /// Subject - the username of the authenticated user. + /// + /// This field identifies which user the token belongs to and + /// corresponds to the username stored in the database. + pub sub: String, + + /// Expiration time as a Unix timestamp. + /// + /// Tokens should be rejected if the current time is after + /// this timestamp. This prevents indefinite token reuse. + pub exp: usize, + + /// Issued at time as a Unix timestamp. + /// + /// Records when the token was created, useful for token + /// lifecycle management and security auditing. + pub iat: usize, + + /// Numeric user ID from the database. + /// + /// This provides a direct reference to the user record + /// for efficient database lookups in authenticated endpoints. + pub id: i32, +} + +/// Factory for creating authentication middleware instances. +/// +/// This factory is responsible for creating `AuthMiddleware` instances +/// with the proper JWT secret configuration. It implements the Actix-web +/// `Transform` trait to integrate with the web framework's middleware system. +/// +/// # Usage +/// +/// The factory is typically registered once during application startup +/// and automatically creates middleware instances for each request scope. +/// +/// # Examples +/// +/// ```rust +/// use actix_web::App; +/// use teus::webserver::auth::middleware::AuthMiddlewareFactory; +/// +/// let app = App::new() +/// .wrap(AuthMiddlewareFactory::new("your-jwt-secret".to_string())); +/// ``` +pub struct AuthMiddlewareFactory { + /// The secret key used for JWT token validation. + /// + /// This must match the secret used when generating tokens. + /// Should be cryptographically secure and kept confidential. + jwt_secret: String, +} + +impl AuthMiddlewareFactory { + /// Creates a new authentication middleware factory. + /// + /// # Arguments + /// + /// * `jwt_secret` - The secret key for JWT token validation + /// + /// # Examples + /// + /// ```rust + /// use teus::webserver::auth::middleware::AuthMiddlewareFactory; + /// + /// let factory = AuthMiddlewareFactory::new("secure-secret-key".to_string()); + /// ``` + pub fn new(jwt_secret: String) -> Self { + Self { jwt_secret } + } +} + +/// Authentication middleware that validates JWT tokens in HTTP requests. +/// +/// This middleware automatically extracts Bearer tokens from the Authorization +/// header, validates them using the configured JWT secret, and makes the user +/// claims available to downstream handlers through request extensions. +/// +/// # Request Processing +/// +/// 1. Extracts the Authorization header from the request +/// 2. Validates the Bearer token format +/// 3. Decodes and validates the JWT using the secret key +/// 4. Injects the claims into request extensions for handler access +/// 5. Returns 401 Unauthorized for invalid or missing tokens +/// +/// # Token Format +/// +/// The middleware expects tokens in the standard Bearer format: +/// ``` +/// Authorization: Bearer +/// ``` +/// +/// # Error Handling +/// +/// Returns `401 Unauthorized` for: +/// - Missing Authorization header +/// - Invalid header format (not starting with "Bearer ") +/// - Invalid or expired JWT tokens +/// - Tokens signed with a different secret +pub struct AuthMiddleware { + /// The wrapped service to call after successful authentication. + service: Rc, + + /// The JWT secret key for token validation. + jwt_secret: String, +} + +impl Transform for AuthMiddlewareFactory +where + S: Service, Error = Error> + 'static, + S::Future: 'static, + B: 'static, +{ + type Response = ServiceResponse; + type Error = Error; + type Transform = AuthMiddleware; + type InitError = (); + type Future = Ready>; + + fn new_transform(&self, service: S) -> Self::Future { + ready(Ok(AuthMiddleware { + service: Rc::new(service), + jwt_secret: self.jwt_secret.clone(), + })) + } +} + +impl Service for AuthMiddleware +where + S: Service, Error = Error> + 'static, + S::Future: 'static, + B: 'static, +{ + type Response = ServiceResponse; + type Error = Error; + type Future = Pin>>>; + + forward_ready!(service); + + fn call(&self, req: ServiceRequest) -> Self::Future { + let service = self.service.clone(); + let jwt_secret = self.jwt_secret.clone(); + + Box::pin(async move { + let auth_header = req.headers().get("Authorization"); + let token = match auth_header { + Some(header) => { + let header_str = header + .to_str() + .map_err(|_| ErrorUnauthorized("Invalid Authorization header format"))?; + + if !header_str.starts_with("Bearer ") { + return Err(ErrorUnauthorized("Invalid Authorization header format")); + } + + header_str[7..].trim() + } + None => return Err(ErrorUnauthorized("Authorization header missing")), + }; + + let token_data = decode::( + token, + &DecodingKey::from_secret(jwt_secret.as_bytes()), + &Validation::new(Algorithm::HS256), + ) + .map_err(|_| ErrorUnauthorized("Invalid token"))?; + + let claims = token_data.claims; + req.extensions_mut().insert(claims.clone()); + service.call(req).await + }) + } +} diff --git a/src/webserver/auth/mod.rs b/teus/webserver/auth/mod.rs similarity index 100% rename from src/webserver/auth/mod.rs rename to teus/webserver/auth/mod.rs index acefefc..f3ef93f 100644 --- a/src/webserver/auth/mod.rs +++ b/teus/webserver/auth/mod.rs @@ -1,5 +1,5 @@ -pub mod middleware; pub mod handlers; -pub mod schema; -pub mod query; +pub mod middleware; pub mod mutation; +pub mod query; +pub mod schema; diff --git a/src/webserver/auth/mutation.rs b/teus/webserver/auth/mutation.rs similarity index 98% rename from src/webserver/auth/mutation.rs rename to teus/webserver/auth/mutation.rs index 07b9c5d..6fe84c3 100644 --- a/src/webserver/auth/mutation.rs +++ b/teus/webserver/auth/mutation.rs @@ -1,6 +1,6 @@ use super::schema::User; -use argon2::password_hash::SaltString; use argon2::password_hash::rand_core::OsRng; +use argon2::password_hash::SaltString; use argon2::{Argon2, PasswordHasher}; use diesel::result::Error; use diesel::{RunQueryDsl, SqliteConnection}; @@ -15,10 +15,10 @@ impl User { // Get a random salt for the password let salt = SaltString::generate(&mut OsRng); - + // Create an Argon2 instance with default parameters let argon2 = Argon2::default(); - + // Generate the password hash as a string let password_hash = argon2 .hash_password(password.as_bytes(), &salt) diff --git a/src/webserver/auth/query.rs b/teus/webserver/auth/query.rs similarity index 93% rename from src/webserver/auth/query.rs rename to teus/webserver/auth/query.rs index d0b5e28..a6ab817 100644 --- a/src/webserver/auth/query.rs +++ b/teus/webserver/auth/query.rs @@ -1,4 +1,4 @@ -use super::schema::{User}; +use super::schema::User; use diesel::{prelude::*, result::Error}; impl User { diff --git a/teus/webserver/auth/schema.rs b/teus/webserver/auth/schema.rs new file mode 100644 index 0000000..a4676ef --- /dev/null +++ b/teus/webserver/auth/schema.rs @@ -0,0 +1,127 @@ +//! User authentication schema structures. +//! +//! This module defines the database schema structure for user accounts +//! in the Teus system. It handles user authentication data including +//! secure password storage with Argon2 hashing and salt generation. + +use diesel::prelude::*; +use serde::Serialize; + +/// Database schema structure for user accounts. +/// +/// This structure represents user authentication data stored in the SQLite +/// database. It includes all necessary fields for secure user management +/// including password hashing and salt storage. +/// +/// # Security Features +/// +/// - Passwords are stored as Argon2 hashes, never in plain text +/// - Each password has a unique salt to prevent rainbow table attacks +/// - Username uniqueness is enforced at the database level +/// - User IDs are auto-generated and used for session management +/// +/// # Database Operations +/// +/// This structure supports both insertion (user creation) and querying +/// (user lookup) operations through Diesel ORM. The `Insertable` trait +/// allows new user creation, while `Queryable` enables data retrieval. +/// +/// # JSON Serialization +/// +/// The structure can be serialized to JSON for API responses, but note +/// that password hashes and salts should typically be excluded from +/// client-facing responses for security reasons. +/// +/// # Examples +/// +/// Creating a new user (handled by implementation methods): +/// ```rust +/// use teus::webserver::auth::schema::User; +/// // User creation is handled by User::create() method with proper hashing +/// ``` +/// +/// # Related Modules +/// +/// - `mutation.rs`: Contains `User::create()` for user registration +/// - `query.rs`: Contains `User::find_by_username()` for authentication +/// - `handlers.rs`: Uses this structure in login/signup endpoints +#[derive(Insertable, Queryable, Selectable, Serialize, Debug)] +#[diesel(table_name = crate::schema::user)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct User { + /// Database-generated unique identifier for the user. + /// + /// This is the primary key and is automatically assigned when + /// a new user is created. Used for session management and + /// referencing the user in JWT tokens and other operations. + /// + /// # Database Behavior + /// + /// - `None` for new users before insertion + /// - `Some(id)` for existing users retrieved from database + pub id: Option, + + /// Unique username for the user account. + /// + /// This serves as the primary login identifier and must be unique + /// across all users in the system. Username matching is case-sensitive. + /// + /// # Constraints + /// + /// - Must be unique (enforced by database) + /// - Cannot be empty or null + /// - Used for login authentication + /// + /// # Security Notes + /// + /// - Safe to include in API responses and logs + /// - Used as the JWT token subject (`sub` claim) + pub username: String, + + /// Argon2 password hash for secure authentication. + /// + /// This field stores the hashed representation of the user's password + /// using the Argon2 algorithm with a unique salt. The original password + /// is never stored in the database. + /// + /// # Security Features + /// + /// - Uses Argon2id variant for maximum security + /// - Includes salt, iteration count, and memory parameters + /// - Resistant to rainbow table and brute force attacks + /// - Format follows PHC string format for compatibility + /// + /// # Important Notes + /// + /// - **NEVER** include this field in API responses + /// - **NEVER** log this field value + /// - Only used for password verification during login + /// - Updated only when user changes their password + pub password: String, + + /// Cryptographic salt used for password hashing. + /// + /// This field stores the unique salt that was used when hashing + /// the user's password. Each user has a different salt to prevent + /// rainbow table attacks and ensure password hash uniqueness. + /// + /// # Security Properties + /// + /// - Cryptographically random and unique per user + /// - Generated using secure random number generation + /// - Combined with password before hashing + /// - Stored separately but used during verification + /// + /// # Implementation Details + /// + /// - Generated using `SaltString::generate()` from the `argon2` crate + /// - Base64-encoded string format for database storage + /// - Required for Argon2 password verification process + /// + /// # Security Warning + /// + /// Like the password hash, this field should **NEVER** be included + /// in API responses or logged, as it could potentially aid in + /// password cracking attempts. + pub salt: String, +} diff --git a/teus/webserver/docker/handlers.rs b/teus/webserver/docker/handlers.rs new file mode 100644 index 0000000..0e802e8 --- /dev/null +++ b/teus/webserver/docker/handlers.rs @@ -0,0 +1,84 @@ +use std::fmt::Debug; + +use actix_web::{get, web, HttpResponse, Responder}; +use docker::docker::DockerClient; +use serde::{Deserialize, Serialize}; +use serde_qs::to_string; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GenericDockerResponse { + message: String, +} + +#[derive(Debug, Deserialize, Serialize)] +struct ContainersQuery { + all: Option, +} + +#[get("/docker/version")] +async fn get_docker_version() -> impl Responder { + let mut docker_client = DockerClient::new(None); + match docker_client.get_version() { + Ok(version) => HttpResponse::Ok().json(version), + Err(err) => HttpResponse::InternalServerError().json(GenericDockerResponse { + message: format!("Error getting docker version: {:?}", err), + }), + } +} + +#[get("/docker/containers")] +async fn get_docker_containers(query: web::Query) -> impl Responder { + println!("Query: {:?}", query); + + let query_params: ContainersQuery = query.into_inner(); + let query_string = to_string(&query_params).unwrap(); + + println!("Query: {:?}", query_string); + let mut docker_client = DockerClient::new(None); + + match docker_client.get_containers(Some(query_string)) { + Ok(containers) => HttpResponse::Ok().json(containers), + Err(err) => HttpResponse::InternalServerError().json(GenericDockerResponse { + message: format!("Error getting docker containers: {:?}", err), + }), + } +} + +#[get("/docker/container/{id}")] +async fn get_docker_container(id: web::Path) -> impl Responder { + let mut docker_client = DockerClient::new(None); + let container_id_clone = id.clone(); + + match docker_client.get_container_details(container_id_clone) { + Ok(container) => HttpResponse::Ok().json(container), + Err(err) => HttpResponse::InternalServerError().json(GenericDockerResponse { + message: format!("Error getting docker container {}: {:?}", id, err), + }), + } +} + +#[get("/docker/volumes")] +async fn get_docker_volumes() -> impl Responder { + let mut docker_client = DockerClient::new(None); + match docker_client.get_volumes() { + Ok(volumes) => HttpResponse::Ok().json(volumes), + Err(err) => HttpResponse::InternalServerError().json(GenericDockerResponse { + message: format!("Error getting docker volumes: {:?}", err), + }), + } +} + +// FIXME: The id is not being passed correctly +// I think the problem is in the enum DockerApi +#[get("/docker/volumes/{id}")] +async fn get_docker_volume(id: web::Path) -> impl Responder { + let mut docker_client = DockerClient::new(None); + + let cloned_id = id.clone(); + match docker_client.get_volume_details(cloned_id) { + Ok(volume) => HttpResponse::Ok().json(volume), + Err(err) => HttpResponse::InternalServerError().json(GenericDockerResponse { + message: format!("Error getting docker volume {} details: {:?}", id, err), + }), + } +} diff --git a/teus/webserver/docker/mod.rs b/teus/webserver/docker/mod.rs new file mode 100644 index 0000000..c3d4495 --- /dev/null +++ b/teus/webserver/docker/mod.rs @@ -0,0 +1 @@ +pub mod handlers; diff --git a/src/webserver/mod.rs b/teus/webserver/mod.rs similarity index 61% rename from src/webserver/mod.rs rename to teus/webserver/mod.rs index 3cb00a8..5e491be 100644 --- a/src/webserver/mod.rs +++ b/teus/webserver/mod.rs @@ -1,4 +1,5 @@ pub mod api; -pub mod services; +pub mod auth; +pub mod docker; pub mod models; -pub mod auth; \ No newline at end of file +pub mod services; diff --git a/src/webserver/models/mod.rs b/teus/webserver/models/mod.rs similarity index 100% rename from src/webserver/models/mod.rs rename to teus/webserver/models/mod.rs diff --git a/teus/webserver/models/sysmodels.rs b/teus/webserver/models/sysmodels.rs new file mode 100644 index 0000000..fb55dbd --- /dev/null +++ b/teus/webserver/models/sysmodels.rs @@ -0,0 +1,725 @@ +//! System monitoring API response models. +//! +//! This module defines the response structures used by the Teus system monitoring +//! API endpoints. These structures provide a standardized format for returning +//! system information, network details, and resource utilization data to clients. + +use serde::Serialize; + +/// Network interface IP address information for API responses. +/// +/// This structure represents IP address configuration for a specific network +/// interface on the monitored system. It includes both the address and subnet +/// information necessary for network analysis and monitoring. +/// +/// # Usage +/// +/// Typically used as part of system information responses to provide detailed +/// network configuration data. Multiple `IpInfo` instances may be included +/// for systems with multiple network interfaces. +/// +/// # Examples +/// +/// ```rust +/// use teus::webserver::models::sysmodels::IpInfo; +/// +/// let ethernet_info = IpInfo { +/// interface: "eth0".to_string(), +/// addr: "192.168.1.100".to_string(), +/// prefix: 24, +/// }; +/// +/// let loopback_info = IpInfo { +/// interface: "lo".to_string(), +/// addr: "127.0.0.1".to_string(), +/// prefix: 8, +/// }; +/// ``` +/// +/// # JSON Representation +/// +/// ```json +/// { +/// "interface": "eth0", +/// "addr": "192.168.1.100", +/// "prefix": 24 +/// } +/// ``` +#[derive(serde::Serialize, Debug)] +pub struct IpInfo { + /// The name of the network interface (e.g., "eth0", "wlan0", "lo"). + /// + /// This identifies which physical or virtual network adapter + /// the IP address is assigned to. Common interface names include: + /// - "eth0", "eth1": Ethernet interfaces + /// - "wlan0", "wlan1": Wireless interfaces + /// - "lo": Loopback interface + /// - "docker0": Docker bridge interface + pub interface: String, + + /// The IP address in string format. + /// + /// Can be either IPv4 (e.g., "192.168.1.100") or IPv6 format. + /// The address represents the current network configuration + /// for this interface. + pub addr: String, + + /// The subnet prefix length (CIDR notation). + /// + /// Indicates the number of bits used for the network portion + /// of the address. Common values: + /// - 8: Class A networks (255.0.0.0) + /// - 16: Class B networks (255.255.0.0) + /// - 24: Class C networks (255.255.255.0) + /// - 32: Host-specific routes + pub prefix: u8, +} + +/// Network interface MAC address information for API responses. +/// +/// This structure represents the Media Access Control (MAC) address +/// configuration for a specific network interface. MAC addresses are +/// hardware identifiers that are unique to each network interface. +/// +/// # Usage +/// +/// Used in system information responses to provide network hardware +/// identification data. Essential for network troubleshooting, +/// asset management, and security monitoring. +/// +/// # Examples +/// +/// ```rust +/// use teus::webserver::models::sysmodels::MACInfo; +/// +/// let mac_info = MACInfo { +/// interface: "eth0".to_string(), +/// mac: "aa:bb:cc:dd:ee:ff".to_string(), +/// }; +/// ``` +/// +/// # JSON Representation +/// +/// ```json +/// { +/// "interface": "eth0", +/// "mac": "aa:bb:cc:dd:ee:ff" +/// } +/// ``` +#[derive(serde::Serialize, Debug)] +pub struct MACInfo { + /// The name of the network interface. + /// + /// Corresponds to the same interface names used in `IpInfo`, + /// allowing clients to correlate IP and MAC address information + /// for each network adapter. + pub interface: String, + + /// The MAC address in colon-separated hexadecimal format. + /// + /// Standard format is six groups of two hexadecimal digits + /// separated by colons (e.g., "aa:bb:cc:dd:ee:ff"). + /// This is the unique hardware identifier for the network interface. + pub mac: String, +} + +/// Comprehensive system information response structure. +/// +/// This structure provides a complete overview of the monitored system's +/// basic configuration and network setup. It's used by API endpoints +/// that need to return general system identification and network data. +/// +/// # Use Cases +/// +/// - System identification and inventory management +/// - Network configuration reporting +/// - Initial system assessment and setup verification +/// - Dashboard overview displays +/// +/// # Network Information +/// +/// The structure includes both summary (primary IPv4) and detailed +/// network information (all interfaces with IP and MAC addresses). +/// This allows clients to choose the appropriate level of detail +/// for their use case. +/// +/// # Examples +/// +/// ```rust +/// use teus::webserver::models::sysmodels::GenericSysInfoResponse; +/// +/// let sys_info = GenericSysInfoResponse { +/// hostname: "server-01".to_string(), +/// os: "Ubuntu 22.04.3 LTS".to_string(), +/// uptime: "5 days, 14:32:10".to_string(), +/// kernel_version: "5.15.0-91-generic".to_string(), +/// ipv4: "192.168.1.100".to_string(), +/// networks: vec![/* network interfaces */], +/// mac_addresses: vec![/* MAC addresses */], +/// }; +/// ``` +/// +/// # JSON Response Format +/// +/// ```json +/// { +/// "hostname": "server-01", +/// "os": "Ubuntu 22.04.3 LTS", +/// "uptime": "5 days, 14:32:10", +/// "kernel_version": "5.15.0-91-generic", +/// "ipv4": "192.168.1.100", +/// "networks": [...], +/// "mac_addresses": [...] +/// } +/// ``` +#[derive(serde::Serialize, Debug)] +pub struct GenericSysInfoResponse { + /// The system's configured hostname. + /// + /// This is the name by which the system identifies itself + /// on the network. Used for system identification and + /// network management purposes. + pub hostname: String, + + /// The operating system name and version. + /// + /// Provides detailed OS information including distribution + /// name and version number. Examples: + /// - "Ubuntu 22.04.3 LTS" + /// - "CentOS Linux 8.4.2105" + /// - "Windows Server 2019" + pub os: String, + + /// Human-readable system uptime information. + /// + /// Indicates how long the system has been running since + /// the last boot. Format may vary but typically includes + /// days, hours, minutes, and seconds. + pub uptime: String, + + /// The kernel version string. + /// + /// Provides the version of the operating system kernel. + /// Important for compatibility checking and security + /// vulnerability assessment. + pub kernel_version: String, + + /// The primary IPv4 address of the system. + /// + /// This is typically the main network address used for + /// external communication. Useful for quick identification + /// and network connectivity verification. + pub ipv4: String, + + /// Detailed information about all network interfaces. + /// + /// Contains IP address configuration for each network + /// interface on the system, including physical and + /// virtual interfaces. + pub networks: Vec, + + /// MAC address information for all network interfaces. + /// + /// Provides hardware identifiers for network interfaces, + /// useful for asset tracking and network security. + pub mac_addresses: Vec, +} + +impl GenericSysInfoResponse {} +impl Default for GenericSysInfoResponse { + fn default() -> Self { + Self { + hostname: "No Info".to_string(), + os: "No Info".to_string(), + uptime: "No Info".to_string(), + kernel_version: "No Info".to_string(), + ipv4: "No Info".to_string(), + networks: vec![], + mac_addresses: vec![], + } + } +} + +/// Real-time system performance and resource utilization response. +/// +/// This structure represents a complete snapshot of system resource +/// usage at a specific point in time. It's the primary response format +/// for monitoring API endpoints that provide current system performance data. +/// +/// # Monitoring Data +/// +/// Includes comprehensive resource utilization metrics: +/// - CPU usage percentage +/// - Memory usage and availability +/// - Swap space utilization +/// - Storage usage across all mounted filesystems +/// +/// # Time Series Data +/// +/// The timestamp field enables this structure to be used for time-series +/// monitoring, allowing clients to track performance trends over time. +/// +/// # API Endpoints +/// +/// Typically returned by: +/// - `/api/system/current` - Current system status +/// - `/api/monitoring/snapshot` - Performance snapshot +/// - Real-time monitoring WebSocket endpoints +/// +/// # Examples +/// +/// ```rust +/// use teus::webserver::models::sysmodels::{SysInfoResponse, DiskInfoResponse}; +/// use chrono::Utc; +/// +/// let response = SysInfoResponse { +/// timestamp: Utc::now().to_rfc3339(), +/// cpu_usage: 35.2, +/// ram_usage: 8192.0, +/// total_ram: 16384.0, +/// free_ram: 8192.0, +/// used_swap: 0.0, +/// disks: vec![/* disk information */], +/// }; +/// ``` +/// +/// # JSON Response Format +/// +/// ```json +/// { +/// "timestamp": "2024-01-15T10:30:00Z", +/// "cpu_usage": 35.2, +/// "ram_usage": 8192.0, +/// "total_ram": 16384.0, +/// "free_ram": 8192.0, +/// "used_swap": 0.0, +/// "disks": [...] +/// } +/// ``` +#[derive(Serialize)] +pub struct SysInfoResponse { + /// ISO 8601 timestamp when this data was collected. + /// + /// Typically in RFC3339 format (e.g., "2024-01-15T10:30:00Z"). + /// Essential for time-series analysis and determining data freshness. + pub timestamp: String, + + /// Current CPU usage as a percentage (0.0 to 100.0). + /// + /// Represents the overall CPU utilization across all cores + /// and threads. Values approaching 100% indicate high + /// computational load that may affect system responsiveness. + pub cpu_usage: f32, + + /// Current RAM usage in megabytes. + /// + /// This represents the amount of physical memory currently + /// allocated to running processes, excluding cached and + /// buffered memory that can be quickly reclaimed. + pub ram_usage: f32, + + /// Total available RAM in the system, in megabytes. + /// + /// This is the total physical memory capacity and should + /// remain constant unless hardware changes occur. Used + /// to calculate usage percentages and available capacity. + pub total_ram: f32, + + /// Amount of RAM currently free and immediately available, in megabytes. + /// + /// This represents memory that can be immediately allocated + /// to new processes without requiring swap operations or + /// cache eviction. Critical for assessing memory pressure. + pub free_ram: f32, + + /// Current swap space usage in megabytes. + /// + /// High swap usage may indicate memory pressure and can + /// significantly impact system performance. Values should + /// typically remain low in well-configured systems. + pub used_swap: f32, + + /// Storage utilization information for all mounted filesystems. + /// + /// Provides detailed disk usage data for each storage device + /// or partition mounted on the system. Critical for storage + /// capacity planning and preventing disk space exhaustion. + pub disks: Vec, +} + +/// Storage device utilization information for API responses. +/// +/// This structure represents the storage usage details for a single +/// filesystem or storage device. It provides comprehensive information +/// about capacity, utilization, and availability for storage monitoring. +/// +/// # Storage Metrics +/// +/// Includes all essential storage metrics: +/// - Total capacity and current usage +/// - Available space for new data +/// - Filesystem type and mount location +/// +/// # Monitoring Applications +/// +/// Used for: +/// - Storage capacity planning and alerts +/// - Disk space utilization tracking +/// - Filesystem performance monitoring +/// - Storage infrastructure management +/// +/// # Examples +/// +/// ```rust +/// use teus::webserver::models::sysmodels::DiskInfoResponse; +/// +/// let root_disk = DiskInfoResponse { +/// filesystem: "ext4".to_string(), +/// mount_point: "/".to_string(), +/// total_space: 1000000, // 1TB in MB +/// available_space: 750000, // 750GB available +/// used_space: 250000, // 250GB used +/// }; +/// +/// let data_disk = DiskInfoResponse { +/// filesystem: "xfs".to_string(), +/// mount_point: "/data".to_string(), +/// total_space: 2000000, // 2TB in MB +/// available_space: 1800000, // 1.8TB available +/// used_space: 200000, // 200GB used +/// }; +/// ``` +/// +/// # JSON Response Format +/// +/// ```json +/// { +/// "filesystem": "ext4", +/// "mount_point": "/", +/// "total_space": 1000000, +/// "available_space": 750000, +/// "used_space": 250000 +/// } +/// ``` +#[derive(Serialize)] +pub struct DiskInfoResponse { + /// The filesystem type (e.g., "ext4", "xfs", "ntfs", "btrfs"). + /// + /// Identifies the filesystem format and technology used + /// on this storage device. Important for understanding + /// performance characteristics and supported features. + pub filesystem: String, + + /// The mount point or drive path where the filesystem is accessible. + /// + /// Examples: + /// - "/" - Root filesystem on Unix-like systems + /// - "/home" - User data partition + /// - "/var" - Variable data partition + /// - "C:" - Windows system drive + /// - "/mnt/data" - Mounted data drive + pub mount_point: String, + + /// Total storage capacity in megabytes. + /// + /// This represents the complete size of the filesystem, + /// including space used by the filesystem metadata and + /// any reserved blocks. Used for capacity planning calculations. + pub total_space: i32, + + /// Currently available space for new data, in megabytes. + /// + /// This is the space that can be immediately used for new + /// files and directories. May be less than (total - used) + /// due to filesystem reservations and overhead. + pub available_space: i32, + + /// Currently used storage space in megabytes. + /// + /// Represents the space occupied by files, directories, + /// and filesystem metadata. Used to calculate utilization + /// percentages and remaining capacity. + pub used_space: i32, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ip_info_creation() { + let ip_info = IpInfo { + interface: "eth0".to_string(), + addr: "192.168.1.10".to_string(), + prefix: 24, + }; + + assert_eq!(ip_info.interface, "eth0"); + assert_eq!(ip_info.addr, "192.168.1.10"); + assert_eq!(ip_info.prefix, 24); + } + + #[test] + fn test_ip_info_serialization() { + let ip_info = IpInfo { + interface: "wlan0".to_string(), + addr: "10.0.0.5".to_string(), + prefix: 16, + }; + + let serialized = serde_json::to_string(&ip_info).unwrap(); + assert!(serialized.contains("\"interface\":\"wlan0\"")); + assert!(serialized.contains("\"addr\":\"10.0.0.5\"")); + assert!(serialized.contains("\"prefix\":16")); + } + + #[test] + fn test_mac_info_creation() { + let mac_info = MACInfo { + interface: "eth0".to_string(), + mac: "aa:bb:cc:dd:ee:ff".to_string(), + }; + + assert_eq!(mac_info.interface, "eth0"); + assert_eq!(mac_info.mac, "aa:bb:cc:dd:ee:ff"); + } + + #[test] + fn test_mac_info_serialization() { + let mac_info = MACInfo { + interface: "wlan0".to_string(), + mac: "11:22:33:44:55:66".to_string(), + }; + + let serialized = serde_json::to_string(&mac_info).unwrap(); + assert!(serialized.contains("\"interface\":\"wlan0\"")); + assert!(serialized.contains("\"mac\":\"11:22:33:44:55:66\"")); + } + + #[test] + fn test_generic_sys_info_default() { + let sys_info = GenericSysInfoResponse::default(); + + assert_eq!(sys_info.hostname, "No Info"); + assert_eq!(sys_info.os, "No Info"); + assert_eq!(sys_info.uptime, "No Info"); + assert_eq!(sys_info.kernel_version, "No Info"); + assert_eq!(sys_info.ipv4, "No Info"); + assert!(sys_info.networks.is_empty()); + assert!(sys_info.mac_addresses.is_empty()); + } + + #[test] + fn test_generic_sys_info_creation() { + let networks = vec![ + IpInfo { + interface: "eth0".to_string(), + addr: "192.168.1.10".to_string(), + prefix: 24, + }, + IpInfo { + interface: "lo".to_string(), + addr: "127.0.0.1".to_string(), + prefix: 8, + }, + ]; + + let mac_addresses = vec![MACInfo { + interface: "eth0".to_string(), + mac: "aa:bb:cc:dd:ee:ff".to_string(), + }]; + + let sys_info = GenericSysInfoResponse { + hostname: "test-machine".to_string(), + os: "Linux".to_string(), + uptime: "5 days".to_string(), + kernel_version: "5.4.0".to_string(), + ipv4: "192.168.1.10".to_string(), + networks, + mac_addresses, + }; + + assert_eq!(sys_info.hostname, "test-machine"); + assert_eq!(sys_info.os, "Linux"); + assert_eq!(sys_info.uptime, "5 days"); + assert_eq!(sys_info.kernel_version, "5.4.0"); + assert_eq!(sys_info.ipv4, "192.168.1.10"); + assert_eq!(sys_info.networks.len(), 2); + assert_eq!(sys_info.mac_addresses.len(), 1); + } + + #[test] + fn test_generic_sys_info_serialization() { + let sys_info = GenericSysInfoResponse { + hostname: "server1".to_string(), + os: "Ubuntu 20.04".to_string(), + uptime: "10 hours".to_string(), + kernel_version: "5.4.0-42".to_string(), + ipv4: "10.0.0.1".to_string(), + networks: vec![], + mac_addresses: vec![], + }; + + let serialized = serde_json::to_string(&sys_info).unwrap(); + assert!(serialized.contains("\"hostname\":\"server1\"")); + assert!(serialized.contains("\"os\":\"Ubuntu 20.04\"")); + assert!(serialized.contains("\"uptime\":\"10 hours\"")); + assert!(serialized.contains("\"kernel_version\":\"5.4.0-42\"")); + assert!(serialized.contains("\"ipv4\":\"10.0.0.1\"")); + } + + #[test] + fn test_disk_info_response_creation() { + let disk_info = DiskInfoResponse { + filesystem: "ext4".to_string(), + mount_point: "/".to_string(), + total_space: 1000000, + available_space: 500000, + used_space: 500000, + }; + + assert_eq!(disk_info.filesystem, "ext4"); + assert_eq!(disk_info.mount_point, "/"); + assert_eq!(disk_info.total_space, 1000000); + assert_eq!(disk_info.available_space, 500000); + assert_eq!(disk_info.used_space, 500000); + } + + #[test] + fn test_disk_info_response_serialization() { + let disk_info = DiskInfoResponse { + filesystem: "ntfs".to_string(), + mount_point: "C:\\".to_string(), + total_space: 2000000, + available_space: 1000000, + used_space: 1000000, + }; + + let serialized = serde_json::to_string(&disk_info).unwrap(); + assert!(serialized.contains("\"filesystem\":\"ntfs\"")); + assert!(serialized.contains("\"mount_point\":\"C:\\\\\"")); + assert!(serialized.contains("\"total_space\":2000000")); + assert!(serialized.contains("\"available_space\":1000000")); + assert!(serialized.contains("\"used_space\":1000000")); + } + + #[test] + fn test_sys_info_response_creation() { + let disks = vec![ + DiskInfoResponse { + filesystem: "ext4".to_string(), + mount_point: "/".to_string(), + total_space: 1000000, + available_space: 600000, + used_space: 400000, + }, + DiskInfoResponse { + filesystem: "ext4".to_string(), + mount_point: "/home".to_string(), + total_space: 2000000, + available_space: 1500000, + used_space: 500000, + }, + ]; + + let sys_info = SysInfoResponse { + timestamp: "2024-01-01T00:00:00Z".to_string(), + cpu_usage: 25.5, + ram_usage: 4096.0, + total_ram: 8192.0, + free_ram: 4096.0, + used_swap: 512.0, + disks, + }; + + assert_eq!(sys_info.timestamp, "2024-01-01T00:00:00Z"); + assert_eq!(sys_info.cpu_usage, 25.5); + assert_eq!(sys_info.ram_usage, 4096.0); + assert_eq!(sys_info.total_ram, 8192.0); + assert_eq!(sys_info.free_ram, 4096.0); + assert_eq!(sys_info.used_swap, 512.0); + assert_eq!(sys_info.disks.len(), 2); + assert_eq!(sys_info.disks[0].mount_point, "/"); + assert_eq!(sys_info.disks[1].mount_point, "/home"); + } + + #[test] + fn test_sys_info_response_serialization() { + let sys_info = SysInfoResponse { + timestamp: "2024-01-01T12:00:00Z".to_string(), + cpu_usage: 50.0, + ram_usage: 2048.0, + total_ram: 4096.0, + free_ram: 2048.0, + used_swap: 0.0, + disks: vec![], + }; + + let serialized = serde_json::to_string(&sys_info).unwrap(); + assert!(serialized.contains("\"timestamp\":\"2024-01-01T12:00:00Z\"")); + assert!(serialized.contains("\"cpu_usage\":50")); + assert!(serialized.contains("\"ram_usage\":2048")); + assert!(serialized.contains("\"total_ram\":4096")); + assert!(serialized.contains("\"free_ram\":2048")); + assert!(serialized.contains("\"used_swap\":0")); + assert!(serialized.contains("\"disks\":[]")); + } + + #[test] + fn test_debug_formatting() { + let ip_info = IpInfo { + interface: "eth0".to_string(), + addr: "192.168.1.1".to_string(), + prefix: 24, + }; + + let debug_str = format!("{:?}", ip_info); + assert!(debug_str.contains("IpInfo")); + assert!(debug_str.contains("eth0")); + assert!(debug_str.contains("192.168.1.1")); + assert!(debug_str.contains("24")); + + let mac_info = MACInfo { + interface: "eth0".to_string(), + mac: "00:11:22:33:44:55".to_string(), + }; + + let debug_str = format!("{:?}", mac_info); + assert!(debug_str.contains("MACInfo")); + assert!(debug_str.contains("eth0")); + assert!(debug_str.contains("00:11:22:33:44:55")); + } + + #[test] + fn test_edge_cases() { + // Test with empty strings + let ip_info = IpInfo { + interface: "".to_string(), + addr: "".to_string(), + prefix: 0, + }; + assert_eq!(ip_info.interface, ""); + assert_eq!(ip_info.addr, ""); + assert_eq!(ip_info.prefix, 0); + + // Test with maximum prefix + let ip_info_max = IpInfo { + interface: "test".to_string(), + addr: "255.255.255.255".to_string(), + prefix: 32, + }; + assert_eq!(ip_info_max.prefix, 32); + + // Test disk with zero values + let disk_info = DiskInfoResponse { + filesystem: "tmpfs".to_string(), + mount_point: "/tmp".to_string(), + total_space: 0, + available_space: 0, + used_space: 0, + }; + assert_eq!(disk_info.total_space, 0); + assert_eq!(disk_info.available_space, 0); + assert_eq!(disk_info.used_space, 0); + } +} diff --git a/teus/webserver/services/mod.rs b/teus/webserver/services/mod.rs new file mode 100644 index 0000000..2d24348 --- /dev/null +++ b/teus/webserver/services/mod.rs @@ -0,0 +1 @@ +pub mod systeminfo; diff --git a/src/webserver/services/systeminfo.rs b/teus/webserver/services/systeminfo.rs similarity index 98% rename from src/webserver/services/systeminfo.rs rename to teus/webserver/services/systeminfo.rs index 0c053c6..b407cbd 100644 --- a/src/webserver/services/systeminfo.rs +++ b/teus/webserver/services/systeminfo.rs @@ -1,5 +1,5 @@ use crate::webserver::models::sysmodels::{GenericSysInfoResponse, IpInfo, MACInfo}; -use actix_web::{HttpResponse, Responder, get}; +use actix_web::{get, HttpResponse, Responder}; use sysinfo::{Networks, System}; fn collect_network_info() -> Vec {