diff --git a/.dockerignore b/.dockerignore index 386c86a71..891c644f7 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,5 +2,17 @@ node_modules /dist logs c2d_storage -.env.local -.env \ No newline at end of file +databases +.env +.env.* +.git +.github +docs +src/test +*.md +*.log +.nyc_output +coverage +docker-compose.yml +elasticsearch-compose.yml +typesense-compose.yml diff --git a/.env.example b/.env.example index 0f4c09503..e632ab81e 100644 --- a/.env.example +++ b/.env.example @@ -65,6 +65,11 @@ export P2P_BOOTSTRAP_NODES= export P2P_FILTER_ANNOUNCED_ADDRESSES= ## compute +# Each environment defines its own resources (CPU, RAM, disk, GPUs) with full configuration. +# CPU, RAM, and disk are per-env exclusive: inUse tracked only within the environment where the job runs. +# A global check ensures the aggregate usage across all environments does not exceed physical capacity. +# GPUs are shared-exclusive: if a job on envA uses gpu0, it shows as in-use on envB too. +# CPU cores are automatically partitioned across environments based on each env's cpu.total. +# CPU and RAM defaults are auto-detected from the system when not configured. +# export DOCKER_COMPUTE_ENVIRONMENTS='[{"socketPath":"/var/run/docker.sock","environments":[{"id":"envA","storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":16,"max":16,"min":1,"type":"ram"},{"id":"disk","total":500,"max":500,"min":10,"type":"disk"},{"id":"gpu0","total":1,"max":1,"min":0,"type":"gpu","init":{"deviceRequests":{"Driver":"nvidia","DeviceIDs":["0"],"Capabilities":[["gpu"]]}}}],"fees":{"1":[{"feeToken":"0x123","prices":[{"id":"cpu","price":1},{"id":"ram","price":0.1},{"id":"disk","price":0.01},{"id":"gpu0","price":5}]}]}}]}]' export DOCKER_COMPUTE_ENVIRONMENTS= - - diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b451e87a..513b366b9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -291,7 +291,7 @@ jobs: DB_PASSWORD: 'changeme' MAX_REQ_PER_MINUTE: 320 MAX_CONNECTIONS_PER_MINUTE: 320 - DOCKER_COMPUTE_ENVIRONMENTS: '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration": 60,"fees":{"8996":[{"prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration": 10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + DOCKER_COMPUTE_ENVIRONMENTS: '[{"socketPath":"/var/run/docker.sock","environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"fees":{"8996":[{"prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' DOCKER_REGISTRY_AUTHS: ${{ env.DOCKER_REGISTRY_AUTHS }} - name: Check Ocean Node is running run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index a3b0312b3..c3f7fd382 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,66 @@ All notable changes to this project will be documented in this file. Dates are d Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog). +#### [v3.0.0](https://github.com/oceanprotocol/ocean-node/compare/v2.1.1...v3.0.0) + +- Bump basic-ftp from 5.2.1 to 5.3.0 [`#1332`](https://github.com/oceanprotocol/ocean-node/pull/1332) +- fix(#1327): use container finishedAt to compute algo stop time on crash [`#1331`](https://github.com/oceanprotocol/ocean-node/pull/1331) +- Bump axios from 1.13.6 to 1.15.0 [`#1323`](https://github.com/oceanprotocol/ocean-node/pull/1323) +- Bump follow-redirects from 1.15.11 to 1.16.0 [`#1326`](https://github.com/oceanprotocol/ocean-node/pull/1326) +- fix cpu pinning for benchmark env [`#1328`](https://github.com/oceanprotocol/ocean-node/pull/1328) +- Bump protobufjs from 7.5.4 to 7.5.5 [`#1333`](https://github.com/oceanprotocol/ocean-node/pull/1333) +- Bugs/fix_persistent_storage [`#1329`](https://github.com/oceanprotocol/ocean-node/pull/1329) +- switch envs, resources are shared on multiple envs and are exclusive [`#1303`](https://github.com/oceanprotocol/ocean-node/pull/1303) +- persistentStorage [`#1318`](https://github.com/oceanprotocol/ocean-node/pull/1318) +- fix: update dockerfile node image [`#1325`](https://github.com/oceanprotocol/ocean-node/pull/1325) +- Bump @tootallnate/once and sqlite3 [`#1322`](https://github.com/oceanprotocol/ocean-node/pull/1322) +- Bump basic-ftp from 5.2.0 to 5.2.1 [`#1321`](https://github.com/oceanprotocol/ocean-node/pull/1321) +- feat(#1317): update quickstart script & networking docs [`#1320`](https://github.com/oceanprotocol/ocean-node/pull/1320) +- chore: add node address in policy server request [`#1319`](https://github.com/oceanprotocol/ocean-node/pull/1319) +- Bump defu from 6.1.4 to 6.1.6 [`#1316`](https://github.com/oceanprotocol/ocean-node/pull/1316) +- Bump lodash from 4.17.23 to 4.18.1 [`#1313`](https://github.com/oceanprotocol/ocean-node/pull/1313) +- add relay listen [`#1315`](https://github.com/oceanprotocol/ocean-node/pull/1315) +- feat: improve c2d docker image security [`#1302`](https://github.com/oceanprotocol/ocean-node/pull/1302) +- fix: add Dockerfile user to docker group dynamically [`#1314`](https://github.com/oceanprotocol/ocean-node/pull/1314) +- make network access configurable [`#1310`](https://github.com/oceanprotocol/ocean-node/pull/1310) +- feat: improve dockerfile [`#1295`](https://github.com/oceanprotocol/ocean-node/pull/1295) +- fix(#1285): use all RPCs defined in `fallbackRPCs` [`#1311`](https://github.com/oceanprotocol/ocean-node/pull/1311) +- feat: update get jobs handler to allow query for running jobs only [`#1299`](https://github.com/oceanprotocol/ocean-node/pull/1299) +- Bump path-to-regexp [`#1306`](https://github.com/oceanprotocol/ocean-node/pull/1306) +- add image scan for vulnerabilities after pull/build [`#1292`](https://github.com/oceanprotocol/ocean-node/pull/1292) +- C2D: build custom images updates [`#1297`](https://github.com/oceanprotocol/ocean-node/pull/1297) +- enforce policyserver policy if exists [`#1304`](https://github.com/oceanprotocol/ocean-node/pull/1304) +- make sure we have enough time to claim [`#1298`](https://github.com/oceanprotocol/ocean-node/pull/1298) +- secure docker [`#1291`](https://github.com/oceanprotocol/ocean-node/pull/1291) +- Bump picomatch [`#1296`](https://github.com/oceanprotocol/ocean-node/pull/1296) +- feat: add constraints check for job resource allocation [`#1270`](https://github.com/oceanprotocol/ocean-node/pull/1270) +- remove engine level resources [`46cce61`](https://github.com/oceanprotocol/ocean-node/commit/46cce6104c377bdfb975baab72a1f5f399dec31a) +- set cpu pinning for envs, release cpu once the job is done, handle the case when the node restarts [`bf1a460`](https://github.com/oceanprotocol/ocean-node/commit/bf1a46004cd8a98681941e1a70e7280c0549c024) +- fixed tests envs [`c3c274f`](https://github.com/oceanprotocol/ocean-node/commit/c3c274f4a5e04ebf5261fded091e4e978139af6a) + +#### [v2.1.1](https://github.com/oceanprotocol/ocean-node/compare/v2.1.0...v2.1.1) + +> 25 March 2026 + +- fix: claim payments for jobs without jobIdHash [`#1293`](https://github.com/oceanprotocol/ocean-node/pull/1293) +- Release 2.1.1 [`702a059`](https://github.com/oceanprotocol/ocean-node/commit/702a059a84a6fe64f0bd941f24eb85289909780e) + +#### [v2.1.0](https://github.com/oceanprotocol/ocean-node/compare/v2.0.2...v2.1.0) + +> 24 March 2026 + +- Feature/validate_output_structure [`#1284`](https://github.com/oceanprotocol/ocean-node/pull/1284) +- add jobIdHash and cancelTx [`#1286`](https://github.com/oceanprotocol/ocean-node/pull/1286) +- fix processor nonce [`#1287`](https://github.com/oceanprotocol/ocean-node/pull/1287) +- Release 2.1.0 [`3910376`](https://github.com/oceanprotocol/ocean-node/commit/39103760f65507893eaebc58546d721ae1e61f43) + #### [v2.0.2](https://github.com/oceanprotocol/ocean-node/compare/v2.0.1...v2.0.2) +> 23 March 2026 + - document publish flow & isolated markets [`#1273`](https://github.com/oceanprotocol/ocean-node/pull/1273) - make sure we bill all cases [`#1277`](https://github.com/oceanprotocol/ocean-node/pull/1277) +- Release 2.0.2 [`8d43849`](https://github.com/oceanprotocol/ocean-node/commit/8d43849d20e14b0c8d66aba4aa6a4f877b15d187) #### [v2.0.1](https://github.com/oceanprotocol/ocean-node/compare/v2.0.0...v2.0.1) diff --git a/Dockerfile b/Dockerfile index 6ba093edb..1567fa7e6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,44 +1,53 @@ -FROM ubuntu:22.04 AS base -RUN apt-get update && apt-get -y install bash curl git wget libatomic1 python3 build-essential -COPY .nvmrc /usr/src/app/ -RUN rm /bin/sh && ln -s /bin/bash /bin/sh -ENV NVM_DIR=/usr/local/nvm -RUN mkdir $NVM_DIR -ENV NODE_VERSION=v22.15.0 -# Install nvm with node and npm -RUN curl https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash \ - && source $NVM_DIR/nvm.sh \ - && nvm install $NODE_VERSION \ - && nvm alias default $NODE_VERSION \ - && nvm use default -ENV NODE_PATH=$NVM_DIR/$NODE_VERSION/lib/node_modules -ENV PATH=$NVM_DIR/versions/node/$NODE_VERSION/bin:$PATH -ENV IPFS_GATEWAY='https://ipfs.io/' -ENV ARWEAVE_GATEWAY='https://arweave.net/' - -FROM base AS builder -COPY package*.json /usr/src/app/ -COPY scripts/ /usr/src/app/scripts/ -WORKDIR /usr/src/app/ +FROM node:22.22.2-trixie@sha256:17ccc50fade521c62e2acefd0c975bf5eb2a09632b8717fa7f8b1c2b4e967a07 AS builder +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 \ + build-essential \ + libatomic1 \ + git \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /usr/src/app +COPY package*.json ./ +COPY scripts/ ./scripts/ RUN npm ci +COPY . . +RUN npm run build && npm prune --omit=dev + + +FROM node:22.22.2-trixie-slim@sha256:76043ed3132293c26b960ede4358d3c8ba424ee64662cd2d56318b76fcc51c4c AS runner +RUN apt-get update && apt-get install -y --no-install-recommends \ + dumb-init \ + gosu \ + libatomic1 \ + && rm -rf /var/lib/apt/lists/* + +ENV NODE_ENV=production \ + IPFS_GATEWAY='https://ipfs.io/' \ + ARWEAVE_GATEWAY='https://arweave.net/' \ + P2P_ipV4BindTcpPort=9000 \ + P2P_ipV4BindWsPort=9001 \ + P2P_ipV6BindTcpPort=9002 \ + P2P_ipV6BindWsPort=9003 \ + P2P_ipV4BindWssPort=9005 \ + HTTP_API_PORT=8000 + +EXPOSE 9000 9001 9002 9003 9005 8000 + +# Docker group membership is handled at runtime in docker-entrypoint.sh by +# inspecting the GID of /var/run/docker.sock, so it works across hosts. + +WORKDIR /usr/src/app + +COPY --chown=node:node --from=builder /usr/src/app/dist ./dist +COPY --chown=node:node --from=builder /usr/src/app/node_modules ./node_modules +COPY --chown=node:node --from=builder /usr/src/app/schemas ./schemas +COPY --chown=node:node --from=builder /usr/src/app/package.json ./ +COPY --chown=node:node --from=builder /usr/src/app/config.json ./ + +RUN mkdir -p databases c2d_storage logs +COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +RUN chmod +x /usr/local/bin/docker-entrypoint.sh -FROM base AS runner -COPY . /usr/src/app -WORKDIR /usr/src/app/ -COPY --from=builder /usr/src/app/node_modules/ /usr/src/app/node_modules/ -RUN npm run build -ENV P2P_ipV4BindTcpPort=9000 -EXPOSE 9000 -ENV P2P_ipV4BindWsPort=9001 -EXPOSE 9001 -ENV P2P_ipV6BindTcpPort=9002 -EXPOSE 9002 -ENV P2P_ipV6BindWsPort=9003 -EXPOSE 9003 -ENV P2P_ipV4BindWssPort=9005 -EXPOSE 9005 -ENV HTTP_API_PORT=8000 -EXPOSE 8000 -ENV NODE_ENV='production' -CMD ["npm","run","start"] +ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] +CMD ["node", "--max-old-space-size=28784", "--trace-warnings", "--experimental-specifier-resolution=node", "dist/index.js"] diff --git a/README.md b/README.md index f6b404ac0..c3d1d33ba 100644 --- a/README.md +++ b/README.md @@ -147,7 +147,8 @@ Your node is now running. To start additional nodes, repeat these steps in a new - [API Endpoints](docs/API.md) - [Environmental Variables](docs/env.md) - [Database Guide](docs/database.md) -- [Storage Types](docs/Storage.md) +- [Asset Storage Types](docs/Storage.md) +- [Persistent storage for c2d jobs](docs/persistentStorage.md) - [Testing Guide](docs/testing.md) - [Network Configuration](docs/networking.md) - [Logging & accessing logs](docs/networking.md) diff --git a/config.json b/config.json index 2fa5f640b..42d364183 100644 --- a/config.json +++ b/config.json @@ -93,57 +93,57 @@ "claimDurationTimeout": 3600, "validateUnsignedDDO": true, "jwtSecret": "ocean-node-secret", + "enableBenchmark": false, "dockerComputeEnvironments": [ { "socketPath": "/var/run/docker.sock", - "resources": [ + "environments": [ { - "id": "disk", - "total": 1 - } - ], - "storageExpiry": 604800, - "maxJobDuration": 3600, - "minJobDuration": 60, - "access": { - "addresses": [], - "accessLists": [] - }, - "fees": { - "8996": [ - { - "prices": [ + "storageExpiry": 604800, + "maxJobDuration": 3600, + "minJobDuration": 60, + "resources": [ + { + "id": "disk", + "total": 1 + } + ], + "access": { + "addresses": [], + "accessLists": [] + }, + "fees": { + "8996": [ { - "id": "cpu", - "price": 1 + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] } ] - } - ] - }, - "free": { - "maxJobDuration": 3600, - "minJobDuration": 60, - "maxJobs": 3, - "access": { - "addresses": [], - "accessLists": [] - }, - "resources": [ - { - "id": "cpu", - "max": 1 - }, - { - "id": "ram", - "max": 1 }, - { - "id": "disk", - "max": 1 + "free": { + "maxJobDuration": 3600, + "maxJobs": 3, + "resources": [ + { + "id": "cpu", + "max": 1 + }, + { + "id": "ram", + "max": 1 + }, + { + "id": "disk", + "max": 1 + } + ] } - ] - } + } + ] } ] } diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100644 index 000000000..41d2b4473 --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,19 @@ +#!/bin/sh +set -e + +# Fix ownership of directories that may be mounted as volumes (owned by root). +# Runs as root, then drops to 'node' user via gosu. +chown -R node:node /usr/src/app/databases /usr/src/app/c2d_storage /usr/src/app/logs 2>/dev/null || true + +# Add node user to the docker group matching the host's /var/run/docker.sock GID, +# so compute jobs can access the socket regardless of the host's docker GID. +if [ -S /var/run/docker.sock ]; then + SOCK_GID=$(stat -c '%g' /var/run/docker.sock) + if ! getent group "$SOCK_GID" > /dev/null 2>&1; then + groupadd -g "$SOCK_GID" dockerhost 2>/dev/null || true + fi + DOCKER_GROUP=$(getent group "$SOCK_GID" | cut -d: -f1) + usermod -aG "$DOCKER_GROUP" node +fi + +exec gosu node dumb-init -- "$@" diff --git a/docs/API.md b/docs/API.md index b141a61d9..4abcc55d4 100644 --- a/docs/API.md +++ b/docs/API.md @@ -1586,3 +1586,180 @@ returns job result #### Response File content + +--- + +## Persistent Storage + +### `HTTP` POST /api/services/persistentStorage/buckets + +#### Description + +Create a new persistent storage bucket. Bucket ownership is set to the request `consumerAddress`. + +#### Request Headers + +| name | type | required | description | +| --------------- | ------ | -------- | ----------- | +| Authorization | string | | auth token (optional; depends on node auth configuration) | + +#### Request Body + +```json +{ + "consumerAddress": "0x...", + "signature": "0x...", + "nonce": "123", + "accessLists": [] +} +``` + +#### Response (200) + +```json +{ + "bucketId": "uuid", + "owner": "0x...", + "accessList": [] +} +``` + +--- + +### `HTTP` GET /api/services/persistentStorage/buckets + +#### Description + +List buckets for a given `owner`. Results are filtered by bucket access lists for the calling consumer. + +#### Query Parameters + +| name | type | required | description | +| --------------- | ------ | -------- | ----------- | +| consumerAddress | string | v | consumer address | +| signature | string | v | signed message (consumerAddress + nonce + command) | +| nonce | string | v | request nonce | +| chainId | number | v | chain id (used by auth/signature checks) | +| owner | string | v | bucket owner to filter by | + +#### Response (200) + +```json +[ + { + "bucketId": "uuid", + "owner": "0x...", + "createdAt": 1710000000, + "accessLists": [] + } +] +``` + +--- + +### `HTTP` GET /api/services/persistentStorage/buckets/:bucketId/files + +#### Description + +List files in a bucket. + +#### Query Parameters + +| name | type | required | description | +| --------------- | ------ | -------- | ----------- | +| consumerAddress | string | v | consumer address | +| signature | string | v | signed message (consumerAddress + nonce + command) | +| nonce | string | v | request nonce | + +#### Response (200) + +```json +[ + { + "bucketId": "uuid", + "name": "hello.txt", + "size": 123, + "lastModified": 1710000000 + } +] +``` + +--- + +### `HTTP` GET /api/services/persistentStorage/buckets/:bucketId/files/:fileName/object + +#### Description + +Return the `fileObject` for a specific file in a bucket (useful for passing references to other subsystems like compute). + +#### Query Parameters + +| name | type | required | description | +| --------------- | ------ | -------- | ----------- | +| consumerAddress | string | v | consumer address | +| signature | string | v | signed message (consumerAddress + nonce + command) | +| nonce | string | v | request nonce | + +#### Response (200) + +```json +{ + "type": "nodePersistentStorage", + "bucketId": "uuid", + "fileName": "hello.txt" +} +``` + +--- + +### `HTTP` POST /api/services/persistentStorage/buckets/:bucketId/files/:fileName + +#### Description + +Upload a file to a bucket. The request body is treated as raw bytes. + +#### Query Parameters + +| name | type | required | description | +| --------------- | ------ | -------- | ----------- | +| consumerAddress | string | v | consumer address | +| signature | string | v | signed message (consumerAddress + nonce + command) | +| nonce | string | v | request nonce | + +#### Request Body + +Raw bytes (any content-type). + +#### Response (200) + +```json +{ + "bucketId": "uuid", + "name": "hello.txt", + "size": 123, + "lastModified": 1710000000 +} +``` + +--- + +### `HTTP` DELETE /api/services/persistentStorage/buckets/:bucketId/files/:fileName + +#### Description + +Delete a file from a bucket. + +#### Query Parameters + +| name | type | required | description | +| --------------- | ------ | -------- | ----------- | +| consumerAddress | string | v | consumer address | +| signature | string | v | signed message (consumerAddress + nonce + command) | +| nonce | string | v | request nonce | +| chainId | number | v | chain id (used by auth/signature checks) | + +#### Response (200) + +```json +{ "success": true } +``` diff --git a/docs/compute-pricing.md b/docs/compute-pricing.md index e2912bd46..c6598e760 100644 --- a/docs/compute-pricing.md +++ b/docs/compute-pricing.md @@ -5,8 +5,11 @@ This guide explains how to configure your node’s Docker compute environments a ## Overview - **Configuration**: Define compute environments via the `DOCKER_COMPUTE_ENVIRONMENTS` environment variable (JSON) or via `config.json` under `dockerComputeEnvironments`. +- **Environment**: Is a group of resources, payment and accesslists. - **Resources**: Each environment declares resources (e.g. `cpu`, `ram`, `disk`, and optionally GPUs). You must declare a `disk` resource. - **Pricing**: For each chain and fee token, you set a `price` per resource. Cost is computed as **price × amount × duration (in minutes, rounded up)**. +- **Free**: Environments which does not require a payment for the resources, but most likley are very limited in terms of resources available and job duration. +- **Image building**: **Free jobs cannot build images** (Dockerfiles are not allowed). For **paid jobs**, **image build time counts toward billable duration** and also consumes the job’s `maxJobDuration`. ## Pricing Units diff --git a/docs/env.md b/docs/env.md index beeae0180..b0cf6e9af 100644 --- a/docs/env.md +++ b/docs/env.md @@ -1,473 +1,145 @@ -# Environmental Variables - -Environmental variables are also tracked in `ENVIRONMENT_VARIABLES` within `src/utils/constants.ts`. Descriptions and example values are provided below: - -## Core - -- `PRIVATE_KEY` (Required): The private key for the node, required for node operations. Example: `"0x1d751ded5a32226054cd2e71261039b65afb9ee1c746d055dd699b1150a5befc"` -- `CONFIG_PATH`: Absolute path to JSON config file -- `RPCS`: JSON object defining RPC endpoints for various networks. Example: `"{ \"11155420\":{ \"rpc\":\"https://sepolia.optimism.io\", \"fallbackRPCs\": [\"https://public.stackup.sh/api/v1/node/optimism-sepolia\"], \"chainId\": 11155420, \"network\": \"optimism-sepolia\", \"chunkSize\": 1000 }}"` -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `IPFS_GATEWAY`: The gateway URL for IPFS, used for downloading files from IPFS. Example: `"https://ipfs.io/"` -- `ARWEAVE_GATEWAY`: The gateway URL for Arweave, used for downloading files from Arweave. Example: `"https://arweave.net/"` -- `LOAD_INITIAL_DDOS`: If set, the node will load initial DDOs from JSON files at startup. This is useful for testing or bootstrapping the network with predefined data. Example: `false` -- `FEE_TOKENS`: Mapping of chain IDs to token addresses for setting fees in the network. Example: `"{ \"1\": \"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\", ...}"` -- `FEE_AMOUNT`: Specifies the fee amount and unit (e.g., MB for megabytes). Example: `"{ \"amount\": 1, \"unit\": \"MB\" }"` -- `ADDRESS_FILE`: File location where Ocean contract addresses are saved. Example: `"ADDRESS_FILE=${HOME}/.ocean/ocean-contracts/artifacts/address.json"` -- `NODE_ENV`: Typically used to specify the environment (e.g., development, production) the node is running in. Example: `'development'` -- `AUTHORIZED_DECRYPTERS`: A JSON array of addresses that are authorized to decrypt data. Example: `"['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']"` -- `AUTHORIZED_DECRYPTERS_LIST`: AccessList contract addresses (per chain). If present, only accounts present on the given access lists can decrypt data. Example: `"{ \"8996\": [\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"] }"` -- `OPERATOR_SERVICE_URL`: Configures C2D cluster URLs for the node. Example: `"[\"http://example.c2d.cluster1.com\",\"http://example.cd2.cluster2.com\"]"` -- `INTERFACES`: Network interfaces the node supports, e.g., HTTP and P2P. By default, if not specified, both are supported. Example: `"[\"HTTP\",\"P2P\"]"` -- `ALLOWED_VALIDATORS`: Array of addresses for allowed validators to verify asset signatures before indexing. Example: `"[\"0x123\",\"0x456\"]"` -- `ALLOWED_VALIDATORS_LIST`: Array of access list addresses (per chain) for allowed validators to verify asset signatures before indexing. Example: `"{ \"8996\": [\"0x123\",\"0x456\"]"` -- `INDEXER_INTERVAL`: Sets the interval in milliseconds for the indexer to crawl. The default is 30 seconds if not set. Example: `10000` -- `INDEXER_NETWORKS`: Specifies the networks the Indexer will crawl. If not set, the Indexer will index all networks defined in the RPCS environment variable. If set to an empty string, indexing will be disabled. Example: `[1, 137]` -- `ALLOWED_ADMINS`: Sets the public address of accounts which have access to admin endpoints e.g. shutting down the node. Example: `"[\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"]"` -- `ALLOWED_ADMINS_LIST`: Array of access list addresses (per chain) for accounts that have access to admin endpoints. Example: `"{ \"8996\": [\"0x123\",\"0x456\"]"` -- `RATE_DENY_LIST`: Blocked list of IPs and peer IDs. Example: `"{ \"peers\": [\"16Uiu2HAkuYfgjXoGcSSLSpRPD6XtUgV71t5RqmTmcqdbmrWY9MJo\"], \"ips\": [\"127.0.0.1\"] }"` -- `MAX_REQ_PER_MINUTE`: Number of requests per minute allowed by the same client (IP or Peer id). Example: `30` -- `MAX_CONNECTIONS_PER_MINUTE`: Max number of requests allowed per minute (all clients). Example: `120` -- `MAX_CHECKSUM_LENGTH`: Define the maximum length for a file if checksum is required (Mb). Example: `10` -- `IS_BOOTSTRAP`: Is this node to be used as bootstrap node or not. Default is `false`. -- `AUTHORIZED_PUBLISHERS`: Authorized list of publishers. If present, Node will only index assets published by the accounts in the list. Example: `"[\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"]"` -- `AUTHORIZED_PUBLISHERS_LIST`: AccessList contract addresses (per chain). If present, Node will only index assets published by the accounts present on the given access lists. Example: `"{ \"8996\": [\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"] }"` -- `VALIDATE_UNSIGNED_DDO`: If set to `false`, the node will not validate unsigned DDOs and will request a signed message with the publisher address, nonce and signature. Default is `true`. Example: `false` -- `JWT_SECRET`: Secret used to sign JWT tokens. Default is `ocean-node-secret`. Example: `"my-secret-jwt-token"` -- `NODE_OWNER_INFO`: Optional JSON object returned by the root endpoint as `ownerInfo`. Example: `"{\"imprint\":{\"legalName\":\"Example Ocean Services GmbH\"},\"termsAndConditions\":{\"url\":\"https://example.com/terms\"},\"anyCustomSection\":{\"foo\":\"bar\"}}"` - -## Database - -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` -- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` -- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` -- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` -- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` -- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` -- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` -- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` -- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` -- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` - -## Database - -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` -- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` -- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` -- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` -- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` -- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` -- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` -- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` -- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` -- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` - -## Database - -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` -- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` -- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` -- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` -- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` -- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` -- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` -- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` -- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` -- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` - -## Database - -- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` -- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` -- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` -- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` -- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` -- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` -- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` -- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` -- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` -- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` -- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` - -## Payments - -- `ESCROW_CLAIM_TIMEOUT`: Amount of time reserved to claim a escrow payment, in seconds. Defaults to `3600`. Example: `3600` - -## Logs - -- `LOG_LEVEL`: Define the default log level. Example: `debug` -- `LOG_CONSOLE`: Write logs to the console. Default is `false`, but becomes `true` if neither `LOG_FILES` or `LOG_DB` are set. -- `LOG_FILES`: Write logs to files. Default is `false` -- `LOG_DB`: Write logs to noSQL database. Default is `false` -- `UNSAFE_URLS`: Array or regular expression URLs to be excluded from access.Example: ["^.*(169.254.169.254).*","^.*(127.0.0.1).*"] - -## HTTP - -- `HTTP_API_PORT`: Port number for the HTTP API. Example: `8000` -- `HTTP_CERT_PATH`: Absolute path to the TLS certificate file. If provided along with `HTTP_KEY_PATH`, the node will start an HTTPS server. Example: `"/etc/letsencrypt/live/example.com/fullchain.pem"` -- `HTTP_KEY_PATH`: Absolute path to the TLS private key file. If provided along with `HTTP_CERT_PATH`, the node will start an HTTPS server. Example: `"/etc/letsencrypt/live/example.com/privkey.pem"` - -## P2P - -- `P2P_ENABLE_IPV4`: Enable IPv4 connectivity. Defaults: `True` -- `P2P_ENABLE_IPV6`: Enable IPv6 connectivity. Defaults: `True` -- `P2P_ipV4BindAddress`: Bind address for IPV4. Defaults to `0.0.0.0`. Example: `"0.0.0.0"` -- `P2P_ipV4BindTcpPort`: Port used on IPv4 TCP connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` -- `P2P_ipV4BindWsPort`: Port used on IPv4 WS connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` -- `P2P_ipV6BindAddress`: Bind address for IPV6. Defaults to `::1`. Example: `"::1"` -- `P2P_ipV6BindTcpPort`: Port used on IPv6 TCP connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` -- `P2P_ipV6BindWsPort`: Port used on IPv6 WS connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` -- `P2P_ANNOUNCE_ADDRESSES`: List of addresses to announce to the network. Example: `"[\"/ip4/1.2.3.4/tcp/8000\"]"` - - To enable SNI (Server Name Indication) with autoTLS, include `/tls/ws` or `/tls/wss` addresses: - - `"["/ip4//tcp/9001/tls/ws"]"` - TLS WebSocket - - `"["/ip4//tcp/9005/tls/wss"]"` - TLS WebSocket Secure - -- `P2P_ANNOUNCE_PRIVATE`: Announce private IPs. Default: `True` -- `P2P_pubsubPeerDiscoveryInterval`: Interval (in ms) for discovery using pubsub. Defaults to `10000` (three seconds). Example: `10000` -- `P2P_dhtMaxInboundStreams`: Maximum number of DHT inbound streams. Defaults to `500`. Example: `500` -- `P2P_dhtMaxOutboundStreams`: Maximum number of DHT outbound streams. Defaults to `500`. Example: `500` -- `P2P_DHT_FILTER`: Filter address in DHT. 0 = (Default) No filter 1. Filter private ddresses. 2. Filter public addresses -- `P2P_mDNSInterval`: Interval (in ms) for discovery using mDNS. Defaults to `20000` (20 seconds). Example: `20000` -- `P2P_connectionsMaxParallelDials`: Maximum number of parallel dials. Defaults to `150`. Example: `150` -- `P2P_connectionsDialTimeout`: Timeout for dial commands. Defaults to `10000` (10 seconds). Example: `10000` -- `P2P_ENABLE_UPNP`: Enable UPNP gateway discovery. Default: `True` -- `P2P_ENABLE_AUTONAT`: Enable AutoNAT discovery. Default: `True` -- `P2P_ENABLE_CIRCUIT_RELAY_SERVER`: Enable Circuit Relay Server. It will help the network but increase your bandwidth usage. Should be disabled for edge nodes. Default: `True` -- `P2P_CIRCUIT_RELAYS`: Numbers of relay servers. Default: `0` -- `P2P_BOOTSTRAP_NODES` : List of bootstrap nodes. Defults to OPF nodes. Example: ["/dns4/node3.oceanprotocol.com/tcp/9000/p2p/"] -- `P2P_BOOTSTRAP_TIMEOUT` : How long to wait before discovering bootstrap nodes. In ms. Default: 2000 ms -- `P2P_BOOTSTRAP_TAGNAME` : Tag a bootstrap peer with this name before "discovering" it. Default: 'bootstrap' -- `P2P_BOOTSTRAP_TAGVALUE` : The bootstrap peer tag will have this value (default: 50) -- `P2P_BOOTSTRAP_TTL` : Cause the bootstrap peer tag to be removed after this number of ms. Default: 120000 ms -- `P2P_FILTER_ANNOUNCED_ADDRESSES`: CIDR filters to filter announced addresses. Default: ["172.15.0.0/24"] (docker ip range). Example: ["192.168.0.1/27"] -- `P2P_MIN_CONNECTIONS`: The minimum number of connections below which libp2p will start to dial peers from the peer book. Setting this to 0 disables this behaviour. Default: 1 -- `P2P_MAX_CONNECTIONS`: The maximum number of connections libp2p is willing to have before it starts pruning connections to reduce resource usage. Default: 300 -- `P2P_AUTODIALPEERRETRYTHRESHOLD`: When we've failed to dial a peer, do not autodial them again within this number of ms. Default: 1000 \* 120 -- `P2P_AUTODIALCONCURRENCY`: When dialling peers from the peer book to keep the number of open connections, add dials for this many peers to the dial queue at once. Default: 5 -- `P2P_MAXPEERADDRSTODIAL`: Maximum number of addresses allowed for a given peer before giving up. Default: 5 -- `P2P_AUTODIALINTERVAL`: Auto dial interval (miliseconds). Amount of time between close and open of new peer connection. Default: 5000 -- `P2P_ENABLE_NETWORK_STATS`: Enables 'getP2pNetworkStats' http endpoint. Since this contains private informations (like your ip addresses), this is disabled by default - -## Policy Server - -- `POLICY_SERVER_URL`: URI definition of PolicyServer, if any. See [the policy server documentation for more details](docs/PolicyServer.md). - -## Additional Nodes (Test Environments) - -- `NODE1_PRIVATE_KEY`: Used on test environments, specifically CI, represents the private key for node 1. Example: `"0xfd5c1ccea015b6d663618850824154a3b3fb2882c46cefb05b9a93fea8c3d215"` -- `NODE2_PRIVATE_KEY`: Used on test environments, specifically CI, represents the private key for node 2. Example: `"0x1263dc73bef43a9da06149c7e598f52025bf4027f1d6c13896b71e81bb9233fb"` - -## Cron Jobs - -- `CRON_DELETE_DB_LOGS`: Delete old logs from database Cron expression. Example: `0 0 * * *` (runs every day at midnight) -- `CRON_CLEANUP_C2D_STORAGE`: Clear c2d expired resources/storage and delete old jobs. Example: `*/5 * * * *` (runs every 5 minutes) - -## Compute - -The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable is used to configure Docker-based compute environments in Ocean Node. This guide will walk you through the options available for defining `DOCKER_COMPUTE_ENVIRONMENTS` and how to set it up correctly. For configuring compute environments and setting prices for each resource (including pricing units and examples), see [Compute pricing](compute-pricing.md). - -Example Configuration -The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of objects, where each object represents a Docker compute environment configuration. Below is an example configuration: - -`Disk` and `Ram` resources are always expressed in GB. +# Ocean Node Networking -```json -[ - { - "socketPath": "/var/run/docker.sock", - "imageRetentionDays": 7, - "imageCleanupInterval": 86400, - "resources": [ - { - "id": "disk", - "total": 10 - } - ], - "storageExpiry": 604800, - "maxJobDuration": 3600, - "minJobDuration": 60, - "access": { - "addresses": ["0x123", "0x456"], - "accessLists": [] - }, - "fees": { - "1": [ - { - "feeToken": "0x123", - "prices": [ - { - "id": "cpu", - "price": 1 - } - ] - } - ] - }, - "free": { - "maxJobDuration": 60, - "minJobDuration": 10, - "maxJobs": 3, - "access": { - "addresses": [], - "accessLists": ["0x789"] - }, - "resources": [ - { - "id": "cpu", - "max": 1 - }, - { - "id": "ram", - "max": 1 - }, - { - "id": "disk", - "max": 1 - } - ] - } - } -] -``` +For other nodes (and browsers) to reach your node, it must be reachable at a stable, publicly routable address. Work through the options below in order — stop at the first one that applies to your setup. -#### Configuration Options - -- **socketPath**: Path to the Docker socket (e.g., docker.sock). -- **imageRetentionDays** - how long docker images are kept, in days. Default: 7 -- **imageCleanupInterval** - how often to run cleanup for docker images, in seconds. Min: 3600 (1hour), Default: 86400 (24 hours) -- **paymentClaimInterval** - how often to run payment claiming, in seconds. Default: 3600 (1 hour) -- **storageExpiry**: Amount of seconds for storage expiry.(Mandatory) -- **maxJobDuration**: Maximum duration in seconds for a job.(Mandatory) -- **minJobDuration**: Minimum duration in seconds for a job.(Mandatory) -- **access**: Access control configuration for paid compute jobs. If both `addresses` and `accessLists` are empty, all addresses are allowed. - - **addresses**: Array of Ethereum addresses allowed to run compute jobs. If empty and no access lists are configured, all addresses are allowed. - - **accessLists**: Array of AccessList contract addresses. Users holding NFTs from these contracts can run compute jobs. Checked across all supported networks. -- **fees**: Fee structure for the compute environment. - - **feeToken**: Token address for the fee. - - **prices**: Array of resource pricing information. - - **id**: Resource type (e.g., `cpu`, `ram`, `disk`). - - **price**: Price per unit of the resource. -- **resources**: Array of resources available in the compute environment. - - **id**: Resource type (e.g., `cpu`, `ram`, `disk`). - - **total**: Total number of the resource available. - - **min**: Minimum number of the resource needed for a job. - - **max**: Maximum number of the resource for a job. -- **free**: Optional configuration for free jobs. - - **storageExpiry**: Amount of seconds for storage expiry for free jobs. - - **maxJobDuration**: Maximum duration in seconds for a free job. - - **minJobDuration**: Minimum duration in seconds for a free job. - - **maxJobs**: Maximum number of simultaneous free jobs. - - **access**: Access control configuration for free compute jobs. Works the same as the main `access` field. - - **addresses**: Array of Ethereum addresses allowed to run free compute jobs. - - **accessLists**: Array of AccessList contract addresses for free compute access control. - - **resources**: Array of resources available for free jobs. - - **id**: Resource type (e.g., `cpu`, `ram`, `disk`). - - **total**: Total number of the resource available. - - **min**: Minimum number of the resource needed for a job. - - **max**: Maximum number of the resource for a job. - -### Docker Registry Authentication - -- `DOCKER_REGISTRY_AUTHS`: JSON object mapping Docker registry URLs to authentication credentials. Used for accessing private Docker/OCI registries when validating and pulling Docker images. Each registry entry must provide either `username`+`password` or `auth`. Example: +## Option 1: Static Public IP -```json -{ - "https://registry-1.docker.io": { - "username": "myuser", - "password": "mypassword" - }, - "https://ghcr.io": { - "username": "myuser", - "password": "ghp_..." - }, - "https://registry.gitlab.com": { - "auth": "glpat-..." - } -} -``` +If your machine has a static public IP directly assigned to it (common in VPS/cloud environments), set `P2P_ANNOUNCE_ADDRESSES` to announce that address. The quickstart script does this automatically when you provide your IP or domain name. -**Configuration Options:** +Example for a node with public IP `1.2.3.4`, using ports 9000 (TCP) and 9001 (WebSocket/TLS): -- **Registry URL** (key): The full registry URL including protocol (e.g., `https://registry-1.docker.io`, `https://ghcr.io`, `https://registry.gitlab.com`) -- **username** (optional): Username for registry authentication. Required if using password-based auth. -- **password** (optional): Password or personal access token for registry authentication. Required if using username-based auth. -- **auth** (optional): Authentication token (alternative to username+password). Required if not using username+password. +```bash +P2P_ANNOUNCE_ADDRESSES='[ + "/ip4/1.2.3.4/tcp/9000", + "/ip4/1.2.3.4/tcp/9001/ws", + "/ip4/1.2.3.4/tcp/9001/tls/ws" +]' +``` -**Notes:** +The `/tls/ws` entry enables [AutoTLS](#tls-and-sni-server-name-indication) for node-to-browser communication. AutoTLS provisions a certificate and serves TLS at the transport layer on the WebSocket port, making it browser-compatible — no DNS setup required on your part. -- For Docker Hub (`registry-1.docker.io`), you can use your Docker Hub username and password, or a personal access token (PAT) as the password. -- For GitHub Container Registry (GHCR), use your GitHub username with a personal access token (PAT) as the password, or use a token directly. -- For GitLab Container Registry, use a personal access token (PAT) or deploy token. -- The registry URL must match exactly (including protocol) with the registry used in the Docker image reference. -- If no credentials are configured for a registry, the node will attempt unauthenticated access (works for public images only). +## Option 2: Dynamic DNS (no static IP) ---- +If your public IP changes (residential ISP, dynamic VPS), use a Dynamic DNS (DDNS) service to get a stable hostname that always resolves to your current IP. -## Private Docker Registries with Per-Job Authentication +Popular free DDNS providers: [DuckDNS](https://www.duckdns.org/), [No-IP](https://www.noip.com/), [Dynu](https://www.dynu.com/). -In addition to node-level registry authentication via `DOCKER_REGISTRY_AUTHS`, you can provide encrypted Docker registry authentication credentials on a per-job basis. This allows different users to use different private registries or credentials for their compute jobs. +Once you have a hostname (e.g. `mynode.duckdns.org`), set up the DDNS client on your machine to keep it updated, then use the hostname in your announce addresses: -### Overview +```bash +P2P_ANNOUNCE_ADDRESSES='[ + "/dns4/mynode.duckdns.org/tcp/9000", + "/dns4/mynode.duckdns.org/tcp/9001/ws", + "/dns4/mynode.duckdns.org/tcp/9001/tls/ws" +]' +``` -The `encryptedDockerRegistryAuth` parameter allows you to securely provide Docker registry credentials that are: +## Option 3: Port Forwarding -- Encrypted using ECIES (Elliptic Curve Integrated Encryption Scheme) with the node's public key -- Validated to ensure proper format (either `auth` string OR `username`+`password`) -- Used only for the specific compute job, overriding node-level configuration if provided +If you are behind a NAT router (home network), you need to forward the P2P ports from your router to the machine running the node. -### Encryption Format +1. Find the local IP of your machine (e.g. `192.168.1.50`). +2. Log in to your router admin panel and add port forwarding rules: + - External TCP port `9000` → `192.168.1.50:9000` + - External TCP port `9001` → `192.168.1.50:9001` +3. Find your public IP (e.g. via `curl ifconfig.me`) or set up a DDNS hostname (see Option 2). +4. Set `P2P_ANNOUNCE_ADDRESSES` to your public IP or DDNS hostname as shown above. -The `encryptedDockerRegistryAuth` must be: +If your router supports UPnP, the node can attempt to configure port forwarding automatically. Enable it with: -1. A JSON object matching the Docker registry auth schema (see below) -2. Encrypted using ECIES with the node's public key -3. Hex-encoded as a string +```bash +P2P_ENABLE_UPNP=true +``` -**Auth Schema Format:** +UPnP is not reliable on all routers and should not be relied on as the sole method. -The decrypted JSON must follow this structure: +## Option 4: Circuit Relay (fallback) -```json -{ - "username": "myuser", - "password": "mypassword" -} -``` +If none of the above options are available (strict NAT, no port forwarding, no public IP), use a circuit relay. A relay node proxies traffic between peers, allowing your node to participate in the network without being directly reachable. -OR +Enable the circuit relay client: -```json -{ - "auth": "base64-encoded-username:password" -} +```bash +P2P_ENABLE_CIRCUIT_RELAY_CLIENT=true +P2P_CIRCUIT_RELAYS=1 ``` -OR (all fields present) +Note: circuit relay increases latency and bandwidth usage on the relay node. It should be a last resort — a node running only via relay is a burden on the network and will have degraded performance. -```json -{ - "username": "myuser", - "password": "mypassword", - "auth": "base64-encoded-username:password" -} -``` +Do not enable `P2P_ENABLE_CIRCUIT_RELAY_SERVER` on edge nodes; that setting is for well-connected nodes that want to help others. -**Validation Rules:** +--- -- Either `auth` string must be provided (non-empty), OR -- Both `username` AND `password` must be provided (both non-empty) -- Empty strings are not accepted +## TLS and SNI (Server Name Indication) -### Usage Examples +AutoTLS provisions TLS certificates for your node automatically, enabling P2P node-to-browser communication. It is always active internally — no DNS or certificate setup required on your part. For it to work, you must include a `/tls/ws` entry in `P2P_ANNOUNCE_ADDRESSES`, which the quickstart script does automatically. -#### 1. Paid Compute Start (`POST /api/services/compute`) +AutoTLS serves TLS at the transport layer on the WebSocket port, making it standard browser-compatible WSS — no separate port is needed. -```json -{ - "command": "startCompute", - "consumerAddress": "0x...", - "signature": "...", - "nonce": "123", - "environment": "0x...", - "algorithm": { - "meta": { - "container": { - "image": "registry.example.com/myorg/myimage:latest" - } - } - }, - "datasets": [], - "payment": { ... }, - "encryptedDockerRegistryAuth": "0xdeadbeef..." // ECIES encrypted hex string -} -``` +Example `.env` / docker-compose entry: -#### 2. Free Compute Start (`POST /api/services/freeCompute`) - -```json -{ - "command": "freeStartCompute", - "consumerAddress": "0x...", - "signature": "...", - "nonce": "123", - "environment": "0x...", - "algorithm": { - "meta": { - "container": { - "image": "ghcr.io/myorg/myimage:latest" - } - } - }, - "datasets": [], - "encryptedDockerRegistryAuth": "0xdeadbeef..." // ECIES encrypted hex string -} +```bash +P2P_ANNOUNCE_ADDRESSES='[ + "/ip4//tcp/9000", + "/ip4//tcp/9001/ws", + "/ip4//tcp/9001/tls/ws" +]' ``` -#### 3. Initialize Compute - -The `initialize` command accepts `encryptedDockerRegistryAuth` as part of the command payload, as it validates the image +Or in `config.json`: ```json { - "command": "initialize", - "datasets": [...], - "algorithm": { - "meta": { - "container": { - "image": "registry.gitlab.com/myorg/myimage:latest" - } - } - }, - "environment": "0x...", - "payment": { ... }, - "consumerAddress": "0x...", - "maxJobDuration": 3600, - "encryptedDockerRegistryAuth": "0xdeadbeef..." // ECIES encrypted hex string + "p2pConfig": { + "announceAddresses": [ + "/ip4//tcp/9000", + "/ip4//tcp/9001/ws", + "/ip4//tcp/9001/tls/ws" + ] + } } ``` -### Encryption Process - -To create `encryptedDockerRegistryAuth`, you need to: +When a TLS certificate is provisioned successfully, you will see logs like: -1. **Prepare the auth JSON object:** +``` +----- A TLS certificate was provisioned ----- +----- TLS addresses: ----- +/ip4//tcp/9001/sni/... +/ip4//tcp/9001/sni/... +----- End of TLS addresses ----- +``` - ```json - { - "username": "myuser", - "password": "mypassword" - } - ``` +## Verifying Connectivity -2. **Get the node's public key** (available via the node's API or P2P interface) +### Check how your node sees itself -3. **Encrypt the JSON string** using ECIES with the node's public key +```bash +curl http://localhost:8000/getP2pPeer?peerId= +``` -4. **Hex-encode the encrypted result** +Look at the `addresses` array in the response. Are any of those IPs/hostnames reachable from outside your network? -### Behavior +```json +{ + "addresses": [ + { "multiaddr": "/ip4/1.2.3.4/tcp/9000", "isCertified": false }, + { "multiaddr": "/ip4/1.2.3.4/tcp/9001/ws", "isCertified": false }, + { "multiaddr": "/ip4/1.2.3.4/tcp/9001/tls/ws", "isCertified": false } + ] +} +``` -- **Priority**: If `encryptedDockerRegistryAuth` is provided, it takes precedence over node-level `DOCKER_REGISTRY_AUTHS` configuration for that specific job -- **Validation**: The encrypted auth is decrypted and validated before the job starts. Invalid formats will result in an error -- **Scope**: The credentials are used for: - - Validating the Docker image exists (during initialize) - - Pulling the Docker image (during job execution) -- **Security**: Credentials are encrypted and only decrypted by the node using its private key +### Check how your node is seen by the network -### Error Handling +Ask a known public node to report back what it knows about you: -If `encryptedDockerRegistryAuth` is invalid, you'll receive an error: +```bash +curl https://cp1.oncompute.ai/getP2pPeer?peerId= +``` -- **Decryption failure**: `Invalid encryptedDockerRegistryAuth: failed to parse JSON - [error message]` -- **Schema validation failure**: `Invalid encryptedDockerRegistryAuth: Either 'auth' must be provided, or both 'username' and 'password' must be provided` +If the response is empty or missing your public address, the node is not reachable from the outside. -### Notes +## All P2P Environment Variables -- The `encryptedDockerRegistryAuth` parameter is optional. If not provided, the node will use `DOCKER_REGISTRY_AUTHS` configuration or attempt unauthenticated access -- The registry URL in the Docker image reference must match the registry you're authenticating to -- For Docker Hub, use `registry-1.docker.io` as the registry URL -- Credentials are stored encrypted in the job record and decrypted only when needed for image operations +See [env.md](env.md#p2p) for the full list of P2P configuration options. diff --git a/docs/networking.md b/docs/networking.md index 542cfcd5c..b6621a57a 100644 --- a/docs/networking.md +++ b/docs/networking.md @@ -1,13 +1,12 @@ # Ocean Node Networking -## Networking in cloud environments or DMZ +For other nodes (and browsers) to reach your node, it must be reachable at a stable, publicly routable address. Work through the options below in order — stop at the first one that applies to your setup. -In order for your node to join the network, the others nodes needs to be able to connect to it. -All options can be controlled using [environment -variables](env.md#p2p) +## Option 1: Static Public IP -To quickly start your node, you can keep all of the default values,but most likely it will hurt performance. If you want a customised approach, here are the full steps: +If your machine has a static public IP directly assigned to it (common in VPS/cloud environments), set `P2P_ANNOUNCE_ADDRESSES` to announce that address. The quickstart script does this automatically when you provide your IP or domain name. +<<<<<<< HEAD - decide what IP version to use (IPV4 or/and IPv6). You should use both if available. - decide if you want to filter private ips (if you run multiple nodes in a LAN or cloud environment, leave them on) - if you already have an external ip configured on your machine, you are good to go. @@ -57,51 +56,142 @@ When TLS certificates are provisioned, you should see logs like: In order to check connectivity, you can do the following: ### On your node, check and observe how your node sees itself: +======= +Example for a node with public IP `1.2.3.4`, using ports 9000 (TCP) and 9001 (WebSocket/TLS): +>>>>>>> 8719d64c2e23093acac0e30661979009d9ddadd9 ```bash -curl http://localhost:8000/getP2pPeer?peerId=16Uiu2HAkwWe6BFQXZWg6zE9X7ExynvXEe9BRTR5Wn3udNs7JpUDx +P2P_ANNOUNCE_ADDRESSES='[ + "/ip4/1.2.3.4/tcp/9000", + "/ip4/1.2.3.4/tcp/9001/ws", + "/ip4/1.2.3.4/tcp/9001/tls/ws" +]' +``` + +The `/tls/ws` entry enables [AutoTLS](#tls-and-sni-server-name-indication) for node-to-browser communication. AutoTLS provisions a certificate and serves TLS at the transport layer on the WebSocket port, making it browser-compatible — no DNS setup required on your part. + +## Option 2: Dynamic DNS (no static IP) + +If your public IP changes (residential ISP, dynamic VPS), use a Dynamic DNS (DDNS) service to get a stable hostname that always resolves to your current IP. + +Popular free DDNS providers: [DuckDNS](https://www.duckdns.org/), [No-IP](https://www.noip.com/), [Dynu](https://www.dynu.com/). + +Once you have a hostname (e.g. `mynode.duckdns.org`), set up the DDNS client on your machine to keep it updated, then use the hostname in your announce addresses: + +```bash +P2P_ANNOUNCE_ADDRESSES='[ + "/dns4/mynode.duckdns.org/tcp/9000", + "/dns4/mynode.duckdns.org/tcp/9001/ws", + "/dns4/mynode.duckdns.org/tcp/9001/tls/ws" +]' ``` -and observe the addresses section: +## Option 3: Port Forwarding + +If you are behind a NAT router (home network), you need to forward the P2P ports from your router to the machine running the node. + +1. Find the local IP of your machine (e.g. `192.168.1.50`). +2. Log in to your router admin panel and add port forwarding rules: + - External TCP port `9000` → `192.168.1.50:9000` + - External TCP port `9001` → `192.168.1.50:9001` +3. Find your public IP (e.g. via `curl ifconfig.me`) or set up a DDNS hostname (see Option 2). +4. Set `P2P_ANNOUNCE_ADDRESSES` to your public IP or DDNS hostname as shown above. + +If your router supports UPnP, the node can attempt to configure port forwarding automatically. Enable it with: + +```bash +P2P_ENABLE_UPNP=true +``` + +UPnP is not reliable on all routers and should not be relied on as the sole method. + +## Option 4: Circuit Relay (fallback) + +If none of the above options are available (strict NAT, no port forwarding, no public IP), use a circuit relay. A relay node proxies traffic between peers, allowing your node to participate in the network without being directly reachable. + +Enable the circuit relay client: + +```bash +P2P_ENABLE_CIRCUIT_RELAY_CLIENT=true +P2P_CIRCUIT_RELAYS=1 +``` + +Note: circuit relay increases latency and bandwidth usage on the relay node. It should be a last resort — a node running only via relay is a burden on the network and will have degraded performance. + +Do not enable `P2P_ENABLE_CIRCUIT_RELAY_SERVER` on edge nodes; that setting is for well-connected nodes that want to help others. + +--- + +## TLS and SNI (Server Name Indication) + +AutoTLS provisions TLS certificates for your node automatically, enabling P2P node-to-browser communication. It is always active internally — no DNS or certificate setup required on your part. For it to work, you must include a `/tls/ws` entry in `P2P_ANNOUNCE_ADDRESSES`, which the quickstart script does automatically. + +AutoTLS serves TLS at the transport layer on the WebSocket port, making it standard browser-compatible WSS — no separate port is needed. + +Example `.env` / docker-compose entry: + +```bash +P2P_ANNOUNCE_ADDRESSES='[ + "/ip4//tcp/9000", + "/ip4//tcp/9001/ws", + "/ip4//tcp/9001/tls/ws" +]' +``` + +Or in `config.json`: + +```json +{ + "p2pConfig": { + "announceAddresses": [ + "/ip4//tcp/9000", + "/ip4//tcp/9001/ws", + "/ip4//tcp/9001/tls/ws" + ] + } +} +``` + +When a TLS certificate is provisioned successfully, you will see logs like: + +``` +----- A TLS certificate was provisioned ----- +----- TLS addresses: ----- +/ip4//tcp/9001/sni/... +/ip4//tcp/9001/sni/... +----- End of TLS addresses ----- +``` + +## Verifying Connectivity + +### Check how your node sees itself + +```bash +curl http://localhost:8000/getP2pPeer?peerId= +``` + +Look at the `addresses` array in the response. Are any of those IPs/hostnames reachable from outside your network? ```json { "addresses": [ - { "multiaddr": "/ip4/127.0.0.1/tcp/34227", "isCertified": false }, - { "multiaddr": "/ip4/127.0.0.1/tcp/36913/ws", "isCertified": false }, - { "multiaddr": "/ip4/172.15.0.1/tcp/34227", "isCertified": false }, - { "multiaddr": "/ip4/172.15.0.1/tcp/36913/ws", "isCertified": false }, - { "multiaddr": "/ip4/172.26.53.25/tcp/34227", "isCertified": false }, - { "multiaddr": "/ip4/172.26.53.25/tcp/36913/ws", "isCertified": false }, - { "multiaddr": "/ip6/::1/tcp/41157", "isCertified": false } - ], - "protocols": [ - "/floodsub/1.0.0", - "/ipfs/id/1.0.0", - "/ipfs/id/push/1.0.0", - "/ipfs/ping/1.0.0", - "/libp2p/autonat/1.0.0", - "/libp2p/circuit/relay/0.2.0/hop", - "/libp2p/circuit/relay/0.2.0/stop", - "/libp2p/dcutr", - "/meshsub/1.0.0", - "/meshsub/1.1.0", - "/ocean/nodes/1.0.0", - "/ocean/nodes/1.0.0/kad/1.0.0", - "/ocean/nodes/1.0.0/lan/kad/1.0.0" - ], - "metadata": {}, - "tags": {}, - "id": "16Uiu2HAkwWe6BFQXZWg6zE9X7ExynvXEe9BRTR5Wn3udNs7JpUDx", - "publicKey": "08021221021efd24150c233d689ade0f9f467aa6a5a2969a5f52d70c85caac8681925093e3" + { "multiaddr": "/ip4/1.2.3.4/tcp/9000", "isCertified": false }, + { "multiaddr": "/ip4/1.2.3.4/tcp/9001/ws", "isCertified": false }, + { "multiaddr": "/ip4/1.2.3.4/tcp/9001/tls/ws", "isCertified": false } + ] } ``` -Are any of those IPs reachable from other nodes? +### Check how your node is seen by the network -### To observe how your node is seen by others, start your node, wait a bit and then ask another node to give you details about you: +Ask a known public node to report back what it knows about you: ```bash - curl http://node2.oceanprotocol.com:8000/getP2pPeer?peerId=16Uiu2HAk -wWe6BFQXZWg6zE9X7ExynvXEe9BRTR5Wn3udNs7JpUDx +curl https://cp1.oncompute.ai/getP2pPeer?peerId= ``` + +If the response is empty or missing your public address, the node is not reachable from the outside. + +## All P2P Environment Variables + +See [env.md](env.md#p2p) for the full list of P2P configuration options. diff --git a/docs/persistentStorage.md b/docs/persistentStorage.md new file mode 100644 index 000000000..0b78c0f63 --- /dev/null +++ b/docs/persistentStorage.md @@ -0,0 +1,193 @@ +# Persistent Storage + +This document describes Ocean Node **Persistent Storage** at a high level: what it is, how it is structured, how access control works, and how to use it via **P2P commands** and **HTTP endpoints**. + +--- + +## What it is + +Persistent Storage is a simple bucket + file store intended for **long-lived artifacts** that Ocean Node needs to keep across requests and across restarts, and to reference later (e.g. as file objects for compute). + +Key primitives: + +- **Bucket**: a logical container for files. +- **File**: binary content stored inside a bucket. +- **Bucket registry**: a local SQLite table that stores bucket metadata (owner, access lists, createdAt). + +--- + +## Architecture (high level) + +### Components + +- **Handlers (protocol layer)**: `src/components/core/handler/persistentStorage.ts` + - Implements protocol commands such as create bucket, list files, upload, delete, and get buckets. + - Validates auth (token or signature) and applies high-level authorization checks. + +- **Persistent storage backends (storage layer)**: `src/components/persistentStorage/*` + - `PersistentStorageFactory`: shared functionality (SQLite bucket registry, access list checks). + - `PersistentStorageLocalFS`: local filesystem backend. + - `PersistentStorageS3`: stub for future S3-compatible backend. + +- **HTTP routes (HTTP interface)**: `src/components/httpRoutes/persistentStorage.ts` + - Exposes REST-ish endpoints under `/api/services/persistentStorage/...` that call the same handlers. + +### Data storage + +Persistent Storage uses two stores: + +1. **Bucket registry (SQLite)** + +- File: `databases/persistentStorage.sqlite` +- Table: `persistent_storage_buckets` +- Columns: + - `bucketId` (primary key) + - `owner` (address, stored as a string) + - `accessListJson` (JSON-encoded access list array) + - `createdAt` (unix timestamp) + +2. **Backend data** + +- `localfs`: writes file bytes to the configured folder under `buckets//`. +- `s3`: not implemented yet. + +--- + +## Ownership and access control + +### Ownership + +Every bucket has a single **owner** address, stored in the bucket registry. + +- When a bucket is created, the node sets: + - `owner = consumerAddress` (normalized via `ethers.getAddress`) + +### Bucket access list + +Each bucket stores an **AccessList[]** (per-chain list(s) of access list contract addresses): + +```ts +export interface AccessList { + [chainId: string]: string[] +} +``` + +This access list is used to decide whether a given `consumerAddress` is allowed to interact with a bucket. + +### Where checks happen + +Access checks happen at two levels: + +1. **Backend enforcement** (required) + +- Backend operations `listFiles`, `uploadFile`, `deleteFile`, and `getFileObject` all require `consumerAddress`. +- The base class helper `assertConsumerAllowedForBucket(consumerAddress, bucketId)` loads the bucket ACL and throws `PersistentStorageAccessDeniedError` if the consumer is not allowed. + +2. **Handler enforcement** (command-specific) + +- `createBucket`: additionally checks the node-level allow list `config.persistentStorage.accessLists` (who can create buckets at all). +- `getBuckets`: queries registry rows filtered by `owner` and then: + - if `consumerAddress === owner`: returns all buckets for that owner + - else: filters buckets by the bucket ACL + +### Error behavior + +- Backends throw `PersistentStorageAccessDeniedError` when forbidden. +- Handlers translate that into **HTTP 403** / `status.httpStatus = 403`. + +--- + +## Features + +### Supported today + +- **Create bucket** + - Creates a bucket id (UUID), persists it in SQLite with `owner` and `accessListJson`, and creates a local directory (localfs). + +- **List buckets (by owner)** + - Returns buckets from the registry filtered by `owner` (mandatory arg). + - Applies ACL filtering for non-owners. + +- **Upload file** + - Writes a stream to the backend. + - Enforces bucket ACL. + +- **List files** + - Returns file metadata (`name`, `size`, `lastModified`) for a bucket. + - Enforces bucket ACL. + +- **Delete file** + - Deletes the named file from the bucket. + - Enforces bucket ACL. + +- **getFileObject** + - Returns fileObject format for c2d use + - Enforces bucket ACL. + +### Not implemented yet + +- **S3 backend** + - `PersistentStorageS3` exists as a placeholder and currently throws “not implemented”. + +--- + +## Configuration + +Persistent storage is controlled by `persistentStorage` in node config. + +Key fields: + +- `enabled`: boolean +- `type`: `"localfs"` or `"s3"` +- `accessLists`: AccessList[] — node-level allow list to create buckets +- `options`: + - localfs: `{ "folder": "/path/to/storage" }` + - s3: `{ endpoint, objectKey, accessKeyId, secretAccessKey, ... }` (future) + +--- + +## Usage + +Flow is: + +- create bucket (or use existing bucket) +- list files +- upload file if needed +- GetFileObject to get object needed for c2d reference +- start c2d job using fileObject for datasets + +### P2P commands + +All persistent storage operations are implemented as protocol commands in the handler: + +- `persistentStorageCreateBucket` +- `persistentStorageGetBuckets` +- `persistentStorageListFiles` +- `persistentStorageGetFileObject` +- `persistentStorageUploadFile` +- `persistentStorageDeleteFile` + +Each command requires authentication (token or signature) based on Ocean Node’s auth configuration. + +### HTTP endpoints + +HTTP routes are available under `/api/services/persistentStorage/...` and call the same handlers. See `docs/API.md` for the full parameter lists and examples. + +At a glance: + +- `POST /api/services/persistentStorage/buckets` +- `GET /api/services/persistentStorage/buckets` +- `GET /api/services/persistentStorage/buckets/:bucketId/files` +- `GET /api/services/persistentStorage/buckets/:bucketId/files/:fileName/object` +- `POST /api/services/persistentStorage/buckets/:bucketId/files/:fileName` +- `DELETE /api/services/persistentStorage/buckets/:bucketId/files/:fileName` + +Upload uses the raw request body as bytes and forwards it to the handler as a stream. + +--- + +## Limitations and notes + +- The bucket registry is local to the node (SQLite file). If you run multiple nodes, each node’s registry is independent unless you externalize/replicate it. +- `listBuckets(owner)` requires `owner` and only returns buckets that were created with that owner recorded. +- Filenames in `localfs` are constrained (no path separators) to avoid path traversal. diff --git a/package-lock.json b/package-lock.json index 487678832..15ffe37e1 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "ocean-node", - "version": "2.1.1", + "version": "3.0.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "ocean-node", - "version": "2.1.1", + "version": "3.0.1", "hasInstallScript": true, "license": "Apache-2.0", "dependencies": { @@ -37,9 +37,9 @@ "@multiformats/multiaddr": "^12.2.3", "@oceanprotocol/contracts": "^2.6.0", "@oceanprotocol/ddo-js": "^0.2.0", - "axios": "^1.13.5", + "axios": "^1.15.0", "base58-js": "^2.0.0", - "basic-ftp": "^5.2.0", + "basic-ftp": "^5.3.0", "cors": "^2.8.5", "datastore-level": "^12.0.2", "delay": "^5.0.0", @@ -55,10 +55,10 @@ "it-pipe": "^3.0.1", "jsonwebtoken": "^9.0.2", "libp2p": "^3.1.2", - "lodash": "^4.17.23", + "lodash": "^4.18.1", "lzma-purejs-requirejs": "^1.0.0", "node-cron": "^3.0.3", - "sqlite3": "^5.1.7", + "sqlite3": "^6.0.1", "stream-concat": "^1.0.0", "tar": "^7.5.11", "uint8arrays": "^4.0.6", @@ -2798,12 +2798,15 @@ "@ethersproject/strings": "^5.8.0" } }, - "node_modules/@gar/promisify": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", - "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", + "node_modules/@gar/promise-retry": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@gar/promise-retry/-/promise-retry-1.0.3.tgz", + "integrity": "sha512-GmzA9ckNokPypTg10pgpeHNQe7ph+iIKKmhKu3Ob9ANkswreCx7R3cKmY781K8QK3AqVL3xVh9A42JvIAbkkSA==", "license": "MIT", - "optional": true + "optional": true, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } }, "node_modules/@grpc/grpc-js": { "version": "1.14.3", @@ -5225,30 +5228,54 @@ "lodash": "^4.15.0" } }, - "node_modules/@npmcli/fs": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-1.1.1.tgz", - "integrity": "sha512-8KG5RD0GVP4ydEzRn/I4BNDuxDtqVbOdm8675T49OIG/NGhaK0pjPX7ZcDlvKYbA+ulvVK3ztfcF4uBdOxuJbQ==", + "node_modules/@npmcli/agent": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/agent/-/agent-4.0.0.tgz", + "integrity": "sha512-kAQTcEN9E8ERLVg5AsGwLNoFb+oEG6engbqAU2P43gD4JEIkNGMHdVQ096FsOAAYpZPB0RSt0zgInKIAS1l5QA==", "license": "ISC", "optional": true, "dependencies": { - "@gar/promisify": "^1.0.1", - "semver": "^7.3.5" + "agent-base": "^7.1.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.1", + "lru-cache": "^11.2.1", + "socks-proxy-agent": "^8.0.3" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" } }, - "node_modules/@npmcli/move-file": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-1.1.2.tgz", - "integrity": "sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg==", - "deprecated": "This functionality has been moved to @npmcli/fs", - "license": "MIT", + "node_modules/@npmcli/agent/node_modules/lru-cache": { + "version": "11.3.5", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.3.5.tgz", + "integrity": "sha512-NxVFwLAnrd9i7KUBxC4DrUhmgjzOs+1Qm50D3oF1/oL+r1NpZ4gA7xvG0/zJ8evR7zIKn4vLf7qTNduWFtCrRw==", + "license": "BlueOak-1.0.0", + "optional": true, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@npmcli/fs": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-5.0.0.tgz", + "integrity": "sha512-7OsC1gNORBEawOa5+j2pXN9vsicaIOH5cPXxoR6fJOmH6/EXpJB2CajXOu1fPRFun2m1lktEFX11+P89hqO/og==", + "license": "ISC", "optional": true, "dependencies": { - "mkdirp": "^1.0.4", - "rimraf": "^3.0.2" + "semver": "^7.3.5" }, "engines": { - "node": ">=10" + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/@npmcli/redact": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/redact/-/redact-4.0.0.tgz", + "integrity": "sha512-gOBg5YHMfZy+TfHArfVogwgfBeQnKbbGo3pSUyK/gSI0AVu+pEiDVcKlQb0D8Mg1LNRZILZ6XG8I5dJ4KuAd9Q==", + "license": "ISC", + "optional": true, + "engines": { + "node": "^20.17.0 || >=22.9.0" } }, "node_modules/@oceanprotocol/contracts": { @@ -6932,16 +6959,6 @@ "tslib": "^2.8.0" } }, - "node_modules/@tootallnate/once": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", - "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", - "license": "MIT", - "optional": true, - "engines": { - "node": ">= 6" - } - }, "node_modules/@tootallnate/quickjs-emscripten": { "version": "0.23.0", "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", @@ -7852,11 +7869,14 @@ } }, "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-4.0.0.tgz", + "integrity": "sha512-a1wflyaL0tHtJSmLSOVybYhy22vRih4eduhhrkcjgrWGnRfrZtovJ2FRjxuTtkkj47O/baf0R86QU5OuYpz8fA==", "license": "ISC", - "optional": true + "optional": true, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } }, "node_modules/abort-controller": { "version": "3.0.0", @@ -7999,30 +8019,17 @@ "version": "7.1.4", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", - "dev": true, + "devOptional": true, "license": "MIT", "engines": { "node": ">= 14" } }, - "node_modules/agentkeepalive": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", - "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", - "license": "MIT", - "optional": true, - "dependencies": { - "humanize-ms": "^1.2.1" - }, - "engines": { - "node": ">= 8.0.0" - } - }, "node_modules/aggregate-error": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "devOptional": true, + "dev": true, "license": "MIT", "dependencies": { "clean-stack": "^2.0.0", @@ -8134,13 +8141,6 @@ "node": ">=8" } }, - "node_modules/aproba": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.1.0.tgz", - "integrity": "sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==", - "license": "ISC", - "optional": true - }, "node_modules/archy": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", @@ -8148,36 +8148,6 @@ "dev": true, "license": "MIT" }, - "node_modules/are-we-there-yet": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.1.tgz", - "integrity": "sha512-QZW4EDmGwlYur0Yyf/b2uGucHQMa8aFUP7eu9ddR73vvhFyt4V0Vl3QHPcTNJ8l6qYOBdxgXdnBXQrHilfRQBg==", - "deprecated": "This package is no longer supported.", - "license": "ISC", - "optional": true, - "dependencies": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, - "node_modules/are-we-there-yet/node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "license": "MIT", - "optional": true, - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -8485,21 +8455,30 @@ } }, "node_modules/axios": { - "version": "1.13.5", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz", - "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==", + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.15.0.tgz", + "integrity": "sha512-wWyJDlAatxk30ZJer+GeCWS209sA42X+N5jU2jy6oHTp7ufw8uzUTVFBX9+wTfAlhiJXGS0Bq7X6efruWjuK9Q==", "license": "MIT", "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", - "proxy-from-env": "^1.1.0" + "proxy-from-env": "^2.1.0" + } + }, + "node_modules/axios/node_modules/proxy-from-env": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-2.1.0.tgz", + "integrity": "sha512-cJ+oHTW1VAEa8cJslgmUZrc+sjRKgAKl3Zyse6+PV38hZe/V6Z14TbCuXcan9F9ghlz4QrFr2c92TNF82UkYHA==", + "license": "MIT", + "engines": { + "node": ">=10" } }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "devOptional": true, + "dev": true, "license": "MIT" }, "node_modules/base-x": { @@ -8551,9 +8530,9 @@ } }, "node_modules/basic-ftp": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.2.0.tgz", - "integrity": "sha512-VoMINM2rqJwJgfdHq6RiUudKt2BV+FY5ZFezP/ypmwayk68+NzzAQy4XXLlqsGD4MCzq3DrmNFD/uUmBJuGoXw==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.3.0.tgz", + "integrity": "sha512-5K9eNNn7ywHPsYnFwjKgYH8Hf8B5emh7JKcPaVjjrMJFQQwGpwowEnZNEtHs7DfR7hCZsmaK3VA4HUK0YarT+w==", "license": "MIT", "engines": { "node": ">=10.0.0" @@ -8948,137 +8927,122 @@ } }, "node_modules/cacache": { - "version": "15.3.0", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-15.3.0.tgz", - "integrity": "sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ==", + "version": "20.0.4", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-20.0.4.tgz", + "integrity": "sha512-M3Lab8NPYlZU2exsL3bMVvMrMqgwCnMWfdZbK28bn3pK6APT/Te/I8hjRPNu1uwORY9a1eEQoifXbKPQMfMTOA==", "license": "ISC", "optional": true, "dependencies": { - "@npmcli/fs": "^1.0.0", - "@npmcli/move-file": "^1.0.1", - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "glob": "^7.1.4", - "infer-owner": "^1.0.4", - "lru-cache": "^6.0.0", - "minipass": "^3.1.1", - "minipass-collect": "^1.0.2", + "@npmcli/fs": "^5.0.0", + "fs-minipass": "^3.0.0", + "glob": "^13.0.0", + "lru-cache": "^11.1.0", + "minipass": "^7.0.3", + "minipass-collect": "^2.0.1", "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.2", - "mkdirp": "^1.0.3", - "p-map": "^4.0.0", - "promise-inflight": "^1.0.1", - "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.0.2", - "unique-filename": "^1.1.1" + "minipass-pipeline": "^1.2.4", + "p-map": "^7.0.2", + "ssri": "^13.0.0" }, "engines": { - "node": ">= 10" + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/cacache/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "license": "MIT", + "optional": true, + "engines": { + "node": "18 || 20 || >=22" } }, "node_modules/cacache/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz", + "integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==", "license": "MIT", "optional": true, "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" } }, "node_modules/cacache/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", - "license": "ISC", + "version": "13.0.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.6.tgz", + "integrity": "sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==", + "license": "BlueOak-1.0.0", "optional": true, "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "minimatch": "^10.2.2", + "minipass": "^7.1.3", + "path-scurry": "^2.0.2" }, "engines": { - "node": "*" + "node": "18 || 20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/cacache/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "license": "ISC", + "node_modules/cacache/node_modules/lru-cache": { + "version": "11.3.5", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.3.5.tgz", + "integrity": "sha512-NxVFwLAnrd9i7KUBxC4DrUhmgjzOs+1Qm50D3oF1/oL+r1NpZ4gA7xvG0/zJ8evR7zIKn4vLf7qTNduWFtCrRw==", + "license": "BlueOak-1.0.0", "optional": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, "engines": { - "node": "*" + "node": "20 || >=22" } }, - "node_modules/cacache/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "license": "ISC", + "node_modules/cacache/node_modules/minimatch": { + "version": "10.2.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz", + "integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==", + "license": "BlueOak-1.0.0", "optional": true, "dependencies": { - "yallist": "^4.0.0" + "brace-expansion": "^5.0.5" }, "engines": { - "node": ">=8" + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/cacache/node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz", + "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==", "license": "MIT", "optional": true, - "dependencies": { - "aggregate-error": "^3.0.0" - }, "engines": { - "node": ">=10" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/cacache/node_modules/tar": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", - "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", - "deprecated": "Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", - "license": "ISC", + "node_modules/cacache/node_modules/path-scurry": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.2.tgz", + "integrity": "sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==", + "license": "BlueOak-1.0.0", "optional": true, "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" + "lru-cache": "^11.0.0", + "minipass": "^7.1.2" }, "engines": { - "node": ">=10" - } - }, - "node_modules/cacache/node_modules/tar/node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "license": "ISC", - "optional": true, - "engines": { - "node": ">=8" + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/caching-transform": { @@ -9289,15 +9253,6 @@ "url": "https://paulmillr.com/funding/" } }, - "node_modules/chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, "node_modules/ci-info": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.4.0.tgz", @@ -9358,7 +9313,7 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "devOptional": true, + "dev": true, "license": "MIT", "engines": { "node": ">=6" @@ -9532,16 +9487,6 @@ "node": ">=12.20" } }, - "node_modules/color-support": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", - "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", - "license": "ISC", - "optional": true, - "bin": { - "color-support": "bin.js" - } - }, "node_modules/color/node_modules/color-convert": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-3.1.3.tgz", @@ -9634,7 +9579,7 @@ "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "devOptional": true, + "dev": true, "license": "MIT" }, "node_modules/concurrently": { @@ -9698,13 +9643,6 @@ "node": "^14.18.0 || >=16.10.0" } }, - "node_modules/console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==", - "license": "ISC", - "optional": true - }, "node_modules/content-disposition": { "version": "0.5.4", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", @@ -10200,13 +10138,6 @@ "node": ">=0.4.0" } }, - "node_modules/delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==", - "license": "MIT", - "optional": true - }, "node_modules/depd": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", @@ -10546,29 +10477,6 @@ "node": ">= 0.8" } }, - "node_modules/encoding": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", - "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "license": "MIT", - "optional": true, - "dependencies": { - "iconv-lite": "^0.6.2" - } - }, - "node_modules/encoding/node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "license": "MIT", - "optional": true, - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/end-of-stream": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", @@ -12024,6 +11932,13 @@ "node": ">=6" } }, + "node_modules/exponential-backoff": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/exponential-backoff/-/exponential-backoff-3.1.3.tgz", + "integrity": "sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==", + "license": "Apache-2.0", + "optional": true + }, "node_modules/express": { "version": "4.22.1", "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz", @@ -12555,34 +12470,23 @@ "license": "MIT" }, "node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "license": "ISC", - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/fs-minipass/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", "license": "ISC", + "optional": true, "dependencies": { - "yallist": "^4.0.0" + "minipass": "^7.0.3" }, "engines": { - "node": ">=8" + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "devOptional": true, + "dev": true, "license": "ISC" }, "node_modules/fsevents": { @@ -12652,75 +12556,25 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/gauge": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.4.tgz", - "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", - "deprecated": "This package is no longer supported.", - "license": "ISC", - "optional": true, - "dependencies": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^3.0.7", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - }, + "node_modules/generator-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", + "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + "dev": true, + "license": "MIT", "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": ">= 0.4" } }, - "node_modules/gauge/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, "license": "MIT", - "optional": true - }, - "node_modules/gauge/node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "license": "ISC", - "optional": true - }, - "node_modules/gauge/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "optional": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/generator-function": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", - "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } + "engines": { + "node": ">=6.9.0" + } }, "node_modules/get-caller-file": { "version": "2.0.5", @@ -13144,13 +12998,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==", - "license": "ISC", - "optional": true - }, "node_modules/hash-base": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.2.tgz", @@ -13347,7 +13194,7 @@ "version": "7.0.2", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", - "dev": true, + "devOptional": true, "license": "MIT", "dependencies": { "agent-base": "^7.1.0", @@ -13361,7 +13208,7 @@ "version": "7.0.6", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", - "dev": true, + "devOptional": true, "license": "MIT", "dependencies": { "agent-base": "^7.1.2", @@ -13400,16 +13247,6 @@ "uuid": "bin/uuid" } }, - "node_modules/humanize-ms": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", - "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", - "license": "MIT", - "optional": true, - "dependencies": { - "ms": "^2.0.0" - } - }, "node_modules/hyperdiff": { "version": "2.0.23", "resolved": "https://registry.npmjs.org/hyperdiff/-/hyperdiff-2.0.23.tgz", @@ -13423,6 +13260,12 @@ "node": ">= 8" } }, + "node_modules/hyperdiff/node_modules/lodash": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" + }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -13508,7 +13351,7 @@ "version": "0.1.4", "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "devOptional": true, + "dev": true, "license": "MIT", "engines": { "node": ">=0.8.19" @@ -13518,25 +13361,18 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "devOptional": true, + "dev": true, "license": "MIT", "engines": { "node": ">=8" } }, - "node_modules/infer-owner": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", - "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", - "license": "ISC", - "optional": true - }, "node_modules/inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "devOptional": true, + "dev": true, "license": "ISC", "dependencies": { "once": "^1.3.0", @@ -13948,13 +13784,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-lambda": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", - "integrity": "sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==", - "license": "MIT", - "optional": true - }, "node_modules/is-loopback-addr": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/is-loopback-addr/-/is-loopback-addr-2.0.2.tgz", @@ -14279,7 +14108,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "devOptional": true, + "dev": true, "license": "ISC" }, "node_modules/issue-parser": { @@ -15153,9 +14982,9 @@ } }, "node_modules/lodash": { - "version": "4.17.23", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", - "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz", + "integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==", "license": "MIT" }, "node_modules/lodash.camelcase": { @@ -15373,101 +15202,37 @@ } }, "node_modules/make-fetch-happen": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", - "integrity": "sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg==", + "version": "15.0.5", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-15.0.5.tgz", + "integrity": "sha512-uCbIa8jWWmQZt4dSnEStkVC6gdakiinAm4PiGsywIkguF0eWMdcjDz0ECYhUolFU3pFLOev9VNPCEygydXnddg==", "license": "ISC", "optional": true, "dependencies": { - "agentkeepalive": "^4.1.3", - "cacache": "^15.2.0", - "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^4.0.1", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^6.0.0", - "minipass": "^3.1.3", - "minipass-collect": "^1.0.2", - "minipass-fetch": "^1.3.2", + "@gar/promise-retry": "^1.0.0", + "@npmcli/agent": "^4.0.0", + "@npmcli/redact": "^4.0.0", + "cacache": "^20.0.1", + "http-cache-semantics": "^4.1.1", + "minipass": "^7.0.2", + "minipass-fetch": "^5.0.0", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.2", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^6.0.0", - "ssri": "^8.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/make-fetch-happen/node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "license": "MIT", - "optional": true, - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/make-fetch-happen/node_modules/http-proxy-agent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", - "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", - "license": "MIT", - "optional": true, - "dependencies": { - "@tootallnate/once": "1", - "agent-base": "6", - "debug": "4" + "negotiator": "^1.0.0", + "proc-log": "^6.0.0", + "ssri": "^13.0.0" }, "engines": { - "node": ">= 6" + "node": "^20.17.0 || >=22.9.0" } }, - "node_modules/make-fetch-happen/node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", - "license": "MIT", - "optional": true, - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/make-fetch-happen/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "license": "ISC", - "optional": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/make-fetch-happen/node_modules/socks-proxy-agent": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.2.1.tgz", - "integrity": "sha512-a6KW9G+6B3nWZ1yB8G7pJwL3ggLy1uTzKAgCb7ttblwqdz9fMGJUuTy3uFzEP48FAs9FLILlmzDlE2JJhVQaXQ==", + "node_modules/make-fetch-happen/node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", "license": "MIT", "optional": true, - "dependencies": { - "agent-base": "^6.0.2", - "debug": "^4.3.3", - "socks": "^2.6.2" - }, "engines": { - "node": ">= 10" + "node": ">= 0.6" } }, "node_modules/math-intrinsics": { @@ -15678,76 +15443,67 @@ } }, "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "license": "ISC", + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz", + "integrity": "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==", + "license": "BlueOak-1.0.0", "engines": { "node": ">=16 || 14 >=14.17" } }, "node_modules/minipass-collect": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", - "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", - "license": "ISC", - "optional": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/minipass-collect/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-2.0.1.tgz", + "integrity": "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==", "license": "ISC", "optional": true, "dependencies": { - "yallist": "^4.0.0" + "minipass": "^7.0.3" }, "engines": { - "node": ">=8" + "node": ">=16 || 14 >=14.17" } }, "node_modules/minipass-fetch": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-1.4.1.tgz", - "integrity": "sha512-CGH1eblLq26Y15+Azk7ey4xh0J/XfJfrCox5LDJiKqI2Q2iwOLOKrlmIaODiSQS8d18jalF6y2K2ePUm0CmShw==", + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-5.0.2.tgz", + "integrity": "sha512-2d0q2a8eCi2IRg/IGubCNRJoYbA1+YPXAzQVRFmB45gdGZafyivnZ5YSEfo3JikbjGxOdntGFvBQGqaSMXlAFQ==", "license": "MIT", "optional": true, "dependencies": { - "minipass": "^3.1.0", - "minipass-sized": "^1.0.3", - "minizlib": "^2.0.0" + "minipass": "^7.0.3", + "minipass-sized": "^2.0.0", + "minizlib": "^3.0.1" }, "engines": { - "node": ">=8" + "node": "^20.17.0 || >=22.9.0" }, "optionalDependencies": { - "encoding": "^0.1.12" + "iconv-lite": "^0.7.2" } }, - "node_modules/minipass-fetch/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "license": "ISC", + "node_modules/minipass-fetch/node_modules/iconv-lite": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.2.tgz", + "integrity": "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==", + "license": "MIT", "optional": true, "dependencies": { - "yallist": "^4.0.0" + "safer-buffer": ">= 2.1.2 < 3.0.0" }, "engines": { - "node": ">=8" + "node": ">=0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/minipass-flush": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", - "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", - "license": "ISC", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.7.tgz", + "integrity": "sha512-TbqTz9cUwWyHS2Dy89P3ocAGUGxKjjLuR9z8w4WUTGAVgEj17/4nhgo2Du56i0Fm3Pm30g4iA8Lcqctc76jCzA==", + "license": "BlueOak-1.0.0", "optional": true, "dependencies": { "minipass": "^3.0.0" @@ -15796,66 +15552,28 @@ } }, "node_modules/minipass-sized": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", - "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", - "license": "ISC", - "optional": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minipass-sized/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-2.0.0.tgz", + "integrity": "sha512-zSsHhto5BcUVM2m1LurnXY6M//cGhVaegT71OfOXoprxT6o780GZd792ea6FfrQkuU4usHZIUczAQMRUE2plzA==", "license": "ISC", "optional": true, "dependencies": { - "yallist": "^4.0.0" + "minipass": "^7.1.2" }, "engines": { "node": ">=8" } }, "node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz", + "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==", "license": "MIT", "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/minizlib/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "license": "MIT", - "bin": { - "mkdirp": "bin/cmd.js" + "minipass": "^7.1.2" }, "engines": { - "node": ">=10" + "node": ">= 18" } }, "node_modules/mkdirp-classic": { @@ -16273,28 +15991,28 @@ } }, "node_modules/node-gyp": { - "version": "8.4.1", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-8.4.1.tgz", - "integrity": "sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w==", + "version": "12.2.0", + "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-12.2.0.tgz", + "integrity": "sha512-q23WdzrQv48KozXlr0U1v9dwO/k59NHeSzn6loGcasyf0UnSrtzs8kRxM+mfwJSf0DkX0s43hcqgnSO4/VNthQ==", "license": "MIT", "optional": true, "dependencies": { "env-paths": "^2.2.0", - "glob": "^7.1.4", + "exponential-backoff": "^3.1.1", "graceful-fs": "^4.2.6", - "make-fetch-happen": "^9.1.0", - "nopt": "^5.0.0", - "npmlog": "^6.0.0", - "rimraf": "^3.0.2", + "make-fetch-happen": "^15.0.0", + "nopt": "^9.0.0", + "proc-log": "^6.0.0", "semver": "^7.3.5", - "tar": "^6.1.2", - "which": "^2.0.2" + "tar": "^7.5.4", + "tinyglobby": "^0.2.12", + "which": "^6.0.0" }, "bin": { "node-gyp": "bin/node-gyp.js" }, "engines": { - "node": ">= 10.12.0" + "node": "^20.17.0 || >=22.9.0" } }, "node_modules/node-gyp-build": { @@ -16308,79 +16026,30 @@ "node-gyp-build-test": "build-test.js" } }, - "node_modules/node-gyp/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "license": "MIT", - "optional": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/node-gyp/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", - "license": "ISC", + "node_modules/node-gyp/node_modules/isexe": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-4.0.0.tgz", + "integrity": "sha512-FFUtZMpoZ8RqHS3XeXEmHWLA4thH+ZxCv2lOiPIn1Xc7CxrqhWzNSDzD+/chS/zbYezmiwWLdQC09JdQKmthOw==", + "license": "BlueOak-1.0.0", "optional": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": ">=20" } }, - "node_modules/node-gyp/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "node_modules/node-gyp/node_modules/which": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/which/-/which-6.0.1.tgz", + "integrity": "sha512-oGLe46MIrCRqX7ytPUf66EAYvdeMIZYn3WaocqqKZAxrBpkqHfL/qvTyJ/bTk5+AqHCjXmrv3CEWgy368zhRUg==", "license": "ISC", "optional": true, "dependencies": { - "brace-expansion": "^1.1.7" + "isexe": "^4.0.0" }, - "engines": { - "node": "*" - } - }, - "node_modules/node-gyp/node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "license": "ISC", - "optional": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/node-gyp/node_modules/tar": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", - "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", - "deprecated": "Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", - "license": "ISC", - "optional": true, - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" + "bin": { + "node-which": "bin/which.js" }, "engines": { - "node": ">=10" + "node": "^20.17.0 || >=22.9.0" } }, "node_modules/node-preload": { @@ -16443,19 +16112,19 @@ } }, "node_modules/nopt": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", - "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-9.0.0.tgz", + "integrity": "sha512-Zhq3a+yFKrYwSBluL4H9XP3m3y5uvQkB/09CwDruCiRmR/UJYnn9W4R48ry0uGC70aeTPKLynBtscP9efFFcPw==", "license": "ISC", "optional": true, "dependencies": { - "abbrev": "1" + "abbrev": "^4.0.0" }, "bin": { "nopt": "bin/nopt.js" }, "engines": { - "node": ">=6" + "node": "^20.17.0 || >=22.9.0" } }, "node_modules/npm-run-path": { @@ -16487,23 +16156,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/npmlog": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.2.tgz", - "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", - "deprecated": "This package is no longer supported.", - "license": "ISC", - "optional": true, - "dependencies": { - "are-we-there-yet": "^3.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^4.0.3", - "set-blocking": "^2.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - } - }, "node_modules/null-prototype-object": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/null-prototype-object/-/null-prototype-object-1.2.5.tgz", @@ -17451,7 +17103,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "devOptional": true, + "dev": true, "license": "MIT", "engines": { "node": ">=0.10.0" @@ -17745,6 +17397,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/proc-log": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-6.1.0.tgz", + "integrity": "sha512-iG+GYldRf2BQ0UDUAd6JQ/RwzaQy6mXmsk/IzlYyal4A4SNFw54MeH4/tLkF4I5WoWG9SQwuqWzS99jaFQHBuQ==", + "license": "ISC", + "optional": true, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -17779,44 +17441,6 @@ "integrity": "sha512-MOzLIwhpt64KIVN64h1MwdKWiyKFNc/S6BoYKPIVUHFg0/eIEyBulhWCgn678v/4c0ri3FdGuzXymNCv02MUIw==", "license": "Apache-2.0 OR MIT" }, - "node_modules/promise-inflight": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", - "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==", - "license": "ISC", - "optional": true - }, - "node_modules/promise-retry": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", - "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", - "license": "MIT", - "optional": true, - "dependencies": { - "err-code": "^2.0.2", - "retry": "^0.12.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/promise-retry/node_modules/err-code": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", - "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", - "license": "MIT", - "optional": true - }, - "node_modules/promise-retry/node_modules/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", - "license": "MIT", - "optional": true, - "engines": { - "node": ">= 4" - } - }, "node_modules/prop-types": { "version": "15.8.1", "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", @@ -17936,6 +17560,7 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "dev": true, "license": "MIT" }, "node_modules/pump": { @@ -18679,7 +18304,7 @@ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", - "devOptional": true, + "dev": true, "license": "ISC", "dependencies": { "glob": "^7.1.3" @@ -18695,7 +18320,7 @@ "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "devOptional": true, + "dev": true, "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", @@ -18707,7 +18332,7 @@ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", - "devOptional": true, + "dev": true, "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", @@ -18728,7 +18353,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "devOptional": true, + "dev": true, "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" @@ -19077,7 +18702,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", - "devOptional": true, + "dev": true, "license": "ISC" }, "node_modules/set-function-length": { @@ -19385,7 +19010,7 @@ "version": "8.0.5", "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", - "dev": true, + "devOptional": true, "license": "MIT", "dependencies": { "agent-base": "^7.1.2", @@ -19465,22 +19090,25 @@ "license": "BSD-3-Clause" }, "node_modules/sqlite3": { - "version": "5.1.7", - "resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-5.1.7.tgz", - "integrity": "sha512-GGIyOiFaG+TUra3JIfkI/zGP8yZYLPQ0pl1bH+ODjiX57sPhrLU5sQJn1y9bDKZUFYkX1crlrPfSYt0BKKdkog==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-6.0.1.tgz", + "integrity": "sha512-X0czUUMG2tmSqJpEQa3tCuZSHKIx8PwM53vLZzKp/o6Rpy25fiVfjdbnZ988M8+O3ZWR1ih0K255VumCb3MAnQ==", "hasInstallScript": true, "license": "BSD-3-Clause", "dependencies": { "bindings": "^1.5.0", - "node-addon-api": "^7.0.0", - "prebuild-install": "^7.1.1", - "tar": "^6.1.11" + "node-addon-api": "^8.0.0", + "prebuild-install": "^7.1.3", + "tar": "^7.5.10" + }, + "engines": { + "node": ">=20.17.0" }, "optionalDependencies": { - "node-gyp": "8.x" + "node-gyp": "12.x" }, "peerDependencies": { - "node-gyp": "8.x" + "node-gyp": "12.x" }, "peerDependenciesMeta": { "node-gyp": { @@ -19488,37 +19116,13 @@ } } }, - "node_modules/sqlite3/node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "license": "ISC", - "engines": { - "node": ">=8" - } - }, "node_modules/sqlite3/node_modules/node-addon-api": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", - "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", - "license": "MIT" - }, - "node_modules/sqlite3/node_modules/tar": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", - "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", - "deprecated": "Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", - "license": "ISC", - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, + "version": "8.7.0", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-8.7.0.tgz", + "integrity": "sha512-9MdFxmkKaOYVTV+XVRG8ArDwwQ77XIgIPyKASB1k3JPq3M8fGQQQE3YpMOrKm6g//Ktx8ivZr8xo1Qmtqub+GA==", + "license": "MIT", "engines": { - "node": ">=10" + "node": "^18 || ^20 || >= 21" } }, "node_modules/ssh2": { @@ -19546,29 +19150,16 @@ "optional": true }, "node_modules/ssri": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-8.0.1.tgz", - "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", - "license": "ISC", - "optional": true, - "dependencies": { - "minipass": "^3.1.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/ssri/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "version": "13.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-13.0.1.tgz", + "integrity": "sha512-QUiRf1+u9wPTL/76GTYlKttDEBWV1ga9ZXW8BG6kfdeyyM8LGPix9gROyg9V2+P0xNyF3X2Go526xKFdMZrHSQ==", "license": "ISC", "optional": true, "dependencies": { - "yallist": "^4.0.0" + "minipass": "^7.0.3" }, "engines": { - "node": ">=8" + "node": "^20.17.0 || >=22.9.0" } }, "node_modules/stack-trace": { @@ -20098,18 +19689,6 @@ "node": ">=18" } }, - "node_modules/tar/node_modules/minizlib": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz", - "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==", - "license": "MIT", - "dependencies": { - "minipass": "^7.1.2" - }, - "engines": { - "node": ">= 18" - } - }, "node_modules/tar/node_modules/yallist": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", @@ -20228,7 +19807,7 @@ "version": "0.2.15", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", - "dev": true, + "devOptional": true, "license": "MIT", "dependencies": { "fdir": "^6.5.0", @@ -20245,7 +19824,7 @@ "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, + "devOptional": true, "license": "MIT", "engines": { "node": ">=12.0.0" @@ -20263,7 +19842,7 @@ "version": "4.0.3", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, + "devOptional": true, "license": "MIT", "engines": { "node": ">=12" @@ -20723,26 +20302,6 @@ "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", "license": "MIT" }, - "node_modules/unique-filename": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", - "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", - "license": "ISC", - "optional": true, - "dependencies": { - "unique-slug": "^2.0.0" - } - }, - "node_modules/unique-slug": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", - "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", - "license": "ISC", - "optional": true, - "dependencies": { - "imurmurhash": "^0.1.4" - } - }, "node_modules/universal-user-agent": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz", @@ -20951,7 +20510,7 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "devOptional": true, + "dev": true, "license": "ISC", "dependencies": { "isexe": "^2.0.0" @@ -21065,38 +20624,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/wide-align": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", - "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", - "license": "ISC", - "optional": true, - "dependencies": { - "string-width": "^1.0.2 || 2 || 3 || 4" - } - }, - "node_modules/wide-align/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT", - "optional": true - }, - "node_modules/wide-align/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "optional": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/wildcard-match": { "version": "5.1.4", "resolved": "https://registry.npmjs.org/wildcard-match/-/wildcard-match-5.1.4.tgz", diff --git a/package.json b/package.json index 7a20f414b..c2f01199b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "ocean-node", - "version": "2.1.1", + "version": "3.0.1", "description": "Ocean Node is used to run all core services in the Ocean stack", "author": "Ocean Protocol Foundation", "license": "Apache-2.0", @@ -75,9 +75,9 @@ "@multiformats/multiaddr": "^12.2.3", "@oceanprotocol/contracts": "^2.6.0", "@oceanprotocol/ddo-js": "^0.2.0", - "axios": "^1.13.5", + "axios": "^1.15.0", "base58-js": "^2.0.0", - "basic-ftp": "^5.2.0", + "basic-ftp": "^5.3.0", "cors": "^2.8.5", "datastore-level": "^12.0.2", "delay": "^5.0.0", @@ -93,10 +93,10 @@ "it-pipe": "^3.0.1", "jsonwebtoken": "^9.0.2", "libp2p": "^3.1.2", - "lodash": "^4.17.23", + "lodash": "^4.18.1", "lzma-purejs-requirejs": "^1.0.0", "node-cron": "^3.0.3", - "sqlite3": "^5.1.7", + "sqlite3": "^6.0.1", "stream-concat": "^1.0.0", "tar": "^7.5.11", "uint8arrays": "^4.0.6", diff --git a/scripts/ocean-node-quickstart.sh b/scripts/ocean-node-quickstart.sh index 5a6da078d..67481d878 100755 --- a/scripts/ocean-node-quickstart.sh +++ b/scripts/ocean-node-quickstart.sh @@ -113,6 +113,12 @@ ensure_jq() { fi } +get_public_ip() { + if command -v curl >/dev/null 2>&1; then + DETECTED_PUBLIC_IP=$(curl -s ifconfig.me) + fi +} + echo "Checking prerequisites (jq) are installed.." ensure_jq @@ -170,8 +176,14 @@ if [ "$enable_upnp" == "y" ]; then P2P_ENABLE_UPNP='true' fi - -read -p "Provide the public IPv4 address or FQDN where this node will be accessible: " P2P_ANNOUNCE_ADDRESS +get_public_ip +if [ -n "$DETECTED_PUBLIC_IP" ]; then + echo -ne "Provide the public IPv4 address or FQDN where this node will be accessible (press Enter to accept detected address: "$DETECTED_PUBLIC_IP") ": + read P2P_ANNOUNCE_ADDRESS + P2P_ANNOUNCE_ADDRESS=${P2P_ANNOUNCE_ADDRESS:-$DETECTED_PUBLIC_IP} +else + read -p "Provide the public IPv4 address or FQDN where this node will be accessible: " P2P_ANNOUNCE_ADDRESS +fi if [ -n "$P2P_ANNOUNCE_ADDRESS" ]; then validate_ip_or_fqdn "$P2P_ANNOUNCE_ADDRESS" @@ -182,10 +194,10 @@ if [ -n "$P2P_ANNOUNCE_ADDRESS" ]; then if [[ "$P2P_ANNOUNCE_ADDRESS" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then # IPv4 - P2P_ANNOUNCE_ADDRESSES='["/ip4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindTcpPort'", "/ip4/'$P2P_ANNOUNCE_ADDRESS'/ws/tcp/'$P2P_ipV4BindWsPort'"]' + P2P_ANNOUNCE_ADDRESSES='["/ip4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindTcpPort'", "/ip4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindWsPort'/ws", "/ip4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindWsPort'/tls/ws"]' elif [[ "$P2P_ANNOUNCE_ADDRESS" =~ ^[a-zA-Z0-9.-]+$ ]]; then # FQDN - P2P_ANNOUNCE_ADDRESSES='["/dns4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindTcpPort'", "/dns4/'$P2P_ANNOUNCE_ADDRESS'/ws/tcp/'$P2P_ipV4BindWsPort'"]' + P2P_ANNOUNCE_ADDRESSES='["/dns4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindTcpPort'", "/dns4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindWsPort'/ws", "/dns4/'$P2P_ANNOUNCE_ADDRESS'/tcp/'$P2P_ipV4BindWsPort'/tls/ws"]' fi else P2P_ANNOUNCE_ADDRESSES='' @@ -227,47 +239,51 @@ if [ -z "$DOCKER_COMPUTE_ENVIRONMENTS" ]; then export DOCKER_COMPUTE_ENVIRONMENTS='[ { "socketPath": "/var/run/docker.sock", - "resources": [ + "environments": [ { - "id": "disk", - "total": 10 - } - ], - "storageExpiry": 604800, - "maxJobDuration": 36000, - "minJobDuration": 60, - "fees": { - "1": [ - { - "feeToken": "0x123", - "prices": [ + "storageExpiry": 604800, + "maxJobDuration": 36000, + "minJobDuration": 60, + "resources": [ + { + "id": "disk", + "total": 10 + } + ], + "fees": { + "1": [ { - "id": "cpu", - "price": 1 + "feeToken": "0x123", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] } ] - } - ] - }, - "free": { - "maxJobDuration": 360000, - "minJobDuration": 60, - "maxJobs": 3, - "resources": [ - { - "id": "cpu", - "max": 1 - }, - { - "id": "ram", - "max": 1 }, - { - "id": "disk", - "max": 1 + "free": { + "maxJobDuration": 360000, + "minJobDuration": 60, + "maxJobs": 3, + "resources": [ + { + "id": "cpu", + "max": 1 + }, + { + "id": "ram", + "max": 1 + }, + { + "id": "disk", + "max": 1 + } + ] } - ] - } + } + ] } ]' fi @@ -616,7 +632,7 @@ if command -v jq &> /dev/null; then if [ "$GPU_COUNT" -gt 0 ]; then echo "Detected $GPU_COUNT GPU type(s). Updating configuration..." - DOCKER_COMPUTE_ENVIRONMENTS=$(echo "$DOCKER_COMPUTE_ENVIRONMENTS" | jq --argjson gpus "$DETECTED_GPUS" '.[0].resources += $gpus') + DOCKER_COMPUTE_ENVIRONMENTS=$(echo "$DOCKER_COMPUTE_ENVIRONMENTS" | jq --argjson gpus "$DETECTED_GPUS" '.[0].environments[0].resources += $gpus') echo "GPUs added to Compute Environment resources." else echo "No GPUs detected." @@ -684,10 +700,10 @@ services: # P2P_mDNSInterval: '' # P2P_connectionsMaxParallelDials: '' # P2P_connectionsDialTimeout: '' - P2P_ENABLE_UPNP: '$P2P_ENABLE_UPNP' + P2P_ENABLE_UPNP: '$P2P_ENABLE_UPNP' # P2P_ENABLE_AUTONAT: '' -# P2P_ENABLE_CIRCUIT_RELAY_SERVER: '' -# P2P_ENABLE_CIRCUIT_RELAY_CLIENT: '' + P2P_ENABLE_CIRCUIT_RELAY_SERVER: false + P2P_ENABLE_CIRCUIT_RELAY_CLIENT: false # P2P_BOOTSTRAP_NODES: '' # P2P_FILTER_ANNOUNCED_ADDRESSES: '' DOCKER_COMPUTE_ENVIRONMENTS: '$DOCKER_COMPUTE_ENVIRONMENTS' @@ -753,3 +769,7 @@ echo -e "\e[1;32mP2P IPv6 TCP Port: $P2P_ipV6BindTcpPort\e[0m" echo -e "\e[1;32mP2P IPv6 WebSocket Port: $P2P_ipV6BindWsPort\e[0m" echo "" echo -e "\e[1;32m4)\e[0m If using SSL/TLS with a custom domain name, make sure to listen on host port 443 for the HTTP API, or use a reverse proxy with TLS offloading" +echo "" +echo -e "If your node is not reachable by other peers (NAT, no public IP, port forwarding issues)," +echo -e "refer to the networking guide for help with Dynamic DNS, port forwarding, and circuit relay:" +echo -e "\e[1;34mhttps://github.com/oceanprotocol/ocean-node/blob/main/docs/networking.md\e[0m" \ No newline at end of file diff --git a/src/@types/AccessList.ts b/src/@types/AccessList.ts new file mode 100644 index 000000000..242b991d1 --- /dev/null +++ b/src/@types/AccessList.ts @@ -0,0 +1,6 @@ +/** + * Mapping of `chainId` -> list of smart contract addresses on that chain. + */ +export interface AccessList { + [chainId: string]: string[] +} diff --git a/src/@types/C2D/C2D.ts b/src/@types/C2D/C2D.ts index b154eb8ce..55f2eae35 100644 --- a/src/@types/C2D/C2D.ts +++ b/src/@types/C2D/C2D.ts @@ -1,5 +1,6 @@ import { MetadataAlgorithm, ConsumerParameter } from '@oceanprotocol/ddo-js' import type { BaseFileObject, StorageObject, EncryptMethod } from '../fileObject.js' +import type { AccessList } from '../AccessList.js' export enum C2DClusterType { // eslint-disable-next-line no-unused-vars OPF_K8 = 0, @@ -22,6 +23,12 @@ export interface C2DClusterInfo { export type ComputeResourceType = 'cpu' | 'ram' | 'disk' | any +export interface ResourceConstraint { + id: ComputeResourceType // the resource being constrained + min?: number // min units of this resource per unit of parent resource + max?: number // max units of this resource per unit of parent resource +} + export interface ComputeResourcesPricingInfo { id: ComputeResourceType price: number // price per unit per minute @@ -63,6 +70,7 @@ export interface ComputeResource { */ platform?: string init?: dockerHwInit + constraints?: ResourceConstraint[] // optional cross-resource constraints } export interface ComputeResourceRequest { id: string @@ -88,7 +96,7 @@ export interface RunningPlatform { export interface ComputeAccessList { addresses: string[] - accessLists: { [chainId: string]: string[] }[] | null + accessLists: AccessList[] | null } export interface ComputeEnvironmentFreeOptions { @@ -99,6 +107,7 @@ export interface ComputeEnvironmentFreeOptions { maxJobs?: number // maximum number of simultaneous free jobs resources?: ComputeResource[] access: ComputeAccessList + allowImageBuild?: boolean } export interface ComputeEnvironmentBaseConfig { description?: string // v1 @@ -132,6 +141,19 @@ export interface ComputeEnvironment extends ComputeEnvironmentBaseConfig { runMaxWaitTimeFree: number } +export interface C2DEnvironmentConfig { + id?: string + description?: string + storageExpiry?: number + minJobDuration?: number + maxJobDuration?: number + maxJobs?: number + fees?: ComputeEnvFeesStructure + access?: ComputeAccessList + free?: ComputeEnvironmentFreeOptions + resources?: ComputeResource[] +} + export interface C2DDockerConfig { socketPath: string protocol: string @@ -140,17 +162,13 @@ export interface C2DDockerConfig { caPath: string certPath: string keyPath: string - storageExpiry?: number - maxJobDuration?: number - minJobDuration?: number - maxJobs?: number - fees: ComputeEnvFeesStructure - resources?: ComputeResource[] // optional, owner can overwrite - free?: ComputeEnvironmentFreeOptions - access: ComputeAccessList imageRetentionDays?: number // Default: 7 days imageCleanupInterval?: number // Default: 86400 seconds (24 hours) paymentClaimInterval?: number // Default: 3600 seconds (1 hours) + scanImages?: boolean + scanImageDBUpdateInterval?: number // Default: 12 hours + environments: C2DEnvironmentConfig[] + enableNetwork?: boolean // whether network is enabled for algorithm containers } export type ComputeResultType = @@ -245,6 +263,7 @@ export interface DBComputeJobPayment { token: string lockTx: string claimTx: string + cancelTx: string cost: number } @@ -271,6 +290,9 @@ export interface DBComputeJob extends ComputeJob { algoDuration: number // duration of the job in seconds encryptedDockerRegistryAuth?: string output?: string // this is always an ECIES encrypted string, that decodes to ComputeOutput interface + jobIdHash: string + buildStartTimestamp?: string + buildStopTimestamp?: string } // make sure we keep them both in sync @@ -290,6 +312,8 @@ export enum C2DStatusNumber { // eslint-disable-next-line no-unused-vars BuildImageFailed = 13, // eslint-disable-next-line no-unused-vars + VulnerableImage = 14, + // eslint-disable-next-line no-unused-vars ConfiguringVolumes = 20, // eslint-disable-next-line no-unused-vars VolumeCreationFailed = 21, @@ -338,6 +362,8 @@ export enum C2DStatusText { // eslint-disable-next-line no-unused-vars BuildImageFailed = 'Building algorithm image failed', // eslint-disable-next-line no-unused-vars + VulnerableImage = 'Image has vulnerabilities', + // eslint-disable-next-line no-unused-vars ConfiguringVolumes = 'Configuring volumes', // eslint-disable-next-line no-unused-vars VolumeCreationFailed = 'Volume creation failed', diff --git a/src/@types/OceanNode.ts b/src/@types/OceanNode.ts index d007a7f64..6717de5c6 100644 --- a/src/@types/OceanNode.ts +++ b/src/@types/OceanNode.ts @@ -4,6 +4,8 @@ import { C2DClusterInfo, C2DDockerConfig } from './C2D/C2D' import { FeeStrategy } from './Fees' import { Schema } from '../components/database' import { KeyProviderType } from './KeyManager' +import type { PersistentStorageConfig } from './PersistentStorage.js' +import type { AccessList } from './AccessList' export interface OceanNodeDBConfig { url: string | null @@ -139,6 +141,8 @@ export interface OceanNodeConfig { jwtSecret?: string httpCertPath?: string httpKeyPath?: string + enableBenchmark?: boolean + persistentStorage?: PersistentStorageConfig } export interface P2PStatusResponse { @@ -191,6 +195,9 @@ export interface OceanNodeStatus { // detailed information c2dClusters?: any[] supportedSchemas?: Schema[] + persistentStorage?: { + accessLists?: AccessList[] + } } export interface FindDDOResponse { diff --git a/src/@types/PersistentStorage.ts b/src/@types/PersistentStorage.ts new file mode 100644 index 000000000..67b0448a2 --- /dev/null +++ b/src/@types/PersistentStorage.ts @@ -0,0 +1,41 @@ +import type { AccessList } from './AccessList' +import type { BaseFileObject } from './fileObject.js' +export type PersistentStorageType = 'localfs' | 's3' + +export interface PersistentStorageLocalFSOptions { + folder: string +} + +export interface PersistentStorageS3Options { + endpoint: string + region?: string + objectKey: string + accessKeyId: string + secretAccessKey: string + /** If true, use path-style addressing (e.g. endpoint/bucket/key). Required for some S3-compatible services (e.g. MinIO). Default false (virtual-host style, e.g. bucket.endpoint/key). */ + forcePathStyle?: boolean +} + +export interface PersistentStorageConfig { + enabled: boolean + type: PersistentStorageType + accessLists: AccessList[] + options: PersistentStorageLocalFSOptions | PersistentStorageS3Options +} + +/** + * Docker mount descriptor used by the Docker C2D engine. + * Mirrors Dockerode `HostConfig.Mounts[]` item shape. + */ +export interface DockerMountObject { + Type: 'bind' + Source: string + Target: string + ReadOnly: boolean +} + +export interface PersistentStorageObject extends BaseFileObject { + type: 'nodePersistentStorage' + bucketId: string + fileName: string +} diff --git a/src/@types/commands.ts b/src/@types/commands.ts index 395487b44..6ed0f76f4 100644 --- a/src/@types/commands.ts +++ b/src/@types/commands.ts @@ -1,3 +1,4 @@ +import { Readable } from 'stream' import { ValidateParams } from '../components/httpRoutes/validateCommands.js' import { P2PCommandResponse } from './OceanNode' import { DDO } from '@oceanprotocol/ddo-js' @@ -8,12 +9,13 @@ import type { DBComputeJobMetadata } from './C2D/C2D.js' import { FileObjectType, StorageObject, EncryptMethod } from './fileObject' - +import type { AccessList } from './AccessList.js' export interface Command { command: string // command name node?: string // if not present it means current node authorization?: string caller?: string | string[] // added by our node for rate limiting + stream?: Readable | null // commands may have an extra stream, after body. IE: Encrypt file } export interface GetP2PPeerCommand extends Command { @@ -312,4 +314,50 @@ export interface GetJobsCommand extends Command { environments?: string[] fromTimestamp?: string consumerAddrs?: string[] + runningJobs?: boolean +} + +export interface PersistentStorageCreateBucketCommand extends Command { + consumerAddress: string + signature: string + nonce: string + accessLists: AccessList[] +} + +export interface PersistentStorageGetBucketsCommand extends Command { + consumerAddress: string + signature: string + nonce: string + owner: string +} + +export interface PersistentStorageListFilesCommand extends Command { + consumerAddress: string + signature: string + nonce: string + bucketId: string +} + +export interface PersistentStorageUploadFileCommand extends Command { + consumerAddress: string + signature: string + nonce: string + bucketId: string + fileName: string +} + +export interface PersistentStorageGetFileObjectCommand extends Command { + consumerAddress: string + signature: string + nonce: string + bucketId: string + fileName: string +} + +export interface PersistentStorageDeleteFileCommand extends Command { + consumerAddress: string + signature: string + nonce: string + bucketId: string + fileName: string } diff --git a/src/OceanNode.ts b/src/OceanNode.ts index f32cf7d87..a8c9c0aac 100644 --- a/src/OceanNode.ts +++ b/src/OceanNode.ts @@ -13,6 +13,8 @@ import { Auth } from './components/Auth/index.js' import { KeyManager } from './components/KeyManager/index.js' import { BlockchainRegistry } from './components/BlockchainRegistry/index.js' import { Blockchain } from './utils/blockchain.js' +import { createPersistentStorage } from './components/persistentStorage/createPersistentStorage.js' +import { PersistentStorageFactory } from './components/persistentStorage/PersistentStorageFactory.js' export interface RequestLimiter { requester: string | string[] // IP address or peer ID @@ -37,6 +39,7 @@ export class OceanNode { private remoteCaller: string | string[] private requestMap: Map private auth: Auth + private persistentStorage: PersistentStorageFactory // eslint-disable-next-line no-useless-constructor private constructor( @@ -73,6 +76,15 @@ export class OceanNode { this.config.claimDurationTimeout, this.blockchainRegistry ) + if (this.config.persistentStorage?.enabled) { + OCEAN_NODE_LOGGER.info( + `Starting PersistenStorage with type ${this.config.persistentStorage.type}` + ) + this.persistentStorage = createPersistentStorage(this) + } else { + OCEAN_NODE_LOGGER.info(`Starting without PersistenStorage`) + this.persistentStorage = null + } } } @@ -181,6 +193,10 @@ export class OceanNode { return this.blockchainRegistry } + public getPersistentStorage(): PersistentStorageFactory | null { + return this.persistentStorage + } + /** * Get a Blockchain instance for the given chainId. * Delegates to BlockchainRegistry. @@ -200,6 +216,10 @@ export class OceanNode { } } + public getConfig(): OceanNodeConfig { + return this.config + } + /** * v3: Direct protocol command handler - no P2P, just call handler directly * Returns {status, stream} without buffering diff --git a/src/components/Indexer/index.ts b/src/components/Indexer/index.ts index fd9e94476..17460bf83 100644 --- a/src/components/Indexer/index.ts +++ b/src/components/Indexer/index.ts @@ -38,6 +38,7 @@ import { create256Hash } from '../../utils/crypt.js' import { getDatabase, isReachableConnection } from '../../utils/database.js' import { sleep } from '../../utils/util.js' import { isReindexingNeeded } from './version.js' +import { getPackageVersion } from '../../utils/version.js' import { DB_EVENTS, ES_CONNECTION_EVENTS } from '../database/ElasticsearchConfigHelper.js' /** @@ -535,7 +536,7 @@ export class OceanIndexer { * Checks if reindexing is needed and triggers it for all chains */ public async checkAndTriggerReindexing(): Promise { - const currentVersion = process.env.npm_package_version + const currentVersion = getPackageVersion() const dbActive = this.getDatabase() if (!dbActive || !(await isReachableConnection(dbActive.getConfig().url))) { INDEXER_LOGGER.error(`Giving up reindexing. DB is not online!`) diff --git a/src/components/Indexer/processors/BaseProcessor.ts b/src/components/Indexer/processors/BaseProcessor.ts index bb2cb1ec9..32f0fcc57 100644 --- a/src/components/Indexer/processors/BaseProcessor.ts +++ b/src/components/Indexer/processors/BaseProcessor.ts @@ -394,7 +394,9 @@ export abstract class BaseEventProcessor { .getCoreHandlers() .getHandler(PROTOCOL_COMMANDS.NONCE) .handle(getNonceTask) - nonceP2p = await streamToString(response.stream as Readable) + nonceP2p = String( + parseInt(await streamToString(response.stream as Readable)) + 1 + ) } catch (error) { const message = `Node exception on getting nonce from local nodeId ${nodeId}. Status: ${error.message}` INDEXER_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, message) @@ -457,7 +459,9 @@ export abstract class BaseEventProcessor { } // Convert stream to Uint8Array - const remoteNonce = await streamToString(response.stream as Readable) + const remoteNonce = String( + parseInt(await streamToString(response.stream as Readable)) + 1 + ) INDEXER_LOGGER.debug( `decryptDDO: Fetched fresh nonce ${remoteNonce} from remote node ${decryptorURL} for decrypt attempt` ) diff --git a/src/components/P2P/handleProtocolCommands.ts b/src/components/P2P/handleProtocolCommands.ts index 0750dec33..f94f6a2f5 100644 --- a/src/components/P2P/handleProtocolCommands.ts +++ b/src/components/P2P/handleProtocolCommands.ts @@ -123,7 +123,39 @@ export async function handleProtocolCommands(stream: Stream, connection: Connect return } - P2P_LOGGER.logMessage('Performing P2P task: ' + JSON.stringify(task), true) + const taskRecord = task as unknown as Record + if (taskRecord.p2pStreamBody === true) { + delete taskRecord.p2pStreamBody + + // True streaming: expose an async Readable that reads LP frames lazily + // as the handler consumes it. Frames are terminated by an empty chunk. + taskRecord.stream = Readable.from( + (async function* () { + while (true) { + const frame = await lp.read({ signal: handshakeSignal() }) + const buf = Buffer.from( + (frame as unknown as { subarray: () => Uint8Array }).subarray() + ) + + if (buf.length === 0) { + break + } + + yield buf + } + })() + ) + } + + const logPayload = { ...taskRecord } + // Avoid JSON-stringifying the request stream itself. + if (logPayload.stream) { + logPayload.stream = '[request stream]' + } + if (Buffer.isBuffer(logPayload.rawData)) { + logPayload.rawData = `[${logPayload.rawData.length} bytes]` + } + P2P_LOGGER.logMessage('Performing P2P task: ' + JSON.stringify(logPayload), true) // Get and execute handler const handler: BaseHandler = this.getCoreHandlers().getHandler(task.command) @@ -152,11 +184,15 @@ export async function handleProtocolCommands(stream: Stream, connection: Connect await stream.close() } catch (err) { P2P_LOGGER.logMessageWithEmoji( - 'handleProtocolCommands Error: ' + err.message, + 'handleProtocolCommands Error: ' + + (err instanceof Error ? err.message : String(err)), true, GENERIC_EMOJIS.EMOJI_CROSS_MARK, LOG_LEVELS_STR.LEVEL_ERROR ) - await sendErrorAndClose(500, err.message) + const httpStatus = + typeof (err as any)?.status === 'number' ? (err as any).status : 500 + const msg = err instanceof Error ? err.message : String(err) + await sendErrorAndClose(httpStatus, msg) } } diff --git a/src/components/P2P/index.ts b/src/components/P2P/index.ts index c8861148a..ca66a3a25 100644 --- a/src/components/P2P/index.ts +++ b/src/components/P2P/index.ts @@ -33,7 +33,7 @@ import { } from '@libp2p/kad-dht' import { EVENTS, cidFromRawString } from '../../utils/index.js' -import { Transform } from 'stream' +import { Transform, Readable } from 'stream' import { Database } from '../database' import { OceanNodeConfig, @@ -70,6 +70,35 @@ type DDOCache = { let index = 0 +/** Optional request payload sent as LP frames after the command JSON; ends with an empty LP frame. */ +export type P2PRequestBodyStream = AsyncIterable | Readable + +function toUint8ArrayChunk(chunk: unknown): Uint8Array { + if (chunk instanceof Uint8Array) return chunk + if (Buffer.isBuffer(chunk)) return new Uint8Array(chunk) + if (typeof chunk === 'string') return uint8ArrayFromString(chunk) + if ( + chunk && + typeof chunk === 'object' && + ArrayBuffer.isView(chunk as ArrayBufferView) + ) { + const v = chunk as ArrayBufferView + return new Uint8Array(v.buffer, v.byteOffset, v.byteLength) + } + throw new Error('Unsupported chunk type for P2P request body') +} + +async function writeP2pRequestBodyLp( + lp: LengthPrefixedStream, + body: P2PRequestBodyStream, + signal: AbortSignal +): Promise { + for await (const chunk of body as AsyncIterable) { + await lp.write(toUint8ArrayChunk(chunk), { signal }) + } + await lp.write(new Uint8Array(0), { signal }) +} + export class OceanP2P extends EventEmitter { _libp2p: Libp2p _topic: string @@ -331,20 +360,23 @@ export class OceanP2P extends EventEmitter { `/ip6/${config.p2pConfig.ipV6BindAddress}/tcp/${config.p2pConfig.ipV6BindWsPort}/ws` ) } + const listenAddrs = config.p2pConfig.enableCircuitRelayClient + ? [...bindInterfaces, '/p2p-circuit'] + : bindInterfaces let addresses = {} if ( config.p2pConfig.announceAddresses && config.p2pConfig.announceAddresses.length > 0 ) { addresses = { - listen: bindInterfaces, + listen: listenAddrs, announceFilter: (multiaddrs: any[]) => multiaddrs.filter((m) => this.shouldAnnounce(m)), appendAnnounce: config.p2pConfig.announceAddresses } } else { addresses = { - listen: bindInterfaces, + listen: listenAddrs, announceFilter: (multiaddrs: any[]) => multiaddrs.filter((m) => this.shouldAnnounce(m)) } @@ -395,7 +427,12 @@ export class OceanP2P extends EventEmitter { // eslint-disable-next-line no-constant-condition, no-self-compare if (config.p2pConfig.enableCircuitRelayServer) { P2P_LOGGER.info('Enabling Circuit Relay Server') - servicesConfig = { ...servicesConfig, ...{ circuitRelay: circuitRelayServer() } } + servicesConfig = { + ...servicesConfig, + ...{ + circuitRelay: circuitRelayServer({ reservations: { maxReservations: 2 } }) + } + } } // eslint-disable-next-line no-constant-condition, no-self-compare if (config.p2pConfig.upnp) { @@ -725,9 +762,19 @@ export class OceanP2P extends EventEmitter { async send( lp: LengthPrefixedStream, message: string, - options: { signal: AbortSignal } + options: { signal: AbortSignal }, + requestBody?: P2PRequestBodyStream ) { - await lp.write(uint8ArrayFromString(message), { signal: options.signal }) + let outbound = message + if (requestBody) { + const cmd = JSON.parse(message) as Record + cmd.p2pStreamBody = true + outbound = JSON.stringify(cmd) + } + await lp.write(uint8ArrayFromString(outbound), { signal: options.signal }) + if (requestBody) { + await writeP2pRequestBodyLp(lp, requestBody, options.signal) + } const statusBytes = await lp.read({ signal: options.signal }) return { status: JSON.parse(uint8ArrayToString(statusBytes.subarray())), @@ -747,7 +794,8 @@ export class OceanP2P extends EventEmitter { async sendTo( peerName: string, message: string, - multiAddrs?: string[] + multiAddrs?: string[], + requestBody?: P2PRequestBodyStream ): Promise<{ status: any; stream?: AsyncIterable }> { const options = { signal: AbortSignal.timeout(10_000), @@ -775,7 +823,7 @@ export class OceanP2P extends EventEmitter { const multiaddrs = multiAddrs?.length ? multiAddrs.map((addr) => multiaddr(addr)) - : await this.getPeerMultiaddrs(peerName) + : (await this.getPeerMultiaddrs(peerName)) || [] if (multiaddrs.length < 1) { const error = `Cannot find any address to dial for peer: ${peerId}` @@ -799,7 +847,7 @@ export class OceanP2P extends EventEmitter { let streamErr: Error | null = null try { - return await this.send(lpStream(stream), message, options) + return await this.send(lpStream(stream), message, options, requestBody) } catch (err) { try { stream.abort(err as Error) @@ -823,7 +871,7 @@ export class OceanP2P extends EventEmitter { stream = await connection.newStream(this._protocol, options) try { - return await this.send(lpStream(stream), message, options) + return await this.send(lpStream(stream), message, options, requestBody) } catch (retryErr) { try { stream.abort(retryErr as Error) @@ -964,6 +1012,7 @@ export class OceanP2P extends EventEmitter { // on timeout the query ends with an abort signal => CodeError: Query aborted // eslint-disable-next-line @typescript-eslint/no-explicit-any } as any) + for await (const value of f) { peersFound.push(value) } diff --git a/src/components/c2d/compute_engine_base.ts b/src/components/c2d/compute_engine_base.ts index 2423090a9..96708ee45 100644 --- a/src/components/c2d/compute_engine_base.ts +++ b/src/components/c2d/compute_engine_base.ts @@ -248,9 +248,65 @@ export abstract class C2DEngine { properResources.push({ id: device, amount: desired }) } + this.checkResourceConstraints(properResources, env, isFree) return properResources } + protected checkResourceConstraints( + resources: ComputeResourceRequest[], + env: ComputeEnvironment, + isFree: boolean + ): void { + const envResources = isFree ? (env.free?.resources ?? []) : (env.resources ?? []) + for (const envResource of envResources) { + if (!envResource.constraints || envResource.constraints.length === 0) continue + const parentAmount = this.getResourceRequest(resources, envResource.id) + if (!parentAmount || parentAmount <= 0) continue + + for (const constraint of envResource.constraints) { + let constrainedAmount = this.getResourceRequest(resources, constraint.id) ?? 0 + + if (constraint.min !== undefined) { + const requiredMin = parentAmount * constraint.min + if (constrainedAmount < requiredMin) { + const constrainedMaxMin = this.getMaxMinResource(constraint.id, env, isFree) + if (requiredMin > constrainedMaxMin.max) { + throw new Error( + `Cannot satisfy constraint: ${parentAmount} ${envResource.id} requires at least ${requiredMin} ${constraint.id}, but max is ${constrainedMaxMin.max}` + ) + } + this.setResourceAmount(resources, constraint.id, requiredMin) + constrainedAmount = requiredMin + } + } + + if (constraint.max !== undefined) { + const requiredMax = parentAmount * constraint.max + // re-read in case it was bumped above + constrainedAmount = this.getResourceRequest(resources, constraint.id) ?? 0 + if (constrainedAmount > requiredMax) { + throw new Error( + `Too much ${constraint.id} for ${parentAmount} ${envResource.id}. Max allowed: ${requiredMax}, requested: ${constrainedAmount}` + ) + } + } + } + } + } + + protected setResourceAmount( + resources: ComputeResourceRequest[], + id: ComputeResourceType, + amount: number + ): void { + for (const resource of resources) { + if (resource.id === id) { + resource.amount = amount + return + } + } + } + public async getUsedResources(env: ComputeEnvironment): Promise { const usedResources: { [x: string]: any } = {} const usedFreeResources: { [x: string]: any } = {} @@ -260,6 +316,9 @@ export abstract class C2DEngine { } catch (e) { CORE_LOGGER.error('Failed to get running jobs:' + e.message) } + + const envResourceMap = new Map((env.resources || []).map((r) => [r.id, r])) + let totalJobs = 0 let totalFreeJobs = 0 let queuedJobs = 0 @@ -268,19 +327,40 @@ export abstract class C2DEngine { let maxWaitTimeFree = 0 let maxRunningTime = 0 let maxRunningTimeFree = 0 + for (const job of jobs) { - if (job.environment === env.id) { - if (job.queueMaxWaitTime === 0) { - const timeElapsed = - new Date().getTime() / 1000 - Number.parseFloat(job?.algoStartTimestamp) + const isThisEnv = job.environment === env.id + const isRunning = job.queueMaxWaitTime === 0 + + if (isThisEnv) { + if (isRunning) { + const timeElapsed = job.buildStartTimestamp + ? new Date().getTime() / 1000 - Number.parseFloat(job?.buildStartTimestamp) + : new Date().getTime() / 1000 - Number.parseFloat(job?.algoStartTimestamp) totalJobs++ maxRunningTime += job.maxJobDuration - timeElapsed if (job.isFree) { totalFreeJobs++ maxRunningTimeFree += job.maxJobDuration - timeElapsed } + } else { + queuedJobs++ + maxWaitTime += job.maxJobDuration + if (job.isFree) { + queuedFreeJobs++ + maxWaitTimeFree += job.maxJobDuration + } + } + } - for (const resource of job.resources) { + if (isRunning) { + for (const resource of job.resources) { + const envRes = envResourceMap.get(resource.id) + if (envRes) { + // GPUs are shared-exclusive: inUse tracked globally across all envs + // Everything else (cpu, ram, disk) is per-env exclusive + const isSharedExclusive = envRes.type === 'gpu' + if (!isSharedExclusive && !isThisEnv) continue if (!(resource.id in usedResources)) usedResources[resource.id] = 0 usedResources[resource.id] += resource.amount if (job.isFree) { @@ -288,14 +368,6 @@ export abstract class C2DEngine { usedFreeResources[resource.id] += resource.amount } } - } else { - // queued job - queuedJobs++ - maxWaitTime += job.maxJobDuration - if (job.isFree) { - queuedFreeJobs++ - maxWaitTimeFree += job.maxJobDuration - } } } } @@ -313,12 +385,41 @@ export abstract class C2DEngine { } } + protected physicalLimits: Map = new Map() + + private checkGlobalResourceAvailability( + allEnvironments: ComputeEnvironment[], + resourceId: string, + amount: number + ) { + let globalUsed = 0 + let globalTotal = 0 + for (const e of allEnvironments) { + const res = this.getResource(e.resources, resourceId) + if (res) { + globalTotal += res.total || 0 + globalUsed += res.inUse || 0 + } + } + const physicalLimit = this.physicalLimits.get(resourceId) + if (physicalLimit !== undefined && globalTotal > physicalLimit) { + globalTotal = physicalLimit + } + const globalRemainder = globalTotal - globalUsed + if (globalRemainder < amount) { + throw new Error( + `Not enough available ${resourceId} globally (remaining: ${globalRemainder}, requested: ${amount})` + ) + } + } + // overridden by each engine if required // eslint-disable-next-line require-await public async checkIfResourcesAreAvailable( resourcesRequest: ComputeResourceRequest[], env: ComputeEnvironment, - isFree: boolean + isFree: boolean, + allEnvironments?: ComputeEnvironment[] ) { // Filter out resources with amount 0 as they're not actually being requested const activeResources = resourcesRequest.filter((r) => r.amount > 0) @@ -328,6 +429,13 @@ export abstract class C2DEngine { if (!envResource) throw new Error(`No such resource ${request.id}`) if (envResource.total - envResource.inUse < request.amount) throw new Error(`Not enough available ${request.id}`) + + // Global check for non-GPU resources (cpu, ram, disk are per-env exclusive) + // GPUs are shared-exclusive so their inUse already reflects global usage + if (allEnvironments && envResource.type !== 'gpu') { + this.checkGlobalResourceAvailability(allEnvironments, request.id, request.amount) + } + if (isFree) { if (!env.free) throw new Error(`No free resources`) envResource = this.getResource(env.free?.resources, request.id) diff --git a/src/components/c2d/compute_engine_docker.ts b/src/components/c2d/compute_engine_docker.ts index 850161eeb..4cc72a697 100755 --- a/src/components/c2d/compute_engine_docker.ts +++ b/src/components/c2d/compute_engine_docker.ts @@ -1,6 +1,7 @@ /* eslint-disable security/detect-non-literal-fs-filename */ -import { Readable } from 'stream' +import { Readable, PassThrough } from 'stream' import os from 'os' +import path from 'path' import { C2DStatusNumber, C2DStatusText, @@ -19,9 +20,17 @@ import type { RunningPlatform, ComputeEnvFeesStructure, ComputeResourceRequest, - ComputeEnvFees + ComputeEnvFees, + ComputeResource, + C2DEnvironmentConfig, + ComputeResourcesPricingInfo } from '../../@types/C2D/C2D.js' -import { getConfiguration } from '../../utils/config.js' +import { + BENCHMARK_MONITORING_ADDRESS, + getConfiguration, + SEPOLIA_CHAIN_ID, + USDC_TOKEN +} from '../../utils/config.js' import { C2DEngine } from './compute_engine_base.js' import { C2DDatabase } from '../database/C2DDatabase.js' import { Escrow } from '../core/utils/escrow.js' @@ -35,10 +44,12 @@ import { createWriteStream, existsSync, mkdirSync, + chmodSync, rmSync, writeFileSync, appendFileSync, statSync, + statfsSync, createReadStream } from 'fs' import { pipeline } from 'node:stream/promises' @@ -55,6 +66,11 @@ import { dockerRegistrysAuth, dockerRegistryAuth } from '../../@types/OceanNode. import { EncryptMethod } from '../../@types/fileObject.js' import { ZeroAddress } from 'ethers' +const C2D_CONTAINER_UID = 1000 +const C2D_CONTAINER_GID = 1000 + +const trivyImage = 'aquasec/trivy:0.69.3' // Use pinned versions for safety + export class C2DEngineDocker extends C2DEngine { private envs: ComputeEnvironment[] = [] @@ -65,10 +81,18 @@ export class C2DEngineDocker extends C2DEngine { private isInternalLoopRunning: boolean = false private imageCleanupTimer: NodeJS.Timeout | null = null private paymentClaimTimer: NodeJS.Timeout | null = null + private scanDBUpdateTimer: NodeJS.Timeout | null = null private static DEFAULT_DOCKER_REGISTRY = 'https://registry-1.docker.io' private retentionDays: number private cleanupInterval: number private paymentClaimInterval: number + private scanImages: boolean + private scanImageDBUpdateInterval: number + private trivyCachePath: string + private cpuAllocations: Map = new Map() + private envCpuCoresMap: Map = new Map() + private enableNetwork: boolean + public constructor( clusterConfig: C2DClusterInfo, db: C2DDatabase, @@ -87,8 +111,11 @@ export class C2DEngineDocker extends C2DEngine { } } this.retentionDays = clusterConfig.connection.imageRetentionDays || 7 - this.cleanupInterval = clusterConfig.connection.imageCleanupInterval || 86400 // 24 hours + this.cleanupInterval = clusterConfig.connection.imageCleanupInterval this.paymentClaimInterval = clusterConfig.connection.paymentClaimInterval || 3600 // 1 hour + this.scanImages = clusterConfig.connection.scanImages || false // default is not to scan images for now, until it's prod ready + this.scanImageDBUpdateInterval = clusterConfig.connection.scanImageDBUpdateInterval + this.enableNetwork = clusterConfig.connection.enableNetwork ?? false if ( clusterConfig.connection.protocol && clusterConfig.connection.host && @@ -104,22 +131,127 @@ export class C2DEngineDocker extends C2DEngine { CORE_LOGGER.error('Could not create Docker container: ' + e.message) } } - // TO DO C2D - create envs + // trivy cache is the same for all engines + this.trivyCachePath = path.join( + process.cwd(), + this.getC2DConfig().tempFolder, + 'trivy_cache' + ) try { - if (!existsSync(clusterConfig.tempFolder)) - mkdirSync(clusterConfig.tempFolder, { recursive: true }) + if (!existsSync(this.getStoragePath())) + mkdirSync(this.getStoragePath(), { recursive: true }) + if (!existsSync(this.trivyCachePath)) + mkdirSync(this.trivyCachePath, { recursive: true }) } catch (e) { CORE_LOGGER.error( 'Could not create Docker container temporary folders: ' + e.message ) } + // envs are build on start function } + private processFeesForEnvironment( + rawFees: ComputeEnvFeesStructure | undefined, + supportedChains: number[] + ): ComputeEnvFeesStructure | null { + if (!rawFees || Object.keys(rawFees).length === 0) return null + let fees: ComputeEnvFeesStructure = null + for (const feeChain of Object.keys(rawFees)) { + if (!supportedChains.includes(parseInt(feeChain))) continue + if (fees === null) fees = {} + if (!(feeChain in fees)) fees[feeChain] = [] + const tmpFees: ComputeEnvFees[] = [] + for (const feeEntry of rawFees[feeChain]) { + if (!feeEntry.prices || feeEntry.prices.length === 0) { + CORE_LOGGER.error( + `Unable to find prices for fee ${JSON.stringify(feeEntry)} on chain ${feeChain}` + ) + continue + } + if (!feeEntry.feeToken) { + const tokenAddress = getOceanTokenAddressForChain(parseInt(feeChain)) + if (tokenAddress) { + feeEntry.feeToken = tokenAddress + tmpFees.push(feeEntry) + } else { + CORE_LOGGER.error( + `Unable to find Ocean token address for chain ${feeChain} and no custom token provided` + ) + } + } else { + tmpFees.push(feeEntry) + } + } + fees[feeChain] = tmpFees + } + return fees + } + + public getStoragePath(): string { + return this.getC2DConfig().tempFolder + this.getC2DConfig().hash + } + + private createBenchmarkEnvironment(sysinfo: any, envConfig: any): void { + const ramGB = this.physicalLimits.get('ram') || 0 + const physicalDiskGB = this.physicalLimits.get('disk') || 0 + + const gpuMap = new Map() + for (const env of envConfig.environments) { + if (env.resources) { + for (const res of env.resources) { + if (res.id !== 'cpu' && res.id !== 'ram' && res.id !== 'disk') { + if (!gpuMap.has(res.id)) { + gpuMap.set(res.id, res) + } + } + } + } + } + const gpuResources: ComputeResource[] = Array.from(gpuMap.values()) + + const benchmarkPrices: ComputeResourcesPricingInfo[] = gpuResources.map((gpu) => ({ + id: gpu.id, + price: 1 + })) + + const sepoliaChainId = SEPOLIA_CHAIN_ID + const usdcToken = USDC_TOKEN + + const benchmarkFees: ComputeEnvFeesStructure = { + [sepoliaChainId]: [{ feeToken: usdcToken, prices: benchmarkPrices }] + } + + const benchmarkEnv: C2DEnvironmentConfig = { + description: 'Auto-generated benchmark environment', + storageExpiry: 604800, + maxJobDuration: 180, + minJobDuration: 60, + resources: [ + { id: 'cpu', total: sysinfo.NCPU, min: 1, max: sysinfo.NCPU }, + { id: 'ram', total: ramGB, min: 1, max: ramGB }, + { id: 'disk', total: physicalDiskGB, min: 0, max: physicalDiskGB }, + ...gpuResources + ], + access: { + addresses: [BENCHMARK_MONITORING_ADDRESS], + accessLists: null + }, + fees: benchmarkFees + } + + envConfig.environments.push(benchmarkEnv) + } + public override async start() { - // let's build the env. Swarm and k8 will build multiple envs, based on arhitecture const config = await getConfiguration() const envConfig = await this.getC2DConfig().connection + if (!envConfig?.environments?.length) { + CORE_LOGGER.warn( + `Skipping C2D engine ${this.getC2DConfig().hash}: no environments configured` + ) + return + } let sysinfo = null try { sysinfo = await this.docker.info() @@ -128,187 +260,257 @@ export class C2DEngineDocker extends C2DEngine { // since we cannot connect to docker, we cannot start the engine -> no envs return } - let fees: ComputeEnvFeesStructure = null + + this.physicalLimits.set('cpu', sysinfo.NCPU) + this.physicalLimits.set('ram', Math.floor(sysinfo.MemTotal / 1024 / 1024 / 1024)) + try { + const diskStats = statfsSync(this.getC2DConfig().tempFolder) + const diskGB = Math.floor((diskStats.bsize * diskStats.blocks) / 1024 / 1024 / 1024) + this.physicalLimits.set('disk', diskGB) + } catch (e) { + CORE_LOGGER.warn('Could not detect physical disk size: ' + e.message) + } + + // Determine supported chains const supportedChains: number[] = [] if (config.supportedNetworks) { for (const chain of Object.keys(config.supportedNetworks)) { supportedChains.push(parseInt(chain)) } } - if (envConfig.fees && Object.keys(envConfig.fees).length > 0) { - for (const feeChain of Object.keys(envConfig.fees)) { - // for (const feeConfig of envConfig.fees) { - if (supportedChains.includes(parseInt(feeChain))) { - if (fees === null) fees = {} - if (!(feeChain in fees)) fees[feeChain] = [] - const tmpFees: ComputeEnvFees[] = [] - for (let i = 0; i < envConfig.fees[feeChain].length; i++) { - if ( - envConfig.fees[feeChain][i].prices && - envConfig.fees[feeChain][i].prices.length > 0 - ) { - if (!envConfig.fees[feeChain][i].feeToken) { - const tokenAddress = getOceanTokenAddressForChain(parseInt(feeChain)) - if (tokenAddress) { - envConfig.fees[feeChain][i].feeToken = tokenAddress - tmpFees.push(envConfig.fees[feeChain][i]) - } else { - CORE_LOGGER.error( - `Unable to find Ocean token address for chain ${feeChain} and no custom token provided` - ) - } - } else { - tmpFees.push(envConfig.fees[feeChain][i]) - } - } else { - CORE_LOGGER.error( - `Unable to find prices for fee ${JSON.stringify( - envConfig.fees[feeChain][i] - )} on chain ${feeChain}` - ) - } - } - fees[feeChain] = tmpFees - } - } - /* for (const chain of Object.keys(config.supportedNetworks)) { - const chainId = parseInt(chain) - if (task.chainId && task.chainId !== chainId) continue - result[chainId] = await computeEngines.fetchEnvironments(chainId) - } */ + const platform: RunningPlatform = { + architecture: sysinfo.Architecture, + os: sysinfo.OSType } - this.envs.push({ - id: '', // this.getC2DConfig().hash + '-' + create256Hash(JSON.stringify(this.envs[i])), - runningJobs: 0, - consumerAddress: this.getKeyManager().getEthAddress(), - platform: { - architecture: sysinfo.Architecture, - os: sysinfo.OSType - }, - access: { - addresses: [], - accessLists: null - }, - fees, - queuedJobs: 0, - queuedFreeJobs: 0, - queMaxWaitTime: 0, - queMaxWaitTimeFree: 0, - runMaxWaitTime: 0, - runMaxWaitTimeFree: 0 - }) - if (`access` in envConfig) this.envs[0].access = envConfig.access - - if (`storageExpiry` in envConfig) this.envs[0].storageExpiry = envConfig.storageExpiry - if (`minJobDuration` in envConfig) - this.envs[0].minJobDuration = envConfig.minJobDuration - if (`maxJobDuration` in envConfig) - this.envs[0].maxJobDuration = envConfig.maxJobDuration - if (`maxJobs` in envConfig) this.envs[0].maxJobs = envConfig.maxJobs - // let's add resources - this.envs[0].resources = [] - const cpuResources = { - id: 'cpu', - type: 'cpu', - total: sysinfo.NCPU, - max: sysinfo.NCPU, - min: 1, - description: os.cpus()[0].model - } - const ramResources = { - id: 'ram', - type: 'ram', - total: Math.floor(sysinfo.MemTotal / 1024 / 1024 / 1024), - max: Math.floor(sysinfo.MemTotal / 1024 / 1024 / 1024), - min: 1 - } - - if (envConfig.resources) { - for (const res of envConfig.resources) { - // allow user to add other resources - if (res.id === 'cpu') { - if (res.total) cpuResources.total = res.total - if (res.max) cpuResources.max = res.max - if (res.min) cpuResources.min = res.min - } - if (res.id === 'ram') { - if (res.total) ramResources.total = res.total - if (res.max) ramResources.max = res.max - if (res.min) ramResources.min = res.min - } - - if (res.id !== 'cpu' && res.id !== 'ram') { - if (!res.max) res.max = res.total - if (!res.min) res.min = 0 - this.envs[0].resources.push(res) - } - } - } - this.envs[0].resources.push(cpuResources) - this.envs[0].resources.push(ramResources) - /* TODO - get namedresources & discreete one - if (sysinfo.GenericResources) { - for (const [key, value] of Object.entries(sysinfo.GenericResources)) { - for (const [type, val] of Object.entries(value)) { - // for (const resType in sysinfo.GenericResources) { - if (type === 'NamedResourceSpec') { - // if we have it, ignore it - const resourceId = val.Value - const resourceType = val.Kind - let found = false - for (const res of this.envs[0].resources) { - if (res.id === resourceId) { - found = true - break - } - } - if (!found) { - this.envs[0].resources.push({ - id: resourceId, - kind: resourceType, - total: 1, - max: 1, - min: 0 - }) - } + const consumerAddress = this.getKeyManager().getEthAddress() + + if (config.enableBenchmark) { + this.createBenchmarkEnvironment(sysinfo, envConfig) + } + + for (let envIdx = 0; envIdx < envConfig.environments.length; envIdx++) { + const envDef: C2DEnvironmentConfig = envConfig.environments[envIdx] + + const fees = this.processFeesForEnvironment(envDef.fees, supportedChains) + + const envResources: ComputeResource[] = [] + const cpuResources = { + id: 'cpu', + type: 'cpu', + total: sysinfo.NCPU, + max: sysinfo.NCPU, + min: 1, + description: os.cpus()[0].model + } + const ramResources = { + id: 'ram', + type: 'ram', + total: Math.floor(sysinfo.MemTotal / 1024 / 1024 / 1024), + max: Math.floor(sysinfo.MemTotal / 1024 / 1024 / 1024), + min: 1 + } + const physicalDiskGB = this.physicalLimits.get('disk') || 0 + const diskResources = { + id: 'disk', + type: 'disk', + total: physicalDiskGB, + max: physicalDiskGB, + min: 0 + } + + if (envDef.resources) { + for (const res of envDef.resources) { + // allow user to add other resources + if (res.id === 'cpu') { + if (res.total) cpuResources.total = res.total + if (res.max) cpuResources.max = res.max + if (res.min) cpuResources.min = res.min + } + if (res.id === 'ram') { + if (res.total) ramResources.total = res.total + if (res.max) ramResources.max = res.max + if (res.min) ramResources.min = res.min + } + if (res.id === 'disk') { + if (res.total) diskResources.total = res.total + if (res.max) diskResources.max = res.max + if (res.min !== undefined) diskResources.min = res.min + } + + if (res.id !== 'cpu' && res.id !== 'ram' && res.id !== 'disk') { + if (!res.max) res.max = res.total + if (!res.min) res.min = 0 + envResources.push(res) } } } + envResources.push(cpuResources) + envResources.push(ramResources) + envResources.push(diskResources) + + const env: ComputeEnvironment = { + id: '', + runningJobs: 0, + consumerAddress, + platform, + access: envDef.access || { addresses: [], accessLists: null }, + fees, + resources: envResources, + queuedJobs: 0, + queuedFreeJobs: 0, + queMaxWaitTime: 0, + queMaxWaitTimeFree: 0, + runMaxWaitTime: 0, + runMaxWaitTimeFree: 0 + } + + if (envDef.storageExpiry !== undefined) env.storageExpiry = envDef.storageExpiry + if (envDef.minJobDuration !== undefined) env.minJobDuration = envDef.minJobDuration + if (envDef.maxJobDuration !== undefined) env.maxJobDuration = envDef.maxJobDuration + if (envDef.maxJobs !== undefined) env.maxJobs = envDef.maxJobs + if (envDef.description !== undefined) env.description = envDef.description + + // Free tier config for this environment + if (envDef.free) { + env.free = { + access: envDef.free.access || { addresses: [], accessLists: null } + } + if (envDef.free.storageExpiry !== undefined) + env.free.storageExpiry = envDef.free.storageExpiry + if (envDef.free.minJobDuration !== undefined) + env.free.minJobDuration = envDef.free.minJobDuration + if (envDef.free.maxJobDuration !== undefined) + env.free.maxJobDuration = envDef.free.maxJobDuration + if (envDef.free.maxJobs !== undefined) env.free.maxJobs = envDef.free.maxJobs + if (envDef.free.resources) env.free.resources = envDef.free.resources + } + + const envIdSuffix = envDef.id || String(envIdx) + env.id = + this.getC2DConfig().hash + + '-' + + create256Hash(JSON.stringify(env.fees) + envIdSuffix) + + this.envs.push(env) + CORE_LOGGER.info( + `Engine ${this.getC2DConfig().hash}: created environment ${env.id} (index=${envIdx}, resources=${envResources.map((r) => r.id).join(',')})` + ) } - */ - // limits for free env - if ('free' in envConfig) { - this.envs[0].free = { - access: { - addresses: [], - accessLists: null + + const physicalCpuCount = this.physicalLimits.get('cpu') || 0 + let cpuOffset = 0 + for (const env of this.envs) { + const cpuRes = this.getResource(env.resources ?? [], 'cpu') + if (cpuRes && cpuRes.total > 0) { + const isBenchmarkEnv = env.access?.addresses?.includes( + BENCHMARK_MONITORING_ADDRESS + ) + if (isBenchmarkEnv) { + const total = physicalCpuCount > 0 ? physicalCpuCount : cpuRes.total + const cores = Array.from({ length: total }, (_, i) => i) + this.envCpuCoresMap.set(env.id, cores) + CORE_LOGGER.info( + `CPU affinity: benchmark environment ${env.id} cores 0-${cores[cores.length - 1]}` + ) + } else { + const cores = Array.from({ length: cpuRes.total }, (_, i) => cpuOffset + i) + this.envCpuCoresMap.set(env.id, cores) + CORE_LOGGER.info( + `CPU affinity: environment ${env.id} cores ${cores[0]}-${cores[cores.length - 1]}` + ) + cpuOffset += cpuRes.total } } - if (`access` in envConfig.free) this.envs[0].free.access = envConfig.free.access - if (`storageExpiry` in envConfig.free) - this.envs[0].free.storageExpiry = envConfig.free.storageExpiry - if (`minJobDuration` in envConfig.free) - this.envs[0].free.minJobDuration = envConfig.free.minJobDuration - if (`maxJobDuration` in envConfig.free) - this.envs[0].free.maxJobDuration = envConfig.free.maxJobDuration - if (`maxJobs` in envConfig.free) this.envs[0].free.maxJobs = envConfig.free.maxJobs - if ('resources' in envConfig.free) { - // TO DO - check if resource is also listed in this.envs[0].resources, if not, ignore it - this.envs[0].free.resources = envConfig.free.resources - } } - this.envs[0].id = - this.getC2DConfig().hash + '-' + create256Hash(JSON.stringify(this.envs[0].fees)) + + // Rebuild CPU allocations from running containers (handles node restart) + await this.rebuildCpuAllocations() // only now set the timer if (!this.cronTimer) { this.setNewTimer() } + this.startCrons() + } + + public startCrons() { + if (!this.docker) { + CORE_LOGGER.debug('Docker not available, skipping crons') + return + } + // Start image cleanup timer - this.startImageCleanupTimer() - // Start claim timer - this.startPaymentTimer() + if (this.cleanupInterval) { + if (this.imageCleanupTimer) { + return // Already running + } + // Run initial cleanup after a short delay + setTimeout(() => { + this.cleanupOldImages().catch((e) => { + CORE_LOGGER.error(`Initial image cleanup failed: ${e.message}`) + }) + }, 60000) // Wait 1 minute after start + + // Set up periodic cleanup + this.imageCleanupTimer = setInterval(() => { + this.cleanupOldImages().catch((e) => { + CORE_LOGGER.error(`Periodic image cleanup failed: ${e.message}`) + }) + }, this.cleanupInterval * 1000) + + CORE_LOGGER.info( + `Image cleanup timer started (interval: ${this.cleanupInterval / 60} minutes)` + ) + } + // start payments cron + if (this.paymentClaimInterval) { + if (this.paymentClaimTimer) { + return // Already running + } + + // Run initial cleanup after a short delay + setTimeout(() => { + this.claimPayments().catch((e) => { + CORE_LOGGER.error(`Initial payments claim failed: ${e.message}`) + }) + }, 60000) // Wait 1 minute after start + + // Set up periodic cleanup + this.paymentClaimTimer = setInterval(() => { + this.claimPayments().catch((e) => { + CORE_LOGGER.error(`Periodic payments claim failed: ${e.message}`) + }) + }, this.paymentClaimInterval * 1000) + + CORE_LOGGER.info( + `Payments claim timer started (interval: ${this.paymentClaimInterval / 60} minutes)` + ) + } + // scan db updater cron + if (this.scanImageDBUpdateInterval) { + if (this.scanDBUpdateTimer) { + return // Already running + } + + // Run initial db cache + setTimeout(() => { + this.scanDBUpdate().catch((e) => { + CORE_LOGGER.error(`scan DB Update Initial failed: ${e.message}`) + }) + }, 30000) // Wait 30 seconds + + // Set up periodic cleanup + this.scanDBUpdateTimer = setInterval(() => { + this.scanDBUpdate().catch((e) => { + CORE_LOGGER.error(`Periodic scan DB update failed: ${e.message}`) + }) + }, this.scanImageDBUpdateInterval * 1000) + + CORE_LOGGER.info( + `scan DB update timer started (interval: ${this.scanImageDBUpdateInterval / 60} minutes)` + ) + } } public override stop(): Promise { @@ -344,7 +546,6 @@ export class C2DEngineDocker extends C2DEngine { const currentTimestamp = BigInt(Math.floor(Date.now() / 1000)) const envs: string[] = [] const envsChains: string[] = [] - // Group jobs by operation type and chain for batch processing const jobsToClaim: Array<{ job: DBComputeJob @@ -399,11 +600,11 @@ export class C2DEngineDocker extends C2DEngine { } // Process each job to determine what operation is needed + let duration for (const job of jobs) { // Calculate algo duration - const algoDuration = - parseFloat(job.algoStopTimestamp) - parseFloat(job.algoStartTimestamp) - job.algoDuration = algoDuration + duration = parseFloat(job.algoStopTimestamp) - parseFloat(job.algoStartTimestamp) + duration += this.getValidBuildDurationSeconds(job) // Free jobs or jobs without payment info - mark as finished if (job.isFree || !job.payment) { @@ -413,7 +614,7 @@ export class C2DEngineDocker extends C2DEngine { // Find matching lock const lock = locks.find( - (lock) => BigInt(lock.jobId.toString()) === BigInt(create256Hash(job.jobId)) + (lock) => BigInt(lock.jobId.toString()) === BigInt(job.jobIdHash) ) if (!lock) { @@ -440,7 +641,7 @@ export class C2DEngineDocker extends C2DEngine { continue } - let minDuration = Math.abs(algoDuration) + let minDuration = Math.abs(duration) if (minDuration > job.maxJobDuration) { minDuration = job.maxJobDuration } @@ -509,8 +710,10 @@ export class C2DEngineDocker extends C2DEngine { if (txId) { // Update all jobs with the transaction ID for (const claim of claims) { - claim.job.payment!.claimTx = txId - claim.job.payment!.cost = claim.cost + if (claim.job.payment) { + claim.job.payment.claimTx = txId + claim.job.payment.cost = claim.cost + } claim.job.status = C2DStatusNumber.JobFinished claim.job.statusText = C2DStatusText.JobFinished await this.db.updateJob(claim.job) @@ -535,8 +738,10 @@ export class C2DEngineDocker extends C2DEngine { claim.proof ) if (txId) { - claim.job.payment!.claimTx = txId - claim.job.payment!.cost = claim.cost + if (claim.job.payment) { + claim.job.payment.claimTx = txId + claim.job.payment.cost = claim.cost + } claim.job.status = C2DStatusNumber.JobFinished claim.job.statusText = C2DStatusText.JobFinished await this.db.updateJob(claim.job) @@ -579,6 +784,7 @@ export class C2DEngineDocker extends C2DEngine { if (txId) { // Update all jobs for (const job of jobsToCancelBatch) { + if (job.payment) job.payment.cancelTx = txId job.status = C2DStatusNumber.JobFinished job.statusText = C2DStatusText.JobFinished await this.db.updateJob(job) @@ -601,6 +807,7 @@ export class C2DEngineDocker extends C2DEngine { job.owner ) if (txId) { + if (job.payment) job.payment.cancelTx = txId job.status = C2DStatusNumber.JobFinished job.statusText = C2DStatusText.JobFinished await this.db.updateJob(job) @@ -618,6 +825,10 @@ export class C2DEngineDocker extends C2DEngine { for (const job of jobsWithoutLock) { job.status = C2DStatusNumber.JobFinished job.statusText = C2DStatusText.JobFinished + if (job.payment) { + job.payment.cancelTx = 'nolock' + job.payment.claimTx = 'nolock' + } await this.db.updateJob(job) } } @@ -631,7 +842,7 @@ export class C2DEngineDocker extends C2DEngine { private async cleanUpUnknownLocks(chain: string, currentTimestamp: bigint) { try { - const nodeAddress = await this.getKeyManager().getEthAddress() + const nodeAddress = this.getKeyManager().getEthAddress() const jobIds: any[] = [] const tokens: string[] = [] const payer: string[] = [] @@ -642,6 +853,10 @@ export class C2DEngineDocker extends C2DEngine { '0x0000000000000000000000000000000000000000', nodeAddress ) + if (!balocks || balocks.length === 0) { + CORE_LOGGER.warn(`Could not find any locks for chain ${chain}, skipping cleanup`) + return + } for (const lock of balocks) { const lockExpiry = BigInt(lock.expiry.toString()) if (currentTimestamp > lockExpiry) { @@ -706,59 +921,6 @@ export class C2DEngineDocker extends C2DEngine { } } - private startImageCleanupTimer(): void { - if (this.imageCleanupTimer) { - return // Already running - } - - if (!this.docker) { - CORE_LOGGER.debug('Docker not available, skipping image cleanup timer') - return - } - - // Run initial cleanup after a short delay - setTimeout(() => { - this.cleanupOldImages().catch((e) => { - CORE_LOGGER.error(`Initial image cleanup failed: ${e.message}`) - }) - }, 60000) // Wait 1 minute after start - - // Set up periodic cleanup - this.imageCleanupTimer = setInterval(() => { - this.cleanupOldImages().catch((e) => { - CORE_LOGGER.error(`Periodic image cleanup failed: ${e.message}`) - }) - }, this.cleanupInterval * 1000) - - CORE_LOGGER.info( - `Image cleanup timer started (interval: ${this.cleanupInterval / 60} minutes)` - ) - } - - private startPaymentTimer(): void { - if (this.paymentClaimTimer) { - return // Already running - } - - // Run initial cleanup after a short delay - setTimeout(() => { - this.claimPayments().catch((e) => { - CORE_LOGGER.error(`Initial payments claim failed: ${e.message}`) - }) - }, 60000) // Wait 1 minute after start - - // Set up periodic cleanup - this.paymentClaimTimer = setInterval(() => { - this.claimPayments().catch((e) => { - CORE_LOGGER.error(`Periodic payments claim failed: ${e.message}`) - }) - }, this.paymentClaimInterval * 1000) - - CORE_LOGGER.info( - `Payments claim timer started (interval: ${this.paymentClaimInterval / 60} minutes)` - ) - } - // eslint-disable-next-line require-await public override async getComputeEnvironments( chainId?: number @@ -1082,12 +1244,20 @@ export class C2DEngineDocker extends C2DEngine { throw new Error(`additionalDockerFiles cannot be used with queued jobs`) } } + if ( + algorithm.meta.container && + algorithm.meta.container.dockerfile && + !env.free.allowImageBuild + ) { + throw new Error(`Building image is not allowed for free jobs`) + } const job: DBComputeJob = { clusterHash: this.getC2DConfig().hash, containerImage: image, owner, jobId, + jobIdHash: create256Hash(jobId), dateCreated: String(Date.now() / 1000), dateFinished: null, status: @@ -1121,7 +1291,9 @@ export class C2DEngineDocker extends C2DEngine { algoDuration: 0, queueMaxWaitTime: queueMaxWaitTime || 0, encryptedDockerRegistryAuth, // we store the encrypted docker registry auth in the job - output + output, + buildStartTimestamp: '0', + buildStopTimestamp: '0' } if (algorithm.meta.container && algorithm.meta.container.dockerfile) { @@ -1200,7 +1372,7 @@ export class C2DEngineDocker extends C2DEngine { let index = 0 try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/image.log' + this.getStoragePath() + '/' + jobId + '/data/logs/image.log' ) if (logStat) { res.push({ @@ -1214,7 +1386,7 @@ export class C2DEngineDocker extends C2DEngine { } catch (e) {} try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/configuration.log' + this.getStoragePath() + '/' + jobId + '/data/logs/configuration.log' ) if (logStat) { res.push({ @@ -1228,7 +1400,7 @@ export class C2DEngineDocker extends C2DEngine { } catch (e) {} try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/algorithm.log' + this.getStoragePath() + '/' + jobId + '/data/logs/algorithm.log' ) if (logStat) { res.push({ @@ -1245,7 +1417,7 @@ export class C2DEngineDocker extends C2DEngine { const jobDb = await this.db.getJob(jobId) if (jobDb.length < 1 || !jobDb[0].output) { const outputStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/outputs/outputs.tar' + this.getStoragePath() + '/' + jobId + '/data/outputs/outputs.tar' ) if (outputStat) { res.push({ @@ -1260,7 +1432,7 @@ export class C2DEngineDocker extends C2DEngine { } catch (e) {} try { const logStat = statSync( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/publish.log' + this.getStoragePath() + '/' + jobId + '/data/logs/publish.log' ) if (logStat) { res.push({ @@ -1322,7 +1494,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'algorithmLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/algorithm.log' + this.getStoragePath() + '/' + jobId + '/data/logs/algorithm.log' ), headers: { 'Content-Type': 'text/plain' @@ -1332,10 +1504,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'configurationLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + - '/' + - jobId + - '/data/logs/configuration.log' + this.getStoragePath() + '/' + jobId + '/data/logs/configuration.log' ), headers: { 'Content-Type': 'text/plain' @@ -1345,7 +1514,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'publishLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/publish.log' + this.getStoragePath() + '/' + jobId + '/data/logs/publish.log' ), headers: { 'Content-Type': 'text/plain' @@ -1355,7 +1524,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'imageLog') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/logs/image.log' + this.getStoragePath() + '/' + jobId + '/data/logs/image.log' ), headers: { 'Content-Type': 'text/plain' @@ -1365,7 +1534,7 @@ export class C2DEngineDocker extends C2DEngine { if (i.type === 'output') { return { stream: createReadStream( - this.getC2DConfig().tempFolder + '/' + jobId + '/data/outputs/outputs.tar', + this.getStoragePath() + '/' + jobId + '/data/outputs/outputs.tar', offset > 0 ? { start: offset } : undefined ), headers: { @@ -1385,7 +1554,7 @@ export class C2DEngineDocker extends C2DEngine { if (!jobRes[0].isRunning) return null try { const job = jobRes[0] - const container = await this.docker.getContainer(job.jobId + '-algoritm') + const container = this.docker.getContainer(job.jobId + '-algoritm') const details = await container.inspect() if (details.State.Running === false) return null return await container.logs({ @@ -1555,12 +1724,11 @@ export class C2DEngineDocker extends C2DEngine { } // check if resources are available now try { - const env = await this.getComputeEnvironment( - job.payment && job.payment.chainId ? job.payment.chainId : null, - job.environment, - null - ) - await this.checkIfResourcesAreAvailable(job.resources, env, job.isFree) + const chainId = job.payment && job.payment.chainId ? job.payment.chainId : null + const allEnvs = await this.getComputeEnvironments(chainId) + const env = allEnvs.find((e) => e.id === job.environment) + if (!env) throw new Error(`Environment ${job.environment} not found`) + await this.checkIfResourcesAreAvailable(job.resources, env, job.isFree, allEnvs) } catch (err) { // resources are still not available return @@ -1580,10 +1748,45 @@ export class C2DEngineDocker extends C2DEngine { } if (job.status === C2DStatusNumber.ConfiguringVolumes) { + // we have the image (etiher pulled or built) + // if built, check if build process took all allocated time + // if yes, stop the job + const buildDuration = this.getValidBuildDurationSeconds(job) + if (buildDuration > 0 && buildDuration >= job.maxJobDuration) { + job.isStarted = false + job.status = C2DStatusNumber.PublishingResults + job.statusText = C2DStatusText.PublishingResults + job.algoStartTimestamp = '0' + job.algoStopTimestamp = '0' + job.isRunning = false + await this.db.updateJob(job) + return + } + // now that we have the image ready, check it for vulnerabilities + if (this.getC2DConfig().connection?.scanImages) { + const check = await this.checkImageVulnerability(job.containerImage) + const imageLogFile = + this.getStoragePath() + '/' + job.jobId + '/data/logs/image.log' + const logText = + `Image scanned for vulnerabilities\nVulnerable:${check.vulnerable}\nSummary:` + + JSON.stringify(check.summary, null, 2) + CORE_LOGGER.debug(logText) + appendFileSync(imageLogFile, logText) + if (check.vulnerable) { + job.status = C2DStatusNumber.VulnerableImage + job.statusText = C2DStatusText.VulnerableImage + job.isRunning = false + job.dateFinished = String(Date.now() / 1000) + await this.db.updateJob(job) + await this.cleanupJob(job) + return + } + } // create the volume & create container // TO DO C2D: Choose driver & size - // get env info - const envResource = this.envs[0].resources + // get environment-specific resources for Docker device/hardware configuration + const env = this.envs.find((e) => e.id === job.environment) + const envResource = env?.resources || [] const volume: VolumeCreateOptions = { Name: job.jobId + '-volume' } @@ -1610,6 +1813,8 @@ export class C2DEngineDocker extends C2DEngine { // create the container const mountVols: any = { '/data': {} } const hostConfig: HostConfig = { + // limit number of Pids container can spawn, to avoid flooding + PidsLimit: 512, Mounts: [ { Type: 'volume', @@ -1619,6 +1824,9 @@ export class C2DEngineDocker extends C2DEngine { } ] } + if (!this.enableNetwork) { + hostConfig.NetworkMode = 'none' // no network inside the container + } // disk // if (diskSize && diskSize > 0) { // hostConfig.StorageOpt = { @@ -1636,6 +1844,11 @@ export class C2DEngineDocker extends C2DEngine { if (cpus && cpus > 0) { hostConfig.CpuPeriod = 100000 // 100 miliseconds is usually the default hostConfig.CpuQuota = Math.floor(cpus * hostConfig.CpuPeriod) + // Pin the container to specific physical CPU cores + const cpusetStr = this.allocateCpus(job.jobId, cpus, job.environment) + if (cpusetStr) { + hostConfig.CpusetCpus = cpusetStr + } } const containerInfo: ContainerCreateOptions = { name: job.jobId + '-algoritm', @@ -1643,9 +1856,10 @@ export class C2DEngineDocker extends C2DEngine { AttachStdin: false, AttachStdout: true, AttachStderr: true, - Tty: true, + Tty: false, OpenStdin: false, StdinOnce: false, + User: `${C2D_CONTAINER_UID}:${C2D_CONTAINER_GID}`, Volumes: mountVols, HostConfig: hostConfig } @@ -1660,12 +1874,16 @@ export class C2DEngineDocker extends C2DEngine { containerInfo.HostConfig.Devices = advancedConfig.Devices if (advancedConfig.GroupAdd) containerInfo.HostConfig.GroupAdd = advancedConfig.GroupAdd - if (advancedConfig.SecurityOpt) - containerInfo.HostConfig.SecurityOpt = advancedConfig.SecurityOpt + containerInfo.HostConfig.SecurityOpt = [ + 'no-new-privileges', + ...(advancedConfig.SecurityOpt ?? []) + ] if (advancedConfig.Binds) containerInfo.HostConfig.Binds = advancedConfig.Binds + containerInfo.HostConfig.CapDrop = ['ALL'] + for (const cap of advancedConfig.CapDrop ?? []) { + containerInfo.HostConfig.CapDrop.push(cap) + } if (advancedConfig.CapAdd) containerInfo.HostConfig.CapAdd = advancedConfig.CapAdd - if (advancedConfig.CapDrop) - containerInfo.HostConfig.CapDrop = advancedConfig.CapDrop if (advancedConfig.IpcMode) containerInfo.HostConfig.IpcMode = advancedConfig.IpcMode if (advancedConfig.ShmSize) @@ -1684,6 +1902,64 @@ export class C2DEngineDocker extends C2DEngine { } containerInfo.Env = envVars } + // persistent Storage: bind-mount bucket files into the job container (localfs backend) + for (const i in job.assets) { + const asset = job.assets[i] + if (!asset.fileObject || asset.fileObject.type !== 'nodePersistentStorage') { + continue + } + const fo = asset.fileObject as { bucketId?: string; fileName?: string } + if (!fo.bucketId || !fo.fileName) { + CORE_LOGGER.error( + `Job ${job.jobId} asset ${i}: nodePersistentStorage requires bucketId and fileName` + ) + job.status = C2DStatusNumber.DataProvisioningFailed + job.statusText = C2DStatusText.DataProvisioningFailed + job.isRunning = false + job.dateFinished = String(Date.now() / 1000) + await this.db.updateJob(job) + await this.cleanupJob(job) + return + } + const ps = OceanNode.getInstance().getPersistentStorage() + if (!ps) { + CORE_LOGGER.error( + `Job ${job.jobId} asset ${i}: persistent storage is not configured on this node` + ) + job.status = C2DStatusNumber.DataProvisioningFailed + job.statusText = C2DStatusText.DataProvisioningFailed + job.isRunning = false + job.dateFinished = String(Date.now() / 1000) + await this.db.updateJob(job) + await this.cleanupJob(job) + return + } + try { + const bindMount = await ps.getDockerMountObject( + fo.bucketId, + fo.fileName, + job.owner + ) + CORE_LOGGER.debug( + `Mounting bucket ${fo.bucketId} to folder ${bindMount.Target}` + ) + hostConfig.Mounts.push(bindMount) + mountVols[bindMount.Target] = {} + } catch (e) { + const errMsg = e instanceof Error ? e.message : String(e) + CORE_LOGGER.error( + `Job ${job.jobId} asset ${i}: failed to resolve persistent storage bind: ${errMsg}` + ) + job.status = C2DStatusNumber.DataProvisioningFailed + job.statusText = C2DStatusText.DataProvisioningFailed + job.isRunning = false + job.dateFinished = String(Date.now() / 1000) + await this.db.updateJob(job) + await this.cleanupJob(job) + return + } + } + const container = await this.createDockerContainer(containerInfo, true) if (container) { job.status = C2DStatusNumber.Provisioning @@ -1719,7 +1995,7 @@ export class C2DEngineDocker extends C2DEngine { let container let details try { - container = await this.docker.getContainer(job.jobId + '-algoritm') + container = this.docker.getContainer(job.jobId + '-algoritm') details = await container.inspect() } catch (e) { console.error( @@ -1753,10 +2029,7 @@ export class C2DEngineDocker extends C2DEngine { job.algoStopTimestamp = String(Date.now() / 1000) try { const algoLogFile = - this.getC2DConfig().tempFolder + - '/' + - job.jobId + - '/data/logs/algorithm.log' + this.getStoragePath() + '/' + job.jobId + '/data/logs/algorithm.log' writeFileSync(algoLogFile, String(e.message)) } catch (e) { CORE_LOGGER.error('Failed to write algorithm log file: ' + e.message) @@ -1780,7 +2053,13 @@ export class C2DEngineDocker extends C2DEngine { } const timeNow = Date.now() / 1000 - const expiry = parseFloat(job.algoStartTimestamp) + job.maxJobDuration + let expiry + + const buildDuration = this.getValidBuildDurationSeconds(job) + if (buildDuration > 0) { + // if job has build time, reduce the remaining algorithm runtime budget + expiry = parseFloat(job.algoStartTimestamp) + job.maxJobDuration - buildDuration + } else expiry = parseFloat(job.algoStartTimestamp) + job.maxJobDuration CORE_LOGGER.debug( 'container running since timeNow: ' + timeNow + ' , Expiry: ' + expiry ) @@ -1807,7 +2086,13 @@ export class C2DEngineDocker extends C2DEngine { job.isStarted = false job.status = C2DStatusNumber.PublishingResults job.statusText = C2DStatusText.PublishingResults - job.algoStopTimestamp = String(Date.now() / 1000) + const containerFinishedAt = + new Date(details.State.FinishedAt).getTime() / 1000 + job.algoStopTimestamp = String( + containerFinishedAt > parseFloat(job.algoStartTimestamp) + ? containerFinishedAt + : Date.now() / 1000 + ) job.isRunning = false await this.db.updateJob(job) return @@ -1821,14 +2106,14 @@ export class C2DEngineDocker extends C2DEngine { job.statusText = C2DStatusText.JobSettle let container try { - container = await this.docker.getContainer(job.jobId + '-algoritm') + container = this.docker.getContainer(job.jobId + '-algoritm') } catch (e) { CORE_LOGGER.debug('Could not retrieve container: ' + e.message) job.isRunning = false job.dateFinished = String(Date.now() / 1000) try { const algoLogFile = - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/algorithm.log' + this.getStoragePath() + '/' + job.jobId + '/data/logs/algorithm.log' writeFileSync(algoLogFile, String(e.message)) } catch (e) { CORE_LOGGER.error('Failed to write algorithm log file: ' + e.message) @@ -1846,7 +2131,7 @@ export class C2DEngineDocker extends C2DEngine { job.terminationDetails.exitCode = null } const outputsArchivePath = - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/outputs/outputs.tar' + this.getStoragePath() + '/' + job.jobId + '/data/outputs/outputs.tar' try { if (container) { @@ -1912,6 +2197,102 @@ export class C2DEngineDocker extends C2DEngine { } // eslint-disable-next-line require-await + private parseCpusetString(cpuset: string): number[] { + const cores: number[] = [] + if (!cpuset) return cores + for (const part of cpuset.split(',')) { + if (part.includes('-')) { + const [start, end] = part.split('-').map(Number) + for (let i = start; i <= end; i++) { + cores.push(i) + } + } else { + cores.push(Number(part)) + } + } + return cores + } + + private allocateCpus(jobId: string, count: number, envId: string): string | null { + const envCores = this.envCpuCoresMap.get(envId) + if (!envCores || envCores.length === 0 || count <= 0) return null + const existing = this.cpuAllocations.get(jobId) + if (existing && existing.length > 0) { + const cpusetStr = existing.join(',') + CORE_LOGGER.info( + `CPU affinity: reusing existing cores [${cpusetStr}] for job ${jobId}` + ) + return cpusetStr + } + + const usedCores = new Set() + for (const cores of this.cpuAllocations.values()) { + for (const core of cores) { + usedCores.add(core) + } + } + + const freeCores: number[] = [] + for (const core of envCores) { + if (!usedCores.has(core)) { + freeCores.push(core) + if (freeCores.length === count) break + } + } + + if (freeCores.length < count) { + CORE_LOGGER.warn( + `CPU affinity: not enough free cores for job ${jobId} in env ${envId} (requested=${count}, available=${freeCores.length}/${envCores.length})` + ) + return null + } + + this.cpuAllocations.set(jobId, freeCores) + const cpusetStr = freeCores.join(',') + CORE_LOGGER.info(`CPU affinity: allocated cores [${cpusetStr}] to job ${jobId}`) + return cpusetStr + } + + private releaseCpus(jobId: string): void { + const cores = this.cpuAllocations.get(jobId) + if (cores) { + CORE_LOGGER.info( + `CPU affinity: released cores [${cores.join(',')}] from job ${jobId}` + ) + this.cpuAllocations.delete(jobId) + } + } + + /** + * On startup, inspects running Docker containers to rebuild the CPU allocation map. + */ + private async rebuildCpuAllocations(): Promise { + if (this.envCpuCoresMap.size === 0) return + try { + const jobs = await this.db.getRunningJobs(this.getC2DConfig().hash) + for (const job of jobs) { + try { + const container = this.docker.getContainer(job.jobId + '-algoritm') + const info = await container.inspect() + const cpuset = info.HostConfig?.CpusetCpus + if (cpuset) { + const cores = this.parseCpusetString(cpuset) + if (cores.length > 0) { + this.cpuAllocations.set(job.jobId, cores) + CORE_LOGGER.info( + `CPU affinity: recovered allocation [${cpuset}] for running job ${job.jobId}` + ) + } + } + } catch (e) { + // Container may not exist yet (e.g., job is in pull/build phase) + } + } + } catch (e) { + CORE_LOGGER.error(`CPU affinity: failed to rebuild allocations: ${e.message}`) + } + } + private async cleanupJob(job: DBComputeJob) { // cleaning up // - claim payment or release lock @@ -1920,13 +2301,14 @@ export class C2DEngineDocker extends C2DEngine { // - delete container this.jobImageSizes.delete(job.jobId) + this.releaseCpus(job.jobId) try { - const container = await this.docker.getContainer(job.jobId + '-algoritm') + const container = this.docker.getContainer(job.jobId + '-algoritm') if (container) { if (job.status !== C2DStatusNumber.AlgorithmFailed) { writeFileSync( - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/algorithm.log', + this.getStoragePath() + '/' + job.jobId + '/data/logs/algorithm.log', await container.logs({ stdout: true, stderr: true, @@ -1953,33 +2335,32 @@ export class C2DEngineDocker extends C2DEngine { } try { // remove folders - rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/inputs', { + rmSync(this.getStoragePath() + '/' + job.jobId + '/data/inputs', { recursive: true, force: true }) } catch (e) { console.error( - `Could not delete inputs from path ${this.getC2DConfig().tempFolder} for job ID ${ + `Could not delete inputs from path ${this.getStoragePath()} for job ID ${ job.jobId }! ` + e.message ) } try { - rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/transformations', { + rmSync(this.getStoragePath() + '/' + job.jobId + '/data/transformations', { recursive: true, force: true }) } catch (e) { console.error( - `Could not delete algorithms from path ${ - this.getC2DConfig().tempFolder - } for job ID ${job.jobId}! ` + e.message + `Could not delete algorithms from path ${this.getStoragePath()} for job ID ${job.jobId}! ` + + e.message ) } } private deleteOutputFolder(job: DBComputeJob) { - rmSync(this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/outputs/', { + rmSync(this.getStoragePath() + '/' + job.jobId + '/data/outputs/', { recursive: true, force: true }) @@ -2114,8 +2495,7 @@ export class C2DEngineDocker extends C2DEngine { private async pullImage(originaljob: DBComputeJob) { const job = JSON.parse(JSON.stringify(originaljob)) as DBComputeJob - const imageLogFile = - this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/image.log' + const imageLogFile = this.getStoragePath() + '/' + job.jobId + '/data/logs/image.log' try { // Get registry auth for the image const { registry } = this.parseImage(job.containerImage) @@ -2218,6 +2598,9 @@ export class C2DEngineDocker extends C2DEngine { const job = JSON.parse(JSON.stringify(originaljob)) as DBComputeJob const imageLogFile = this.getC2DConfig().tempFolder + '/' + job.jobId + '/data/logs/image.log' + const controller = new AbortController() + const timeoutMs = job.maxJobDuration * 1000 + const timer = setTimeout(() => controller.abort(), timeoutMs) try { const pack = tarStream.pack() @@ -2230,53 +2613,132 @@ export class C2DEngineDocker extends C2DEngine { } } pack.finalize() + job.buildStartTimestamp = String(Date.now() / 1000) + await this.db.updateJob(job) - // Build the image using the tar stream as context - const buildStream = await this.docker.buildImage(pack, { - t: job.containerImage - }) + const cpuperiod = 100000 + const ramGb = this.getResourceRequest(job.resources, 'ram') + const ramBytes = + ramGb && ramGb > 0 ? ramGb * 1024 * 1024 * 1024 : 1024 * 1024 * 1024 - // Optional: listen to build output - buildStream.on('data', (data) => { + const cpus = this.getResourceRequest(job.resources, 'cpu') + const cpuquota = cpus && cpus > 0 ? Math.floor(cpus * cpuperiod) : 50000 + + const buildOptions: Dockerode.ImageBuildOptions = { + t: job.containerImage, + memory: ramBytes, + memswap: ramBytes, // same as memory => no swap + cpushares: 1024, // CPU Shares (default is 1024) + cpuquota, // 100000 = 1 CPU with cpuperiod=100000 + cpuperiod, + nocache: true, // prevent cache poison + abortSignal: controller.signal + } + // Build the image using the tar stream as context (Node IncomingMessage extends stream.Readable) + const buildStream = (await this.docker.buildImage(pack, buildOptions)) as Readable + + const onBuildData = (data: Buffer) => { try { const text = JSON.parse(data.toString('utf8')) - CORE_LOGGER.debug( - "Building image for jobId '" + job.jobId + "': " + text.stream.trim() - ) - appendFileSync(imageLogFile, String(text.stream)) + if (text && text.stream && typeof text.stream === 'string') { + CORE_LOGGER.debug( + "Building image for jobId '" + job.jobId + "': " + text.stream.trim() + ) + appendFileSync(imageLogFile, String(text.stream)) + } } catch (e) { // console.log('non json build data: ', data.toString('utf8')) } - }) + } + buildStream.on('data', onBuildData) await new Promise((resolve, reject) => { - buildStream.on('end', () => { - CORE_LOGGER.debug(`Image '${job.containerImage}' built successfully.`) - this.updateImageUsage(job.containerImage).catch((e) => { - CORE_LOGGER.debug(`Failed to track image usage: ${e.message}`) + let settled = false + const detachBuildLog = () => { + buildStream.removeListener('data', onBuildData) + } + const finish = (action: () => void) => { + if (settled) return + settled = true + action() + } + const onAbort = () => { + finish(() => { + detachBuildLog() + buildStream.destroy() + const err = new Error('Image build aborted') as NodeJS.ErrnoException + err.code = 'ABORT_ERR' + err.name = 'AbortError' + reject(err) }) - resolve() - }) + } + controller.signal.addEventListener('abort', onAbort, { once: true }) + const onSuccess = () => { + finish(async () => { + detachBuildLog() + controller.signal.removeEventListener('abort', onAbort) + + // Build stream completed, but does the image actually exist? + try { + await this.docker.getImage(job.containerImage).inspect() + } catch (e) { + return reject( + new Error( + `Cannot find image '${job.containerImage}' after building. Most likely it failed: ${ + (e as Error)?.message || String(e) + }` + ) + ) + } + + CORE_LOGGER.debug(`Image '${job.containerImage}' built successfully.`) + this.updateImageUsage(job.containerImage).catch((e) => { + CORE_LOGGER.debug(`Failed to track image usage: ${e.message}`) + }) + resolve() + }) + } + // Some HTTP responses emit `close` without a reliable `end`; handle both (settled ensures once). + buildStream.on('end', onSuccess) + buildStream.on('close', onSuccess) buildStream.on('error', (err) => { CORE_LOGGER.debug(`Error building image '${job.containerImage}':` + err.message) appendFileSync(imageLogFile, String(err.message)) - reject(err) + finish(() => { + detachBuildLog() + controller.signal.removeEventListener('abort', onAbort) + reject(err) + }) }) }) job.status = C2DStatusNumber.ConfiguringVolumes job.statusText = C2DStatusText.ConfiguringVolumes - this.db.updateJob(job) + job.buildStopTimestamp = String(Date.now() / 1000) + await this.db.updateJob(job) } catch (err) { - CORE_LOGGER.error( - `Unable to build docker image: ${job.containerImage}: ${err.message}` - ) - appendFileSync(imageLogFile, String(err.message)) + const aborted = + (err as NodeJS.ErrnoException)?.code === 'ABORT_ERR' || + (err as Error)?.name === 'AbortError' + if (aborted) { + // timeout-specific handling + const msg = `Image build timed out after ${timeoutMs / 1000}s` + CORE_LOGGER.error(`Unable to build docker image: ${job.containerImage}: ${msg}`) + appendFileSync(imageLogFile, msg) + } else { + CORE_LOGGER.error( + `Unable to build docker image: ${job.containerImage}: ${err.message}` + ) + appendFileSync(imageLogFile, String(err.message)) + } job.status = C2DStatusNumber.BuildImageFailed job.statusText = C2DStatusText.BuildImageFailed + job.buildStopTimestamp = String(Date.now() / 1000) job.isRunning = false job.dateFinished = String(Date.now() / 1000) await this.db.updateJob(job) await this.cleanupJob(job) + } finally { + clearTimeout(timer) } } @@ -2304,7 +2766,7 @@ export class C2DEngineDocker extends C2DEngine { status: C2DStatusNumber.RunningAlgorithm, statusText: C2DStatusText.RunningAlgorithm } - const jobFolderPath = this.getC2DConfig().tempFolder + '/' + job.jobId + const jobFolderPath = this.getStoragePath() + '/' + job.jobId const fullAlgoPath = jobFolderPath + '/data/transformations/algorithm' const configLogPath = jobFolderPath + '/data/logs/configuration.log' @@ -2314,10 +2776,7 @@ export class C2DEngineDocker extends C2DEngine { "Writing algocustom data to '/data/inputs/algoCustomData.json'\n" ) const customdataPath = - this.getC2DConfig().tempFolder + - '/' + - job.jobId + - '/data/inputs/algoCustomData.json' + this.getStoragePath() + '/' + job.jobId + '/data/inputs/algoCustomData.json' writeFileSync(customdataPath, JSON.stringify(job.algorithm.algocustomdata ?? {})) let storage = null @@ -2455,6 +2914,10 @@ export class C2DEngineDocker extends C2DEngine { if (asset.fileObject) { try { if (asset.fileObject.type) { + if (asset.fileObject.type === 'nodePersistentStorage') { + // local storage is handled later, when we start the container and create the binds + continue + } storage = Storage.getStorageClass(asset.fileObject, config) } else { CORE_LOGGER.info('asset file object seems to be encrypted, checking it...') @@ -2571,7 +3034,7 @@ export class C2DEngineDocker extends C2DEngine { if (existsSync(destination)) { // now, upload it to the container - const container = await this.docker.getContainer(job.jobId + '-algoritm') + const container = this.docker.getContainer(job.jobId + '-algoritm') try { // await container2.putArchive(destination, { @@ -2618,7 +3081,7 @@ export class C2DEngineDocker extends C2DEngine { private makeJobFolders(job: DBComputeJob): boolean { try { - const baseFolder = this.getC2DConfig().tempFolder + '/' + job.jobId + const baseFolder = this.getStoragePath() + '/' + job.jobId const dirs = [ baseFolder, baseFolder + '/data', @@ -2633,6 +3096,8 @@ export class C2DEngineDocker extends C2DEngine { if (!existsSync(dir)) { mkdirSync(dir, { recursive: true }) } + // update directory permissions to allow read/write from job containers + chmodSync(dir, 0o777) } return true } catch (e) { @@ -2657,7 +3122,7 @@ export class C2DEngineDocker extends C2DEngine { } // delete output folders - await this.deleteOutputFolder(job) + this.deleteOutputFolder(job) // delete the job await this.db.deleteJob(job.jobId) return true @@ -2666,6 +3131,227 @@ export class C2DEngineDocker extends C2DEngine { } return false } + + private getValidBuildDurationSeconds(job: DBComputeJob): number { + const startRaw = job.buildStartTimestamp + const stopRaw = job.buildStopTimestamp + if (!startRaw || !stopRaw) return 0 + const start = Number.parseFloat(startRaw) + const stop = Number.parseFloat(stopRaw) + if (!Number.isFinite(start) || !Number.isFinite(stop)) return 0 + if (start <= 0) return 0 + if (stop < start) return 0 + return stop - start + } + + private async checkscanDBImage(): Promise { + // 1. Pull the image if it's missing locally + try { + await this.docker.getImage(trivyImage).inspect() + return true + } catch (error) { + if (error.statusCode === 404) { + CORE_LOGGER.info(`Trivy not found. Pulling ${trivyImage}...`) + const stream = await this.docker.pull(trivyImage) + + // We must wrap the pull stream in a promise to wait for completion + await new Promise((resolve, reject) => { + this.docker.modem.followProgress(stream, (err, res) => + err ? reject(err) : resolve(res) + ) + }) + + CORE_LOGGER.info('Pull complete.') + return true + } else { + CORE_LOGGER.error(`Unable to pull ${trivyImage}: ${error.message}`) + return true + } + } + } + + private async scanDBUpdate(): Promise { + CORE_LOGGER.info('Starting Trivy database refresh cron') + const hasImage = await this.checkscanDBImage() + if (!hasImage) { + // we cannot update without image + return + } + const updater = await this.docker.createContainer({ + Image: trivyImage, + Cmd: ['image', '--download-db-only'], // Only refreshes the cache + HostConfig: { + Binds: [`${this.trivyCachePath}:/root/.cache/trivy`] + } + }) + + await updater.start() + await updater.wait() + await updater.remove() + CORE_LOGGER.info('Trivy database refreshed.') + } + + private async scanImage(imageName: string) { + if (!imageName || !imageName.trim()) return null + const hasImage = await this.checkscanDBImage() + if (!hasImage) { + // we cannot update without image + return + } + CORE_LOGGER.debug(`Starting vulnerability check for ${imageName}`) + const container = await this.docker.createContainer({ + Image: trivyImage, + Cmd: [ + 'image', + '--format', + 'json', + '--quiet', + '--no-progress', + '--skip-db-update', + '--severity', + 'CRITICAL,HIGH', + imageName + ], + HostConfig: { + Binds: [ + '/var/run/docker.sock:/var/run/docker.sock', // To see local images + `${this.trivyCachePath}:/root/.cache/trivy` // THE CACHE BIND + ] + } + }) + + await container.start() + + // Wait for completion, then parse from *demuxed stdout* to avoid corrupt JSON + // due to Docker multiplexed log framing. + const logsStream = await container.logs({ + follow: true, + stdout: true, + stderr: true + }) + + const outStream = new PassThrough() + const errStream = new PassThrough() + outStream.resume() + errStream.resume() + + const rawChunks: Buffer[] = [] + outStream.on('data', (chunk) => { + rawChunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)) + }) + + container.modem.demuxStream(logsStream, outStream, errStream) + + const logsDrained = new Promise((resolve, reject) => { + const done = () => resolve() + logsStream.once('end', done) + logsStream.once('close', done) + logsStream.once('error', reject) + }) + + await container.wait() + // Wait for the docker log stream to finish producing data. + await logsDrained + + await container.remove() + CORE_LOGGER.debug(`Vulnerability check for ${imageName} finished`) + + try { + const rawData = Buffer.concat(rawChunks).toString('utf8') + // Trivy's `--format json` output is a JSON object (it includes `SchemaVersion`). + // Prefer extracting the JSON object only; do not attempt array parsing since + // Trivy help/usage output may include `[` tokens (e.g. "[flags]") that are not JSON. + const firstBrace = rawData.indexOf('{') + const lastBrace = rawData.lastIndexOf('}') + + if (firstBrace !== -1 && lastBrace !== -1 && lastBrace > firstBrace) { + const jsonText = rawData.slice(firstBrace, lastBrace + 1).trim() + if (!jsonText.includes('"SchemaVersion"')) { + CORE_LOGGER.error( + 'Trivy output did not contain SchemaVersion in extracted JSON. Truncated output: ' + + rawData.slice(0, 500) + ) + return null + } + return JSON.parse(jsonText) + } + + CORE_LOGGER.error( + `Failed to locate JSON in Trivy output. Truncated output: ${rawData.slice( + 0, + 1000 + )}` + ) + return null + } catch (e) { + CORE_LOGGER.error('Failed to parse Trivy output: ' + e.message) + return null + } + } + + private async checkImageVulnerability(imageName: string) { + const report = await this.scanImage(imageName) + if (!report) { + // + return { vulnerable: false, summary: 'failed to scan' } + } + // Results is an array (one entry per OS package manager / language) + const allVulnerabilities = report.Results.flatMap((r: any) => r.Vulnerabilities || []) + + const severityRank = (sev: string) => { + switch (sev) { + case 'CRITICAL': + return 3 + case 'HIGH': + return 2 + default: + return 1 + } + } + + const summary = { + total: allVulnerabilities.length, + critical: allVulnerabilities.filter((v: any) => v.Severity === 'CRITICAL').length, + high: allVulnerabilities.filter((v: any) => v.Severity === 'HIGH').length, + list: (() => { + // Present the most important vulnerabilities first. + const sorted = [...allVulnerabilities].sort((a: any, b: any) => { + const diff = severityRank(b.Severity) - severityRank(a.Severity) + if (diff !== 0) return diff + return String(a.VulnerabilityID || '').localeCompare( + String(b.VulnerabilityID || '') + ) + }) + + const list: Array<{ + severity: string + id: string + package: string + title: string + }> = [] + + for (const v of sorted) { + list.push({ + severity: v.Severity, + id: v.VulnerabilityID, + package: v.PkgName, + title: v.Title || 'No description' + }) + } + + return list + })() + } + + if (summary.critical > 0) { + return { + vulnerable: true, + summary + } + } + + return { vulnerable: false, summary } + } } // this uses the docker engine, but exposes only one env, the free one diff --git a/src/components/c2d/compute_engines.ts b/src/components/c2d/compute_engines.ts index 26ad035f9..f2da957a0 100644 --- a/src/components/c2d/compute_engines.ts +++ b/src/components/c2d/compute_engines.ts @@ -1,10 +1,15 @@ -import { C2DClusterType, ComputeEnvironment } from '../../@types/C2D/C2D.js' +import { + C2DClusterInfo, + C2DClusterType, + ComputeEnvironment +} from '../../@types/C2D/C2D.js' import { C2DEngine } from './compute_engine_base.js' import { C2DEngineDocker } from './compute_engine_docker.js' import { OceanNodeConfig } from '../../@types/OceanNode.js' import { C2DDatabase } from '../database/C2DDatabase.js' import { Escrow } from '../core/utils/escrow.js' import { KeyManager } from '../KeyManager/index.js' +import { CORE_LOGGER } from '../../utils/logging/common.js' export class C2DEngines { public engines: C2DEngine[] @@ -14,24 +19,50 @@ export class C2DEngines { escrow: Escrow, keyManager: KeyManager ) { - // let's see what engines do we have and initialize them one by one - // for docker, we need to add the "free" - - // TO DO - check if we have multiple config.c2dClusters with the same host - // if yes, do not create multiple engines + const crons = { + imageCleanup: false, + scanDBUpdate: false + } if (config && config.c2dClusters) { this.engines = [] for (const cluster of config.c2dClusters) { if (cluster.type === C2DClusterType.DOCKER) { - this.engines.push( - new C2DEngineDocker( - cluster, - db, - escrow, - keyManager, - config.dockerRegistrysAuth + // do some checks + const limit = 6 + const claimDurationTimeout = escrow.getMinLockTime(0) + if (cluster.connection.paymentClaimInterval * limit > claimDurationTimeout) { + CORE_LOGGER.error( + `Cannot create engine ${cluster.connection.hash}.\r\nConfig.claimDurationTimeout is not high enough to claim at least ${limit} times. Either decrease environment.paymentClaimInterval${cluster.connection.paymentClaimInterval} or increase config.claimDurationTimeout(${claimDurationTimeout})` + ) + } else { + const cfg = JSON.parse(JSON.stringify(cluster)) as C2DClusterInfo + // make sure that crons are running only on one docker engine + if (crons.imageCleanup) { + // already running, set cron to null for this engine + cfg.connection.imageCleanupInterval = null + } else { + // not running yet, set the defaults + cfg.connection.imageCleanupInterval = + cfg.connection.imageCleanupInterval || 86400 // 24 hours + crons.imageCleanup = true + } + if (crons.scanDBUpdate) { + cfg.connection.scanImageDBUpdateInterval = null + } else { + if (cfg.connection.scanImages) { + // set the defaults + cfg.connection.scanImageDBUpdateInterval = + cfg.connection.scanImageDBUpdateInterval || 43200 // 12 hours + crons.scanDBUpdate = true + } else { + // image scanning disabled for this engine + cfg.connection.scanImageDBUpdateInterval = null + } + } + this.engines.push( + new C2DEngineDocker(cfg, db, escrow, keyManager, config.dockerRegistrysAuth) ) - ) + } } } } diff --git a/src/components/core/compute/initialize.ts b/src/components/core/compute/initialize.ts index 6f965ce13..d7a498abc 100644 --- a/src/components/core/compute/initialize.ts +++ b/src/components/core/compute/initialize.ts @@ -39,6 +39,11 @@ import { validateAlgoForDataset, validateOutput } from './utils.js' +import { + ensureConsumerAllowedForPersistentStorageLocalfsFileObject, + rejectPersistentStorageFileObjectOnAlgorithm +} from '../../persistentStorage/PersistentStorageFactory.js' + export class ComputeInitializeHandler extends CommandHandler { validate(command: ComputeInitializeCommand): ValidateParams { const validation = validateCommandParameters(command, [ @@ -211,7 +216,6 @@ export class ComputeInitializeHandler extends CommandHandler { ) } } - const isValidOutput = await validateOutput( node, task.output, @@ -220,7 +224,22 @@ export class ComputeInitializeHandler extends CommandHandler { if (isValidOutput.status.httpStatus !== 200) { return isValidOutput } - + const algoPersistentStorageBan = rejectPersistentStorageFileObjectOnAlgorithm( + task.algorithm.fileObject + ) + if (algoPersistentStorageBan) { + return algoPersistentStorageBan + } + for (const dataset of task.datasets) { + const psAccess = await ensureConsumerAllowedForPersistentStorageLocalfsFileObject( + node, + task.consumerAddress, + dataset.fileObject + ) + if (psAccess) { + return psAccess + } + } // check algo let index = 0 const policyServer = new PolicyServer() diff --git a/src/components/core/compute/startCompute.ts b/src/components/core/compute/startCompute.ts index 7cfc5d954..b16efd916 100644 --- a/src/components/core/compute/startCompute.ts +++ b/src/components/core/compute/startCompute.ts @@ -29,6 +29,7 @@ import { import { EncryptMethod } from '../../../@types/fileObject.js' import { ComputeAccessList, + ComputeEnvironment, ComputeResourceRequestWithPrice } from '../../../@types/C2D/C2D.js' // import { verifyProviderFees } from '../utils/feesHandler.js' @@ -43,6 +44,10 @@ import { getNonceAsNumber } from '../utils/nonceHandler.js' import { PolicyServer } from '../../policyServer/index.js' import { checkCredentials } from '../../../utils/credentials.js' import { checkAddressOnAccessList } from '../../../utils/accessList.js' +import { + ensureConsumerAllowedForPersistentStorageLocalfsFileObject, + rejectPersistentStorageFileObjectOnAlgorithm +} from '../../persistentStorage/PersistentStorageFactory.js' export class CommonComputeHandler extends CommandHandler { validate(command: PaidComputeStartCommand): ValidateParams { @@ -119,8 +124,10 @@ export class PaidComputeStartHandler extends CommonComputeHandler { } } + let allEnvs: ComputeEnvironment[] try { - env = await engine.getComputeEnvironment(null, task.environment) + allEnvs = await engine.getComputeEnvironments() + env = allEnvs.find((e) => e.id === task.environment) if (!env) { return { stream: null, @@ -150,7 +157,7 @@ export class PaidComputeStartHandler extends CommonComputeHandler { } } try { - await engine.checkIfResourcesAreAvailable(task.resources, env, false) + await engine.checkIfResourcesAreAvailable(task.resources, env, false, allEnvs) } catch (e) { if (task.queueMaxWaitTime > 0) { CORE_LOGGER.verbose( @@ -225,7 +232,23 @@ export class PaidComputeStartHandler extends CommonComputeHandler { } } const policyServer = new PolicyServer() - // check algo + const algoPersistentStorageBan = rejectPersistentStorageFileObjectOnAlgorithm( + task.algorithm.fileObject + ) + if (algoPersistentStorageBan) { + return algoPersistentStorageBan + } + for (const dataset of task.datasets) { + const psAccess = await ensureConsumerAllowedForPersistentStorageLocalfsFileObject( + node, + task.consumerAddress, + dataset.fileObject + ) + if (psAccess) { + return psAccess + } + } + // check algo and datasets (orders, credentials, etc.) for (const elem of [...[task.algorithm], ...task.datasets]) { const result: any = { validOrder: false } if ('documentId' in elem && elem.documentId) { @@ -603,6 +626,7 @@ export class PaidComputeStartHandler extends CommonComputeHandler { token: task.payment.token, lockTx: agreementId, claimTx: null, + cancelTx: null, cost: 0 }, jobId, @@ -748,6 +772,22 @@ export class FreeComputeStartHandler extends CommonComputeHandler { return isValidOutput } const policyServer = new PolicyServer() + const algoPersistentStorageBanFree = rejectPersistentStorageFileObjectOnAlgorithm( + task.algorithm.fileObject + ) + if (algoPersistentStorageBanFree) { + return algoPersistentStorageBanFree + } + for (const dataset of task.datasets) { + const psAccess = await ensureConsumerAllowedForPersistentStorageLocalfsFileObject( + thisNode, + task.consumerAddress, + dataset.fileObject + ) + if (psAccess) { + return psAccess + } + } for (const elem of [...[task.algorithm], ...task.datasets]) { if (!('documentId' in elem)) { continue @@ -886,7 +926,8 @@ export class FreeComputeStartHandler extends CommonComputeHandler { } } } - const env = await engine.getComputeEnvironment(null, task.environment) + const allFreeEnvs = await engine.getComputeEnvironments() + const env = allFreeEnvs.find((e) => e.id === task.environment) if (!env) { return { stream: null, @@ -932,7 +973,7 @@ export class FreeComputeStartHandler extends CommonComputeHandler { } } try { - await engine.checkIfResourcesAreAvailable(task.resources, env, true) + await engine.checkIfResourcesAreAvailable(task.resources, env, true, allFreeEnvs) } catch (e) { if (task.queueMaxWaitTime > 0) { CORE_LOGGER.verbose( @@ -1026,41 +1067,5 @@ async function validateAccess( if (access.addresses.includes(consumerAddress)) { return true } - - const config = await getConfiguration() - const { supportedNetworks } = config - for (const accessListMap of access.accessLists) { - if (!accessListMap) continue - for (const chain of Object.keys(accessListMap)) { - const { chainId } = supportedNetworks[chain] - try { - const blockchain = oceanNode.getBlockchain(chainId) - if (!blockchain) { - CORE_LOGGER.logMessage( - `Blockchain instance not available for chain ${chainId}, skipping access list check`, - true - ) - continue - } - const signer = await blockchain.getSigner() - for (const accessListAddress of accessListMap[chain]) { - const hasAccess = await checkAddressOnAccessList( - accessListAddress, - consumerAddress, - signer - ) - if (hasAccess) { - return true - } - } - } catch (error) { - CORE_LOGGER.logMessage( - `Failed to check access lists on chain ${chain}: ${error.message}`, - true - ) - } - } - } - - return false + return await checkAddressOnAccessList(consumerAddress, access.accessLists, oceanNode) } diff --git a/src/components/core/handler/coreHandlersRegistry.ts b/src/components/core/handler/coreHandlersRegistry.ts index de3464580..531f7f1a9 100644 --- a/src/components/core/handler/coreHandlersRegistry.ts +++ b/src/components/core/handler/coreHandlersRegistry.ts @@ -47,6 +47,14 @@ import { } from './p2p.js' import { CreateAuthTokenHandler, InvalidateAuthTokenHandler } from './authHandler.js' import { GetJobsHandler } from './getJobs.js' +import { + PersistentStorageCreateBucketHandler, + PersistentStorageDeleteFileHandler, + PersistentStorageGetBucketsHandler, + PersistentStorageGetFileObjectHandler, + PersistentStorageListFilesHandler, + PersistentStorageUploadFileHandler +} from './persistentStorage.js' export type HandlerRegistry = { handlerName: string // name of the handler @@ -167,6 +175,30 @@ export class CoreHandlersRegistry { this.registerCoreHandler(PROTOCOL_COMMANDS.PUSH_CONFIG, new PushConfigHandler(node)) this.registerCoreHandler(PROTOCOL_COMMANDS.GET_LOGS, new GetLogsHandler(node)) this.registerCoreHandler(PROTOCOL_COMMANDS.JOBS, new GetJobsHandler(node)) + this.registerCoreHandler( + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + new PersistentStorageCreateBucketHandler(node) + ) + this.registerCoreHandler( + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_BUCKETS, + new PersistentStorageGetBucketsHandler(node) + ) + this.registerCoreHandler( + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES, + new PersistentStorageListFilesHandler(node) + ) + this.registerCoreHandler( + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + new PersistentStorageUploadFileHandler(node) + ) + this.registerCoreHandler( + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT, + new PersistentStorageGetFileObjectHandler(node) + ) + this.registerCoreHandler( + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_DELETE_FILE, + new PersistentStorageDeleteFileHandler(node) + ) } public static getInstance( diff --git a/src/components/core/handler/ddoHandler.ts b/src/components/core/handler/ddoHandler.ts index 5df1162e3..7ef17b8f3 100644 --- a/src/components/core/handler/ddoHandler.ts +++ b/src/components/core/handler/ddoHandler.ts @@ -800,7 +800,7 @@ export class ValidateDDOHandler extends CommandHandler { task.publisherAddress, task.policyServer ) - if (!response) { + if (!response.success) { CORE_LOGGER.logMessage( `Error: Validation for ${task.publisherAddress} was denied`, true diff --git a/src/components/core/handler/downloadHandler.ts b/src/components/core/handler/downloadHandler.ts index 1b97d1792..cf587323a 100644 --- a/src/components/core/handler/downloadHandler.ts +++ b/src/components/core/handler/downloadHandler.ts @@ -542,6 +542,17 @@ export class DownloadHandler extends CommandHandler { CORE_LOGGER.info('Appended userData to file url: ' + decriptedFileObject.url) } + if (decriptedFileObject?.url && task.userData) { + const url = new URL(decriptedFileObject.url) + const userDataObj = + typeof task.userData === 'string' ? JSON.parse(task.userData) : task.userData + for (const [key, value] of Object.entries(userDataObj)) { + url.searchParams.append(key, String(value)) + } + decriptedFileObject.url = url.toString() + CORE_LOGGER.info('Appended userData to file url: ' + decriptedFileObject.url) + } + if (!validateFilesStructure(ddo, service, decryptedFileData)) { CORE_LOGGER.error( 'Unauthorized download operation. Decrypted "nftAddress" and "datatokenAddress" do not match the original DDO' diff --git a/src/components/core/handler/encryptHandler.ts b/src/components/core/handler/encryptHandler.ts index 520d92807..1b213ae85 100644 --- a/src/components/core/handler/encryptHandler.ts +++ b/src/components/core/handler/encryptHandler.ts @@ -71,7 +71,7 @@ export class EncryptHandler extends CommandHandler { task.consumerAddress, task.policyServer ) - if (!response) { + if (!response.success) { CORE_LOGGER.logMessage( `Error: Encrypt for ${task.consumerAddress} was denied`, true @@ -163,7 +163,7 @@ export class EncryptFileHandler extends CommandHandler { task.policyServer, task.files ) - if (!response) { + if (!response.success) { CORE_LOGGER.logMessage( `Error: EncryptFile for ${task.consumerAddress} was denied`, true diff --git a/src/components/core/handler/getJobs.ts b/src/components/core/handler/getJobs.ts index a72a25add..21cdf18ed 100644 --- a/src/components/core/handler/getJobs.ts +++ b/src/components/core/handler/getJobs.ts @@ -30,7 +30,9 @@ export class GetJobsHandler extends CommandHandler { const jobs = await c2d.getJobs( task.environments, task.fromTimestamp, - task.consumerAddrs + task.consumerAddrs, + undefined, + task.runningJobs ) const sanitizedJobs = jobs.map((job) => { if (job.algorithm) { diff --git a/src/components/core/handler/persistentStorage.ts b/src/components/core/handler/persistentStorage.ts new file mode 100644 index 000000000..5674c00e0 --- /dev/null +++ b/src/components/core/handler/persistentStorage.ts @@ -0,0 +1,384 @@ +import { Readable } from 'stream' +import type { + PersistentStorageCreateBucketCommand, + PersistentStorageDeleteFileCommand, + PersistentStorageGetBucketsCommand, + PersistentStorageGetFileObjectCommand, + PersistentStorageListFilesCommand, + PersistentStorageUploadFileCommand +} from '../../../@types/commands.js' +import { + PersistentStorageAccessDeniedError, + type PersistentStorageFactory +} from '../../persistentStorage/PersistentStorageFactory.js' +import type { P2PCommandResponse } from '../../../@types/OceanNode.js' +import { getAddress } from 'ethers' +import { checkAddressOnAccessList } from '../../../utils/accessList.js' + +import { CORE_LOGGER } from '../../../utils/logging/common.js' +import { + buildInvalidRequestMessage, + validateCommandParameters, + type ValidateParams +} from '../../httpRoutes/validateCommands.js' +import { CommandHandler } from './handler.js' + +function requirePersistentStorage(handler: CommandHandler): PersistentStorageFactory { + const node = handler.getOceanNode() as any + if (!node.getPersistentStorage) { + throw new Error('Persistent storage is not available on this node') + } + const storage = node.getPersistentStorage() + if (!storage) { + throw new Error('Persistent storage is not configured or disabled') + } + return storage +} + +export class PersistentStorageCreateBucketHandler extends CommandHandler { + validate(command: PersistentStorageCreateBucketCommand): ValidateParams { + const base = validateCommandParameters(command, [ + 'consumerAddress', + 'signature', + 'nonce', + 'accessLists' + ]) + if (!base.valid) return base + if (!Array.isArray(command.accessLists)) { + return buildInvalidRequestMessage( + 'Invalid parameter: "accessLists" must be an array of objects' + ) + } + return { valid: true } + } + + async handle(task: PersistentStorageCreateBucketCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) return validationResponse + + const isAuthRequestValid = await this.validateTokenOrSignature( + task.authorization, + task.consumerAddress, + task.nonce, + task.signature, + task.command + ) + if (isAuthRequestValid.status.httpStatus !== 200) return isAuthRequestValid + + try { + const storage = requirePersistentStorage(this) + const node = this.getOceanNode() + const config = node.getConfig() + // if we have access lists,check them. + if ( + config.persistentStorage?.accessLists && + config.persistentStorage?.accessLists.length > 0 + ) { + const isAllowedCreate = await checkAddressOnAccessList( + task.consumerAddress, + config.persistentStorage?.accessLists, + node + ) + if (!isAllowedCreate) { + return { + stream: null, + status: { + httpStatus: 403, + error: 'You are not allowed to create new buckets' + } + } + } + } + + let ownerNormalized: string + try { + ownerNormalized = getAddress(task.consumerAddress) + } catch { + return { + stream: null, + status: { httpStatus: 400, error: 'Invalid parameter: "consumerAddress"' } + } + } + + const result = await storage.createNewBucket(task.accessLists, ownerNormalized) + return { + stream: Readable.from(JSON.stringify(result)), + status: { httpStatus: 200, error: null } + } + } catch (e) { + const message = e instanceof Error ? e.message : String(e) + CORE_LOGGER.error(`PersistentStorageCreateBucketHandler error: ${message}`) + return { stream: null, status: { httpStatus: 500, error: message } } + } + } +} + +export class PersistentStorageGetBucketsHandler extends CommandHandler { + validate(command: PersistentStorageGetBucketsCommand): ValidateParams { + const base = validateCommandParameters(command, [ + 'consumerAddress', + 'signature', + 'nonce', + 'owner' + ]) + if (!base.valid) return base + if (!command.owner || typeof command.owner !== 'string') { + return buildInvalidRequestMessage( + 'Invalid parameter: "owner" must be a non-empty string' + ) + } + return { valid: true } + } + + async handle(task: PersistentStorageGetBucketsCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) return validationResponse + + const isAuthRequestValid = await this.validateTokenOrSignature( + task.authorization, + task.consumerAddress, + task.nonce, + task.signature, + task.command + ) + if (isAuthRequestValid.status.httpStatus !== 200) return isAuthRequestValid + + let ownerNormalized: string + // let consumerNormalized: string + try { + ownerNormalized = getAddress(task.owner) + // consumerNormalized = getAddress(task.consumerAddress) + } catch { + return { + stream: null, + status: { + httpStatus: 400, + error: 'Invalid parameter: "owner" or "consumerAddress"' + } + } + } + + try { + const storage = requirePersistentStorage(this) + // const node = this.getOceanNode() + const rows = await storage.listBuckets(ownerNormalized) + + return { + stream: Readable.from(JSON.stringify(rows)), + status: { httpStatus: 200, error: null } + } + } catch (e) { + const message = e instanceof Error ? e.message : String(e) + CORE_LOGGER.error(`PersistentStorageGetBucketsHandler error: ${message}`) + return { stream: null, status: { httpStatus: 500, error: message } } + } + } +} + +export class PersistentStorageListFilesHandler extends CommandHandler { + validate(command: PersistentStorageListFilesCommand): ValidateParams { + const base = validateCommandParameters(command, [ + 'consumerAddress', + 'signature', + 'nonce', + 'bucketId' + ]) + if (!base.valid) return base + if (!command.bucketId || typeof command.bucketId !== 'string') { + return buildInvalidRequestMessage('Invalid parameter: "bucketId" must be a string') + } + return { valid: true } + } + + async handle(task: PersistentStorageListFilesCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) return validationResponse + + const isAuthRequestValid = await this.validateTokenOrSignature( + task.authorization, + task.consumerAddress, + task.nonce, + task.signature, + task.command + ) + if (isAuthRequestValid.status.httpStatus !== 200) return isAuthRequestValid + + try { + const storage = requirePersistentStorage(this) + const result = await storage.listFiles(task.bucketId, task.consumerAddress) + return { + stream: Readable.from(JSON.stringify(result)), + status: { httpStatus: 200, error: null } + } + } catch (e) { + if (e instanceof PersistentStorageAccessDeniedError) { + return { + stream: null, + status: { httpStatus: 403, error: e.message } + } + } + const message = e instanceof Error ? e.message : String(e) + CORE_LOGGER.error(`PersistentStorageListFilesHandler error: ${message}`) + return { stream: null, status: { httpStatus: 500, error: message } } + } + } +} + +export class PersistentStorageGetFileObjectHandler extends CommandHandler { + validate(command: PersistentStorageGetFileObjectCommand): ValidateParams { + const base = validateCommandParameters(command, [ + 'consumerAddress', + 'signature', + 'nonce', + 'bucketId', + 'fileName' + ]) + if (!base.valid) return base + return { valid: true } + } + + async handle(task: PersistentStorageGetFileObjectCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) return validationResponse + + const isAuthRequestValid = await this.validateTokenOrSignature( + task.authorization, + task.consumerAddress, + task.nonce, + task.signature, + task.command + ) + if (isAuthRequestValid.status.httpStatus !== 200) return isAuthRequestValid + + try { + const storage = requirePersistentStorage(this) + const obj = await storage.getFileObject( + task.bucketId, + task.fileName, + task.consumerAddress + ) + return { + stream: Readable.from(JSON.stringify(obj)), + status: { httpStatus: 200, error: null } + } + } catch (e) { + if (e instanceof PersistentStorageAccessDeniedError) { + return { + stream: null, + status: { httpStatus: 403, error: e.message } + } + } + const message = e instanceof Error ? e.message : String(e) + if (message.toLowerCase().includes('file not found')) { + return { stream: null, status: { httpStatus: 404, error: message } } + } + CORE_LOGGER.error(`PersistentStorageGetFileObjectHandler error: ${message}`) + return { stream: null, status: { httpStatus: 500, error: message } } + } + } +} + +export class PersistentStorageUploadFileHandler extends CommandHandler { + validate(command: PersistentStorageUploadFileCommand): ValidateParams { + const base = validateCommandParameters(command, [ + 'consumerAddress', + 'signature', + 'nonce', + 'bucketId', + 'fileName' + ]) + if (!base.valid) return base + return { valid: true } + } + + async handle(task: PersistentStorageUploadFileCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) return validationResponse + + const isAuthRequestValid = await this.validateTokenOrSignature( + task.authorization, + task.consumerAddress, + task.nonce, + task.signature, + task.command + ) + if (isAuthRequestValid.status.httpStatus !== 200) return isAuthRequestValid + + try { + const storage = requirePersistentStorage(this) + if (!task.stream) { + return { + stream: null, + status: { httpStatus: 403, error: 'Upload stream error' } + } + } + const result = await storage.uploadFile( + task.bucketId, + task.fileName, + task.stream, + task.consumerAddress + ) + return { + stream: Readable.from(JSON.stringify(result)), + status: { httpStatus: 200, error: null } + } + } catch (e) { + if (e instanceof PersistentStorageAccessDeniedError) { + return { + stream: null, + status: { httpStatus: 403, error: e.message } + } + } + const message = e instanceof Error ? e.message : String(e) + CORE_LOGGER.error(`PersistentStorageUploadFileHandler error: ${message}`) + return { stream: null, status: { httpStatus: 500, error: message } } + } + } +} + +export class PersistentStorageDeleteFileHandler extends CommandHandler { + validate(command: PersistentStorageDeleteFileCommand): ValidateParams { + const base = validateCommandParameters(command, [ + 'consumerAddress', + 'signature', + 'nonce', + 'bucketId', + 'fileName' + ]) + if (!base.valid) return base + return { valid: true } + } + + async handle(task: PersistentStorageDeleteFileCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) return validationResponse + + const isAuthRequestValid = await this.validateTokenOrSignature( + task.authorization, + task.consumerAddress, + task.nonce, + task.signature, + task.command + ) + if (isAuthRequestValid.status.httpStatus !== 200) return isAuthRequestValid + + try { + const storage = requirePersistentStorage(this) + await storage.deleteFile(task.bucketId, task.fileName, task.consumerAddress) + return { + stream: Readable.from(JSON.stringify({ success: true })), + status: { httpStatus: 200, error: null } + } + } catch (e) { + if (e instanceof PersistentStorageAccessDeniedError) { + return { + stream: null, + status: { httpStatus: 403, error: e.message } + } + } + const message = e instanceof Error ? e.message : String(e) + CORE_LOGGER.error(`PersistentStorageDeleteFileHandler error: ${message}`) + return { stream: null, status: { httpStatus: 500, error: message } } + } + } +} diff --git a/src/components/core/utils/statusHandler.ts b/src/components/core/utils/statusHandler.ts index 2b7d73c9c..222d40ca0 100644 --- a/src/components/core/utils/statusHandler.ts +++ b/src/components/core/utils/statusHandler.ts @@ -7,13 +7,13 @@ import { StorageTypes, OceanNodeConfig } from '../../../@types/OceanNode.js' -import { getConfiguration } from '../../../utils/index.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { OceanNode } from '../../../OceanNode.js' import { typesenseSchemas } from '../../database/TypesenseSchemas.js' import { SupportedNetwork } from '../../../@types/blockchain.js' import { getAdminAddresses } from '../../../utils/auth.js' import HumanHasher from 'humanhash' +import { getPackageVersion } from '../../../utils/version.js' function getSupportedStorageTypes(config: OceanNodeConfig): StorageTypes { return { @@ -111,7 +111,7 @@ export async function status( ) return } - const config = await getConfiguration() + const config = oceanNode.getConfig() // no previous status? if (!nodeStatus) { @@ -126,7 +126,7 @@ export async function status( publicKey: publicKeyHex, friendlyName: new HumanHasher().humanize(publicKeyHex), address: oceanNode.getKeyManager().getEthAddress(), - version: process.env.npm_package_version, + version: getPackageVersion(), http: config.hasHttp, p2p: config.hasP2P, provider: [], @@ -172,5 +172,11 @@ export async function status( } nodeStatus.supportedSchemas = typesenseSchemas.ddoSchemas } + + if (config.persistentStorage) { + nodeStatus.persistentStorage = {} + if (config.persistentStorage.accessLists) + nodeStatus.persistentStorage.accessLists = config.persistentStorage.accessLists + } return nodeStatus } diff --git a/src/components/database/C2DDatabase.ts b/src/components/database/C2DDatabase.ts index 87146e576..2fdaedbdc 100755 --- a/src/components/database/C2DDatabase.ts +++ b/src/components/database/C2DDatabase.ts @@ -84,9 +84,16 @@ export class C2DDatabase extends AbstractDatabase { environments?: string[], fromTimestamp?: string, consumerAddrs?: string[], - status?: C2DStatusNumber + status?: C2DStatusNumber, + runningJobs?: boolean ): Promise { - return await this.provider.getJobs(environments, fromTimestamp, consumerAddrs, status) + return await this.provider.getJobs( + environments, + fromTimestamp, + consumerAddrs, + status, + runningJobs + ) } async getJobsByStatus( diff --git a/src/components/database/sqliteCompute.ts b/src/components/database/sqliteCompute.ts index d3796b7ce..1c2462d5f 100644 --- a/src/components/database/sqliteCompute.ts +++ b/src/components/database/sqliteCompute.ts @@ -6,6 +6,7 @@ import { } from '../../@types/C2D/C2D.js' import sqlite3, { RunResult } from 'sqlite3' import { DATABASE_LOGGER } from '../../utils/logging/common.js' +import { create256Hash } from '../../utils/crypt.js' interface ComputeDatabaseProvider { newJob(job: DBComputeJob): Promise @@ -46,7 +47,10 @@ function getInternalStructure(job: DBComputeJob): any { payment: job.payment, algoDuration: job.algoDuration, queueMaxWaitTime: job.queueMaxWaitTime, - output: job.output + output: job.output, + jobIdHash: job.jobIdHash, + buildStartTimestamp: job.buildStartTimestamp, + buildStopTimestamp: job.buildStopTimestamp } return internalBlob } @@ -443,7 +447,8 @@ export class SQLiteCompute implements ComputeDatabaseProvider { environments?: string[], fromTimestamp?: string, consumerAddrs?: string[], - status?: C2DStatusNumber + status?: C2DStatusNumber, + runningJobs?: boolean ): Promise { let selectSQL = `SELECT * FROM ${this.schema.name}` @@ -456,9 +461,22 @@ export class SQLiteCompute implements ComputeDatabaseProvider { params.push(...environments) } - if (fromTimestamp) { - conditions.push(`dateFinished >= ?`) - params.push(fromTimestamp) + if (runningJobs) { + conditions.push(`status = ?`) + params.push(C2DStatusNumber.RunningAlgorithm.toString()) + if (fromTimestamp) { + conditions.push(`dateCreated >= ?`) + params.push(fromTimestamp) + } + } else { + if (fromTimestamp) { + conditions.push(`dateFinished >= ?`) + params.push(fromTimestamp) + } + if (status) { + conditions.push(`status = ?`) + params.push(status.toString()) + } } if (consumerAddrs && consumerAddrs.length > 0) { @@ -467,11 +485,6 @@ export class SQLiteCompute implements ComputeDatabaseProvider { params.push(...consumerAddrs) } - if (status) { - conditions.push(`status = ?`) - params.push(status.toString()) - } - if (conditions.length > 0) { selectSQL += ` WHERE ${conditions.join(' AND ')}` } @@ -529,6 +542,9 @@ export class SQLiteCompute implements ComputeDatabaseProvider { const maxJobDuration = row.expireTimestamp delete row.expireTimestamp const job: DBComputeJob = { ...row, ...body, maxJobDuration } + if (!job.jobIdHash && job.jobId) { + job.jobIdHash = create256Hash(job.jobId) + } return job }) resolve(all) diff --git a/src/components/httpRoutes/index.ts b/src/components/httpRoutes/index.ts index cf5530c5f..184608f80 100644 --- a/src/components/httpRoutes/index.ts +++ b/src/components/httpRoutes/index.ts @@ -15,6 +15,7 @@ import { addMapping, allRoutesMapping, findPathName } from './routeUtils.js' import { PolicyServerPassthroughRoute } from './policyServer.js' import { authRoutes } from './auth.js' import { adminConfigRoutes } from './adminConfig.js' +import { persistentStorageRoutes } from './persistentStorage.js' export * from './getOceanPeers.js' export * from './auth.js' @@ -62,6 +63,8 @@ httpRoutes.use(PolicyServerPassthroughRoute) httpRoutes.use(authRoutes) // admin config routes httpRoutes.use(adminConfigRoutes) +// persistent storage routes +httpRoutes.use(persistentStorageRoutes) export function getAllServiceEndpoints() { httpRoutes.stack.forEach(addMapping.bind(null, [])) diff --git a/src/components/httpRoutes/persistentStorage.ts b/src/components/httpRoutes/persistentStorage.ts new file mode 100644 index 000000000..c99ad3d76 --- /dev/null +++ b/src/components/httpRoutes/persistentStorage.ts @@ -0,0 +1,199 @@ +import express from 'express' +import { Readable } from 'stream' + +import { SERVICES_API_BASE_PATH, PROTOCOL_COMMANDS } from '../../utils/constants.js' +import { HTTP_LOGGER } from '../../utils/logging/common.js' +import { streamToObject, streamToString } from '../../utils/util.js' + +import { + PersistentStorageCreateBucketHandler, + PersistentStorageDeleteFileHandler, + PersistentStorageGetBucketsHandler, + PersistentStorageGetFileObjectHandler, + PersistentStorageListFilesHandler, + PersistentStorageUploadFileHandler +} from '../core/handler/persistentStorage.js' + +export const persistentStorageRoutes = express.Router() + +// Create bucket +persistentStorageRoutes.post( + `${SERVICES_API_BASE_PATH}/persistentStorage/buckets`, + express.json(), + async (req, res) => { + try { + const response = await new PersistentStorageCreateBucketHandler( + req.oceanNode + ).handle({ + ...req.body, + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + authorization: req.headers?.authorization, + caller: req.caller + }) + if (!response.stream) { + res.status(response.status.httpStatus).send(response.status.error) + return + } + const payload = await streamToObject(response.stream as Readable) + res.status(200).json(payload) + } catch (error) { + HTTP_LOGGER.error(`PersistentStorage create bucket error: ${error}`) + res.status(500).send('Internal Server Error') + } + } +) + +// List buckets for an owner (then filtered by ACL in handler) +persistentStorageRoutes.get( + `${SERVICES_API_BASE_PATH}/persistentStorage/buckets`, + async (req, res) => { + try { + const response = await new PersistentStorageGetBucketsHandler(req.oceanNode).handle( + { + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_BUCKETS, + consumerAddress: req.query.consumerAddress as string, + signature: req.query.signature as string, + nonce: req.query.nonce as string, + owner: req.query.owner as string, + authorization: req.headers?.authorization, + caller: req.caller + } as any + ) + if (!response.stream) { + res.status(response.status.httpStatus).send(response.status.error) + return + } + const payload = await streamToObject(response.stream as Readable) + res.status(200).json(payload) + } catch (error) { + HTTP_LOGGER.error(`PersistentStorage get buckets error: ${error}`) + res.status(500).send('Internal Server Error') + } + } +) + +// List files in bucket +persistentStorageRoutes.get( + `${SERVICES_API_BASE_PATH}/persistentStorage/buckets/:bucketId/files`, + async (req, res) => { + try { + const response = await new PersistentStorageListFilesHandler(req.oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES, + consumerAddress: req.query.consumerAddress as string, + signature: req.query.signature as string, + nonce: req.query.nonce as string, + bucketId: req.params.bucketId, + authorization: req.headers?.authorization, + caller: req.caller + } as any) + if (!response.stream) { + res.status(response.status.httpStatus).send(response.status.error) + return + } + const payload = await streamToObject(response.stream as Readable) + res.status(200).json(payload) + } catch (error) { + HTTP_LOGGER.error(`PersistentStorage list files error: ${error}`) + res.status(500).send('Internal Server Error') + } + } +) + +// Get file object for a file in a bucket +persistentStorageRoutes.get( + `${SERVICES_API_BASE_PATH}/persistentStorage/buckets/:bucketId/files/:fileName/object`, + async (req, res) => { + try { + const response = await new PersistentStorageGetFileObjectHandler( + req.oceanNode + ).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT, + consumerAddress: req.query.consumerAddress as string, + signature: req.query.signature as string, + nonce: req.query.nonce as string, + bucketId: req.params.bucketId, + fileName: req.params.fileName, + authorization: req.headers?.authorization, + caller: req.caller + } as any) + if (!response.stream) { + res.status(response.status.httpStatus).send(response.status.error) + return + } + const payload = await streamToObject(response.stream as Readable) + res.status(200).json(payload) + } catch (error) { + HTTP_LOGGER.error(`PersistentStorage get file object error: ${error}`) + res.status(500).send('Internal Server Error') + } + } +) + +// Upload file to bucket. Body is treated as raw bytes. +persistentStorageRoutes.post( + `${SERVICES_API_BASE_PATH}/persistentStorage/buckets/:bucketId/files/:fileName`, + async (req, res) => { + try { + const response = await new PersistentStorageUploadFileHandler(req.oceanNode).handle( + { + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + consumerAddress: req.query.consumerAddress as string, + signature: req.query.signature as string, + nonce: req.query.nonce as string, + bucketId: req.params.bucketId, + fileName: req.params.fileName, + // Stream request body directly (supports chunked uploads, avoids buffering). + stream: req, + authorization: req.headers?.authorization, + caller: req.caller + } as any + ) + if (!response.stream) { + res.status(response.status.httpStatus).send(response.status.error) + return + } + const payload = await streamToObject(response.stream as Readable) + res.status(200).json(payload) + } catch (error) { + HTTP_LOGGER.error(`PersistentStorage upload error: ${error}`) + res.status(500).send('Internal Server Error') + } + } +) + +// Delete file from bucket +persistentStorageRoutes.delete( + `${SERVICES_API_BASE_PATH}/persistentStorage/buckets/:bucketId/files/:fileName`, + async (req, res) => { + try { + const response = await new PersistentStorageDeleteFileHandler(req.oceanNode).handle( + { + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_DELETE_FILE, + consumerAddress: req.query.consumerAddress as string, + signature: req.query.signature as string, + nonce: req.query.nonce as string, + bucketId: req.params.bucketId, + fileName: req.params.fileName, + authorization: req.headers?.authorization, + caller: req.caller + } as any + ) + + if (response.status.httpStatus !== 200) { + res.status(response.status.httpStatus).send(response.status.error) + return + } + + if (!response.stream) { + res.status(200).json({ success: true }) + return + } + + const payload = JSON.parse(await streamToString(response.stream as Readable)) + res.status(200).json(payload) + } catch (error) { + HTTP_LOGGER.error(`PersistentStorage delete error: ${error}`) + res.status(500).send('Internal Server Error') + } + } +) diff --git a/src/components/httpRoutes/validateCommands.ts b/src/components/httpRoutes/validateCommands.ts index 49850f496..f8bb365cb 100644 --- a/src/components/httpRoutes/validateCommands.ts +++ b/src/components/httpRoutes/validateCommands.ts @@ -32,8 +32,23 @@ export function validateCommandParameters( return buildInvalidRequestMessage(`Invalid or unrecognized command: "${commandStr}"`) } - // deep copy - const logCommandData = structuredClone(commandData) + // deep copy for logging (must not throw for non-cloneable payloads like streams) + let logCommandData: any + try { + // For some commands, the task contains non-cloneable fields (e.g. Node streams). + // We redact those before cloning to avoid DataCloneError. + const sanitized = { ...(commandData ?? {}) } + if ('stream' in sanitized) { + sanitized.stream = '[STREAM]' + } + logCommandData = structuredClone(sanitized) + } catch { + // Last resort: shallow clone; avoid crashing validation because of logging. + logCommandData = { ...(commandData ?? {}) } + if ('stream' in logCommandData) { + logCommandData.stream = '[STREAM]' + } + } if (commandStr === PROTOCOL_COMMANDS.ENCRYPT) { logCommandData.files = [] // hide files data (sensitive) + rawData (long buffer) from logging diff --git a/src/components/persistentStorage/PersistentStorageFactory.ts b/src/components/persistentStorage/PersistentStorageFactory.ts new file mode 100644 index 000000000..d0ee58d5c --- /dev/null +++ b/src/components/persistentStorage/PersistentStorageFactory.ts @@ -0,0 +1,376 @@ +import { P2PCommandResponse } from '../../@types/index.js' +import type { AccessList } from '../../@types/AccessList.js' +import type { + DockerMountObject, + PersistentStorageObject +} from '../../@types/PersistentStorage.js' + +import sqlite3, { RunResult } from 'sqlite3' +import path from 'path' +import fs from 'fs' +import { getAddress } from 'ethers' +import { OceanNode } from '../../OceanNode.js' +import { checkAddressOnAccessList } from '../../utils/accessList.js' + +export class PersistentStorageAccessDeniedError extends Error { + constructor(message = 'You are not allowed to access this bucket') { + super(message) + this.name = 'PersistentStorageAccessDeniedError' + } +} + +function normalizeWeb3Address(addr: string): string { + try { + return getAddress(addr) + } catch { + return (addr ?? '').toLowerCase() + } +} + +function parseBucketAccessListsJson(accessListJson: string): AccessList[] { + try { + const parsed = JSON.parse(accessListJson || '[]') + return Array.isArray(parsed) ? (parsed as AccessList[]) : [] + } catch { + return [] + } +} + +export type BucketRow = { + bucketId: string + owner: string + accessListJson: string + createdAt: number +} + +export interface PersistentStorageFileInfo { + bucketId: string + name: string + size: number + lastModified: number +} + +export type CreateBucketResult = { + bucketId: string + owner: string + accessList: AccessList[] +} + +/** Bucket metadata from registry (list APIs and internal filtering). */ +export type PersistentStorageBucketRecord = { + bucketId: string + owner: string + createdAt: number + accessLists: AccessList[] +} + +export abstract class PersistentStorageFactory { + private db: sqlite3.Database + private node: OceanNode + private dbReady = false + private dbReadyPromise: Promise + + constructor(node: OceanNode) { + this.node = node + const dbDir = path.dirname('databases/persistentStorage.sqlite') + if (!fs.existsSync(dbDir)) { + fs.mkdirSync(dbDir, { recursive: true }) + } + this.db = new sqlite3.Database('databases/persistentStorage.sqlite') + const createBucketsSQL = ` + CREATE TABLE IF NOT EXISTS persistent_storage_buckets ( + bucketId TEXT PRIMARY KEY, + owner TEXT NOT NULL, + accessListJson TEXT NOT NULL, + createdAt INTEGER NOT NULL + ); + ` + this.dbReadyPromise = new Promise((resolve, reject) => { + this.db.run(createBucketsSQL, (err) => { + if (err) { + reject(err) + return + } + this.dbReady = true + resolve() + }) + }) + } + + public isDbReady(): boolean { + return this.dbReady + } + + private async ensureDbReady(): Promise { + if (this.dbReady) { + return + } + await this.dbReadyPromise + } + + /** + * Validate a bucket id. Today localfs uses UUIDs, so enforce UUIDv4. + * This is a security boundary because bucketId participates in filesystem paths. + */ + public validateBucket(bucketId: string): void { + // UUID v4: xxxxxxxx-xxxx-4xxx-[89ab]xxx-xxxxxxxxxxxx + const uuidV4 = + /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i + if (typeof bucketId !== 'string' || !uuidV4.test(bucketId)) { + throw new Error('Invalid bucketId') + } + } + + public abstract createNewBucket( + accessList: AccessList[], + owner: string + ): Promise + + public abstract listFiles( + bucketId: string, + consumerAddress: string + ): Promise + + public abstract uploadFile( + bucketId: string, + fileName: string, + content: NodeJS.ReadableStream, + consumerAddress: string + ): Promise + + public abstract deleteFile( + bucketId: string, + fileName: string, + consumerAddress: string + ): Promise + + /** + * Returns a file object that can be attached to compute jobs. + * The concrete shape depends on the backend implementation. + */ + public abstract getFileObject( + bucketId: string, + fileName: string, + consumerAddress: string + ): Promise + + /** + * Returns a Docker mount descriptor for a specific bucket file. + * This is used by the Docker C2D engine to mount the file into the job container. + */ + public abstract getDockerMountObject( + bucketId: string, + fileName: string, + consumerAddress?: string + ): Promise + + // common functions + async getBucketAccessList(bucketId: string): Promise { + try { + const row = await this.getBucket(bucketId) + if (!row) { + return [] + } + return parseBucketAccessListsJson(row.accessListJson) + } catch { + return [] + } + } + + async getBucket(bucketId: string): Promise { + try { + const row = await this.dbGetBucket(bucketId) + return row + } catch { + return null + } + } + + /** + * Lists buckets for a given owner from the SQLite registry (metadata only). + * `owner` must already be normalized (e.g. checksummed `getAddress`). + * Backends that need setup (e.g. localfs init) should override and call `super.listBuckets(owner)`. + */ + async listBuckets(owner: string): Promise { + const rows = await this.dbListBucketsByOwner(owner) + return rows.map((row) => ({ + bucketId: row.bucketId, + owner: row.owner, + createdAt: row.createdAt, + accessLists: parseBucketAccessListsJson(row.accessListJson) + })) + } + + /* + * NOTE: db* methods are intentionally gated on ensureDbReady() to avoid races + * with constructor-time schema creation. + */ + + dbUpsertBucket( + bucketId: string, + owner: string, + accessListJson: string, + createdAt: number + ): Promise { + const sql = ` + INSERT INTO persistent_storage_buckets (bucketId, owner, accessListJson, createdAt) + VALUES (?, ?, ?, ?) + ON CONFLICT(bucketId) DO UPDATE SET accessListJson=excluded.accessListJson; + ` + return this.ensureDbReady().then( + () => + new Promise((resolve, reject) => { + this.db.run(sql, [bucketId, owner, accessListJson, createdAt], (err) => { + if (err) reject(err) + else resolve() + }) + }) + ) + } + + dbGetBucket(bucketId: string): Promise { + const sql = `SELECT bucketId, owner, accessListJson, createdAt FROM persistent_storage_buckets WHERE bucketId = ?` + return this.ensureDbReady().then( + () => + new Promise((resolve, reject) => { + this.db.get(sql, [bucketId], (err, row: BucketRow | undefined) => { + if (err) reject(err) + else resolve(row ?? null) + }) + }) + ) + } + + dbListBucketsByOwner(owner: string): Promise { + const sql = `SELECT bucketId, owner, accessListJson, createdAt FROM persistent_storage_buckets WHERE owner = ? ORDER BY createdAt ASC` + return this.ensureDbReady().then( + () => + new Promise((resolve, reject) => { + this.db.all(sql, [owner], (err, rows: BucketRow[]) => { + if (err) reject(err) + else resolve(rows ?? []) + }) + }) + ) + } + + dbDeleteBucket(bucketId: string): Promise { + const sql = `DELETE FROM persistent_storage_buckets WHERE bucketId = ?` + return this.ensureDbReady().then( + () => + new Promise((resolve, reject) => { + this.db.run(sql, [bucketId], function (this: RunResult, err) { + if (err) reject(err) + else resolve(this.changes === 1) + }) + }) + ) + } + + isAllowed(consumerAddress: string, accessLists: AccessList[]): Promise { + return checkAddressOnAccessList(consumerAddress, accessLists, this.node) + } + + /** Throws {@link PersistentStorageAccessDeniedError} if the consumer is not on the bucket access list. */ + public async assertConsumerAllowedForBucket( + consumerAddress: string, + bucketId: string + ): Promise { + const bucket = await this.getBucket(bucketId) + if (!bucket) { + throw new PersistentStorageAccessDeniedError() + } + const accessLists = parseBucketAccessListsJson(bucket.accessListJson) + if (normalizeWeb3Address(consumerAddress) === normalizeWeb3Address(bucket.owner)) { + return + } + if (!(await this.isAllowed(consumerAddress, accessLists))) { + throw new PersistentStorageAccessDeniedError() + } + } +} + +/** + * Algorithms must not reference node persistent storage; only datasets may use + * `nodePersistentStorage` / `localfs` file objects. + */ +export function rejectPersistentStorageFileObjectOnAlgorithm( + fileObject: unknown +): P2PCommandResponse | null { + if (fileObject === null || fileObject === undefined || typeof fileObject !== 'object') { + return null + } + const fo = fileObject as { type?: string } + if (fo.type === 'nodePersistentStorage' || fo.type === 'localfs') { + return { + stream: null, + status: { + httpStatus: 400, + error: + 'Algorithms cannot use node persistent storage file objects; only datasets may reference persistent storage.' + } + } + } + return null +} + +/** + * When a compute dataset uses a node persistent-storage file (localfs backend), + * ensure the consumer is on the bucket ACL before proceeding. + */ +export async function ensureConsumerAllowedForPersistentStorageLocalfsFileObject( + node: OceanNode, + consumerAddress: string, + fileObject: unknown +): Promise { + if (fileObject === null || fileObject === undefined || typeof fileObject !== 'object') { + return null + } + const fo = fileObject as { type?: string; bucketId?: unknown } + if (fo.type !== 'nodePersistentStorage') { + return null + } + if (typeof fo.bucketId !== 'string' || fo.bucketId.length === 0) { + return { + stream: null, + status: { + httpStatus: 400, + error: 'Persistent storage file object is missing a valid bucketId' + } + } + } + const cfg = node.getConfig().persistentStorage + if (!cfg?.enabled || cfg.type !== 'localfs') { + return { + stream: null, + status: { + httpStatus: 400, + error: + 'This compute job references node persistent storage (localfs), which is not enabled or not configured as localfs on this node' + } + } + } + const storage = node.getPersistentStorage() + if (!storage) { + return { + stream: null, + status: { + httpStatus: 400, + error: + 'This compute job references node persistent storage but persistent storage is not available on this node' + } + } + } + try { + await storage.assertConsumerAllowedForBucket(consumerAddress, fo.bucketId) + } catch (e) { + if (e instanceof PersistentStorageAccessDeniedError) { + return { + stream: null, + status: { httpStatus: 403, error: e.message } + } + } + throw e + } + return null +} diff --git a/src/components/persistentStorage/PersistentStorageLocalFS.ts b/src/components/persistentStorage/PersistentStorageLocalFS.ts new file mode 100644 index 000000000..4c1dec0bc --- /dev/null +++ b/src/components/persistentStorage/PersistentStorageLocalFS.ts @@ -0,0 +1,221 @@ +import fs from 'fs' +import fsp from 'fs/promises' +import path from 'path' +import { pipeline } from 'stream/promises' +import { randomUUID } from 'crypto' + +import type { AccessList } from '../../@types/AccessList.js' +import type { + DockerMountObject, + PersistentStorageLocalFSOptions, + PersistentStorageObject +} from '../../@types/PersistentStorage.js' + +import { + CreateBucketResult, + PersistentStorageBucketRecord, + PersistentStorageFactory, + PersistentStorageFileInfo +} from './PersistentStorageFactory.js' +import { OceanNode } from '../../OceanNode.js' +import { CORE_LOGGER } from '../../utils/logging/common.js' + +export class PersistentStorageLocalFS extends PersistentStorageFactory { + /* eslint-disable security/detect-non-literal-fs-filename -- localfs backend operates on filesystem paths */ + private baseFolder: string + + constructor(node: OceanNode) { + super(node) + const options = node.getConfig().persistentStorage + .options as PersistentStorageLocalFSOptions + + this.baseFolder = options.folder + + // Ensure base folder exists and is a directory (sync to avoid startup races). + try { + fs.mkdirSync(this.baseFolder, { recursive: true }) + const st = fs.statSync(this.baseFolder) + if (!st.isDirectory()) { + throw new Error( + `Persistent storage folder is not a directory: ${this.baseFolder}` + ) + } + fs.mkdirSync(path.join(this.baseFolder, 'buckets'), { recursive: true }) + } catch (e: any) { + if (e?.code === 'EACCES') { + throw new Error( + `Persistent storage folder is not accessible (EACCES): ${this.baseFolder}. ` + + `Configure 'persistentStorage.options.folder' to a writable path inside the container and mount it as a volume.` + ) + } + throw e + } + } + + private bucketPath(bucketId: string): string { + return path.join(this.baseFolder, 'buckets', bucketId) + } + + private async ensureBucketExists(bucketId: string): Promise { + this.validateBucket(bucketId) + const bucketsRoot = path.resolve(this.baseFolder, 'buckets') + const resolvedBucketPath = path.resolve(this.bucketPath(bucketId)) + if ( + resolvedBucketPath !== bucketsRoot && + !resolvedBucketPath.startsWith(bucketsRoot + path.sep) + ) { + throw new Error('Invalid bucketId') + } + const row = await this.dbGetBucket(bucketId) + if (!row) { + throw new Error(`Bucket not found: ${bucketId}`) + } + } + + private async ensureFileExists(bucketId: string, fileName: string): Promise { + if (!fileName || fileName.includes('/') || fileName.includes('\\')) { + throw new Error('Invalid fileName') + } + const targetPath = path.join(this.bucketPath(bucketId), fileName) + try { + const st = await fsp.stat(targetPath) + if (!st.isFile()) { + throw new Error(`File not found: ${fileName}`) + } + } catch { + throw new Error(`File not found: ${fileName}`) + } + } + + // eslint-disable-next-line require-await + async listBuckets(owner: string): Promise { + return super.listBuckets(owner) + } + + async createNewBucket( + accessList: AccessList[], + owner: string + ): Promise { + const bucketId = randomUUID() + const createdAt = Math.floor(Date.now() / 1000) + const path = this.bucketPath(bucketId) + CORE_LOGGER.debug(`Creating ${path} folder for new bucket`) + await fsp.mkdir(path) + await super.dbUpsertBucket( + bucketId, + owner, + JSON.stringify(accessList ?? []), + createdAt + ) + + return { bucketId, owner, accessList } + } + + async listFiles( + bucketId: string, + consumerAddress: string + ): Promise { + await this.ensureBucketExists(bucketId) + await this.assertConsumerAllowedForBucket(consumerAddress, bucketId) + + const dir = this.bucketPath(bucketId) + const entries = await fsp.readdir(dir, { withFileTypes: true }) + const out: PersistentStorageFileInfo[] = [] + + for (const ent of entries) { + if (!ent.isFile()) continue + const filePath = path.join(dir, ent.name) + const st = await fsp.stat(filePath) + out.push({ + bucketId, + name: ent.name, + size: st.size, + lastModified: Math.floor(st.mtimeMs) + }) + } + + return out + } + + async uploadFile( + bucketId: string, + fileName: string, + content: NodeJS.ReadableStream, + consumerAddress: string + ): Promise { + await this.ensureBucketExists(bucketId) + await this.assertConsumerAllowedForBucket(consumerAddress, bucketId) + + if (!fileName || fileName.includes('/') || fileName.includes('\\')) { + throw new Error('Invalid fileName') + } + + const targetDir = this.bucketPath(bucketId) + await fsp.mkdir(targetDir, { recursive: true }) + const targetPath = path.join(targetDir, fileName) + + await pipeline(content, fs.createWriteStream(targetPath)) + + const st = await fsp.stat(targetPath) + return { + bucketId, + name: fileName, + size: st.size, + lastModified: Math.floor(st.mtimeMs) + } + } + + async deleteFile( + bucketId: string, + fileName: string, + consumerAddress: string + ): Promise { + await this.ensureBucketExists(bucketId) + await this.assertConsumerAllowedForBucket(consumerAddress, bucketId) + await this.ensureFileExists(bucketId, fileName) + + const targetPath = path.join(this.bucketPath(bucketId), fileName) + await fsp.rm(targetPath) + } + + async getFileObject( + bucketId: string, + fileName: string, + consumerAddress: string + ): Promise { + await this.ensureBucketExists(bucketId) + await this.assertConsumerAllowedForBucket(consumerAddress, bucketId) + await this.ensureFileExists(bucketId, fileName) + + // This is intentionally not a downloadable URL; compute backends can interpret this object. + const obj: PersistentStorageObject = { + type: 'nodePersistentStorage', + bucketId, + fileName + } + return obj + } + + async getDockerMountObject( + bucketId: string, + fileName: string, + consumerAddress?: string + ): Promise { + await this.ensureBucketExists(bucketId) + if (consumerAddress) { + await this.assertConsumerAllowedForBucket(consumerAddress, bucketId) + } + await this.ensureFileExists(bucketId, fileName) + + const source = path.join(this.bucketPath(bucketId), fileName) + const target = path.posix.join('/data', 'persistentStorage', bucketId, fileName) + + return { + Type: 'bind', + Source: source, + Target: target, + ReadOnly: true + } + } +} +/* eslint-enable security/detect-non-literal-fs-filename */ diff --git a/src/components/persistentStorage/PersistentStorageS3.ts b/src/components/persistentStorage/PersistentStorageS3.ts new file mode 100644 index 000000000..bd4cac5ee --- /dev/null +++ b/src/components/persistentStorage/PersistentStorageS3.ts @@ -0,0 +1,86 @@ +import { + CreateBucketResult, + PersistentStorageBucketRecord, + PersistentStorageFactory, + PersistentStorageFileInfo +} from './PersistentStorageFactory.js' + +import type { AccessList } from '../../@types/AccessList.js' +import type { + DockerMountObject, + PersistentStorageS3Options, + PersistentStorageObject +} from '../../@types/PersistentStorage.js' +import { OceanNode } from '../../OceanNode.js' + +export class PersistentStorageS3 extends PersistentStorageFactory { + private options: PersistentStorageS3Options + constructor(node: OceanNode) { + super(node) + this.options = node.getConfig().persistentStorage + .options as PersistentStorageS3Options + } + + // eslint-disable-next-line require-await + async init(): Promise { + throw new Error('PersistentStorageS3 is not implemented yet') + } + + async listBuckets(owner: string): Promise { + await this.init() + return super.listBuckets(owner) + } + + // eslint-disable-next-line require-await + async createNewBucket( + accessList: AccessList[], + _owner: string + ): Promise { + throw new Error('PersistentStorageS3 is not implemented yet') + } + + // eslint-disable-next-line require-await + async listFiles( + _bucketId: string, + _consumerAddress: string + ): Promise { + throw new Error('PersistentStorageS3 is not implemented yet') + } + + // eslint-disable-next-line require-await + async uploadFile( + _bucketId: string, + _fileName: string, + _content: Buffer | NodeJS.ReadableStream, + _consumerAddress: string + ): Promise { + throw new Error('PersistentStorageS3 is not implemented yet') + } + + // eslint-disable-next-line require-await + async deleteFile( + _bucketId: string, + _fileName: string, + _consumerAddress: string + ): Promise { + throw new Error('PersistentStorageS3 is not implemented yet') + } + + // eslint-disable-next-line require-await + async getFileObject( + _bucketId: string, + _fileName: string, + _consumerAddress: string + ): Promise { + throw new Error('PersistentStorageS3 is not implemented yet') + } + + // eslint-disable-next-line require-await + async getDockerMountObject( + _bucketId: string, + _fileName: string, + _consumerAddress?: string + ): Promise { + throw new Error('PersistentStorageS3 is not implemented yet') + } +} diff --git a/src/components/persistentStorage/createPersistentStorage.ts b/src/components/persistentStorage/createPersistentStorage.ts new file mode 100644 index 000000000..c8588cde4 --- /dev/null +++ b/src/components/persistentStorage/createPersistentStorage.ts @@ -0,0 +1,23 @@ +import { OceanNode } from '../../OceanNode.js' + +import type { PersistentStorageFactory } from './PersistentStorageFactory.js' +import { PersistentStorageLocalFS } from './PersistentStorageLocalFS.js' +import { PersistentStorageS3 } from './PersistentStorageS3.js' + +export function createPersistentStorage(node: OceanNode): PersistentStorageFactory { + const config = node.getConfig().persistentStorage + if (!config?.enabled) { + throw new Error('Persistent storage is disabled') + } + + switch (config.type) { + case 'localfs': + return new PersistentStorageLocalFS(node) + case 's3': + return new PersistentStorageS3(node) + default: + throw new Error( + `Unsupported persistent storage type: ${(config as { type?: string })?.type}` + ) + } +} diff --git a/src/components/persistentStorage/index.ts b/src/components/persistentStorage/index.ts new file mode 100644 index 000000000..00ad60b6a --- /dev/null +++ b/src/components/persistentStorage/index.ts @@ -0,0 +1,4 @@ +export * from './PersistentStorageFactory.js' +export { createPersistentStorage } from './createPersistentStorage.js' +export * from './PersistentStorageLocalFS.js' +export * from './PersistentStorageS3.js' diff --git a/src/components/policyServer/index.ts b/src/components/policyServer/index.ts index 8596b8dc6..7e2fbf320 100644 --- a/src/components/policyServer/index.ts +++ b/src/components/policyServer/index.ts @@ -2,29 +2,50 @@ import { DDO } from '@oceanprotocol/ddo-js' import { PolicyServerResult } from '../../@types/policyServer.js' import { isDefined } from '../../utils/util.js' import { BaseFileObject } from '../../@types/fileObject.js' +import { OceanNode } from '../../OceanNode.js' export class PolicyServer { serverUrl: string + private apikey: string public constructor() { this.serverUrl = process.env.POLICY_SERVER_URL + this.apikey = process.env.POLICY_SERVER_API_KEY + } + + private attachNodeAddress(command: Record): Record { + const node = OceanNode.getInstance() + const keyManager = node.getKeyManager() + const nodeAddress = keyManager.getEthWallet().address + return { + ...command, + nodeAddress + } } private async askServer(command: any): Promise { if (!this.serverUrl) return { success: true, message: '', httpStatus: 404 } let response + const commandWithNodeAddress = this.attachNodeAddress(command) + + const headers: Record = { + 'Content-Type': 'application/json' + } + if (this.apikey) { + headers['X-API-Key'] = this.apikey + } try { response = await fetch(this.serverUrl, { - headers: { - 'Content-Type': 'application/json' - }, + headers, method: 'POST', - body: JSON.stringify(command) + body: JSON.stringify(commandWithNodeAddress) }) } catch (e) { + const errorText = + e instanceof Error ? e.message : typeof e === 'string' ? e : JSON.stringify(e) return { - success: true, - message: '', + success: false, + message: errorText || 'Policy server request failed', httpStatus: 400 } } @@ -130,7 +151,7 @@ export class PolicyServer { async checkStartCompute( documentId: string, - ddo: DDO | Record, + ddo: DDO, serviceId: string, consumerAddress: string, policyServer: any diff --git a/src/index.ts b/src/index.ts index 679821f04..414255be3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -28,12 +28,18 @@ const app: Express = express() process.on('uncaughtException', (err) => { OCEAN_NODE_LOGGER.error(`Uncaught exception: ${err.message}`) + if (err?.stack) { + OCEAN_NODE_LOGGER.error(`Uncaught exception stack: ${err.stack}`) + } process.exit(1) }) process.on('unhandledRejection', (err) => { OCEAN_NODE_LOGGER.error( `Unhandled rejection: ${err instanceof Error ? err.message : String(err)}` ) + if (err instanceof Error && err.stack) { + OCEAN_NODE_LOGGER.error(`Unhandled rejection stack: ${err.stack}`) + } process.exit(1) }) diff --git a/src/test/config.json b/src/test/config.json index 778a1c1be..31dd6b7cf 100644 --- a/src/test/config.json +++ b/src/test/config.json @@ -96,76 +96,97 @@ "dockerComputeEnvironments": [ { "socketPath": "/var/run/docker.sock", - "resources": [ + "environments": [ { - "id": "disk", - "total": 1000000000 - } - ], - "storageExpiry": 604800, - "maxJobDuration": 3600, - "minJobDuration": 60, - "fees": { - "11155111": [ - { - "feeToken": "0x1B083D8584dd3e6Ff37d04a6e7e82b5F622f3985", - "prices": [ + "storageExpiry": 604800, + "maxJobDuration": 3600, + "minJobDuration": 60, + "resources": [ + { + "id": "cpu", + "total": 4, + "max": 4, + "min": 1, + "type": "cpu" + }, + { + "id": "ram", + "total": 10, + "max": 10, + "min": 1, + "type": "ram" + }, + { + "id": "disk", + "total": 100, + "max": 100, + "min": 0, + "type": "disk" + } + ], + "fees": { + "11155111": [ { - "id": "cpu", - "price": 1 - } - ] - }, - { - "feeToken": "0xfff9976782d46cc05630d1f6ebab18b2324d6b14", - "prices": [ + "feeToken": "0x1B083D8584dd3e6Ff37d04a6e7e82b5F622f3985", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + }, { - "id": "cpu", - "price": 1 + "feeToken": "0xfff9976782d46cc05630d1f6ebab18b2324d6b14", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] } - ] - } - ], - "11155420": [ - { - "feeToken": "0xf26c6C93f9f1d725e149d95f8E7B2334a406aD10", - "prices": [ + ], + "11155420": [ { - "id": "cpu", - "price": 1 + "feeToken": "0xf26c6C93f9f1d725e149d95f8E7B2334a406aD10", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + }, + { + "feeToken": "0x4200000000000000000000000000000000000006", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] } ] }, - { - "feeToken": "0x4200000000000000000000000000000000000006", - "prices": [ + "free": { + "maxJobDuration": 3600, + "minJobDuration": 60, + "maxJobs": 3, + "resources": [ { "id": "cpu", - "price": 1 + "max": 1 + }, + { + "id": "ram", + "max": 1 + }, + { + "id": "disk", + "max": 1 } ] } - ] - }, - "free": { - "maxJobDuration": 3600, - "minJobDuration": 60, - "maxJobs": 3, - "resources": [ - { - "id": "cpu", - "max": 1 - }, - { - "id": "ram", - "max": 1000000000 - }, - { - "id": "disk", - "max": 1000000000 - } - ] - } + } + ] } ] } diff --git a/src/test/integration/accessLists.test.ts b/src/test/integration/accessLists.test.ts index fe901f65e..18e62fd7d 100644 --- a/src/test/integration/accessLists.test.ts +++ b/src/test/integration/accessLists.test.ts @@ -17,7 +17,7 @@ import { AccessListContract, OceanNodeConfig } from '../../@types/OceanNode.js' import { homedir } from 'os' import { getConfiguration } from '../../utils/config.js' import { assert, expect } from 'chai' -import { checkAddressOnAccessList } from '../../utils/accessList.js' +import { checkAddressOnAccessListWithSigner } from '../../utils/accessList.js' import { KeyManager } from '../../components/KeyManager/index.js' describe('Should deploy some accessLists before all other tests.', () => { @@ -174,7 +174,11 @@ describe('Should deploy some accessLists before all other tests.', () => { for (let i = 0; i < wallets.length; i++) { const account = await wallets[i].getAddress() expect( - (await checkAddressOnAccessList(accessListAddress, account, owner)) === true, + (await checkAddressOnAccessListWithSigner( + accessListAddress, + account, + owner + )) === true, `Address ${account} has no balance on Access List ${accessListAddress}, so its not Authorized` ) } @@ -187,7 +191,11 @@ describe('Should deploy some accessLists before all other tests.', () => { for (let i = wallets.length; i < 4; i++) { const account = await (await provider.getSigner(i)).getAddress() expect( - (await checkAddressOnAccessList(accessListAddress, account, owner)) === false, + (await checkAddressOnAccessListWithSigner( + accessListAddress, + account, + owner + )) === false, `Address ${account} should not be part Access List ${accessListAddress}, therefore its not Authorized` ) } diff --git a/src/test/integration/algorithmsAccess.test.ts b/src/test/integration/algorithmsAccess.test.ts index 95fb54bd9..677f7343c 100644 --- a/src/test/integration/algorithmsAccess.test.ts +++ b/src/test/integration/algorithmsAccess.test.ts @@ -97,11 +97,11 @@ describe('Trusted algorithms Flow', () => { '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58', JSON.stringify(['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']), `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, - '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"fees":{"' + + '[{"socketPath":"/var/run/docker.sock","environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"fees":{"' + DEVELOPMENT_CHAIN_ID + '":[{"feeToken":"' + paymentToken + - '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60, "minJobDuration":10, "maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' ] ) ) diff --git a/src/test/integration/compute.test.ts b/src/test/integration/compute.test.ts index 98a1b8aa4..fcf491e19 100644 --- a/src/test/integration/compute.test.ts +++ b/src/test/integration/compute.test.ts @@ -58,11 +58,12 @@ import { buildEnvOverrideConfig, getMockSupportedNetworks, setupEnvironment, - tearDownEnvironment + tearDownEnvironment, + sleep } from '../utils/utils.js' import { ProviderFees, ProviderComputeInitializeResults } from '../../@types/Fees.js' -import { homedir } from 'os' +import { homedir, tmpdir } from 'os' import { publishAlgoDDO, publishDatasetDDO } from '../data/ddo.js' import { DEVELOPMENT_CHAIN_ID, getOceanArtifactsAdresses } from '../../utils/address.js' import ERC721Factory from '@oceanprotocol/contracts/artifacts/contracts/ERC721Factory.sol/ERC721Factory.json' with { type: 'json' } @@ -81,8 +82,16 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import Dockerode from 'dockerode' import { C2DEngineDocker } from '../../components/c2d/compute_engine_docker.js' import { createHashForSignature, safeSign } from '../utils/signature.js' - -const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) +import { create256Hash } from '../../utils/crypt.js' +import fsp from 'fs/promises' +import path from 'path' +import { existsSync } from 'fs' +import { + PersistentStorageCreateBucketHandler, + PersistentStorageUploadFileHandler +} from '../../components/core/handler/persistentStorage.js' +import { deployAndGetAccessListConfig } from '../utils/contracts.js' +import * as tar from 'tar' /** * Polls getComputeEnvironments until every environment's resources (and free.resources) @@ -182,11 +191,11 @@ describe('Compute', () => { '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58', JSON.stringify(['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']), `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, - '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"fees":{"' + + '[{"socketPath":"/var/run/docker.sock","environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"fees":{"' + DEVELOPMENT_CHAIN_ID + '":[{"feeToken":"' + paymentToken + - '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' ] ) ) @@ -205,7 +214,7 @@ describe('Compute', () => { oceanNode.blockchainRegistry ) oceanNode.addIndexer(indexer) - oceanNode.addC2DEngines() + await oceanNode.addC2DEngines() provider = new JsonRpcProvider('http://127.0.0.1:8545') publisherAccount = (await provider.getSigner(0)) as Signer @@ -2167,7 +2176,7 @@ describe('Compute', () => { }) it('should wait for jobWithOutputURL status 70 and download output from URL', async function () { - this.timeout(130_000) // waitForAllJobsToFinish can take up to 120s + this.timeout(180_000) // waitForAllJobsToFinish can take up to 180s assert(jobWithOutputURL, 'jobWithOutputURL must be set by previous test') const statusTask: ComputeGetStatusCommand = { command: PROTOCOL_COMMANDS.COMPUTE_GET_STATUS, @@ -2213,9 +2222,386 @@ describe('Compute', () => { ) }) + describe('Compute with persistent storage (localfs)', function () { + this.timeout(DEFAULT_TEST_TIMEOUT * 4) + + let psRoot: string + let psDockerEngine: C2DEngineDocker | undefined + let psSuiteActive = false + + const jobReachedSuccessfulTerminalStatus = (status: number) => + status === C2DStatusNumber.JobFinished || status === C2DStatusNumber.JobSettle + + const waitForComputeJobFinished = async ( + node: OceanNode, + fullJobId: string, + timeoutMs: number + ) => { + const deadline = Date.now() + timeoutMs + while (Date.now() < deadline) { + const r = await new ComputeGetStatusHandler(node).handle({ + command: PROTOCOL_COMMANDS.COMPUTE_GET_STATUS, + consumerAddress: null, + agreementId: null, + jobId: fullJobId + }) + assert.equal(r.status.httpStatus, 200) + const jobs = await streamToObject(r.stream as Readable) + const j = jobs[0] + if (!j) { + await sleep(2000) + continue + } + if (jobReachedSuccessfulTerminalStatus(j.status)) { + return j + } + if (j.dateFinished && !jobReachedSuccessfulTerminalStatus(j.status)) { + assert.fail( + `Job ended with status ${j.status} (${j.statusText}) instead of JobFinished or JobSettle` + ) + } + await sleep(3000) + } + assert.fail( + `Job ${fullJobId} did not reach JobFinished or JobSettle within ${timeoutMs}ms` + ) + } + + before(async function () { + try { + const d = new Dockerode() + await d.info() + } catch { + this.skip() + } + + psRoot = await fsp.mkdtemp(path.join(tmpdir(), 'ocean-compute-ps-')) + const bucketAllowList = await deployAndGetAccessListConfig( + publisherAccount, + provider, + [ + publisherAccount, + consumerAccount, + (await provider.getSigner(2)) as Signer, + (await provider.getSigner(3)) as Signer + ] + ) + assert(bucketAllowList, 'access list deploy failed for persistent storage') + + const cfg = await getConfiguration(true) + cfg.persistentStorage = { + enabled: true, + type: 'localfs', + accessLists: [bucketAllowList], + options: { folder: psRoot } + } + + const enginesOld = oceanNode.getC2DEngines() + if (enginesOld) await enginesOld.stopAllEngines() + const km = oceanNode.getKeyManager() + const br = oceanNode.blockchainRegistry + oceanNode = OceanNode.getInstance(cfg, dbconn, null, null, indexer, km, br, true) + oceanNode.addIndexer(indexer) + await oceanNode.addC2DEngines() + + const c2dEngines = oceanNode.getC2DEngines() + const engines = (c2dEngines as any).engines as C2DEngineDocker[] + psDockerEngine = engines.find((e) => e instanceof C2DEngineDocker) + if (!psDockerEngine) { + this.skip() + } + + await waitForAllJobsToFinish(oceanNode) + psSuiteActive = true + }) + + after(async () => { + if (!psSuiteActive) return + try { + const enginesOld = oceanNode.getC2DEngines() + if (enginesOld) await enginesOld.stopAllEngines() + const cfg = await getConfiguration(true) + cfg.persistentStorage = { + enabled: false, + type: 'localfs', + accessLists: [], + options: { folder: '/tmp' } + } + const km = oceanNode.getKeyManager() + const br = oceanNode.blockchainRegistry + oceanNode = OceanNode.getInstance(cfg, dbconn, null, null, indexer, km, br, true) + oceanNode.addIndexer(indexer) + await oceanNode.addC2DEngines() + } catch (e) { + console.error('Compute persistent-storage suite teardown failed:', e) + } + }) + + it('happy path: bind-mounted persistent storage file is readable inside the container', async function () { + const consumerAddress = await consumerAccount.getAddress() + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumerAccount, messageHashBytes) + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + assert.equal(createRes.status.httpStatus, 200) + const created = await streamToObject(createRes.stream as Readable) + const bucketId = created.bucketId as string + + const fileName = 'ps-data.txt' + const secret = 'PS_COMPUTE_INTEGRATION_OK\n' + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE + ) + signature = await safeSign(consumerAccount, messageHashBytes) + const uploadRes = await new PersistentStorageUploadFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + consumerAddress, + signature, + nonce, + bucketId, + fileName, + stream: Readable.from(Buffer.from(secret)) + } as any) + assert.equal(uploadRes.status.httpStatus, 200) + + const rawcode = [ + "const fs = require('fs');", + `const p = '/data/persistentStorage/${bucketId}/${fileName}';`, + "const out = '/data/outputs/ps-result.txt';", + "fs.mkdirSync('/data/outputs', { recursive: true });", + "const c = fs.readFileSync(p, 'utf8');", + "fs.writeFileSync(out, c, 'utf8');" + ].join('\n') + + const algoMeta = publishedAlgoDataset.ddo.metadata.algorithm + + const initResp = await new ComputeInitializeHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.COMPUTE_INITIALIZE, + consumerAddress, + datasets: [ + { + fileObject: { + type: 'nodePersistentStorage', + bucketId, + fileName + } as any + } + ], + algorithm: { + meta: { + ...algoMeta, + rawcode + } + }, + environment: firstEnv.id, + payment: { + chainId: DEVELOPMENT_CHAIN_ID, + token: paymentToken + }, + maxJobDuration: 60 + } as any) + assert.equal(initResp.status.httpStatus, 200, String(initResp.status.error)) + + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.FREE_COMPUTE_START + ) + signature = await safeSign(consumerAccount, messageHashBytes) + + const startTask: FreeComputeStartCommand = { + command: PROTOCOL_COMMANDS.FREE_COMPUTE_START, + consumerAddress, + signature, + nonce, + environment: firstEnv.id, + queueMaxWaitTime: 0, + datasets: [ + { + fileObject: { + type: 'nodePersistentStorage', + bucketId, + fileName + } as any + } + ], + algorithm: { + meta: { + ...algoMeta, + rawcode + } + }, + output: null + } + + const startRes = await new FreeComputeStartHandler(oceanNode).handle(startTask) + assert.equal(startRes.status.httpStatus, 200, String(startRes.status.error)) + const started = await streamToObject(startRes.stream as Readable) + const fullJobId = started[0].jobId as string + const innerJobId = fullJobId.slice(fullJobId.indexOf('-') + 1) + + await waitForComputeJobFinished(oceanNode, fullJobId, 180_000) + + const base = (psDockerEngine as any).getStoragePath() as string + const outputsTarPath = path.join(base, innerJobId, 'data/outputs/outputs.tar') + /* eslint-disable security/detect-non-literal-fs-filename -- job paths from C2D engine */ + assert( + existsSync(outputsTarPath), + `expected outputs archive at ${outputsTarPath} (algorithm should write into /data/outputs before tar)` + ) + const extractDir = await fsp.mkdtemp(path.join(tmpdir(), 'ocean-ps-tar-')) + try { + await tar.x( + { + file: outputsTarPath, + cwd: extractDir + }, + ['outputs/ps-result.txt'] + ) + const extractedFile = path.join(extractDir, 'outputs/ps-result.txt') + assert( + existsSync(extractedFile), + 'expected outputs/ps-result.txt inside outputs.tar' + ) + const written = await fsp.readFile(extractedFile, 'utf8') + assert.equal(written, secret) + } finally { + await fsp.rm(extractDir, { recursive: true, force: true }) + } + /* eslint-enable security/detect-non-literal-fs-filename */ + }) + + it('denies free compute start when consumer is not on the bucket access list', async function () { + const ownerAddress = await consumerAccount.getAddress() + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + ownerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumerAccount, messageHashBytes) + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress: ownerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + assert.equal(createRes.status.httpStatus, 200) + const created = await streamToObject(createRes.stream as Readable) + const bucketId = created.bucketId as string + + const fileName = 'private.txt' + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + ownerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE + ) + signature = await safeSign(consumerAccount, messageHashBytes) + const uploadRes = await new PersistentStorageUploadFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + consumerAddress: ownerAddress, + signature, + nonce, + bucketId, + fileName, + stream: Readable.from(Buffer.from('secret')) + } as any) + assert.equal(uploadRes.status.httpStatus, 200) + + const intruderAddress = await nonAllowedAccount.getAddress() + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + intruderAddress, + nonce, + PROTOCOL_COMMANDS.FREE_COMPUTE_START + ) + signature = await safeSign(nonAllowedAccount, messageHashBytes) + + const algoMeta = publishedAlgoDataset.ddo.metadata.algorithm + + const initResp = await new ComputeInitializeHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.COMPUTE_INITIALIZE, + consumerAddress: intruderAddress, + datasets: [ + { + fileObject: { + type: 'nodePersistentStorage', + bucketId, + fileName + } as any + } + ], + algorithm: { + meta: { + ...algoMeta, + rawcode: "console.log('noop');" + } + }, + environment: firstEnv.id, + payment: { + chainId: DEVELOPMENT_CHAIN_ID, + token: paymentToken + }, + maxJobDuration: 60 + } as any) + assert.equal(initResp.status.httpStatus, 403, String(initResp.status.error)) + + const startTask: FreeComputeStartCommand = { + command: PROTOCOL_COMMANDS.FREE_COMPUTE_START, + consumerAddress: intruderAddress, + signature, + nonce, + environment: firstEnv.id, + queueMaxWaitTime: 0, + datasets: [ + { + fileObject: { + type: 'nodePersistentStorage', + bucketId, + fileName + } as any + } + ], + algorithm: { + meta: { + ...algoMeta, + rawcode: "console.log('noop');" + } + }, + output: null + } + + const startRes = await new FreeComputeStartHandler(oceanNode).handle(startTask) + assert.equal(startRes.status.httpStatus, 403, String(startRes.status.error)) + assert.include( + (startRes.status.error || '').toLowerCase(), + 'allow', + 'expected access-denied style message' + ) + }) + }) + after(async () => { await tearDownEnvironment(previousConfiguration) - indexer.stopAllChainIndexers() + await indexer.stopAllChainIndexers() }) }) @@ -2338,7 +2724,7 @@ describe('Compute Access Restrictions', () => { '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58', JSON.stringify(['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']), `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, - '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"access":{"addresses":["' + + '[{"socketPath":"/var/run/docker.sock","environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"access":{"addresses":["' + allowedAddress + '"],"accessLists":[]},"fees":{"' + DEVELOPMENT_CHAIN_ID + @@ -2346,7 +2732,7 @@ describe('Compute Access Restrictions', () => { paymentToken + '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"access":{"addresses":["' + allowedAddress + - '"],"accessLists":[]},"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + '"],"accessLists":[]},"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' ] ) ) @@ -2368,7 +2754,7 @@ describe('Compute Access Restrictions', () => { oceanNode.blockchainRegistry ) oceanNode.addIndexer(indexer) - oceanNode.addC2DEngines() + await oceanNode.addC2DEngines() publishedComputeDataset = await publishAsset(computeAsset, publisherAccount) publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) @@ -2502,36 +2888,44 @@ describe('Compute Access Restrictions', () => { JSON.stringify([ { socketPath: '/var/run/docker.sock', - resources: [{ id: 'disk', total: 10 }], - storageExpiry: 604800, - maxJobDuration: 3600, - minJobDuration: 60, - access: { - addresses: [], - accessLists: [{ [DEVELOPMENT_CHAIN_ID]: [accessListAddress] }] - }, - fees: { - [DEVELOPMENT_CHAIN_ID]: [ - { - feeToken: paymentToken, - prices: [{ id: 'cpu', price: 1 }] + environments: [ + { + storageExpiry: 604800, + maxJobDuration: 3600, + minJobDuration: 60, + resources: [ + { id: 'cpu', total: 4, max: 4, min: 1, type: 'cpu' }, + { id: 'ram', total: 10, max: 10, min: 1, type: 'ram' }, + { id: 'disk', total: 10, max: 10, min: 0, type: 'disk' } + ], + access: { + addresses: [], + accessLists: [{ [DEVELOPMENT_CHAIN_ID]: [accessListAddress] }] + }, + fees: { + [DEVELOPMENT_CHAIN_ID]: [ + { + feeToken: paymentToken, + prices: [{ id: 'cpu', price: 1 }] + } + ] + }, + free: { + maxJobDuration: 60, + minJobDuration: 10, + maxJobs: 3, + access: { + addresses: [], + accessLists: [{ [DEVELOPMENT_CHAIN_ID]: [accessListAddress] }] + }, + resources: [ + { id: 'cpu', max: 1 }, + { id: 'ram', max: 1 }, + { id: 'disk', max: 1 } + ] } - ] - }, - free: { - maxJobDuration: 60, - minJobDuration: 10, - maxJobs: 3, - access: { - addresses: [], - accessLists: [{ [DEVELOPMENT_CHAIN_ID]: [accessListAddress] }] - }, - resources: [ - { id: 'cpu', max: 1 }, - { id: 'ram', max: 1 }, - { id: 'disk', max: 1 } - ] - } + } + ] } ]) ] @@ -2555,7 +2949,7 @@ describe('Compute Access Restrictions', () => { oceanNode.blockchainRegistry ) oceanNode.addIndexer(indexer) - oceanNode.addC2DEngines() + await oceanNode.addC2DEngines() publishedComputeDataset = await publishAsset(computeAsset, publisherAccount) publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) @@ -2659,11 +3053,11 @@ describe('Compute Access Restrictions', () => { JSON.stringify([DEVELOPMENT_CHAIN_ID]), '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58', `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, - '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"paymentClaimInterval":60,"fees":{"' + + '[{"socketPath":"/var/run/docker.sock","paymentClaimInterval":60,"environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"fees":{"' + DEVELOPMENT_CHAIN_ID + '":[{"feeToken":"' + paymentToken + - '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' ] ) ) @@ -2685,7 +3079,7 @@ describe('Compute Access Restrictions', () => { oceanNode.blockchainRegistry ) oceanNode.addIndexer(indexer) - oceanNode.addC2DEngines() + await oceanNode.addC2DEngines() const provider = new JsonRpcProvider('http://127.0.0.1:8545') const publisherAccount = (await provider.getSigner(0)) as Signer @@ -2733,6 +3127,7 @@ describe('Compute Access Restrictions', () => { const testJob: DBComputeJob = { owner: await consumerAccount.getAddress(), jobId: testJobId, + jobIdHash: create256Hash(testJobId), dateCreated: now, status: C2DStatusNumber.PublishingResults, statusText: C2DStatusText.PublishingResults, @@ -2749,6 +3144,7 @@ describe('Compute Access Restrictions', () => { token: paymentToken, lockTx: '0x123', claimTx: '', + cancelTx: '', cost: 0 }, resources: [ @@ -2901,6 +3297,7 @@ describe('Compute Access Restrictions', () => { const testJob: DBComputeJob = { owner: await consumerAccount.getAddress(), jobId: testJobId, + jobIdHash: create256Hash(testJobId), dateCreated: now.toString(), status: C2DStatusNumber.JobSettle, statusText: C2DStatusText.JobSettle, @@ -2917,6 +3314,7 @@ describe('Compute Access Restrictions', () => { token: paymentToken, lockTx: lockTx || '0x123', claimTx: '', + cancelTx: '', cost: 0 }, resources: [ @@ -2968,6 +3366,7 @@ describe('Compute Access Restrictions', () => { const testJob: DBComputeJob = { owner: await consumerAccount.getAddress(), jobId: testJobId, + jobIdHash: create256Hash(testJobId), dateCreated: now.toString(), status: C2DStatusNumber.JobSettle, statusText: C2DStatusText.JobSettle, @@ -2984,6 +3383,7 @@ describe('Compute Access Restrictions', () => { token: paymentToken, lockTx: '0xexpired', claimTx: '', + cancelTx: '', cost: 0 }, resources: [ @@ -3032,6 +3432,7 @@ describe('Compute Access Restrictions', () => { const testJob: DBComputeJob = { owner: await consumerAccount.getAddress(), jobId: testJobId, + jobIdHash: create256Hash(testJobId), dateCreated: now, status: C2DStatusNumber.JobSettle, statusText: C2DStatusText.JobSettle, @@ -3095,6 +3496,7 @@ describe('Compute Access Restrictions', () => { const testJob: DBComputeJob = { owner: await consumerAccount.getAddress(), jobId: testJobId, + jobIdHash: create256Hash(testJobId), dateCreated: now.toString(), status: C2DStatusNumber.JobSettle, statusText: C2DStatusText.JobSettle, @@ -3158,19 +3560,5 @@ describe('Compute Access Restrictions', () => { ) } }) - - it('should start payment claim timer on engine start', function () { - // Verify timer methods exist - // Timer might be null if not started yet, or a NodeJS.Timeout if started - // We can't easily test the timer directly, but we can verify the method exists - assert( - typeof (dockerEngine as any).startPaymentTimer === 'function', - 'startPaymentTimer method should exist' - ) - assert( - typeof (dockerEngine as any).claimPayments === 'function', - 'claimPayments method should exist' - ) - }) }) }) diff --git a/src/test/integration/credentials.test.ts b/src/test/integration/credentials.test.ts index db9945ea6..6bd7d83ba 100644 --- a/src/test/integration/credentials.test.ts +++ b/src/test/integration/credentials.test.ts @@ -126,11 +126,11 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { JSON.stringify([await publisherAccount.getAddress()]), JSON.stringify([await publisherAccount.getAddress()]), `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, - '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"fees":{"' + + '[{"socketPath":"/var/run/docker.sock","environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"fees":{"' + DEVELOPMENT_CHAIN_ID + '":[{"feeToken":"' + paymentToken + - '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' ] ) ) diff --git a/src/test/integration/getJobs.test.ts b/src/test/integration/getJobs.test.ts index 58aba7857..3311ec7f9 100644 --- a/src/test/integration/getJobs.test.ts +++ b/src/test/integration/getJobs.test.ts @@ -17,6 +17,7 @@ import { tearDownEnvironment } from '../utils/utils.js' import { streamToObject } from '../../utils/util.js' +import { create256Hash } from '../../utils/crypt.js' // Helper to create a minimal valid DBComputeJob function buildJob(overrides: Partial = {}): DBComputeJob { @@ -25,6 +26,9 @@ function buildJob(overrides: Partial = {}): DBComputeJob { owner: overrides.owner || '0xowner_test', did: overrides.did, jobId: overrides.jobId || `job-${Date.now()}-${Math.random().toString(36).slice(2)}`, + jobIdHash: create256Hash( + overrides.jobId || `job-${Date.now()}-${Math.random().toString(36).slice(2)}` + ), dateCreated: overrides.dateCreated || nowSec, dateFinished: overrides.dateFinished || (null as unknown as string), status: overrides.status ?? C2DStatusNumber.JobStarted, @@ -56,7 +60,9 @@ function buildJob(overrides: Partial = {}): DBComputeJob { payment: overrides.payment, additionalViewers: overrides.additionalViewers || [], algoDuration: overrides.algoDuration || 0, - queueMaxWaitTime: overrides.queueMaxWaitTime || 0 + queueMaxWaitTime: overrides.queueMaxWaitTime || 0, + buildStartTimestamp: overrides.buildStartTimestamp || '0', + buildStopTimestamp: overrides.buildStopTimestamp || '0' } } diff --git a/src/test/integration/imageCleanup.test.ts b/src/test/integration/imageCleanup.test.ts index ab3b5304c..31f60d561 100644 --- a/src/test/integration/imageCleanup.test.ts +++ b/src/test/integration/imageCleanup.test.ts @@ -33,24 +33,32 @@ describe('Docker Image Cleanup Integration Tests', () => { JSON.stringify([ { socketPath: '/var/run/docker.sock', - resources: [{ id: 'disk', total: 10 }], - storageExpiry: 604800, - maxJobDuration: 3600, - minJobDuration: 60, - fees: { - '1': [ - { - feeToken: '0x123', - prices: [{ id: 'cpu', price: 1 }] - } - ] - }, - access: { - addresses: [], - accessLists: null - }, imageRetentionDays: 7, - imageCleanupInterval: 60 // 1 minute for testing + imageCleanupInterval: 60, // 1 minute for testing + environments: [ + { + storageExpiry: 604800, + maxJobDuration: 3600, + minJobDuration: 60, + resources: [ + { id: 'cpu', total: 4, max: 4, min: 1, type: 'cpu' }, + { id: 'ram', total: 10, max: 10, min: 1, type: 'ram' }, + { id: 'disk', total: 10, max: 10, min: 0, type: 'disk' } + ], + fees: { + '1': [ + { + feeToken: '0x123', + prices: [{ id: 'cpu', price: 1 }] + } + ] + }, + access: { + addresses: [], + accessLists: null + } + } + ] } ]) ] diff --git a/src/test/integration/persistentStorage.test.ts b/src/test/integration/persistentStorage.test.ts new file mode 100644 index 000000000..87cd4b8b3 --- /dev/null +++ b/src/test/integration/persistentStorage.test.ts @@ -0,0 +1,693 @@ +import { expect } from 'chai' +import fsp from 'fs/promises' +import os from 'os' +import path from 'path' +import { Readable } from 'stream' +import { getAddress, JsonRpcProvider, Signer } from 'ethers' + +import { Database } from '../../components/database/index.js' +import { + PersistentStorageCreateBucketHandler, + PersistentStorageDeleteFileHandler, + PersistentStorageGetBucketsHandler, + PersistentStorageGetFileObjectHandler, + PersistentStorageListFilesHandler, + PersistentStorageUploadFileHandler +} from '../../components/core/handler/persistentStorage.js' +import { StatusHandler } from '../../components/core/handler/statusHandler.js' +import { OceanNode } from '../../OceanNode.js' +import type { AccessList } from '../../@types/AccessList.js' +import { ENVIRONMENT_VARIABLES, PROTOCOL_COMMANDS } from '../../utils/constants.js' +import { getConfiguration } from '../../utils/config.js' +import { streamToObject, streamToString } from '../../utils/util.js' +import { + DEFAULT_TEST_TIMEOUT, + OverrideEnvConfig, + TEST_ENV_CONFIG_FILE, + buildEnvOverrideConfig, + setupEnvironment, + tearDownEnvironment, + sleep +} from '../utils/utils.js' +import { createHashForSignature, safeSign } from '../utils/signature.js' + +import { BlockchainRegistry } from '../../components/BlockchainRegistry/index.js' +import { Blockchain } from '../../utils/blockchain.js' +import { RPCS, SupportedNetwork } from '../../@types/blockchain.js' +import { DEVELOPMENT_CHAIN_ID } from '../../utils/address.js' +import { deployAndGetAccessListConfig } from '../utils/contracts.js' +import { OceanNodeConfig, OceanNodeStatus } from '../../@types/OceanNode.js' +import { KeyManager } from '../../components/KeyManager/index.js' + +describe('Persistent storage handlers (integration)', function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + let previousConfiguration: OverrideEnvConfig[] + let config: OceanNodeConfig + let database: Database + let oceanNode: OceanNode + let consumer: Signer + let psRoot: string + + let provider: JsonRpcProvider + let blockchain: Blockchain + let owner: Signer + let wallets: Signer[] = [] + let forbiddenConsumer: Signer + let bucketAllowList: any + + before(async () => { + provider = new JsonRpcProvider('http://127.0.0.1:8545') + config = await getConfiguration() // Force reload the configuration + + wallets = [ + (await provider.getSigner(0)) as Signer, + (await provider.getSigner(1)) as Signer, + (await provider.getSigner(2)) as Signer, + (await provider.getSigner(3)) as Signer + ] + forbiddenConsumer = (await provider.getSigner(4)) as Signer + + const rpcs: RPCS = config.supportedNetworks + const chain: SupportedNetwork = rpcs[String(DEVELOPMENT_CHAIN_ID)] + const keyManager = new KeyManager(config) + const blockchains = new BlockchainRegistry(keyManager, config) + blockchain = blockchains.getBlockchain(chain.chainId) + + owner = await blockchain.getSigner() + + // ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS_LIST + const accessListPublishers = await deployAndGetAccessListConfig( + owner, + provider, + wallets + ) + bucketAllowList = accessListPublishers + previousConfiguration = await setupEnvironment( + TEST_ENV_CONFIG_FILE, + buildEnvOverrideConfig( + [ENVIRONMENT_VARIABLES.PRIVATE_KEY], + ['0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58'] + ) + ) + + config = await getConfiguration(true) + psRoot = await fsp.mkdtemp(path.join(os.tmpdir(), 'ocean-ps-it-')) + config.persistentStorage = { + enabled: true, + type: 'localfs', + accessLists: [bucketAllowList], + options: { folder: psRoot } + } + + database = await Database.init(config.dbConfig) + oceanNode = await OceanNode.getInstance( + config, + database, + undefined, + undefined, + undefined, + undefined, + undefined, + true + ) + + consumer = (await provider.getSigner(1)) as Signer + }) + + after(async () => { + await tearDownEnvironment(previousConfiguration) + // await fsp.rm(psRoot, { recursive: true, force: true }) + }) + + it('should expose persistent storage access lists on node status', async () => { + const statusCommand = { + command: PROTOCOL_COMMANDS.STATUS, + node: oceanNode.getKeyManager().getPeerId().toString() + } + const response = await new StatusHandler(oceanNode).handle(statusCommand) + expect(response.status.httpStatus).to.equal(200) + const body = await streamToString(response.stream as Readable) + const nodeStatus = JSON.parse(body) as OceanNodeStatus + expect(nodeStatus.persistentStorage).to.be.an('object') + expect(nodeStatus.persistentStorage?.accessLists).to.be.an('array').with.lengthOf(1) + }) + + it('create bucket → upload → list → delete (happy path)', async () => { + const consumerAddress = await consumer.getAddress() + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumer, messageHashBytes) + + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + + expect(createRes.status.httpStatus).to.equal(200) + expect(createRes.stream).to.be.instanceOf(Readable) + const created = await streamToObject(createRes.stream as Readable) + expect(created.bucketId).to.be.a('string') + expect(getAddress(created.owner)).to.equal(getAddress(consumerAddress)) + const bucketId = created.bucketId as string + + const fileName = 'hello.txt' + const body = Buffer.from('persistent-storage-it') + + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE + ) + signature = await safeSign(consumer, messageHashBytes) + const uploadRes = await new PersistentStorageUploadFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + consumerAddress, + signature, + nonce, + bucketId, + fileName, + stream: Readable.from(body) + } as any) + expect(uploadRes.status.httpStatus).to.equal(200) + const uploaded = await streamToObject(uploadRes.stream as Readable) + expect(uploaded.name).to.equal(fileName) + expect(uploaded.size).to.equal(body.length) + await sleep(1000) + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES + ) + signature = await safeSign(consumer, messageHashBytes) + const listRes = await new PersistentStorageListFilesHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES, + consumerAddress, + signature, + nonce, + bucketId, + authorization: undefined + } as any) + + expect(listRes.status.httpStatus).to.equal(200) + const listed = await streamToObject(listRes.stream as Readable) + expect(listed).to.be.an('array') + expect(listed.some((f: { name: string }) => f.name === fileName)).to.equal(true) + await sleep(1000) + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_DELETE_FILE + ) + signature = await safeSign(consumer, messageHashBytes) + const delRes = await new PersistentStorageDeleteFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_DELETE_FILE, + consumerAddress, + signature, + nonce, + chainId: 8996, + bucketId, + fileName, + authorization: undefined + } as any) + + expect(delRes.status.httpStatus).to.equal(200) + await sleep(1000) + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES + ) + signature = await safeSign(consumer, messageHashBytes) + const listAfterDel = await new PersistentStorageListFilesHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES, + consumerAddress, + signature, + nonce, + bucketId, + authorization: undefined + } as any) + expect(listAfterDel.status.httpStatus).to.equal(200) + const listedAfter = await streamToObject(listAfterDel.stream as Readable) + expect(listedAfter.some((f: { name: string }) => f.name === fileName)).to.equal(false) + }) + + it('getFileObject returns a file object for an allowed consumer', async () => { + const consumerAddress = await consumer.getAddress() + + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumer, messageHashBytes) + + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + expect(createRes.status.httpStatus).to.equal(200) + const created = await streamToObject(createRes.stream as Readable) + const bucketId = created.bucketId as string + + const fileName = 'obj.txt' + const body = Buffer.from('file-object') + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE + ) + signature = await safeSign(consumer, messageHashBytes) + const uploadRes = await new PersistentStorageUploadFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + consumerAddress, + signature, + nonce, + bucketId, + fileName, + stream: Readable.from(body) + } as any) + expect(uploadRes.status.httpStatus).to.equal(200) + + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT + ) + signature = await safeSign(consumer, messageHashBytes) + const objRes = await new PersistentStorageGetFileObjectHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT, + consumerAddress, + signature, + nonce, + bucketId, + fileName, + authorization: undefined + } as any) + expect(objRes.status.httpStatus).to.equal(200) + const obj = await streamToObject(objRes.stream as Readable) + expect(obj).to.be.an('object') + expect(obj.bucketId).to.equal(bucketId) + expect(obj.fileName).to.equal(fileName) + }) + + it('should not create bucket when consumer is not on allow list', async () => { + const forbiddenConsumerAddress = await forbiddenConsumer.getAddress() + const nonce = Date.now().toString() + const messageHashBytes = createHashForSignature( + forbiddenConsumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + const signature = await safeSign(forbiddenConsumer, messageHashBytes) + + const res = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress: forbiddenConsumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + + expect(res.status.httpStatus).to.equal(403) + expect(res.status.error).to.contain('not allowed') + }) + + it('should deny forbiddenConsumer for bucket operations when bucket has accessList', async () => { + // Create a bucket whose ACL allows only wallets[0..3] + const consumerAddress = await consumer.getAddress() + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumer, messageHashBytes) + + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [bucketAllowList], + authorization: undefined + } as any) + + expect(createRes.status.httpStatus).to.equal(200) + const created = await streamToObject(createRes.stream as Readable) + const bucketId = created.bucketId as string + + // Forbidden consumer tries to list files -> should fail + const forbiddenConsumerAddress = await forbiddenConsumer.getAddress() + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + forbiddenConsumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES + ) + signature = await safeSign(forbiddenConsumer, messageHashBytes) + const listRes = await new PersistentStorageListFilesHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES, + consumerAddress: forbiddenConsumerAddress, + signature, + nonce, + bucketId, + authorization: undefined + } as any) + expect(listRes.status.httpStatus).to.equal(403) + expect(listRes.status.error).to.contain('not allowed') + + // Forbidden consumer tries to upload -> should fail + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + forbiddenConsumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE + ) + signature = await safeSign(forbiddenConsumer, messageHashBytes) + const uploadRes = await new PersistentStorageUploadFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + consumerAddress: forbiddenConsumerAddress, + signature, + nonce, + bucketId, + fileName: 'forbidden.txt', + stream: Readable.from(Buffer.from('nope')), + authorization: undefined + } as any) + expect(uploadRes.status.httpStatus).to.equal(403) + expect(uploadRes.status.error).to.contain('not allowed') + }) + + it('getFileObject should fail for forbiddenConsumer when bucket has accessList', async () => { + // Create a bucket whose ACL allows only wallets[0..3] + const consumerAddress = await consumer.getAddress() + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumer, messageHashBytes) + + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [bucketAllowList], + authorization: undefined + } as any) + expect(createRes.status.httpStatus).to.equal(200) + const created = await streamToObject(createRes.stream as Readable) + const bucketId = created.bucketId as string + + const fileName = 'forbidden-obj.txt' + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE + ) + signature = await safeSign(consumer, messageHashBytes) + const uploadRes = await new PersistentStorageUploadFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + consumerAddress, + signature, + nonce, + bucketId, + fileName, + stream: Readable.from(Buffer.from('secret')) + } as any) + expect(uploadRes.status.httpStatus).to.equal(200) + + const forbiddenConsumerAddress = await forbiddenConsumer.getAddress() + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + forbiddenConsumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT + ) + signature = await safeSign(forbiddenConsumer, messageHashBytes) + + const objRes = await new PersistentStorageGetFileObjectHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT, + consumerAddress: forbiddenConsumerAddress, + signature, + nonce, + bucketId, + fileName, + authorization: undefined + } as any) + + expect(objRes.status.httpStatus).to.equal(403) + expect(objRes.status.error).to.contain('not allowed') + }) + + it('getFileObject should fail when file does not exist', async () => { + const consumerAddress = await consumer.getAddress() + + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumer, messageHashBytes) + + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + expect(createRes.status.httpStatus).to.equal(200) + const created = await streamToObject(createRes.stream as Readable) + const bucketId = created.bucketId as string + + const missingFileName = 'missing.txt' + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT + ) + signature = await safeSign(consumer, messageHashBytes) + + const objRes = await new PersistentStorageGetFileObjectHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT, + consumerAddress, + signature, + nonce, + bucketId, + fileName: missingFileName, + authorization: undefined + } as any) + expect(objRes.status.httpStatus).to.equal(404) + expect(objRes.status.error?.toLowerCase()).to.contain('file not found') + }) + + it('deleteFile should fail when file does not exist', async () => { + const consumerAddress = await consumer.getAddress() + + let nonce = Date.now().toString() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + let signature = await safeSign(consumer, messageHashBytes) + + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + expect(createRes.status.httpStatus).to.equal(200) + const created = await streamToObject(createRes.stream as Readable) + const bucketId = created.bucketId as string + + const missingFileName = 'missing-delete.txt' + nonce = Date.now().toString() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_DELETE_FILE + ) + signature = await safeSign(consumer, messageHashBytes) + + const delRes = await new PersistentStorageDeleteFileHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_DELETE_FILE, + consumerAddress, + signature, + nonce, + chainId: 8996, + bucketId, + fileName: missingFileName, + authorization: undefined + } as any) + expect(delRes.status.httpStatus).to.equal(500) + expect(delRes.status.error?.toLowerCase()).to.contain('file not found') + }) + + it('getBuckets returns buckets the consumer can access', async () => { + const consumerAddress = await consumer.getAddress() + await sleep(1000) + let nonce = Date.now() + let messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_BUCKETS + ) + let signature = await safeSign(consumer, messageHashBytes) + const beforeCreate = await new PersistentStorageGetBucketsHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_BUCKETS, + consumerAddress, + signature, + nonce, + chainId: 8996, + owner: consumerAddress, + authorization: undefined + } as any) + expect(beforeCreate.status.httpStatus).to.equal(200) + const beforeList = await streamToObject(beforeCreate.stream as Readable) + expect(beforeList).to.be.an('array') + await sleep(1000) + nonce = Date.now() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + signature = await safeSign(consumer, messageHashBytes) + const createRes = await new PersistentStorageCreateBucketHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + expect(createRes.status.httpStatus).to.equal(200) + const created = await streamToObject(createRes.stream as Readable) + const newBucketId = created.bucketId as string + await sleep(1000) + nonce = Date.now() + messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_BUCKETS + ) + signature = await safeSign(consumer, messageHashBytes) + const afterCreate = await new PersistentStorageGetBucketsHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_BUCKETS, + consumerAddress, + signature, + nonce, + chainId: 8996, + owner: consumerAddress, + authorization: undefined + } as any) + expect(afterCreate.status.httpStatus).to.equal(200) + const afterList = await streamToObject(afterCreate.stream as Readable) + expect(afterList).to.be.an('array') + const found = afterList.find((b: { bucketId: string }) => b.bucketId === newBucketId) + expect(found).to.be.an('object') + expect(found.createdAt).to.be.a('number') + expect(getAddress(found.owner)).to.equal(getAddress(consumerAddress)) + expect(found.accessLists).to.be.an('array') + expect(afterList.length).to.be.at.least(beforeList.length + 1) + }) + + it('create bucket validate fails when accessLists is missing', async () => { + const consumerAddress = await consumer.getAddress() + await sleep(1000) + const nonce = Date.now().toString() + const messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + const signature = await safeSign(consumer, messageHashBytes) + const validation = await new PersistentStorageCreateBucketHandler(oceanNode).validate( + { + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce + } as any + ) + + expect(validation.valid).to.equal(false) + expect(validation.reason).to.contain('accessLists') + }) + + it('returns error when persistent storage is disabled', async () => { + const disabledConfig = { + ...config, + persistentStorage: { + enabled: false, + type: 'localfs' as const, + accessLists: [] as AccessList[], + options: { folder: psRoot } + } + } + const nodeDisabled = await OceanNode.getInstance( + disabledConfig, + database, + undefined, + undefined, + undefined, + undefined, + undefined, + true + ) + + const consumerAddress = await consumer.getAddress() + await sleep(1000) + const nonce = Date.now().toString() + const messageHashBytes = createHashForSignature( + consumerAddress, + nonce, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET + ) + const signature = await safeSign(consumer, messageHashBytes) + + const res = await new PersistentStorageCreateBucketHandler(nodeDisabled).handle({ + command: PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + consumerAddress, + signature, + nonce, + accessLists: [], + authorization: undefined + } as any) + + expect(res.status.httpStatus).to.equal(500) + expect(res.status.error).to.match(/not configured|disabled/i) + }) +}) diff --git a/src/test/unit/buildImage.test.ts b/src/test/unit/buildImage.test.ts new file mode 100644 index 000000000..caafbc234 --- /dev/null +++ b/src/test/unit/buildImage.test.ts @@ -0,0 +1,157 @@ +import { expect } from 'chai' +import sinon from 'sinon' +import { mkdirSync } from 'fs' +import os from 'os' +import path from 'path' +import { Readable } from 'stream' +import { C2DStatusNumber } from '../../@types/C2D/C2D.js' +import type { DBComputeJob } from '../../@types/C2D/C2D.js' + +function ensureTestEnv() { + // Several runtime modules validate env on import; provide safe defaults for unit tests. + if (!process.env.PRIVATE_KEY) { + process.env.PRIVATE_KEY = `0x${'11'.repeat(32)}` + } +} + +async function makeEngine(opts: { tempFolder: string }) { + ensureTestEnv() + const { C2DEngineDocker } = + await import('../../components/c2d/compute_engine_docker.js') + const db = { + updateJob: sinon.stub().resolves(), + // buildImage() doesn't call getJobs*; keep minimal surface + getRunningJobs: sinon.stub().resolves([]), + getJobsByStatus: sinon.stub().resolves([]) + } as any + + const clusterConfig = { + type: 2, + hash: 'test-hash', + tempFolder: opts.tempFolder, + connection: { + // keep constructor happy + imageRetentionDays: 1, + imageCleanupInterval: 999999, + paymentClaimInterval: 999999 + } + } as any + + const engine = new C2DEngineDocker(clusterConfig, db, {} as any, {} as any, {} as any) + + // prevent side-effects during unit tests + ;(engine as any).cleanupJob = sinon.stub().resolves() + ;(engine as any).updateImageUsage = sinon.stub().resolves() + + return { engine, db } +} + +function makeJob(base: Partial = {}): DBComputeJob { + return { + jobId: 'job-123', + owner: '0x0', + environment: 'env-1', + dateCreated: String(Date.now() / 1000), + dateFinished: null as any, + clusterHash: 'test-hash', + isFree: false, + isRunning: true, + isStarted: false, + stopRequested: false, + status: C2DStatusNumber.BuildImage, + statusText: 'BuildImage', + resources: [ + { id: 'cpu', amount: 1 }, + { id: 'ram', amount: 1 }, + { id: 'disk', amount: 1 } + ], + maxJobDuration: 60, + queueMaxWaitTime: 0, + // timestamps + algoStartTimestamp: '0', + algoStopTimestamp: '0', + buildStartTimestamp: '0', + buildStopTimestamp: '0', + // algorithm/container + algorithm: { + did: 'did:op:algo', + serviceIndex: 0, + meta: { + container: { + image: 'dummy', + tag: 'latest', + entrypoint: 'node', + checksum: '0x0', + dockerfile: 'FROM alpine:3.18\nRUN echo hi\n' + } + } + } as any, + input: [] as any, + output: '' as any, + containerImage: 'ocean-node-test:job-123', + algoDuration: 0, + encryptedDockerRegistryAuth: undefined, + payment: null as any, + additionalViewers: [], + logs: null as any, + results: null as any, + jobIdHash: '1', + ...base + } as DBComputeJob +} + +describe('C2DEngineDocker.buildImage', () => { + afterEach(() => { + sinon.restore() + }) + + it('marks build as failed if image is missing after build completes', async () => { + const tempFolder = path.join(os.tmpdir(), 'ocean-node-buildimage-test') + const { engine, db } = await makeEngine({ tempFolder }) + + const job = makeJob() + mkdirSync(path.join(tempFolder, job.jobId, 'data', 'logs'), { recursive: true }) + + const buildStream = new Readable({ read() {} }) + ;(engine as any).docker = { + buildImage: sinon.stub().resolves(buildStream), + getImage: sinon.stub().returns({ + inspect: sinon.stub().rejects(new Error('no such image')) + }) + } + + const p = (engine as any).buildImage(job, null) + await new Promise((resolve) => setImmediate(resolve)) + buildStream.emit('end') + await p + + expect(db.updateJob.called).to.equal(true) + const lastUpdate = db.updateJob.lastCall.args[0] as DBComputeJob + expect(lastUpdate.status).to.equal(C2DStatusNumber.BuildImageFailed) + }) + + it('only logs success when image exists', async () => { + const tempFolder = path.join(os.tmpdir(), 'ocean-node-buildimage-test-success') + const { engine, db } = await makeEngine({ tempFolder }) + + const job = makeJob({ containerImage: 'ocean-node-test:job-123-success' }) + mkdirSync(path.join(tempFolder, job.jobId, 'data', 'logs'), { recursive: true }) + + const buildStream = new Readable({ read() {} }) + ;(engine as any).docker = { + buildImage: sinon.stub().resolves(buildStream), + getImage: sinon.stub().returns({ + inspect: sinon.stub().resolves({}) + }) + } + + const p = (engine as any).buildImage(job, null) + await new Promise((resolve) => setImmediate(resolve)) + buildStream.emit('end') + await p + + const lastUpdate = db.updateJob.lastCall.args[0] as DBComputeJob + expect(lastUpdate.status).to.equal(C2DStatusNumber.ConfiguringVolumes) + expect(Number.parseFloat(lastUpdate.buildStopTimestamp)).to.be.greaterThan(0) + }) +}) diff --git a/src/test/unit/compute.test.ts b/src/test/unit/compute.test.ts index 3f000f294..4e5120e90 100644 --- a/src/test/unit/compute.test.ts +++ b/src/test/unit/compute.test.ts @@ -7,8 +7,9 @@ import { C2DStatusText, ComputeAlgorithm, ComputeAsset, - // ComputeEnvironment, - // ComputeJob, + ComputeEnvironment, + ComputeJob, + ComputeResourceRequest, DBComputeJob, RunningPlatform } from '../../@types/C2D/C2D.js' @@ -30,7 +31,82 @@ import { import { OceanNodeConfig } from '../../@types/OceanNode.js' import { ENVIRONMENT_VARIABLES } from '../../utils/constants.js' import { dockerImageManifest } from '../data/assets.js' +import { C2DEngine } from '../../components/c2d/index.js' import { checkManifestPlatform } from '../../components/c2d/compute_engine_docker.js' +import { ValidateParams } from '../../components/httpRoutes/validateCommands.js' +import { Readable } from 'stream' + +/* eslint-disable require-await */ +class TestC2DEngine extends C2DEngine { + constructor() { + super(null, null, null, null, null) + } + + async getComputeEnvironments(): Promise { + return [] + } + + async checkDockerImage(): Promise { + return { valid: true, reason: null as string, status: 200 } + } + + async startComputeJob(): Promise { + return [] + } + + async stopComputeJob(): Promise { + return [] + } + + async getComputeJobStatus(): Promise { + return [] + } + + async getComputeJobResult(): Promise<{ stream: Readable; headers: any }> { + return null + } + + async cleanupExpiredStorage(): Promise { + return true + } +} +/* eslint-enable require-await */ + +function makeEnv( + resources: any[], + opts: { + freeResources?: any[] + runningJobs?: number + runningfreeJobs?: number + maxJobs?: number + } = {} +): ComputeEnvironment { + return { + id: 'test-env', + resources, + free: opts.freeResources + ? { + resources: opts.freeResources, + access: { addresses: [], accessLists: null } + } + : undefined, + runningJobs: opts.runningJobs ?? 0, + runningfreeJobs: opts.runningfreeJobs ?? 0, + queuedJobs: 0, + queuedFreeJobs: 0, + queMaxWaitTime: 0, + queMaxWaitTimeFree: 0, + runMaxWaitTime: 0, + runMaxWaitTimeFree: 0, + consumerAddress: '0x0', + fees: {}, + access: { addresses: [], accessLists: null }, + platform: { architecture: 'x86_64', os: 'linux' }, + minJobDuration: 60, + maxJobDuration: 3600, + maxJobs: opts.maxJobs ?? 10 + } +} describe('Compute Jobs Database', () => { let envOverrides: OverrideEnvConfig[] @@ -51,7 +127,7 @@ describe('Compute Jobs Database', () => { envOverrides = buildEnvOverrideConfig( [ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS], [ - '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"fees":{"1":[{"feeToken":"0x123","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + '[{"socketPath":"/var/run/docker.sock","environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"fees":{"1":[{"feeToken":"0x123","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"minJobDuration":10,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' ] ) envOverrides = await setupEnvironment(TEST_ENV_CONFIG_FILE, envOverrides) @@ -63,6 +139,7 @@ describe('Compute Jobs Database', () => { const job: DBComputeJob = { owner: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260', jobId: null, + jobIdHash: null, dateCreated: null, dateFinished: null, status: C2DStatusNumber.JobStarted, @@ -90,6 +167,7 @@ describe('Compute Jobs Database', () => { token: '0x123', lockTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', claimTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', + cancelTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', chainId: 8996, cost: 0 }, @@ -134,6 +212,7 @@ describe('Compute Jobs Database', () => { const job: DBComputeJob = { owner: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947261', jobId: null, + jobIdHash: null, dateCreated: null, dateFinished: null, status: C2DStatusNumber.JobStarted, @@ -161,6 +240,7 @@ describe('Compute Jobs Database', () => { token: '0x123', lockTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', claimTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', + cancelTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', chainId: 8996, cost: 0 }, @@ -202,28 +282,6 @@ describe('Compute Jobs Database', () => { expect(convertStringToArray(str)).to.deep.equal(expectedArray) }) - // it('should convert DBComputeJob to ComputeJob and omit internal DB data', () => { - // const source: any = completeDBComputeJob - // const output: ComputeJob = omitDBComputeFieldsFromComputeJob(source as DBComputeJob) - - // expect(Object.prototype.hasOwnProperty.call(output, 'clusterHash')).to.be.equal(false) - // expect(Object.prototype.hasOwnProperty.call(output, 'configlogURL')).to.be.equal( - // false - // ) - // expect(Object.prototype.hasOwnProperty.call(output, 'publishlogURL')).to.be.equal( - // false - // ) - // expect(Object.prototype.hasOwnProperty.call(output, 'algologURL')).to.be.equal(false) - // expect(Object.prototype.hasOwnProperty.call(output, 'outputsURL')).to.be.equal(false) - // expect(Object.prototype.hasOwnProperty.call(output, 'algorithm')).to.be.equal(false) - // expect(Object.prototype.hasOwnProperty.call(output, 'assets')).to.be.equal(false) - // expect(Object.prototype.hasOwnProperty.call(output, 'isRunning')).to.be.equal(false) - // expect(Object.prototype.hasOwnProperty.call(output, 'isStarted')).to.be.equal(false) - // expect(Object.prototype.hasOwnProperty.call(output, 'containerImage')).to.be.equal( - // false - // ) - // }) - it('should check manifest platform against local platform env', () => { const arch = os.machine() // ex: arm const platform = os.platform() // ex: linux @@ -247,11 +305,187 @@ describe('Compute Jobs Database', () => { expect(checkManifestPlatform(null, env)).to.be.equal(true) }) - it('testing checkAndFillMissingResources', async function () { - // TO DO + describe('testing checkAndFillMissingResources', function () { + let engine: TestC2DEngine + + before(function () { + engine = new TestC2DEngine() + }) + + const baseResources = [ + { id: 'cpu', total: 8, min: 1, max: 8, inUse: 0 }, + { id: 'ram', total: 32, min: 1, max: 32, inUse: 0 }, + { id: 'disk', total: 500, min: 10, max: 500, inUse: 0 } + ] + + it('satisfies constraints exactly → passes without modification', async function () { + const resources = [ + ...baseResources.slice(0, 1).map((r) => ({ + ...r, + constraints: [{ id: 'ram', min: 1, max: 4 }] + })), + ...baseResources.slice(1) + ] + const env = makeEnv(resources) + // 4 cpu, 8 ram (= 4*2, in [4, 16]) → no change + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 4 }, + { id: 'ram', amount: 8 }, + { id: 'disk', amount: 50 } + ] + const result = await engine.checkAndFillMissingResources(req, env, false) + const ramEntry = result.find((r) => r.id === 'ram') + expect(ramEntry.amount).to.equal(8) + }) + + it('resource below constraint min → auto-bumped to required minimum', async function () { + const resources = [ + { ...baseResources[0], constraints: [{ id: 'ram', min: 2, max: 8 }] }, + ...baseResources.slice(1) + ] + const env = makeEnv(resources) + // 4 cpu, 4 ram → ram < 4*2=8 → should be bumped to 8 + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 4 }, + { id: 'ram', amount: 4 }, + { id: 'disk', amount: 50 } + ] + const result = await engine.checkAndFillMissingResources(req, env, false) + const ramEntry = result.find((r) => r.id === 'ram') + expect(ramEntry.amount).to.equal(8) + }) + + it('resource above constraint max → throws meaningful error', async function () { + const resources = [ + { ...baseResources[0], constraints: [{ id: 'ram', min: 1, max: 3 }] }, + ...baseResources.slice(1) + ] + const env = makeEnv(resources) + // 4 cpu, 20 ram → ram > 4*3=12 → throws + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 4 }, + { id: 'ram', amount: 20 }, + { id: 'disk', amount: 50 } + ] + try { + await engine.checkAndFillMissingResources(req, env, false) + assert.fail('Expected error was not thrown') + } catch (err: any) { + expect(err.message).to.include('Too much ram') + expect(err.message).to.include('4 cpu') + expect(err.message).to.include('Max allowed: 12') + } + }) + + it('constraint involving GPU with 0 GPU requested → no constraint applied', async function () { + const resources = [ + ...baseResources, + { + id: 'gpu', + total: 4, + min: 0, + max: 4, + inUse: 0, + constraints: [{ id: 'ram', min: 8, max: 32 }] + } + ] + const env = makeEnv(resources) + // 0 gpu → gpu constraints should not be applied → ram stays at 4 + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 2 }, + { id: 'ram', amount: 4 }, + { id: 'disk', amount: 50 }, + { id: 'gpu', amount: 0 } + ] + const result = await engine.checkAndFillMissingResources(req, env, false) + const ramEntry = result.find((r) => r.id === 'ram') + expect(ramEntry.amount).to.equal(4) + }) + + it('no constraints defined → existing behavior unchanged', async function () { + const env = makeEnv(baseResources) + // below min → bumped to min; above max → throws + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 0 }, + { id: 'ram', amount: 0 }, + { id: 'disk', amount: 0 } + ] + const result = await engine.checkAndFillMissingResources(req, env, false) + const cpuEntry = result.find((r) => r.id === 'cpu') + const diskEntry = result.find((r) => r.id === 'disk') + expect(cpuEntry.amount).to.equal(1) // bumped to min + expect(diskEntry.amount).to.equal(10) // bumped to min + }) }) - it('testing checkIfResourcesAreAvailable', async function () { - // TO DO + + describe('testing checkIfResourcesAreAvailable', function () { + let engine: TestC2DEngine + + before(function () { + engine = new TestC2DEngine() + }) + + it('resources within env limits → passes', async function () { + const env = makeEnv([ + { id: 'cpu', total: 8, min: 1, max: 8, inUse: 2 }, + { id: 'ram', total: 32, min: 1, max: 32, inUse: 4 }, + { id: 'disk', total: 500, min: 10, max: 500, inUse: 50 } + ]) + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 4 }, + { id: 'ram', amount: 8 }, + { id: 'disk', amount: 100 } + ] + // should not throw + await engine.checkIfResourcesAreAvailable(req, env, false) + }) + + it('resources exceed env availability → throws', async function () { + const env = makeEnv([ + { id: 'cpu', total: 4, min: 1, max: 4, inUse: 3 }, + { id: 'ram', total: 32, min: 1, max: 32, inUse: 0 }, + { id: 'disk', total: 500, min: 10, max: 500, inUse: 0 } + ]) + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 4 }, // only 1 available (4-3) + { id: 'ram', amount: 8 }, + { id: 'disk', amount: 100 } + ] + try { + await engine.checkIfResourcesAreAvailable(req, env, false) + assert.fail('Expected error was not thrown') + } catch (err: any) { + expect(err.message).to.include('Not enough available cpu') + } + }) + + it('free resource limit exceeded → throws', async function () { + const env = makeEnv( + [ + { id: 'cpu', total: 8, min: 1, max: 8, inUse: 0 }, + { id: 'ram', total: 32, min: 1, max: 32, inUse: 0 }, + { id: 'disk', total: 500, min: 10, max: 500, inUse: 0 } + ], + { + freeResources: [ + { id: 'cpu', total: 2, min: 1, max: 2, inUse: 2 }, // fully used + { id: 'ram', total: 4, min: 1, max: 4, inUse: 0 }, + { id: 'disk', total: 20, min: 10, max: 20, inUse: 0 } + ] + } + ) + const req: ComputeResourceRequest[] = [ + { id: 'cpu', amount: 1 }, + { id: 'ram', amount: 2 }, + { id: 'disk', amount: 10 } + ] + try { + await engine.checkIfResourcesAreAvailable(req, env, true) + assert.fail('Expected error was not thrown') + } catch (err: any) { + expect(err.message).to.include('cpu') + } + }) }) after(async () => { diff --git a/src/test/utils/contracts.ts b/src/test/utils/contracts.ts index ba1c4112b..4dc7bbfa9 100644 --- a/src/test/utils/contracts.ts +++ b/src/test/utils/contracts.ts @@ -55,7 +55,6 @@ export async function deployAccessListContract( if (!nameAccessList || !symbolAccessList) { throw new Error(`Access list symbol and name are required`) } - const contract = getContract(contractFactoryAddress, contractFactoryAbi, signer) try { @@ -125,7 +124,12 @@ export async function deployAndGetAccessListConfig( await wallets[2].getAddress(), await wallets[3].getAddress() ], - ['https://oceanprotocol.com/nft/'] + [ + 'https://oceanprotocol.com/nft/', + 'https://oceanprotocol.com/nft/', + 'https://oceanprotocol.com/nft/', + 'https://oceanprotocol.com/nft/' + ] ) if (!txAddress) { diff --git a/src/test/utils/utils.ts b/src/test/utils/utils.ts index 8376d6ae1..b3bf0917b 100644 --- a/src/test/utils/utils.ts +++ b/src/test/utils/utils.ts @@ -4,7 +4,7 @@ import { fileURLToPath } from 'url' import { DB_TYPES, ENVIRONMENT_VARIABLES, EnvVariable } from '../../utils/constants.js' import { CONFIG_LOGGER } from '../../utils/logging/common.js' import { RPCS } from '../../@types/blockchain.js' -import { getConfiguration } from '../../utils/config.js' +import { getConfiguration } from '../../utils/config/builder.js' export const DEFAULT_TEST_TIMEOUT = 20000 // 20 secs MAX // __dirname and __filename are not defined in ES module scope @@ -164,3 +164,5 @@ export function isRunningContinousIntegrationEnv(): boolean { export const SELECTED_RUN_DATABASE = new Date().getTime() % 2 === 0 ? DB_TYPES.ELASTIC_SEARCH : DB_TYPES.TYPESENSE CONFIG_LOGGER.debug(`SELECTED_RUN_DATABASE: ${SELECTED_RUN_DATABASE}`) + +export const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) diff --git a/src/utils/accessList.ts b/src/utils/accessList.ts index fbecbe462..21d7671f3 100644 --- a/src/utils/accessList.ts +++ b/src/utils/accessList.ts @@ -1,6 +1,8 @@ import AccessListJson from '@oceanprotocol/contracts/artifacts/contracts/accesslists/AccessList.sol/AccessList.json' with { type: 'json' } import { ethers, Signer } from 'ethers' import { CORE_LOGGER } from './logging/common.js' +import { AccessList } from '../@types/AccessList.js' +import { OceanNode } from '../OceanNode.js' /** * @param accessList the access list contract address @@ -9,7 +11,7 @@ import { CORE_LOGGER } from './logging/common.js' * @param signer signer for the contract part * @returns true if the account has balanceOf > 0 OR if the accessList is empty OR does not contain info for this chain, false otherwise */ -export async function checkAddressOnAccessList( +export async function checkAddressOnAccessListWithSigner( accessListContractAddress: string, addressToCheck: string, signer: Signer @@ -40,3 +42,49 @@ export async function checkAddressOnAccessList( return false } } + +export async function checkAddressOnAccessList( + consumerAddress: string, + access: AccessList[], + oceanNode: OceanNode +): Promise { + if (!access || access.length === 0) { + return false + } + const config = oceanNode.getConfig() + const { supportedNetworks } = config + for (const accessListMap of access) { + if (!accessListMap) continue + for (const chain of Object.keys(accessListMap)) { + const { chainId } = supportedNetworks[chain] + try { + const blockchain = oceanNode.getBlockchain(chainId) + if (!blockchain) { + CORE_LOGGER.logMessage( + `Blockchain instance not available for chain ${chainId}, skipping access list check`, + true + ) + continue + } + const signer = await blockchain.getSigner() + for (const accessListAddress of accessListMap[chain]) { + const hasAccess = await checkAddressOnAccessListWithSigner( + accessListAddress, + consumerAddress, + signer + ) + if (hasAccess) { + return true + } + } + } catch (error) { + CORE_LOGGER.logMessage( + `Failed to check access lists on chain ${chain}: ${error.message}`, + true + ) + } + } + } + + return false +} diff --git a/src/utils/address.ts b/src/utils/address.ts index 9760ed3b5..624438f28 100644 --- a/src/utils/address.ts +++ b/src/utils/address.ts @@ -1,7 +1,7 @@ import fs from 'fs' import addresses from '@oceanprotocol/contracts/addresses/address.json' with { type: 'json' } import { CORE_LOGGER } from './logging/common.js' -import { isDefined } from './index.js' +import { isDefined } from './util.js' /** * Get the artifacts address from the address.json file diff --git a/src/utils/blockchain.ts b/src/utils/blockchain.ts index 2c80161c8..0bf9458f7 100644 --- a/src/utils/blockchain.ts +++ b/src/utils/blockchain.ts @@ -3,7 +3,6 @@ import { ethers, Signer, Contract, - JsonRpcApiProvider, JsonRpcProvider, FallbackProvider, isAddress, @@ -15,18 +14,12 @@ import { getConfiguration } from './config.js' import { CORE_LOGGER } from './logging/common.js' import { ConnectionStatus } from '../@types/blockchain.js' import { ValidateChainId } from '../@types/commands.js' -// import { KNOWN_CONFIDENTIAL_EVMS } from '../utils/address.js' -import { OceanNodeConfig } from '../@types/OceanNode.js' import { KeyManager } from '../components/KeyManager/index.js' export class Blockchain { - private config?: OceanNodeConfig // Optional for new constructor - private static signers: Map = new Map() - private static providers: Map = new Map() private keyManager: KeyManager private signer: Signer private provider: FallbackProvider - private providers: JsonRpcProvider[] = [] private chainId: number private knownRPCs: string[] = [] @@ -65,24 +58,44 @@ export class Blockchain { public async getProvider(force: boolean = false): Promise { if (!this.provider) { - for (const rpc of this.knownRPCs) { + const configs: { + provider: JsonRpcProvider + priority: number + stallTimeout: number + }[] = [] + + const PRIMARY_RPC_TIMEOUT = 3000 + const FALLBACK_RPC_TIMEOUT = 1500 + for (let i = 0; i < this.knownRPCs.length; i++) { + const rpc = this.knownRPCs[i] const rpcProvider = new JsonRpcProvider(rpc) - // filter wrong chains or broken RPCs if (!force) { try { const { chainId } = await rpcProvider.getNetwork() if (chainId.toString() === this.chainId.toString()) { - this.providers.push(rpcProvider) - break + // primary RPC gets lowest priority = is first to be called + configs.push({ + provider: rpcProvider, + priority: i + 1, + stallTimeout: i === 0 ? PRIMARY_RPC_TIMEOUT : FALLBACK_RPC_TIMEOUT + }) } } catch (error) { CORE_LOGGER.error(`Error getting network for RPC ${rpc}: ${error}`) } } else { - this.providers.push(new JsonRpcProvider(rpc)) + configs.push({ + provider: rpcProvider, + priority: i + 1, + stallTimeout: i === 0 ? PRIMARY_RPC_TIMEOUT : FALLBACK_RPC_TIMEOUT + }) } } - this.provider = new FallbackProvider(this.providers) + // quorum=1: accept the first response to avoid calls to all configured rpcs + this.provider = + configs.length > 0 + ? new FallbackProvider(configs, undefined, { quorum: 1 }) + : new FallbackProvider([]) } return this.provider } diff --git a/src/utils/config/builder.ts b/src/utils/config/builder.ts index 23a0f5218..37ef7e75f 100644 --- a/src/utils/config/builder.ts +++ b/src/utils/config/builder.ts @@ -9,14 +9,13 @@ import { C2DClusterType } from '../../@types/C2D/C2D.js' import fs from 'fs' import os from 'os' import path from 'path' -// import { hexStringToByteArray, computeCodebaseHash } from '../index.js' -import { computeCodebaseHash } from '../index.js' +import crypto from 'crypto' +import { computeCodebaseHash } from '../attestation.js' import { getOceanArtifactsAdresses, OCEAN_ARTIFACTS_ADDRESSES_PER_CHAIN } from '../address.js' -import { create256Hash } from '../crypt.js' import { CONFIG_LOGGER } from '../logging/common.js' import { LOG_LEVELS_STR, GENERIC_EMOJIS } from '../logging/Logger.js' import { OceanNodeConfigSchema } from './schemas.js' @@ -26,6 +25,11 @@ import lodash from 'lodash' let previousConfiguration: OceanNodeConfig = null +function create256Hash(input: string): string { + const result = crypto.createHash('sha256').update(input).digest('hex') + return '0x' + result +} + function mapEnvToConfig( env: NodeJS.ProcessEnv, mapping: Record @@ -159,7 +163,7 @@ export function buildC2DClusters( connection: dockerC2d, hash, type: C2DClusterType.DOCKER, - tempFolder: './c2d_storage/' + hash + tempFolder: './c2d_storage/' // this is the base folder, each engine creates it's own subfolder }) count += 1 } diff --git a/src/utils/config/constants.ts b/src/utils/config/constants.ts index 23f78c9cf..0f6cfda4d 100644 --- a/src/utils/config/constants.ts +++ b/src/utils/config/constants.ts @@ -68,12 +68,17 @@ export const ENV_TO_CONFIG_MAPPING = { P2P_AUTODIALINTERVAL: 'p2pConfig.autoDialInterval', P2P_ENABLE_NETWORK_STATS: 'p2pConfig.enableNetworkStats', HTTP_CERT_PATH: 'httpCertPath', - HTTP_KEY_PATH: 'httpKeyPath' + HTTP_KEY_PATH: 'httpKeyPath', + ENABLE_BENCHMARK: 'enableBenchmark', + PERSISTENT_STORAGE: 'persistentStorage' } as const // Configuration defaults export const DEFAULT_RATE_LIMIT_PER_MINUTE = 30 export const DEFAULT_MAX_CONNECTIONS_PER_MINUTE = 60 * 2 // 120 requests per minute +export const BENCHMARK_MONITORING_ADDRESS = '0xC5ea7916f95D5a087A644f1Dc0f7d19955eC446F' +export const SEPOLIA_CHAIN_ID = '11155111' +export const USDC_TOKEN = '0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238' export const DEFAULT_BOOTSTRAP_ADDRESSES = [ // OPF nodes diff --git a/src/utils/config/schemas.ts b/src/utils/config/schemas.ts index 7246e8de9..b5422c1c8 100644 --- a/src/utils/config/schemas.ts +++ b/src/utils/config/schemas.ts @@ -84,6 +84,64 @@ export const OceanNodeDBConfigSchema = z.object({ dbType: z.string().nullable() }) +export const PersistentStorageConfigSchema = z + .object({ + enabled: z.boolean().optional().default(false), + type: z.enum(['localfs', 's3']).optional().default('localfs'), + accessLists: jsonFromString(z.array(z.record(z.string(), z.array(z.string())))) + .optional() + .default([]), + options: z.any().optional() + }) + .superRefine((data, ctx) => { + if (!data.enabled) return + + if (data.type === 'localfs') { + if (!data.options || typeof data.options !== 'object') { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: 'persistentStorage.options must be an object for localfs', + path: ['options'] + }) + return + } + if ( + typeof (data.options as any).folder !== 'string' || + !(data.options as any).folder + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: 'persistentStorage.options.folder is required for localfs', + path: ['options', 'folder'] + }) + } + } + + if (data.type === 's3') { + if (!data.options || typeof data.options !== 'object') { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: 'persistentStorage.options must be an object for s3', + path: ['options'] + }) + return + } + const required = ['endpoint', 'objectKey', 'accessKeyId', 'secretAccessKey'] + for (const key of required) { + if ( + typeof (data.options as any)[key] !== 'string' || + !(data.options as any)[key] + ) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: `persistentStorage.options.${key} is required for s3`, + path: ['options', key] + }) + } + } + } + }) + export const DockerRegistryAuthSchema = z .object({ username: z.string().optional(), @@ -109,6 +167,12 @@ export const DockerRegistryAuthSchema = z export const DockerRegistrysSchema = z.record(z.string(), DockerRegistryAuthSchema) +const ResourceConstraintSchema = z.object({ + id: z.string(), + min: z.number().optional(), + max: z.number().optional() +}) + export const ComputeResourceSchema = z.object({ id: z.string(), total: z.number().optional(), @@ -121,7 +185,8 @@ export const ComputeResourceSchema = z.object({ init: z.any().optional(), platform: z.string().optional(), memoryTotal: z.string().optional(), - driverVersion: z.string().optional() + driverVersion: z.string().optional(), + constraints: z.array(ResourceConstraintSchema).optional() }) export const ComputeResourcesPricingInfoSchema = z.object({ @@ -135,6 +200,7 @@ export const ComputeEnvFeesSchema = z.object({ }) export const ComputeEnvironmentFreeOptionsSchema = z.object({ + minJobDuration: z.number().int().optional().default(60), maxJobDuration: z.number().int().optional().default(3600), maxJobs: z.number().int().optional().default(3), resources: z.array(ComputeResourceSchema).optional(), @@ -146,66 +212,67 @@ export const ComputeEnvironmentFreeOptionsSchema = z.object({ .nullable() .optional() }) - .optional() + .optional(), + allowImageBuild: z.boolean().optional().default(false) }) +export const C2DEnvironmentConfigSchema = z + .object({ + id: z.string().optional(), + description: z.string().optional(), + storageExpiry: z.number().int().optional().default(604800), + minJobDuration: z.number().int().optional().default(60), + maxJobDuration: z.number().int().optional().default(3600), + maxJobs: z.number().int().optional(), + fees: z.record(z.string(), z.array(ComputeEnvFeesSchema)).optional(), + access: z + .object({ + addresses: z.array(z.string()), + accessLists: z + .array(z.record(z.string(), z.array(z.string()))) + .nullable() + .optional() + }) + .optional(), + free: ComputeEnvironmentFreeOptionsSchema.optional(), + resources: z.array(ComputeResourceSchema).optional() + }) + .refine( + (data) => + (data.fees !== undefined && Object.keys(data.fees).length > 0) || + (data.free !== undefined && data.free !== null), + { + message: + 'Each environment must have either a non-empty "fees" configuration or a "free" configuration' + } + ) + .refine((data) => data.storageExpiry >= data.maxJobDuration, { + message: '"storageExpiry" should be greater than "maxJobDuration"' + }) + .refine( + (data) => { + if (!data.resources) return false + return data.resources.some((r) => r.id === 'disk' && r.total) + }, + { message: 'There is no "disk" resource configured. This is mandatory' } + ) + export const C2DDockerConfigSchema = z.array( - z - .object({ - socketPath: z.string().optional(), - protocol: z.string().optional(), - host: z.string().optional(), - port: z.number().optional(), - caPath: z.string().optional(), - certPath: z.string().optional(), - keyPath: z.string().optional(), - resources: z.array(ComputeResourceSchema).optional(), - storageExpiry: z.number().int().optional().default(604800), - maxJobDuration: z.number().int().optional().default(3600), - minJobDuration: z.number().int().optional().default(60), - access: z - .object({ - addresses: z.array(z.string()), - accessLists: z - .array(z.record(z.string(), z.array(z.string()))) - .nullable() - .optional() - }) - .optional(), - fees: z.record(z.string(), z.array(ComputeEnvFeesSchema)).optional(), - free: ComputeEnvironmentFreeOptionsSchema.optional(), - imageRetentionDays: z.number().int().min(1).optional().default(7), - imageCleanupInterval: z.number().int().min(3600).optional().default(86400) // min 1 hour, default 24 hours - }) - .refine( - (data) => - (data.fees !== undefined && Object.keys(data.fees).length > 0) || - (data.free !== undefined && data.free !== null), - { - message: - 'Each docker compute environment must have either a non-empty "fees" configuration or a "free" configuration' - } - ) - .refine((data) => data.storageExpiry >= data.maxJobDuration, { - message: '"storageExpiry" should be greater than "maxJobDuration"' - }) - .refine( - (data) => { - if (!data.resources) return false - return data.resources.some((r) => r.id === 'disk' && r.total) - }, - { message: 'There is no "disk" resource configured. This is mandatory' } - ) - .transform((data) => { - if (data.resources) { - for (const resource of data.resources) { - if (resource.id === 'disk' && resource.total) { - resource.type = 'disk' - } - } - } - return data - }) + z.object({ + socketPath: z.string().optional(), + protocol: z.string().optional(), + host: z.string().optional(), + port: z.number().optional(), + caPath: z.string().optional(), + certPath: z.string().optional(), + keyPath: z.string().optional(), + imageRetentionDays: z.number().int().min(1).optional().default(7), + imageCleanupInterval: z.number().int().min(3600).optional().default(86400), // min 1 hour, default 24 hours + scanImages: z.boolean().optional().default(false), + scanImageDBUpdateInterval: z.number().int().min(3600).optional().default(43200), // default 43200 (12 hours) + enableNetwork: z.boolean().optional().default(false), + environments: z.array(C2DEnvironmentConfigSchema).min(1) + }) ) export const C2DClusterInfoSchema = z.object({ @@ -312,6 +379,7 @@ export const OceanNodeConfigSchema = z INTERFACES: z.string().optional(), hasP2P: booleanFromString.optional().default(true), hasHttp: booleanFromString.optional().default(true), + enableBenchmark: booleanFromString.optional().default(false), p2pConfig: OceanNodeP2PConfigSchema.nullable().optional(), hasIndexer: booleanFromString.default(true), @@ -321,6 +389,41 @@ export const OceanNodeConfigSchema = z DB_PASSWORD: z.string().optional(), DB_TYPE: z.string().optional(), dbConfig: OceanNodeDBConfigSchema.optional(), + // Accept either an object (config file) or a JSON string (env var `PERSISTENT_STORAGE`), + // and validate the parsed value against the PersistentStorage schema. + persistentStorage: z + .preprocess((val) => { + if (val === undefined || val === null) return val + if (typeof val === 'string') { + const tryParse = (s: string) => { + try { + return JSON.parse(s) + } catch { + return undefined + } + } + + // 1) Normal JSON string + const parsed = tryParse(val) + if (parsed !== undefined) { + // 2) Handle double-encoded JSON (e.g. "\"{...}\"") + if (typeof parsed === 'string') { + const parsedTwice = tryParse(parsed) + if (parsedTwice !== undefined) return parsedTwice + } + return parsed + } + + // 3) Common docker-compose/shell mistake: single quotes inside JSON + const normalized = val.replace(/'/g, '"') + const parsedNormalized = tryParse(normalized) + if (parsedNormalized !== undefined) return parsedNormalized + + return val + } + return val + }, PersistentStorageConfigSchema) + .optional(), FEE_AMOUNT: z.string().optional(), FEE_TOKENS: z.string().optional(), diff --git a/src/utils/constants.ts b/src/utils/constants.ts index 756e11b1e..7150e37b0 100644 --- a/src/utils/constants.ts +++ b/src/utils/constants.ts @@ -38,7 +38,13 @@ export const PROTOCOL_COMMANDS = { FETCH_CONFIG: 'fetchConfig', PUSH_CONFIG: 'pushConfig', GET_LOGS: 'getLogs', - JOBS: 'jobs' + JOBS: 'jobs', + PERSISTENT_STORAGE_CREATE_BUCKET: 'persistentStorageCreateBucket', + PERSISTENT_STORAGE_GET_BUCKETS: 'persistentStorageGetBuckets', + PERSISTENT_STORAGE_LIST_FILES: 'persistentStorageListFiles', + PERSISTENT_STORAGE_UPLOAD_FILE: 'persistentStorageUploadFile', + PERSISTENT_STORAGE_GET_FILE_OBJECT: 'persistentStorageGetFileObject', + PERSISTENT_STORAGE_DELETE_FILE: 'persistentStorageDeleteFile' } // more visible, keep then close to make sure we always update both export const SUPPORTED_PROTOCOL_COMMANDS: string[] = [ @@ -78,7 +84,13 @@ export const SUPPORTED_PROTOCOL_COMMANDS: string[] = [ PROTOCOL_COMMANDS.FETCH_CONFIG, PROTOCOL_COMMANDS.PUSH_CONFIG, PROTOCOL_COMMANDS.GET_LOGS, - PROTOCOL_COMMANDS.JOBS + PROTOCOL_COMMANDS.JOBS, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_CREATE_BUCKET, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_BUCKETS, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_LIST_FILES, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_UPLOAD_FILE, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_GET_FILE_OBJECT, + PROTOCOL_COMMANDS.PERSISTENT_STORAGE_DELETE_FILE ] export const MetadataStates = { @@ -514,6 +526,11 @@ export const ENVIRONMENT_VARIABLES: Record = { name: 'HTTP_KEY_PATH', value: process.env.HTTP_KEY_PATH, required: false + }, + PERSISTENT_STORAGE: { + name: 'PERSISTENT_STORAGE', + value: process.env.PERSISTENT_STORAGE, + required: false } } export const CONNECTION_HISTORY_DELETE_THRESHOLD = 300 diff --git a/src/utils/credentials.ts b/src/utils/credentials.ts index d54692b4f..b21c25f21 100644 --- a/src/utils/credentials.ts +++ b/src/utils/credentials.ts @@ -3,7 +3,7 @@ import { AccessListContract } from '../@types/OceanNode.js' import { CORE_LOGGER } from './logging/common.js' import { Credential, Credentials, MATCH_RULES } from '@oceanprotocol/ddo-js' import { CREDENTIALS_TYPES } from '../@types/DDO/Credentials.js' -import { checkAddressOnAccessList } from './accessList.js' +import { checkAddressOnAccessListWithSigner } from './accessList.js' import { isDefined } from './util.js' /** @@ -203,7 +203,7 @@ export async function checkSingleCredential( try { // Check if the consumer address has tokens in the access list contract - const hasAccess = await checkAddressOnAccessList( + const hasAccess = await checkAddressOnAccessListWithSigner( accessListCredential.accessList, consumerAddress, signer @@ -252,7 +252,7 @@ export async function checkCredentialOnAccessList( if (chainsListed.length > 0 && chainsListed.includes(chainId)) { let isAuthorized = false for (const accessListAddress of accessList[chainId]) { - const result = await checkAddressOnAccessList( + const result = await checkAddressOnAccessListWithSigner( accessListAddress, addressToCheck, signer diff --git a/src/utils/file.ts b/src/utils/file.ts index 79998c763..b05293640 100644 --- a/src/utils/file.ts +++ b/src/utils/file.ts @@ -1,9 +1,4 @@ -import { - ArweaveFileObject, - EncryptMethod, - IpfsFileObject, - UrlFileObject -} from '../@types/fileObject.js' +import { StorageObject, EncryptMethod } from '../@types/fileObject.js' import { OceanNode } from '../OceanNode.js' import { FindDdoHandler } from '../components/core/handler/ddoHandler.js' import { AssetUtils } from './asset.js' @@ -16,7 +11,7 @@ export async function getFile( didOrDdo: string | DDO, serviceId: string, node: OceanNode -): Promise { +): Promise { try { // 1. Get the DDO const ddo = diff --git a/src/utils/version.ts b/src/utils/version.ts new file mode 100644 index 000000000..470f95abe --- /dev/null +++ b/src/utils/version.ts @@ -0,0 +1,7 @@ +import { createRequire } from 'module' + +const require = createRequire(import.meta.url) + +export function getPackageVersion(): string { + return process.env.npm_package_version ?? require('../../package.json').version +}