diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f0a9ada2e..04200f877 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -119,7 +119,7 @@ jobs: repository: 'oceanprotocol/barge' path: 'barge' - name: Login to Docker Hub - if: ${{ env.DOCKERHUB_PASSWORD && env.DOCKERHUB_USERNAME }} + if: ${{ env.DOCKERHUB_PASSWORDNONO && env.DOCKERHUB_USERNAMENONO }} run: | echo "Login to Docker Hub";echo "$DOCKERHUB_PASSWORD" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin env: @@ -169,10 +169,8 @@ jobs: IPFS_GATEWAY: http://172.15.0.16:8080/ ARWEAVE_GATEWAY: https://arweave.net/ RPCS: '{ "8996": {"rpc": "http://127.0.0.1:8545", "chainId": 8996, "network": "development", "chunkSize": 100}}' - DB_TYPE: 'typesense' - DB_URL: 'http://localhost:8108/?apiKey=xyz' - DB_USERNAME: 'elastic' - DB_PASSWORD: 'changeme' + DB_URL: 'http://localhost:9200' + DB_TYPE: 'elasticsearch' FEE_TOKENS: '{ "1": "0x967da4048cD07aB37855c090aAF366e4ce1b9F48", "137": "0x282d8efCe846A88B159800bd4130ad77443Fa1A1", "80001": "0xd8992Ed72C445c35Cb4A2be468568Ed1079357c8", "56": "0xDCe07662CA8EbC241316a15B611c89711414Dd1a" }' FEE_AMOUNT: '{ "amount": 1, "unit": "MB" }' ASSET_PURGATORY_URL: 'https://raw.githubusercontent.com/oceanprotocol/list-purgatory/main/list-assets.json' @@ -228,12 +226,44 @@ jobs: working-directory: ${{ github.workspace }}/barge run: | bash -x start_ocean.sh --no-node --with-typesense 2>&1 > start_ocean.log & - - run: npm ci - - run: npm run build - run: docker image ls - name: Delete default runner images run: | rm -rf /usr/share/swift/ + - name: Checkout Ocean CLI + uses: actions/checkout@v4 + with: + repository: 'oceanprotocol/ocean-cli' + path: 'ocean-cli' + - name: Checkout Ocean-js + uses: actions/checkout@v4 + with: + repository: 'oceanprotocol/ocean.js' + path: 'ocean.js' + ref: main + - name: Build ocean-js + working-directory: ${{ github.workspace }}/ocean.js + run: | + npm ci + npm run build + npm link + - name: Setup Ocean CLI + working-directory: ${{ github.workspace }}/ocean-cli + run: | + npm ci + npm link @oceanprotocol/lib + npm run build + - name: Checkout Ocean Node + uses: actions/checkout@v4 + with: + repository: 'OceanProtocolEnterprise/ocean-node' + path: 'ocean-node' + ref: ${{ github.event_name == 'pull_request' && github.head_ref || 'main' }} + - name: Build Ocean Node + working-directory: ${{ github.workspace }}/ocean-node + run: | + npm ci + npm run build - name: Wait for contracts deployment and C2D cluster to be ready working-directory: ${{ github.workspace }}/barge @@ -247,12 +277,6 @@ jobs: run: docker logs ocean-contracts-1 && docker logs ocean-typesense-1 if: ${{ failure() }} - - name: Checkout Ocean Node - uses: actions/checkout@v4 - with: - repository: 'OceanProtocolEnterprise/ocean-node' - path: 'ocean-node' - ref: ${{ github.event_name == 'pull_request' && github.head_ref || 'main' }} - name: Set DOCKER_REGISTRY_AUTHS from Docker Hub secrets if: env.DOCKERHUB_USERNAME && env.DOCKERHUB_PASSWORD run: | @@ -269,8 +293,6 @@ jobs: - name: Start Ocean Node working-directory: ${{ github.workspace }}/ocean-node run: | - npm ci - npm run build npm run start > ocean-node.log 2>&1 & env: PRIVATE_KEY: ${{ secrets.PRIVATE_KEY }} @@ -280,6 +302,7 @@ jobs: HTTP_API_PORT: 8001 RPCS: '{ "8996": {"rpc": "http://127.0.0.1:8545", "chainId": 8996, "network": "development", "chunkSize": 100} }' INDEXER_NETWORKS: '[8996]' + DB_URL: 'http://localhost:9200' FEE_TOKENS: '{ "1": "0x967da4048cD07aB37855c090aAF366e4ce1b9F48", "137": "0x282d8efCe846A88B159800bd4130ad77443Fa1A1", "80001": "0xd8992Ed72C445c35Cb4A2be468568Ed1079357c8", "56": "0xDCe07662CA8EbC241316a15B611c89711414Dd1a" }' FEE_AMOUNT: '{ "amount": 1, "unit": "MB" }' SKIP_FEE_TOKEN_VALIDATION: 'true' @@ -294,10 +317,11 @@ jobs: MAX_CONNECTIONS_PER_MINUTE: 320 DOCKER_COMPUTE_ENVIRONMENTS: '[{"socketPath":"/var/run/docker.sock","environments":[{"storageExpiry":604800,"maxJobDuration":3600,"minJobDuration":60,"resources":[{"id":"cpu","total":4,"max":4,"min":1,"type":"cpu"},{"id":"ram","total":10,"max":10,"min":1,"type":"ram"},{"id":"disk","total":10,"max":10,"min":0,"type":"disk"}],"fees":{"8996":[{"prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"maxJobs":3,"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]}]' DOCKER_REGISTRY_AUTHS: ${{ env.DOCKER_REGISTRY_AUTHS }} + PERSISTENT_STORAGE: '{"enabled": true, "type": "localfs", "options": {"folder": "/tmp/ocean-persistent-storage"}}' - name: Check Ocean Node is running run: | - for i in $(seq 1 90); do - if curl --output /dev/null --silent --max-time 1 --head --fail "http://localhost:8001"; then + for i in $(seq 1 12); do + if curl --output /dev/null --silent --head --fail "http://localhost:8001"; then echo "Ocean Node is up" exit 0 fi @@ -305,33 +329,6 @@ jobs: done echo "Ocean Node did not start in time" exit 1 - - name: Checkout Ocean CLI - uses: actions/checkout@v4 - with: - repository: 'oceanprotocol/ocean-cli' - path: 'ocean-cli' - - name: Set up Node.js - uses: actions/setup-node@v4 - with: - node-version: 'v20.19.0' - - name: Checkout Ocean-js - uses: actions/checkout@v4 - with: - repository: 'oceanprotocol/ocean.js' - path: 'ocean.js' - ref: main - - name: Build ocean-js - working-directory: ${{ github.workspace }}/ocean.js - run: | - npm ci - npm run build - npm link - - name: Setup Ocean CLI - working-directory: ${{ github.workspace }}/ocean-cli - run: | - npm ci - npm link @oceanprotocol/lib - npm run build - name: Run system tests working-directory: ${{ github.workspace }}/ocean-cli run: npm run test:system diff --git a/CHANGELOG.md b/CHANGELOG.md index c3f7fd382..0e3424e8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,54 @@ All notable changes to this project will be documented in this file. Dates are d Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog). -#### [v3.0.0](https://github.com/oceanprotocol/ocean-node/compare/v2.1.1...v3.0.0) +#### [v3.0.6](https://github.com/oceanprotocol/ocean-node/compare/v3.0.5...v3.0.6) + +- fix fees on auto create benchmark env [`#1347`](https://github.com/oceanprotocol/ocean-node/pull/1347) + +#### [v3.0.5](https://github.com/oceanprotocol/ocean-node/compare/v3.0.4...v3.0.5) + +> 22 April 2026 + +- enableNetwork per env [`#1346`](https://github.com/oceanprotocol/ocean-node/pull/1346) +- Release 3.0.5 [`772a8f4`](https://github.com/oceanprotocol/ocean-node/commit/772a8f44795dab6563d818deb9a668e0b8ef2220) + +#### [v3.0.4](https://github.com/oceanprotocol/ocean-node/compare/v3.0.3...v3.0.4) + +> 22 April 2026 + +- fix access list issue [`#1345`](https://github.com/oceanprotocol/ocean-node/pull/1345) +- Release 3.0.4 [`01b6f9f`](https://github.com/oceanprotocol/ocean-node/commit/01b6f9f927926caf21494d7dabdc069db62b173d) + +#### [v3.0.3](https://github.com/oceanprotocol/ocean-node/compare/v3.0.2...v3.0.3) + +> 22 April 2026 + +- set benchmark env on base [`#1337`](https://github.com/oceanprotocol/ocean-node/pull/1337) +- parse multiaddrs if string [`#1342`](https://github.com/oceanprotocol/ocean-node/pull/1342) +- Release 3.0.3 [`ad9cecc`](https://github.com/oceanprotocol/ocean-node/commit/ad9ceccf703fc6129fa0a240f3212cf5ccfea0aa) +- set accessList [`a3cd730`](https://github.com/oceanprotocol/ocean-node/commit/a3cd730d8a1209edfecafe026a3351da659e4702) +- add benchmark validation [`3f94ea8`](https://github.com/oceanprotocol/ocean-node/commit/3f94ea8968539d74e40d0055fc025149ed5206a7) + +#### [v3.0.2](https://github.com/oceanprotocol/ocean-node/compare/v3.0.1...v3.0.2) + +> 21 April 2026 + +- fix: persistent storage required params to allow authToken [`#1341`](https://github.com/oceanprotocol/ocean-node/pull/1341) +- Bump undici and release-it [`#1340`](https://github.com/oceanprotocol/ocean-node/pull/1340) +- Release 3.0.2 [`8129284`](https://github.com/oceanprotocol/ocean-node/commit/8129284686017117e815ad5fee84e20c1e15f3bb) + +#### [v3.0.1](https://github.com/oceanprotocol/ocean-node/compare/v3.0.0...v3.0.1) + +> 20 April 2026 + +- check if c2d is configured [`#1336`](https://github.com/oceanprotocol/ocean-node/pull/1336) +- remove chainId [`#1338`](https://github.com/oceanprotocol/ocean-node/pull/1338) +- fix: compose node env yaml indentation [`#1335`](https://github.com/oceanprotocol/ocean-node/pull/1335) +- Release 3.0.1 [`1a25e48`](https://github.com/oceanprotocol/ocean-node/commit/1a25e48ad3410328b4710265ec434c074b28d78c) + +### [v3.0.0](https://github.com/oceanprotocol/ocean-node/compare/v2.1.1...v3.0.0) + +> 17 April 2026 - Bump basic-ftp from 5.2.1 to 5.3.0 [`#1332`](https://github.com/oceanprotocol/ocean-node/pull/1332) - fix(#1327): use container finishedAt to compute algo stop time on crash [`#1331`](https://github.com/oceanprotocol/ocean-node/pull/1331) diff --git a/docs/env.md b/docs/env.md index b0cf6e9af..8bf52eef7 100644 --- a/docs/env.md +++ b/docs/env.md @@ -1,145 +1,439 @@ -# Ocean Node Networking +# Environmental Variables + +Environmental variables are also tracked in `ENVIRONMENT_VARIABLES` within `src/utils/constants.ts`. Descriptions and example values are provided below: + +## Core + +- `PRIVATE_KEY` (Required): The private key for the node, required for node operations. Example: `"0x1d751ded5a32226054cd2e71261039b65afb9ee1c746d055dd699b1150a5befc"` +- `CONFIG_PATH`: Absolute path to JSON config file +- `RPCS`: JSON object defining RPC endpoints for various networks. Example: `"{ \"11155420\":{ \"rpc\":\"https://sepolia.optimism.io\", \"fallbackRPCs\": [\"https://public.stackup.sh/api/v1/node/optimism-sepolia\"], \"chainId\": 11155420, \"network\": \"optimism-sepolia\", \"chunkSize\": 1000 }}"` +- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` +- `IPFS_GATEWAY`: The gateway URL for IPFS, used for downloading files from IPFS. Example: `"https://ipfs.io/"` +- `ARWEAVE_GATEWAY`: The gateway URL for Arweave, used for downloading files from Arweave. Example: `"https://arweave.net/"` +- `LOAD_INITIAL_DDOS`: If set, the node will load initial DDOs from JSON files at startup. This is useful for testing or bootstrapping the network with predefined data. Example: `false` +- `FEE_TOKENS`: Mapping of chain IDs to token addresses for setting fees in the network. Example: `"{ \"1\": \"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\", ...}"` +- `FEE_AMOUNT`: Specifies the fee amount and unit (e.g., MB for megabytes). Example: `"{ \"amount\": 1, \"unit\": \"MB\" }"` +- `ADDRESS_FILE`: File location where Ocean contract addresses are saved. Example: `"ADDRESS_FILE=${HOME}/.ocean/ocean-contracts/artifacts/address.json"` +- `NODE_ENV`: Typically used to specify the environment (e.g., development, production) the node is running in. Example: `'development'` +- `AUTHORIZED_DECRYPTERS`: A JSON array of addresses that are authorized to decrypt data. Example: `"['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']"` +- `AUTHORIZED_DECRYPTERS_LIST`: AccessList contract addresses (per chain). If present, only accounts present on the given access lists can decrypt data. Example: `"{ \"8996\": [\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"] }"` +- `OPERATOR_SERVICE_URL`: Configures C2D cluster URLs for the node. Example: `"[\"http://example.c2d.cluster1.com\",\"http://example.cd2.cluster2.com\"]"` +- `INTERFACES`: Network interfaces the node supports, e.g., HTTP and P2P. By default, if not specified, both are supported. Example: `"[\"HTTP\",\"P2P\"]"` +- `ALLOWED_VALIDATORS`: Array of addresses for allowed validators to verify asset signatures before indexing. Example: `"[\"0x123\",\"0x456\"]"` +- `ALLOWED_VALIDATORS_LIST`: Array of access list addresses (per chain) for allowed validators to verify asset signatures before indexing. Example: `"{ \"8996\": [\"0x123\",\"0x456\"]"` +- `INDEXER_INTERVAL`: Sets the interval in milliseconds for the indexer to crawl. The default is 30 seconds if not set. Example: `10000` +- `INDEXER_NETWORKS`: Specifies the networks the Indexer will crawl. If not set, the Indexer will index all networks defined in the RPCS environment variable. If set to an empty string, indexing will be disabled. Example: `[1, 137]` +- `ALLOWED_ADMINS`: Sets the public address of accounts which have access to admin endpoints e.g. shutting down the node. Example: `"[\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"]"` +- `ALLOWED_ADMINS_LIST`: Array of access list addresses (per chain) for accounts that have access to admin endpoints. Example: `"{ \"8996\": [\"0x123\",\"0x456\"]"` +- `RATE_DENY_LIST`: Blocked list of IPs and peer IDs. Example: `"{ \"peers\": [\"16Uiu2HAkuYfgjXoGcSSLSpRPD6XtUgV71t5RqmTmcqdbmrWY9MJo\"], \"ips\": [\"127.0.0.1\"] }"` +- `MAX_REQ_PER_MINUTE`: Number of requests per minute allowed by the same client (IP or Peer id). Example: `30` +- `MAX_CONNECTIONS_PER_MINUTE`: Max number of requests allowed per minute (all clients). Example: `120` +- `MAX_CHECKSUM_LENGTH`: Define the maximum length for a file if checksum is required (Mb). Example: `10` +- `IS_BOOTSTRAP`: Is this node to be used as bootstrap node or not. Default is `false`. +- `AUTHORIZED_PUBLISHERS`: Authorized list of publishers. If present, Node will only index assets published by the accounts in the list. Example: `"[\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"]"` +- `AUTHORIZED_PUBLISHERS_LIST`: AccessList contract addresses (per chain). If present, Node will only index assets published by the accounts present on the given access lists. Example: `"{ \"8996\": [\"0x967da4048cD07aB37855c090aAF366e4ce1b9F48\",\"0x388C818CA8B9251b393131C08a736A67ccB19297\"] }"` +- `VALIDATE_UNSIGNED_DDO`: If set to `false`, the node will not validate unsigned DDOs and will request a signed message with the publisher address, nonce and signature. Default is `true`. Example: `false` +- `JWT_SECRET`: Secret used to sign JWT tokens. Default is `ocean-node-secret`. Example: `"my-secret-jwt-token"` +- `PERSISTENT_STORAGE`: Persistent storage config. See [persistent storage](persistentStorage.md). + +## Database + +- `DB_URL`: URL for connecting to the database. Required for running a database with the node. Example: `"http://localhost:8108/?apiKey=xyz"` +- `DB_USERNAME`: Username for database authentication. Optional if not using authentication. Example: `"elastic"` +- `DB_PASSWORD`: Password for database authentication. Optional if not using authentication. Example: `"password123"` +- `ELASTICSEARCH_REQUEST_TIMEOUT`: Request timeout in milliseconds for Elasticsearch operations. Default is `60000`. Example: `60000` +- `ELASTICSEARCH_PING_TIMEOUT`: Ping timeout in milliseconds for Elasticsearch health checks. Default is `5000`. Example: `5000` +- `ELASTICSEARCH_RESURRECT_STRATEGY`: Strategy for bringing failed Elasticsearch nodes back online. Options are 'ping', 'optimistic', or 'none'. Default is `ping`. Example: `"ping"` +- `ELASTICSEARCH_MAX_RETRIES`: Maximum number of retry attempts for failed Elasticsearch operations. Default is `5`. Example: `5` +- `ELASTICSEARCH_SNIFF_ON_START`: Enable cluster node discovery on Elasticsearch client startup. Default is `true`. Example: `true` +- `ELASTICSEARCH_SNIFF_INTERVAL`: Interval in milliseconds for periodic cluster health monitoring and node discovery. Set to 'false' to disable. Default is `30000`. Example: `30000` +- `ELASTICSEARCH_SNIFF_ON_CONNECTION_FAULT`: Enable automatic cluster node discovery when connection faults occur. Default is `true`. Example: `true` +- `ELASTICSEARCH_HEALTH_CHECK_INTERVAL`: Interval in milliseconds for proactive connection health monitoring. Default is `60000`. Example: `60000` + +## Payments + +- `ESCROW_CLAIM_TIMEOUT`: Amount of time reserved to claim a escrow payment, in seconds. Defaults to `3600`. Example: `3600` + +## Logs + +- `LOG_LEVEL`: Define the default log level. Example: `debug` +- `LOG_CONSOLE`: Write logs to the console. Default is `false`, but becomes `true` if neither `LOG_FILES` or `LOG_DB` are set. +- `LOG_FILES`: Write logs to files. Default is `false` +- `LOG_DB`: Write logs to noSQL database. Default is `false` +- `UNSAFE_URLS`: Array or regular expression URLs to be excluded from access.Example: ["^.*(169.254.169.254).*","^.*(127.0.0.1).*"] + +## HTTP + +- `HTTP_API_PORT`: Port number for the HTTP API. Example: `8000` +- `HTTP_CERT_PATH`: Absolute path to the TLS certificate file. If provided along with `HTTP_KEY_PATH`, the node will start an HTTPS server. Example: `"/etc/letsencrypt/live/example.com/fullchain.pem"` +- `HTTP_KEY_PATH`: Absolute path to the TLS private key file. If provided along with `HTTP_CERT_PATH`, the node will start an HTTPS server. Example: `"/etc/letsencrypt/live/example.com/privkey.pem"` + +## P2P + +- `P2P_ENABLE_IPV4`: Enable IPv4 connectivity. Defaults: `True` +- `P2P_ENABLE_IPV6`: Enable IPv6 connectivity. Defaults: `True` +- `P2P_ipV4BindAddress`: Bind address for IPV4. Defaults to `0.0.0.0`. Example: `"0.0.0.0"` +- `P2P_ipV4BindTcpPort`: Port used on IPv4 TCP connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` +- `P2P_ipV4BindWsPort`: Port used on IPv4 WS connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` +- `P2P_ipV6BindAddress`: Bind address for IPV6. Defaults to `::1`. Example: `"::1"` +- `P2P_ipV6BindTcpPort`: Port used on IPv6 TCP connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` +- `P2P_ipV6BindWsPort`: Port used on IPv6 WS connections. Defaults to `0` (Use whatever port is free. When running as docker, please set it explicitly). Example: `0` +- `P2P_ANNOUNCE_ADDRESSES`: List of addresses to announce to the network. Example: `"[\"/ip4/1.2.3.4/tcp/8000\"]"` + + To enable SNI (Server Name Indication) with autoTLS, include `/tls/ws` or `/tls/wss` addresses: + - `"["/ip4//tcp/9001/tls/ws"]"` - TLS WebSocket + - `"["/ip4//tcp/9005/tls/wss"]"` - TLS WebSocket Secure + +- `P2P_ANNOUNCE_PRIVATE`: Announce private IPs. Default: `True` +- `P2P_pubsubPeerDiscoveryInterval`: Interval (in ms) for discovery using pubsub. Defaults to `10000` (three seconds). Example: `10000` +- `P2P_dhtMaxInboundStreams`: Maximum number of DHT inbound streams. Defaults to `500`. Example: `500` +- `P2P_dhtMaxOutboundStreams`: Maximum number of DHT outbound streams. Defaults to `500`. Example: `500` +- `P2P_DHT_FILTER`: Filter address in DHT. 0 = (Default) No filter 1. Filter private ddresses. 2. Filter public addresses +- `P2P_mDNSInterval`: Interval (in ms) for discovery using mDNS. Defaults to `20000` (20 seconds). Example: `20000` +- `P2P_connectionsMaxParallelDials`: Maximum number of parallel dials. Defaults to `150`. Example: `150` +- `P2P_connectionsDialTimeout`: Timeout for dial commands. Defaults to `10000` (10 seconds). Example: `10000` +- `P2P_ENABLE_UPNP`: Enable UPNP gateway discovery. Default: `True` +- `P2P_ENABLE_AUTONAT`: Enable AutoNAT discovery. Default: `True` +- `P2P_ENABLE_CIRCUIT_RELAY_SERVER`: Enable Circuit Relay Server. It will help the network but increase your bandwidth usage. Should be disabled for edge nodes. Default: `True` +- `P2P_CIRCUIT_RELAYS`: Numbers of relay servers. Default: `0` +- `P2P_BOOTSTRAP_NODES` : List of bootstrap nodes. Defults to OPF nodes. Example: ["/dns4/node3.oceanprotocol.com/tcp/9000/p2p/"] +- `P2P_BOOTSTRAP_TIMEOUT` : How long to wait before discovering bootstrap nodes. In ms. Default: 2000 ms +- `P2P_BOOTSTRAP_TAGNAME` : Tag a bootstrap peer with this name before "discovering" it. Default: 'bootstrap' +- `P2P_BOOTSTRAP_TAGVALUE` : The bootstrap peer tag will have this value (default: 50) +- `P2P_BOOTSTRAP_TTL` : Cause the bootstrap peer tag to be removed after this number of ms. Default: 120000 ms +- `P2P_FILTER_ANNOUNCED_ADDRESSES`: CIDR filters to filter announced addresses. Default: ["172.15.0.0/24"] (docker ip range). Example: ["192.168.0.1/27"] +- `P2P_MIN_CONNECTIONS`: The minimum number of connections below which libp2p will start to dial peers from the peer book. Setting this to 0 disables this behaviour. Default: 1 +- `P2P_MAX_CONNECTIONS`: The maximum number of connections libp2p is willing to have before it starts pruning connections to reduce resource usage. Default: 300 +- `P2P_AUTODIALPEERRETRYTHRESHOLD`: When we've failed to dial a peer, do not autodial them again within this number of ms. Default: 1000 \* 120 +- `P2P_AUTODIALCONCURRENCY`: When dialling peers from the peer book to keep the number of open connections, add dials for this many peers to the dial queue at once. Default: 5 +- `P2P_MAXPEERADDRSTODIAL`: Maximum number of addresses allowed for a given peer before giving up. Default: 5 +- `P2P_AUTODIALINTERVAL`: Auto dial interval (miliseconds). Amount of time between close and open of new peer connection. Default: 5000 +- `P2P_ENABLE_NETWORK_STATS`: Enables 'getP2pNetworkStats' http endpoint. Since this contains private informations (like your ip addresses), this is disabled by default + +## Policy Server + +- `POLICY_SERVER_URL`: URI definition of PolicyServer, if any. See [the policy server documentation for more details](docs/PolicyServer.md). +- `POLICY_SERVER_API_KEY`: Optional API key sent by Ocean Node as `X-API-Key` when calling Policy Server. + +## Additional Nodes (Test Environments) + +- `NODE1_PRIVATE_KEY`: Used on test environments, specifically CI, represents the private key for node 1. Example: `"0xfd5c1ccea015b6d663618850824154a3b3fb2882c46cefb05b9a93fea8c3d215"` +- `NODE2_PRIVATE_KEY`: Used on test environments, specifically CI, represents the private key for node 2. Example: `"0x1263dc73bef43a9da06149c7e598f52025bf4027f1d6c13896b71e81bb9233fb"` + +## Cron Jobs + +- `CRON_DELETE_DB_LOGS`: Delete old logs from database Cron expression. Example: `0 0 * * *` (runs every day at midnight) +- `CRON_CLEANUP_C2D_STORAGE`: Clear c2d expired resources/storage and delete old jobs. Example: `*/5 * * * *` (runs every 5 minutes) + +## Compute + +The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable is used to configure Docker-based compute environments in Ocean Node. This guide will walk you through the options available for defining `DOCKER_COMPUTE_ENVIRONMENTS` and how to set it up correctly. For configuring compute environments and setting prices for each resource (including pricing units and examples), see [Compute pricing](compute-pricing.md). + +Example Configuration +The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of objects, where each object represents a Docker compute environment configuration. Below is an example configuration: + +`Disk` and `Ram` resources are always expressed in GB. -For other nodes (and browsers) to reach your node, it must be reachable at a stable, publicly routable address. Work through the options below in order — stop at the first one that applies to your setup. - -## Option 1: Static Public IP - -If your machine has a static public IP directly assigned to it (common in VPS/cloud environments), set `P2P_ANNOUNCE_ADDRESSES` to announce that address. The quickstart script does this automatically when you provide your IP or domain name. +```json +[ + { + "socketPath": "/var/run/docker.sock", + "scanImages": true, + "enableNetwork": false, + "imageRetentionDays": 7, + "imageCleanupInterval": 86400, + "resources": [ + { + "id": "disk", + "total": 10 + } + ], + "storageExpiry": 604800, + "maxJobDuration": 3600, + "minJobDuration": 60, + "access": { + "addresses": ["0x123", "0x456"], + "accessLists": [] + }, + "fees": { + "1": [ + { + "feeToken": "0x123", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + } + ] + }, + "free": { + "maxJobDuration": 60, + "minJobDuration": 10, + "maxJobs": 3, + "access": { + "addresses": [], + "accessLists": ["0x789"] + }, + "resources": [ + { + "id": "cpu", + "max": 1 + }, + { + "id": "ram", + "max": 1 + }, + { + "id": "disk", + "max": 1 + } + ] + } + } +] +``` -Example for a node with public IP `1.2.3.4`, using ports 9000 (TCP) and 9001 (WebSocket/TLS): +#### Configuration Options + +- **socketPath**: Path to the Docker socket (e.g., docker.sock). +- **scanImages**: Whether Docker images should be scanned for vulnerabilities using Trivy. If enabled and critical vulnerabilities are found, the C2D job is rejected. +- **scanImageDBUpdateInterval**: How often to update the vulnerability database, in seconds. Default: 43200 (12 hours) +- **enableNetwork**: Whether networking is enabled for algorithm containers. Default: false +- **imageRetentionDays** - how long docker images are kept, in days. Default: 7 +- **imageCleanupInterval** - how often to run cleanup for docker images, in seconds. Min: 3600 (1hour), Default: 86400 (24 hours) +- **paymentClaimInterval** - how often to run payment claiming, in seconds. Default: 3600 (1 hour) +- **enableBenchmark** - when set to `true`, the node will auto-create a benchmark compute environment at startup using the system's available resources (CPU, RAM, disk, GPUs). Default: `false` +- **storageExpiry**: Amount of seconds for storage expiry.(Mandatory) +- **maxJobDuration**: Maximum duration in seconds for a job.(Mandatory) +- **minJobDuration**: Minimum duration in seconds for a job.(Mandatory) +- **access**: Access control configuration for paid compute jobs. If both `addresses` and `accessLists` are empty, all addresses are allowed. + - **addresses**: Array of Ethereum addresses allowed to run compute jobs. If empty and no access lists are configured, all addresses are allowed. + - **accessLists**: Array of AccessList contract addresses. Users holding NFTs from these contracts can run compute jobs. Checked across all supported networks. +- **fees**: Fee structure for the compute environment. + - **feeToken**: Token address for the fee. + - **prices**: Array of resource pricing information. + - **id**: Resource type (e.g., `cpu`, `ram`, `disk`). + - **price**: Price per unit of the resource. +- **resources**: Array of resources available in the compute environment. + - **id**: Resource type (e.g., `cpu`, `ram`, `disk`). + - **total**: Total number of the resource available. + - **min**: Minimum number of the resource needed for a job. + - **max**: Maximum number of the resource for a job. +- **free**: Optional configuration for free jobs. + - **storageExpiry**: Amount of seconds for storage expiry for free jobs. + - **maxJobDuration**: Maximum duration in seconds for a free job. + - **minJobDuration**: Minimum duration in seconds for a free job. + - **maxJobs**: Maximum number of simultaneous free jobs. + - **allowImageBuild**: If building images is allowed on free envs. Default: false + - **access**: Access control configuration for free compute jobs. Works the same as the main `access` field. + - **addresses**: Array of Ethereum addresses allowed to run free compute jobs. + - **accessLists**: Array of AccessList contract addresses for free compute access control. + - **resources**: Array of resources available for free jobs. + - **id**: Resource type (e.g., `cpu`, `ram`, `disk`). + - **total**: Total number of the resource available. + - **min**: Minimum number of the resource needed for a job. + - **max**: Maximum number of the resource for a job. + +### Docker Registry Authentication + +- `DOCKER_REGISTRY_AUTHS`: JSON object mapping Docker registry URLs to authentication credentials. Used for accessing private Docker/OCI registries when validating and pulling Docker images. Each registry entry must provide either `username`+`password` or `auth`. Example: -```bash -P2P_ANNOUNCE_ADDRESSES='[ - "/ip4/1.2.3.4/tcp/9000", - "/ip4/1.2.3.4/tcp/9001/ws", - "/ip4/1.2.3.4/tcp/9001/tls/ws" -]' +```json +{ + "https://registry-1.docker.io": { + "username": "myuser", + "password": "mypassword" + }, + "https://ghcr.io": { + "username": "myuser", + "password": "ghp_..." + }, + "https://registry.gitlab.com": { + "auth": "glpat-..." + } +} ``` -The `/tls/ws` entry enables [AutoTLS](#tls-and-sni-server-name-indication) for node-to-browser communication. AutoTLS provisions a certificate and serves TLS at the transport layer on the WebSocket port, making it browser-compatible — no DNS setup required on your part. +**Configuration Options:** -## Option 2: Dynamic DNS (no static IP) +- **Registry URL** (key): The full registry URL including protocol (e.g., `https://registry-1.docker.io`, `https://ghcr.io`, `https://registry.gitlab.com`) +- **username** (optional): Username for registry authentication. Required if using password-based auth. +- **password** (optional): Password or personal access token for registry authentication. Required if using username-based auth. +- **auth** (optional): Authentication token (alternative to username+password). Required if not using username+password. -If your public IP changes (residential ISP, dynamic VPS), use a Dynamic DNS (DDNS) service to get a stable hostname that always resolves to your current IP. +**Notes:** -Popular free DDNS providers: [DuckDNS](https://www.duckdns.org/), [No-IP](https://www.noip.com/), [Dynu](https://www.dynu.com/). +- For Docker Hub (`registry-1.docker.io`), you can use your Docker Hub username and password, or a personal access token (PAT) as the password. +- For GitHub Container Registry (GHCR), use your GitHub username with a personal access token (PAT) as the password, or use a token directly. +- For GitLab Container Registry, use a personal access token (PAT) or deploy token. +- The registry URL must match exactly (including protocol) with the registry used in the Docker image reference. +- If no credentials are configured for a registry, the node will attempt unauthenticated access (works for public images only). -Once you have a hostname (e.g. `mynode.duckdns.org`), set up the DDNS client on your machine to keep it updated, then use the hostname in your announce addresses: +--- -```bash -P2P_ANNOUNCE_ADDRESSES='[ - "/dns4/mynode.duckdns.org/tcp/9000", - "/dns4/mynode.duckdns.org/tcp/9001/ws", - "/dns4/mynode.duckdns.org/tcp/9001/tls/ws" -]' -``` +## Private Docker Registries with Per-Job Authentication -## Option 3: Port Forwarding +In addition to node-level registry authentication via `DOCKER_REGISTRY_AUTHS`, you can provide encrypted Docker registry authentication credentials on a per-job basis. This allows different users to use different private registries or credentials for their compute jobs. -If you are behind a NAT router (home network), you need to forward the P2P ports from your router to the machine running the node. +### Overview -1. Find the local IP of your machine (e.g. `192.168.1.50`). -2. Log in to your router admin panel and add port forwarding rules: - - External TCP port `9000` → `192.168.1.50:9000` - - External TCP port `9001` → `192.168.1.50:9001` -3. Find your public IP (e.g. via `curl ifconfig.me`) or set up a DDNS hostname (see Option 2). -4. Set `P2P_ANNOUNCE_ADDRESSES` to your public IP or DDNS hostname as shown above. +The `encryptedDockerRegistryAuth` parameter allows you to securely provide Docker registry credentials that are: -If your router supports UPnP, the node can attempt to configure port forwarding automatically. Enable it with: +- Encrypted using ECIES (Elliptic Curve Integrated Encryption Scheme) with the node's public key +- Validated to ensure proper format (either `auth` string OR `username`+`password`) +- Used only for the specific compute job, overriding node-level configuration if provided -```bash -P2P_ENABLE_UPNP=true -``` +### Encryption Format -UPnP is not reliable on all routers and should not be relied on as the sole method. +The `encryptedDockerRegistryAuth` must be: -## Option 4: Circuit Relay (fallback) +1. A JSON object matching the Docker registry auth schema (see below) +2. Encrypted using ECIES with the node's public key +3. Hex-encoded as a string -If none of the above options are available (strict NAT, no port forwarding, no public IP), use a circuit relay. A relay node proxies traffic between peers, allowing your node to participate in the network without being directly reachable. +**Auth Schema Format:** -Enable the circuit relay client: +The decrypted JSON must follow this structure: -```bash -P2P_ENABLE_CIRCUIT_RELAY_CLIENT=true -P2P_CIRCUIT_RELAYS=1 +```json +{ + "username": "myuser", + "password": "mypassword" +} ``` -Note: circuit relay increases latency and bandwidth usage on the relay node. It should be a last resort — a node running only via relay is a burden on the network and will have degraded performance. - -Do not enable `P2P_ENABLE_CIRCUIT_RELAY_SERVER` on edge nodes; that setting is for well-connected nodes that want to help others. +OR ---- +```json +{ + "auth": "base64-encoded-username:password" +} +``` -## TLS and SNI (Server Name Indication) +OR (all fields present) -AutoTLS provisions TLS certificates for your node automatically, enabling P2P node-to-browser communication. It is always active internally — no DNS or certificate setup required on your part. For it to work, you must include a `/tls/ws` entry in `P2P_ANNOUNCE_ADDRESSES`, which the quickstart script does automatically. +```json +{ + "username": "myuser", + "password": "mypassword", + "auth": "base64-encoded-username:password" +} +``` -AutoTLS serves TLS at the transport layer on the WebSocket port, making it standard browser-compatible WSS — no separate port is needed. +**Validation Rules:** -Example `.env` / docker-compose entry: +- Either `auth` string must be provided (non-empty), OR +- Both `username` AND `password` must be provided (both non-empty) +- Empty strings are not accepted -```bash -P2P_ANNOUNCE_ADDRESSES='[ - "/ip4//tcp/9000", - "/ip4//tcp/9001/ws", - "/ip4//tcp/9001/tls/ws" -]' -``` +### Usage Examples -Or in `config.json`: +#### 1. Paid Compute Start (`POST /api/services/compute`) ```json { - "p2pConfig": { - "announceAddresses": [ - "/ip4//tcp/9000", - "/ip4//tcp/9001/ws", - "/ip4//tcp/9001/tls/ws" - ] - } + "command": "startCompute", + "consumerAddress": "0x...", + "signature": "...", + "nonce": "123", + "environment": "0x...", + "algorithm": { + "meta": { + "container": { + "image": "registry.example.com/myorg/myimage:latest" + } + } + }, + "datasets": [], + "payment": { ... }, + "encryptedDockerRegistryAuth": "0xdeadbeef..." // ECIES encrypted hex string } ``` -When a TLS certificate is provisioned successfully, you will see logs like: +#### 2. Free Compute Start (`POST /api/services/freeCompute`) -``` ------ A TLS certificate was provisioned ----- ------ TLS addresses: ----- -/ip4//tcp/9001/sni/... -/ip4//tcp/9001/sni/... ------ End of TLS addresses ----- +```json +{ + "command": "freeStartCompute", + "consumerAddress": "0x...", + "signature": "...", + "nonce": "123", + "environment": "0x...", + "algorithm": { + "meta": { + "container": { + "image": "ghcr.io/myorg/myimage:latest" + } + } + }, + "datasets": [], + "encryptedDockerRegistryAuth": "0xdeadbeef..." // ECIES encrypted hex string +} ``` -## Verifying Connectivity +#### 3. Initialize Compute -### Check how your node sees itself - -```bash -curl http://localhost:8000/getP2pPeer?peerId= -``` - -Look at the `addresses` array in the response. Are any of those IPs/hostnames reachable from outside your network? +The `initialize` command accepts `encryptedDockerRegistryAuth` as part of the command payload, as it validates the image ```json { - "addresses": [ - { "multiaddr": "/ip4/1.2.3.4/tcp/9000", "isCertified": false }, - { "multiaddr": "/ip4/1.2.3.4/tcp/9001/ws", "isCertified": false }, - { "multiaddr": "/ip4/1.2.3.4/tcp/9001/tls/ws", "isCertified": false } - ] + "command": "initialize", + "datasets": [...], + "algorithm": { + "meta": { + "container": { + "image": "registry.gitlab.com/myorg/myimage:latest" + } + } + }, + "environment": "0x...", + "payment": { ... }, + "consumerAddress": "0x...", + "maxJobDuration": 3600, + "encryptedDockerRegistryAuth": "0xdeadbeef..." // ECIES encrypted hex string } ``` -### Check how your node is seen by the network +### Encryption Process -Ask a known public node to report back what it knows about you: +To create `encryptedDockerRegistryAuth`, you need to: -```bash -curl https://cp1.oncompute.ai/getP2pPeer?peerId= -``` +1. **Prepare the auth JSON object:** + + ```json + { + "username": "myuser", + "password": "mypassword" + } + ``` + +2. **Get the node's public key** (available via the node's API or P2P interface) + +3. **Encrypt the JSON string** using ECIES with the node's public key + +4. **Hex-encode the encrypted result** + +### Behavior + +- **Priority**: If `encryptedDockerRegistryAuth` is provided, it takes precedence over node-level `DOCKER_REGISTRY_AUTHS` configuration for that specific job +- **Validation**: The encrypted auth is decrypted and validated before the job starts. Invalid formats will result in an error +- **Scope**: The credentials are used for: + - Validating the Docker image exists (during initialize) + - Pulling the Docker image (during job execution) +- **Security**: Credentials are encrypted and only decrypted by the node using its private key + +### Error Handling + +If `encryptedDockerRegistryAuth` is invalid, you'll receive an error: -If the response is empty or missing your public address, the node is not reachable from the outside. +- **Decryption failure**: `Invalid encryptedDockerRegistryAuth: failed to parse JSON - [error message]` +- **Schema validation failure**: `Invalid encryptedDockerRegistryAuth: Either 'auth' must be provided, or both 'username' and 'password' must be provided` -## All P2P Environment Variables +### Notes -See [env.md](env.md#p2p) for the full list of P2P configuration options. +- The `encryptedDockerRegistryAuth` parameter is optional. If not provided, the node will use `DOCKER_REGISTRY_AUTHS` configuration or attempt unauthenticated access +- The registry URL in the Docker image reference must match the registry you're authenticating to +- For Docker Hub, use `registry-1.docker.io` as the registry URL +- Credentials are stored encrypted in the job record and decrypted only when needed for image operations diff --git a/docs/networking.md b/docs/networking.md index b6621a57a..b0cf6e9af 100644 --- a/docs/networking.md +++ b/docs/networking.md @@ -6,59 +6,7 @@ For other nodes (and browsers) to reach your node, it must be reachable at a sta If your machine has a static public IP directly assigned to it (common in VPS/cloud environments), set `P2P_ANNOUNCE_ADDRESSES` to announce that address. The quickstart script does this automatically when you provide your IP or domain name. -<<<<<<< HEAD -- decide what IP version to use (IPV4 or/and IPv6). You should use both if available. -- decide if you want to filter private ips (if you run multiple nodes in a LAN or cloud environment, leave them on) -- if you already have an external ip configured on your machine, you are good to go. -- if you have a private ip, but an UPNP gateway, you should be fine as well. -- if you have a private ip and you can forward external ports from your gateway, use P2P_ANNOUNCE_ADDRESSES and let other nodes know your external IP/port. -- if you cannot forward ports on your gateway, the only choice is to use a circuit relay server (then all traffic will go through that node and it will proxy) - -## TLS and SNI (Server Name Indication) - -AutoTLS is used to provision TLS certificates for your node in order to allow P2P node-to-browser communication. -To enable SNI with Ocean Node's autoTLS feature, include `/tls/ws` or `/tls/wss` addresses in `P2P_ANNOUNCE_ADDRESSES`: - -Add to .env file - -```bash -export P2P_ANNOUNCE_ADDRESSES='[ - "/ip4//tcp/9000", - "/ip4//tcp/9001/tls/ws", - "/ip4//tcp/9005/tls/wss", -]' -``` - -Or in config.json file: - -```json -{ - "p2pConfig": { - "announceAddresses": [ - "/ip4//tcp/9000", - "/ip4//tcp/9001/tls/ws", - "/ip4//tcp/9005/tls/wss" - ] - } -} -``` - -When TLS certificates are provisioned, you should see logs like: - -``` ------ A TLS certificate was provisioned ----- ------ TLS addresses: ----- -/ip4//tcp/9001/sni/... -/ip4//tcp/9005/sni/... ------ End of TLS addresses ----- -``` - -In order to check connectivity, you can do the following: - -### On your node, check and observe how your node sees itself: -======= Example for a node with public IP `1.2.3.4`, using ports 9000 (TCP) and 9001 (WebSocket/TLS): ->>>>>>> 8719d64c2e23093acac0e30661979009d9ddadd9 ```bash P2P_ANNOUNCE_ADDRESSES='[ diff --git a/package-lock.json b/package-lock.json index c2aa87c70..b3522f86c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "ocean-node", - "version": "3.0.31", + "version": "3.1.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "ocean-node", - "version": "3.0.31", + "version": "3.1.0", "hasInstallScript": true, "license": "Apache-2.0", "dependencies": { diff --git a/package.json b/package.json index d7c434f94..ab701c163 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "ocean-node", - "version": "3.0.31", + "version": "3.1.0", "description": "Ocean Node is used to run all core services in the Ocean stack", "author": "Ocean Protocol Foundation", "license": "Apache-2.0", diff --git a/scripts/ocean-node-quickstart.sh b/scripts/ocean-node-quickstart.sh index 67481d878..5280c45f6 100755 --- a/scripts/ocean-node-quickstart.sh +++ b/scripts/ocean-node-quickstart.sh @@ -707,6 +707,7 @@ services: # P2P_BOOTSTRAP_NODES: '' # P2P_FILTER_ANNOUNCED_ADDRESSES: '' DOCKER_COMPUTE_ENVIRONMENTS: '$DOCKER_COMPUTE_ENVIRONMENTS' + ENABLE_BENCHMARK: true $( if [ "$enable_tls" == "y" ]; then echo " HTTP_CERT_PATH: '/usr/src/app/certs/cert.pem'" @@ -772,4 +773,4 @@ echo -e "\e[1;32m4)\e[0m If using SSL/TLS with a custom domain name, make sure t echo "" echo -e "If your node is not reachable by other peers (NAT, no public IP, port forwarding issues)," echo -e "refer to the networking guide for help with Dynamic DNS, port forwarding, and circuit relay:" -echo -e "\e[1;34mhttps://github.com/oceanprotocol/ocean-node/blob/main/docs/networking.md\e[0m" \ No newline at end of file +echo -e "\e[1;34mhttps://github.com/oceanprotocol/ocean-node/blob/main/docs/networking.md\e[0m" diff --git a/src/@types/C2D/C2D.ts b/src/@types/C2D/C2D.ts index 55f2eae35..fb764c6c6 100644 --- a/src/@types/C2D/C2D.ts +++ b/src/@types/C2D/C2D.ts @@ -120,6 +120,7 @@ export interface ComputeEnvironmentBaseConfig { access: ComputeAccessList free?: ComputeEnvironmentFreeOptions platform: RunningPlatform + enableNetwork?: boolean // whether network is enabled for algorithm containers } export interface ComputeRuntimes { @@ -152,6 +153,7 @@ export interface C2DEnvironmentConfig { access?: ComputeAccessList free?: ComputeEnvironmentFreeOptions resources?: ComputeResource[] + enableNetwork?: boolean // whether network is enabled for algorithm containers } export interface C2DDockerConfig { @@ -168,7 +170,6 @@ export interface C2DDockerConfig { scanImages?: boolean scanImageDBUpdateInterval?: number // Default: 12 hours environments: C2DEnvironmentConfig[] - enableNetwork?: boolean // whether network is enabled for algorithm containers } export type ComputeResultType = diff --git a/src/@types/blockchain.ts b/src/@types/blockchain.ts index 36b158e24..0e67c6ada 100644 --- a/src/@types/blockchain.ts +++ b/src/@types/blockchain.ts @@ -5,6 +5,8 @@ export interface SupportedNetwork { chunkSize?: number startBlock?: number fallbackRPCs?: string[] + primaryRpcTimeout?: number + fallbackRpcTimeout?: number } export interface RPCS { diff --git a/src/OceanNode.ts b/src/OceanNode.ts index a8c9c0aac..4e23fedbe 100644 --- a/src/OceanNode.ts +++ b/src/OceanNode.ts @@ -1,7 +1,13 @@ import { OceanP2P } from './components/P2P/index.js' import { OceanProvider } from './components/Provider/index.js' import { OceanIndexer } from './components/Indexer/index.js' -import { OceanNodeConfig, P2PCommandResponse } from './@types/OceanNode.js' +import { + AccessListContract, + OceanNodeConfig, + P2PCommandResponse +} from './@types/OceanNode.js' +import { ValidateChainId } from './@types/commands.js' + import { Database } from './components/database/index.js' import { Escrow } from './components/core/utils/escrow.js' import { CoreHandlersRegistry } from './components/core/handler/coreHandlersRegistry.js' @@ -15,6 +21,8 @@ import { BlockchainRegistry } from './components/BlockchainRegistry/index.js' import { Blockchain } from './utils/blockchain.js' import { createPersistentStorage } from './components/persistentStorage/createPersistentStorage.js' import { PersistentStorageFactory } from './components/persistentStorage/PersistentStorageFactory.js' +import { isAddress, FallbackProvider, ethers } from 'ethers' +import { create256Hash } from './utils/crypt.js' export interface RequestLimiter { requester: string | string[] // IP address or peer ID @@ -40,6 +48,7 @@ export class OceanNode { private requestMap: Map private auth: Auth private persistentStorage: PersistentStorageFactory + private database: Database // eslint-disable-next-line no-useless-constructor private constructor( @@ -51,21 +60,15 @@ export class OceanNode { public keyManager?: KeyManager, public blockchainRegistry?: BlockchainRegistry ) { - if (keyManager) { - this.keyManager = keyManager - } else { - this.keyManager = new KeyManager(config) - } - if (blockchainRegistry) { - this.blockchainRegistry = blockchainRegistry - } else { - this.blockchainRegistry = new BlockchainRegistry(this.keyManager, config) - } - this.coreHandlers = CoreHandlersRegistry.getInstance(this) + this.keyManager = keyManager + this.blockchainRegistry = blockchainRegistry + this.coreHandlers = CoreHandlersRegistry.getInstance(this, true) this.requestMap = new Map() this.config = config + this.database = db + if (this.db && this.db?.authToken) { - this.auth = new Auth(this.db.authToken) + this.auth = new Auth(this.db.authToken, config) } if (node) { node.setCoreHandlers(this.coreHandlers) @@ -86,6 +89,7 @@ export class OceanNode { this.persistentStorage = null } } + this.addIndexer(indexer) } // Singleton instance @@ -104,10 +108,19 @@ export class OceanNode { if (!config) { throw new Error('KeyManager and BlockchainRegistry are required') } - keyManager = new KeyManager(config) - blockchainRegistry = new BlockchainRegistry(keyManager, config) + if (!keyManager) keyManager = new KeyManager(config) + if (!blockchainRegistry) + blockchainRegistry = new BlockchainRegistry(keyManager, config) } - // prepare compute engines + // teardown old instance if needed + this.instance?.tearDownAll().catch((err: unknown) => { + OCEAN_NODE_LOGGER.warn( + `Failed to tear down previous OceanNode instance: ${ + err instanceof Error ? err.message : String(err) + }` + ) + }) + OCEAN_NODE_LOGGER.debug('Creating new OceanNode instance') this.instance = new OceanNode( config, db, @@ -117,6 +130,8 @@ export class OceanNode { keyManager, blockchainRegistry ) + } else { + OCEAN_NODE_LOGGER.debug('Return cached OceanNode instance') } return this.instance } @@ -127,7 +142,34 @@ export class OceanNode { } public addIndexer(_indexer: OceanIndexer) { + const previous = this.indexer this.indexer = _indexer + if (previous) { + previous.stop().catch((err: unknown) => { + OCEAN_NODE_LOGGER.warn( + `Failed to stop replaced indexer: ${err instanceof Error ? err.message : String(err)}` + ) + }) + } + } + + public async tearDownAll() { + if (this.c2dEngines) { + await this.c2dEngines.stopAllEngines() + this.c2dEngines = null + } + if (this.indexer) { + await this.indexer.stop() + this.indexer = null + } + if (this.blockchainRegistry) { + this.blockchainRegistry.stop() + this.blockchainRegistry = null + } + if (OceanNode.instance === this) { + OceanNode.instance = null + } + OCEAN_NODE_LOGGER.debug('OceanNode instance stopped & cleared') } public async addC2DEngines() { @@ -161,11 +203,7 @@ export class OceanNode { return this.indexer } - public getDatabase(): Database { - return this.db - } - - public getC2DEngines(): C2DEngines { + public getC2DEngines(): C2DEngines | undefined { return this.c2dEngines } @@ -259,4 +297,103 @@ export class OceanNode { } } } + + getAdminAddresses(): { addresses: string[]; accessLists: any } { + const ret = { + addresses: [] as string[], + accessLists: undefined as AccessListContract | undefined + } + + if (this.config.allowedAdmins && this.config.allowedAdmins.length > 0) { + for (const admin of this.config.allowedAdmins) { + if (isAddress(admin) === true) { + ret.addresses.push(admin) + } + } + } + ret.accessLists = this.config.allowedAdminsList + return ret + } + + checkSupportedChainId(chainId: number): ValidateChainId { + if (!chainId || !(`${chainId.toString()}` in this.config.supportedNetworks)) { + OCEAN_NODE_LOGGER.error(`Chain ID ${chainId} is not supported`) + return { + validation: false, + networkRpc: '' + } + } + return { + validation: true, + networkRpc: this.config.supportedNetworks[chainId.toString()].rpc + } + } + + async getJsonRpcProvider(chainId: number): Promise { + const checkResult = this.checkSupportedChainId(chainId) + if (!checkResult.validation) { + return null + } + const blockchain = this.getBlockchain(chainId) + if (!blockchain) return null + return await blockchain.getProvider() + } + + hasP2PInterface() { + return this.config.hasP2P || false + } + + private dbInitPromise: Promise | null = null + async getDatabase(forceReload: boolean = false): Promise { + if (!this.database || forceReload) { + if (!this.dbInitPromise || forceReload) { + const { dbConfig } = this.config + if (dbConfig && dbConfig.url) { + this.dbInitPromise = Database.init(dbConfig).then((db) => { + this.database = db + return db + }) + } + } + return await this.dbInitPromise + } + return this.database + } + + async getValidationSignature(ddo: string): Promise { + try { + const hashedDDO = create256Hash(ddo) + const providerWallet = await this.keyManager.getEthWallet() + const messageHash = ethers.solidityPackedKeccak256( + ['bytes'], + [ethers.hexlify(ethers.toUtf8Bytes(hashedDDO))] + ) + const signed32Bytes = await providerWallet.signMessage( + new Uint8Array(ethers.toBeArray(messageHash)) + ) + const signatureSplitted = ethers.Signature.from(signed32Bytes) + const v = signatureSplitted.v <= 1 ? signatureSplitted.v + 27 : signatureSplitted.v + const r = ethers.hexlify(signatureSplitted.r) // 32 bytes + const s = ethers.hexlify(signatureSplitted.s) + return { hash: hashedDDO, publicKey: providerWallet.address, r, s, v } + } catch (error) { + OCEAN_NODE_LOGGER.logMessage(`Validation signature error: ${error}`, true) + return { hash: '', publicKey: '', r: '', s: '', v: '' } + } + } + + isRemoteDDO(ddo: any): boolean { + let keys + try { + keys = Object.keys(ddo) + } catch (e) { + return false + } + + if (keys.length === 1 && keys[0] === 'remote') { + return true + } + + return false + } } diff --git a/src/components/Auth/index.ts b/src/components/Auth/index.ts index a043bed5c..6b671d6fd 100644 --- a/src/components/Auth/index.ts +++ b/src/components/Auth/index.ts @@ -2,9 +2,8 @@ import { AuthToken, AuthTokenDatabase } from '../database/AuthTokenDatabase.js' import jwt from 'jsonwebtoken' import { checkNonce, NonceResponse } from '../core/utils/nonceHandler.js' import { OceanNode } from '../../OceanNode.js' -import { getConfiguration } from '../../utils/index.js' import { CommonValidation } from '../../utils/validators.js' - +import { OceanNodeConfig } from '../../@types/OceanNode.js' export interface AuthValidation { token?: string address?: string @@ -16,16 +15,18 @@ export interface AuthValidation { export class Auth { private authTokenDatabase: AuthTokenDatabase + private jwtSecret: string - public constructor(authTokenDatabase: AuthTokenDatabase) { + public constructor(authTokenDatabase: AuthTokenDatabase, config: OceanNodeConfig) { this.authTokenDatabase = authTokenDatabase + this.jwtSecret = config.jwtSecret } - public async getJwtSecret(): Promise { - const config = await getConfiguration() - return config.jwtSecret + public getJwtSecret(): string { + return this.jwtSecret } + // eslint-disable-next-line require-await async getJWTToken(address: string, nonce: string, createdAt: number): Promise { const jwtToken = jwt.sign( { @@ -33,7 +34,7 @@ export class Auth { nonce, createdAt }, - await this.getJwtSecret() + this.getJwtSecret() ) return jwtToken @@ -84,7 +85,8 @@ export class Auth { if (signature && address && nonce) { const oceanNode = OceanNode.getInstance() const nonceCheckResult: NonceResponse = await checkNonce( - oceanNode.getDatabase().nonce, + oceanNode.getConfig(), + (await oceanNode.getDatabase()).nonce, address, parseInt(nonce), signature, diff --git a/src/components/BlockchainRegistry/index.ts b/src/components/BlockchainRegistry/index.ts index c95f2ecba..9426d1275 100644 --- a/src/components/BlockchainRegistry/index.ts +++ b/src/components/BlockchainRegistry/index.ts @@ -39,11 +39,10 @@ export class BlockchainRegistry { // Get network configuration const networkConfig = supportedNetworks[chainId.toString()] - const { rpc } = networkConfig - const { fallbackRPCs } = networkConfig + if (!networkConfig.chainId) networkConfig.chainId = chainId // Create Blockchain instance with new constructor - const blockchain = new Blockchain(this.keyManager, rpc, chainId, fallbackRPCs) + const blockchain = new Blockchain(this.keyManager, networkConfig) // Cache the instance this.blockchains.set(chainId, blockchain) @@ -60,6 +59,11 @@ export class BlockchainRegistry { return Array.from(this.blockchains.values()) } + public stop() { + for (const blockchain of Array.from(this.blockchains.values())) blockchain.stop() + this.blockchains.clear() + } + /** * Remove a Blockchain instance from the registry. * Useful for cleanup or when a network is no longer supported. @@ -68,18 +72,11 @@ export class BlockchainRegistry { */ removeBlockchain(chainId: number): void { if (this.blockchains.has(chainId)) { + this.getBlockchain(chainId).stop() this.blockchains.delete(chainId) } } - /** - * Clear all Blockchain instances from the registry. - * Useful for cleanup or testing. - */ - clear(): void { - this.blockchains.clear() - } - /** * Get the number of initialized Blockchain instances */ diff --git a/src/components/Indexer/ChainIndexer.ts b/src/components/Indexer/ChainIndexer.ts index 66da722cb..80bd13674 100644 --- a/src/components/Indexer/ChainIndexer.ts +++ b/src/components/Indexer/ChainIndexer.ts @@ -5,7 +5,6 @@ import { LOG_LEVELS_STR } from '../../utils/logging/Logger.js' import { isDefined, sleep } from '../../utils/util.js' import { EVENTS, INDEXER_CRAWLING_EVENTS } from '../../utils/index.js' import { INDEXER_LOGGER } from '../../utils/logging/common.js' -import { getDatabase } from '../../utils/database.js' import { DEVELOPMENT_CHAIN_ID } from '../../utils/address.js' import { processBlocks, processChunkLogs } from './processor.js' import { Blockchain } from '../../utils/blockchain.js' @@ -16,6 +15,7 @@ import { retrieveChunkEvents } from './utils.js' import { OceanNodeConfig } from '../../@types/OceanNode.js' +import { Database } from '../database/index.js' export interface ReindexTask { txId: string @@ -36,15 +36,20 @@ export class ChainIndexer { private reindexQueue: ReindexTask[] = [] private eventEmitter: EventEmitter private blockchain: Blockchain + private db: Database constructor( blockchain: Blockchain, rpcDetails: SupportedNetwork, - eventEmitter: EventEmitter + eventEmitter: EventEmitter, + database: Database, + _config: OceanNodeConfig ) { this.blockchain = blockchain this.eventEmitter = eventEmitter this.rpcDetails = rpcDetails + this.db = database + this.config = _config } /** @@ -237,7 +242,8 @@ export class ChainIndexer { provider, this.blockchain.getSupportedChain(), startBlock, - blocksToProcess + blocksToProcess, + this.config ) INDEXER_LOGGER.debug( @@ -320,7 +326,7 @@ export class ChainIndexer { * Get the last indexed block from database */ private async getLastIndexedBlock(): Promise { - const { indexer } = await getDatabase() + const { indexer } = this.db try { const networkDetails = await indexer.retrieve(this.blockchain.getSupportedChain()) if (networkDetails && networkDetails.lastIndexedBlock) { @@ -352,7 +358,7 @@ export class ChainIndexer { return -1 } - const { indexer } = await getDatabase() + const { indexer } = this.db const updatedIndex = await indexer.update( this.blockchain.getSupportedChain(), block @@ -383,7 +389,7 @@ export class ChainIndexer { * Delete all assets from this chain */ private async deleteAllAssetsFromChain(): Promise { - const { ddo } = await getDatabase() + const { ddo } = this.db try { const numDeleted = await ddo.deleteAllAssetsFromChain( this.blockchain.getSupportedChain() @@ -449,7 +455,8 @@ export class ChainIndexer { logs, signer, provider, - this.blockchain.getSupportedChain() + this.blockchain.getSupportedChain(), + this.config ) // Emit event to clear from parent queue diff --git a/src/components/Indexer/index.ts b/src/components/Indexer/index.ts index 17460bf83..7bcc95fa3 100644 --- a/src/components/Indexer/index.ts +++ b/src/components/Indexer/index.ts @@ -35,11 +35,13 @@ import { BlockchainRegistry } from '../BlockchainRegistry/index.js' import { CommandStatus, JobStatus } from '../../@types/commands.js' import { buildJobIdentifier, getDeployedContractBlock } from './utils.js' import { create256Hash } from '../../utils/crypt.js' -import { getDatabase, isReachableConnection } from '../../utils/database.js' +import { isReachableConnection } from '../../utils/database.js' import { sleep } from '../../utils/util.js' import { isReindexingNeeded } from './version.js' import { getPackageVersion } from '../../utils/version.js' import { DB_EVENTS, ES_CONNECTION_EVENTS } from '../database/ElasticsearchConfigHelper.js' +import { OceanNodeConfig } from '../../@types/OceanNode.js' +import { clearEventProcessorCache } from './processor.js' /** * Event emitter for DDO (Data Descriptor Object) events @@ -79,6 +81,7 @@ let numCrawlAttempts = 0 */ export class OceanIndexer { private db: Database + private config: OceanNodeConfig private networks: RPCS private blockchainRegistry?: BlockchainRegistry private supportedChains: string[] @@ -88,14 +91,16 @@ export class OceanIndexer { private reconnectTimer: NodeJS.Timeout | null = null constructor( - db: Database, - supportedNetworks: RPCS, - blockchainRegistry: BlockchainRegistry + _db: Database, + _config: OceanNodeConfig, + _blockchainRegistry: BlockchainRegistry ) { - this.db = db - this.networks = supportedNetworks - this.blockchainRegistry = blockchainRegistry - this.supportedChains = Object.keys(supportedNetworks) + this.db = _db + this.config = _config + this.networks = this.config.indexingNetworks + this.blockchainRegistry = _blockchainRegistry + this.supportedChains = Object.keys(this.networks) + clearEventProcessorCache() INDEXING_QUEUE = [] this.setupDbConnectionListeners() this.startAllChainIndexers() @@ -145,10 +150,6 @@ export class OceanIndexer { INDEXER_LOGGER.info( 'Database connection stable - reinitialising DB and restarting all chain indexers' ) - const freshDb = await getDatabase(true) - if (freshDb) { - this.db = freshDb - } await this.startAllChainIndexers() }, 5000) @@ -292,7 +293,9 @@ export class OceanIndexer { const indexer = new ChainIndexer( blockchain, rpcDetails, - INDEXER_CRAWLING_EVENT_EMITTER + INDEXER_CRAWLING_EVENT_EMITTER, + this.db, + this.config ) INDEXER_LOGGER.log( @@ -597,4 +600,10 @@ export class OceanIndexer { INDEXER_LOGGER.info('No reindexing needed based on version check') } } + + public async stop(): Promise { + await this.stopAllChainIndexers() + INDEXER_CRAWLING_EVENT_EMITTER.removeAllListeners() + INDEXER_DDO_EVENT_EMITTER.removeAllListeners() + } } diff --git a/src/components/Indexer/processor.ts b/src/components/Indexer/processor.ts index 7518ed2a6..ada273397 100644 --- a/src/components/Indexer/processor.ts +++ b/src/components/Indexer/processor.ts @@ -1,7 +1,6 @@ import { ethers, Signer, FallbackProvider, Interface, getAddress } from 'ethers' import { BlocksEvents, ProcessingEvents } from '../../@types/blockchain.js' import { EVENTS } from '../../utils/constants.js' -import { getConfiguration } from '../../utils/config.js' import { INDEXER_LOGGER } from '../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../utils/logging/Logger.js' import { fetchEventFromTransaction } from '../../utils/util.js' @@ -23,6 +22,7 @@ import { import { findEventByKey } from './utils.js' import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20TemplateEnterprise.sol/ERC20TemplateEnterprise.json' with { type: 'json' } import AccessListContract from '@oceanprotocol/contracts/artifacts/contracts/accesslists/AccessList.sol/AccessList.json' with { type: 'json' } +import { OceanNodeConfig } from '../../@types/OceanNode.js' const EVENT_PROCESSOR_MAP: Record = { [EVENTS.METADATA_CREATED]: MetadataEventProcessor, @@ -41,7 +41,16 @@ const EVENT_PROCESSOR_MAP: Record = { const processorInstances = new Map() -function getEventProcessor(eventType: string, chainId: number): BaseEventProcessor { +/** Drop cached processors so they are recreated with the current config (tests, new indexer, etc.). */ +export function clearEventProcessorCache(): void { + processorInstances.clear() +} + +function getEventProcessor( + eventType: string, + chainId: number, + config: OceanNodeConfig +): BaseEventProcessor { const cacheKey = `${eventType}-${chainId}` if (!processorInstances.has(cacheKey)) { @@ -49,9 +58,13 @@ function getEventProcessor(eventType: string, chainId: number): BaseEventProcess if (!ProcessorClass) { throw new Error(`No processor found for event type: ${eventType}`) } - processorInstances.set(cacheKey, new ProcessorClass(chainId)) + INDEXER_LOGGER.debug( + 'Creating new Processor for event ' + eventType + 'with key ' + cacheKey + ) + processorInstances.set(cacheKey, new ProcessorClass(chainId, config)) + } else { + INDEXER_LOGGER.debug('Reusing cached processor for key ' + cacheKey) } - return processorInstances.get(cacheKey) } @@ -59,11 +72,12 @@ export const processChunkLogs = async ( logs: readonly ethers.Log[], signer: Signer, provider: FallbackProvider, - chainId: number + chainId: number, + config: OceanNodeConfig ): Promise => { const storeEvents: BlocksEvents = {} if (logs.length > 0) { - const { allowedValidators, allowedValidatorsList } = await getConfiguration() // getAllowedValidators() + const { allowedValidators, allowedValidatorsList } = config // getAllowedValidators() const checkMetadataValidated = allowedValidators.length > 0 || (allowedValidatorsList && Object.keys(allowedValidatorsList).length > 0) @@ -163,7 +177,7 @@ export const processChunkLogs = async ( if (event.type === EVENTS.TOKEN_URI_UPDATE) { storeEvents[event.type] = 'TOKEN_URI_UPDATE' } else { - const processor = getEventProcessor(event.type, chainId) + const processor = getEventProcessor(event.type, chainId, config) storeEvents[event.type] = await processor.processEvent( log, chainId, @@ -186,12 +200,13 @@ export const processBlocks = async ( provider: FallbackProvider, network: number, lastIndexedBlock: number, - count: number + count: number, + config: OceanNodeConfig ): Promise => { try { const events: any[] | BlocksEvents = blockLogs && blockLogs.length > 0 - ? await processChunkLogs(blockLogs, signer, provider, network) + ? await processChunkLogs(blockLogs, signer, provider, network, config) : [] return { lastBlock: lastIndexedBlock + count, diff --git a/src/components/Indexer/processors/BaseProcessor.ts b/src/components/Indexer/processors/BaseProcessor.ts index 32f0fcc57..95fa4439a 100644 --- a/src/components/Indexer/processors/BaseProcessor.ts +++ b/src/components/Indexer/processors/BaseProcessor.ts @@ -18,7 +18,6 @@ import { OceanNode } from '../../../OceanNode.js' import { EVENT_HASHES, PROTOCOL_COMMANDS } from '../../../utils/constants.js' import { timestampToDateTime } from '../../../utils/conversions.js' import { create256Hash } from '../../../utils/crypt.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { URLUtils } from '../../../utils/url.js' @@ -28,14 +27,26 @@ import { toString as uint8ArrayToString } from 'uint8arrays/to-string' import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20TemplateEnterprise.sol/ERC20TemplateEnterprise.json' with { type: 'json' } import { fetchTransactionReceipt } from '../../core/utils/validateOrders.js' import { withRetrial } from '../utils.js' -import { createHash } from 'crypto' +import { OceanNodeConfig } from '../../../@types/OceanNode.js' +import { Database } from '../../../components/database/index.js' import { AbstractDdoDatabase } from '../../database/BaseDatabase.js' +import { createHash } from 'crypto' export abstract class BaseEventProcessor { protected networkId: number + private config: OceanNodeConfig - constructor(chainId: number) { + constructor(chainId: number, config: OceanNodeConfig) { this.networkId = chainId + this.config = config + } + + getConfig(): OceanNodeConfig { + return this.config + } + + async getDatabase(): Promise { + return await OceanNode.getInstance().getDatabase() } protected isValidDtAddressFromServices(services: any[]): boolean { @@ -157,8 +168,9 @@ export abstract class BaseEventProcessor { } protected async createOrUpdateDDO(ddo: VersionedDDO, method: string): Promise { + const db = await OceanNode.getInstance().getDatabase() try { - const { ddo: ddoDatabase, ddoState } = await getDatabase() + const { ddo: ddoDatabase, ddoState } = db if (ddo instanceof DeprecatedDDO) { const { id, nftAddress } = ddo.getDDOFields() await Promise.all([ddoDatabase.delete(id), ddoState.delete(id)]) @@ -181,7 +193,7 @@ export abstract class BaseEventProcessor { ) return saveDDO } catch (err) { - const { ddoState } = await getDatabase() + const { ddoState } = db const { id, nftAddress } = ddo.getDDOFields() const tx = ddo instanceof DeprecatedDDO diff --git a/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts b/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts index 2ade38b8c..58ea9bf34 100644 --- a/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts +++ b/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -48,7 +47,7 @@ export class DispenserActivatedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) try { - const { ddo: ddoDatabase } = await getDatabase() + const { ddo: ddoDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( diff --git a/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts b/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts index a44123ef1..5cd2d9ccc 100644 --- a/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts +++ b/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts @@ -1,7 +1,7 @@ import { DDOManager, PriceType } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' + import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -48,7 +48,7 @@ export class DispenserCreatedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) try { - const { ddo: ddoDatabase } = await getDatabase() + const { ddo: ddoDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( diff --git a/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts b/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts index 38e056edb..c0d96d5a9 100644 --- a/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts +++ b/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -48,7 +47,7 @@ export class DispenserDeactivatedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) try { - const { ddo: ddoDatabase } = await getDatabase() + const { ddo: ddoDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( diff --git a/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts b/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts index f036caf65..b7a1548a2 100644 --- a/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -58,7 +57,7 @@ export class ExchangeActivatedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) - const { ddo: ddoDatabase } = await getDatabase() + const { ddo: ddoDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( diff --git a/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts b/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts index 9200ca897..6a4365093 100644 --- a/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -54,7 +53,7 @@ export class ExchangeCreatedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) - const { ddo: ddoDatabase } = await getDatabase() + const { ddo: ddoDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( diff --git a/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts b/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts index d5f02c15c..f3aa8e57d 100644 --- a/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -59,8 +58,8 @@ export class ExchangeDeactivatedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) try { - const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + const { ddo: ddoDatabase } = await this.getDatabase() + const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected ExchangeDeactivated changed for ${did}, but it does not exists.` diff --git a/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts b/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts index 975fb6561..6e2df9eb0 100644 --- a/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts @@ -1,7 +1,7 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' + import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -54,7 +54,7 @@ export class ExchangeRateChangedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) - const { ddo: ddoDatabase } = await getDatabase() + const { ddo: ddoDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( diff --git a/src/components/Indexer/processors/MetadataEventProcessor.ts b/src/components/Indexer/processors/MetadataEventProcessor.ts index c8a690c1d..82b6162f1 100644 --- a/src/components/Indexer/processors/MetadataEventProcessor.ts +++ b/src/components/Indexer/processors/MetadataEventProcessor.ts @@ -6,9 +6,9 @@ import { MetadataStates } from '../../../utils/constants.js' import { deleteIndexedMetadataIfExists } from '../../../utils/asset.js' -import { getConfiguration } from '../../../utils/config.js' + import { checkCredentialOnAccessList } from '../../../utils/credentials.js' -import { getDatabase } from '../../../utils/database.js' + import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { asyncCallWithTimeout, streamToString } from '../../../utils/util.js' @@ -17,9 +17,10 @@ import { wasNFTDeployedByOurFactory, getPricingStatsForDddo, getDid } from '../u import { BaseEventProcessor } from './BaseProcessor.js' import ERC721Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC721Template.sol/ERC721Template.json' with { type: 'json' } import { Purgatory } from '../purgatory.js' -import { isRemoteDDO } from '../../core/utils/validateDdoHandler.js' import { Storage } from '../../storage/index.js' import { Readable } from 'stream' +import { getConfiguration } from '../../../utils/config.js' +import { isRemoteDDO } from '../../core/utils/validateDdoHandler.js' export class MetadataEventProcessor extends BaseEventProcessor { async processEvent( @@ -31,7 +32,7 @@ export class MetadataEventProcessor extends BaseEventProcessor { ): Promise { let did = 'did:op' try { - const { ddo: ddoDatabase, ddoState } = await getDatabase() + const { ddo: ddoDatabase, ddoState } = await this.getDatabase() const wasDeployedByUs = await wasNFTDeployedByOurFactory( chainId, signer, @@ -75,8 +76,8 @@ export class MetadataEventProcessor extends BaseEventProcessor { `Delete DDO because Metadata state is ${metadataState}`, true ) - const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + const { ddo: ddoDatabase } = await this.getDatabase() + const ddo = await this.getDDO(ddoDatabase, event.address, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected MetadataState changed for ${did}, but it does not exists.` @@ -111,7 +112,7 @@ export class MetadataEventProcessor extends BaseEventProcessor { return savedDDO } - const decryptedDDO = await this.decryptDDO( + const decryptDDO = await this.decryptDDO( decodedEventData.args[2], flag, owner, @@ -121,9 +122,9 @@ export class MetadataEventProcessor extends BaseEventProcessor { metadataHash, metadata ) - let ddo = await this.processDDO(decryptedDDO) + let ddo = await this.processDDO(decryptDDO) if ( - !isRemoteDDO(decryptedDDO) && + !isRemoteDDO(decryptDDO) && parseInt(flag) !== 2 && !this.checkDdoHash(ddo, metadataHash) ) { @@ -158,14 +159,8 @@ export class MetadataEventProcessor extends BaseEventProcessor { ) return } - // For remote DDOs, the on-chain hash covers the remote pointer payload, - // not the fetched document body, so only inline unencrypted DDOs can be - // revalidated here against metadataHash. - if ( - !isRemoteDDO(decryptedDDO) && - (parseInt(flag) & 2) === 0 && - !this.checkDdoHash(updatedDdo, metadataHash) - ) { + // for unencrypted DDOs + if ((parseInt(flag) & 2) === 0 && !this.checkDdoHash(updatedDdo, metadataHash)) { INDEXER_LOGGER.error('Unencrypted DDO hash does not match metadata hash.') await ddoState.update( this.networkId, @@ -179,12 +174,11 @@ export class MetadataEventProcessor extends BaseEventProcessor { } // check authorized publishers - const { authorizedPublishers, authorizedPublishersList } = await getConfiguration() + const { authorizedPublishers, authorizedPublishersList } = this.getConfig() if (authorizedPublishers.length > 0) { - // if is not there, do not index - const authorized: string[] = authorizedPublishers.filter((address) => - // do a case insensitive search - address.toLowerCase().includes(owner.toLowerCase()) + const ownerNormalized = getAddress(String(owner)) + const authorized: string[] = authorizedPublishers.filter( + (address) => getAddress(address).toLowerCase() === ownerNormalized.toLowerCase() ) if (!authorized.length) { INDEXER_LOGGER.error( @@ -246,6 +240,7 @@ export class MetadataEventProcessor extends BaseEventProcessor { if (previousDdo) { previousDdoInstance = DDOManager.getDDOClass(previousDdo) } + if (eventName === EVENTS.METADATA_CREATED) { if ( previousDdoInstance && @@ -318,6 +313,7 @@ export class MetadataEventProcessor extends BaseEventProcessor { } const from = decodedEventData.args[0].toString() let ddoUpdatedWithPricing + // we need to store the event data (either metadata created or update and is updatable) if ( [EVENTS.METADATA_CREATED, EVENTS.METADATA_UPDATED].includes(eventName) && @@ -388,21 +384,21 @@ export class MetadataEventProcessor extends BaseEventProcessor { ddoUpdatedWithPricing = ddoWithPricing } // always call, but only create instance once - const purgatory = await Purgatory.getInstance() + const purgatory = Purgatory.getInstance(this.getConfig()) // if purgatory is disabled just return false - const state = await this.getPurgatoryState(ddo, from, purgatory) - - ddoUpdatedWithPricing.updateFields({ - indexedMetadata: { purgatory: { state } } - }) - if (state === false) { + const updatedDDO = await this.updatePurgatoryStateDdo( + ddoUpdatedWithPricing, + from, + purgatory + ) + if (updatedDDO.getAssetFields().indexedMetadata.purgatory.state === false) { // TODO: insert in a different collection for purgatory DDOs const saveDDO = await this.createOrUpdateDDO(ddoUpdatedWithPricing, eventName) INDEXER_LOGGER.logMessage(`saved DDO: ${JSON.stringify(saveDDO)}`) return saveDDO } } catch (error) { - const { ddoState } = await getDatabase() + const { ddoState } = await this.getDatabase() await ddoState.update( this.networkId, did, @@ -419,45 +415,35 @@ export class MetadataEventProcessor extends BaseEventProcessor { } } - async getPurgatoryState( - ddo: any, - owner: string, - purgatory: Purgatory - ): Promise { - if (purgatory.isEnabled()) { - const state: boolean = - (await purgatory.isBannedAsset(ddo.id)) || - (await purgatory.isBannedAccount(owner)) - return state - } - return false - } - async updatePurgatoryStateDdo( ddo: VersionedDDO, owner: string, purgatory: Purgatory - ): Promise> { + ): Promise { if (!purgatory.isEnabled()) { - return ddo.updateFields({ + ddo.updateFields({ indexedMetadata: { purgatory: { state: false } } }) + + return ddo } const state: boolean = (await purgatory.isBannedAsset(ddo.getDid())) || (await purgatory.isBannedAccount(owner)) - return ddo.updateFields({ + ddo.updateFields({ indexedMetadata: { purgatory: { state } } }) + + return ddo } isUpdateable( diff --git a/src/components/Indexer/processors/MetadataStateEventProcessor.ts b/src/components/Indexer/processors/MetadataStateEventProcessor.ts index 70b27d929..9265f06bd 100644 --- a/src/components/Indexer/processors/MetadataStateEventProcessor.ts +++ b/src/components/Indexer/processors/MetadataStateEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider } from 'ethers' import { EVENTS, MetadataStates } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { BaseEventProcessor } from './BaseProcessor.js' @@ -31,8 +30,9 @@ export class MetadataStateEventProcessor extends BaseEventProcessor { const did = getDid(event.address, chainId) try { - const { ddo: ddoDatabase } = await getDatabase() + const { ddo: ddoDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, event.address, chainId) + if (!ddo) { INDEXER_LOGGER.logMessage( `Detected MetadataState changed for ${did}, but it does not exists.` diff --git a/src/components/Indexer/processors/OrderReusedEventProcessor.ts b/src/components/Indexer/processors/OrderReusedEventProcessor.ts index 9087cd54e..b71f5badd 100644 --- a/src/components/Indexer/processors/OrderReusedEventProcessor.ts +++ b/src/components/Indexer/processors/OrderReusedEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { @@ -36,7 +35,7 @@ export class OrderReusedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) try { - const { ddo: ddoDatabase, order: orderDatabase } = await getDatabase() + const { ddo: ddoDatabase, order: orderDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( @@ -44,7 +43,15 @@ export class OrderReusedEventProcessor extends BaseEventProcessor { ) return } + const existingOrder = await orderDatabase.retrieve(event.transactionHash) + if (existingOrder) { + INDEXER_LOGGER.logMessage( + `OrderReused already processed for tx ${event.transactionHash}, skipping duplicate` + ) + return ddo + } const ddoInstance = DDOManager.getDDOClass(ddo) + const storedDid = ddoInstance.getDid() if (!ddoInstance.getAssetFields().indexedMetadata) { ddoInstance.updateFields({ indexedMetadata: {} }) } @@ -102,7 +109,7 @@ export class OrderReusedEventProcessor extends BaseEventProcessor { payer, event.address, nftAddress, - did, + storedDid, startOrderId ) } catch (error) { diff --git a/src/components/Indexer/processors/OrderStartedEventProcessor.ts b/src/components/Indexer/processors/OrderStartedEventProcessor.ts index 110b3cbac..c96db6410 100644 --- a/src/components/Indexer/processors/OrderStartedEventProcessor.ts +++ b/src/components/Indexer/processors/OrderStartedEventProcessor.ts @@ -1,7 +1,6 @@ import { DDOManager } from '@oceanprotocol/ddo-js' import { ethers, Signer, FallbackProvider } from 'ethers' import { EVENTS } from '../../../utils/constants.js' -import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { getDtContract, getDid, getPricesByDt } from '../utils.js' @@ -34,7 +33,7 @@ export class OrderStartedEventProcessor extends BaseEventProcessor { const nftAddress = await datatokenContract.getERC721Address() const did = getDid(nftAddress, chainId) try { - const { ddo: ddoDatabase, order: orderDatabase } = await getDatabase() + const { ddo: ddoDatabase, order: orderDatabase } = await this.getDatabase() const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) if (!ddo) { INDEXER_LOGGER.logMessage( @@ -42,28 +41,38 @@ export class OrderStartedEventProcessor extends BaseEventProcessor { ) return } + const existingOrder = await orderDatabase.retrieve(event.transactionHash) + if (existingOrder) { + INDEXER_LOGGER.logMessage( + `OrderStarted already processed for tx ${event.transactionHash}, skipping duplicate` + ) + return ddo + } const ddoInstance = DDOManager.getDDOClass(ddo) - if (!ddoInstance.getDDOData().indexedMetadata) { + const storedDid = ddoInstance.getDid() + if (!ddoInstance.getAssetFields().indexedMetadata) { ddoInstance.updateFields({ indexedMetadata: {} }) } - if (!Array.isArray(ddoInstance.getDDOData().indexedMetadata.stats)) { + + if (!Array.isArray(ddoInstance.getAssetFields().indexedMetadata.stats)) { ddoInstance.updateFields({ indexedMetadata: { stats: [] } }) } + if ( - ddoInstance.getDDOData().indexedMetadata.stats.length !== 0 && + ddoInstance.getAssetFields().indexedMetadata.stats.length !== 0 && ddoInstance .getDDOFields() .services[serviceIndex].datatokenAddress?.toLowerCase() === event.address?.toLowerCase() ) { - for (const stat of ddoInstance.getDDOData().indexedMetadata.stats) { + for (const stat of ddoInstance.getAssetFields().indexedMetadata.stats) { if (stat.datatokenAddress.toLowerCase() === event.address?.toLowerCase()) { stat.orders += 1 break } } - } else if (ddoInstance.getDDOData().indexedMetadata.stats.length === 0) { - const existingStats = ddoInstance.getDDOData().indexedMetadata.stats + } else if (ddoInstance.getAssetFields().indexedMetadata.stats.length === 0) { + const existingStats = ddoInstance.getAssetFields().indexedMetadata.stats existingStats.push({ datatokenAddress: event.address, name: await datatokenContract.name(), @@ -83,10 +92,10 @@ export class OrderStartedEventProcessor extends BaseEventProcessor { payer, ddoInstance.getDDOFields().services[serviceIndex].datatokenAddress, nftAddress, - did + storedDid ) INDEXER_LOGGER.logMessage( - `Found did ${did} for order starting on network ${chainId}` + `Found did ${storedDid} for order starting on network ${chainId}` ) const savedDDO = await this.createOrUpdateDDO(ddoInstance, EVENTS.ORDER_STARTED) return savedDDO diff --git a/src/components/Indexer/processors/index.ts b/src/components/Indexer/processors/index.ts index dc6501a82..912ab6aa2 100644 --- a/src/components/Indexer/processors/index.ts +++ b/src/components/Indexer/processors/index.ts @@ -1,3 +1,4 @@ +import { OceanNodeConfig } from '../../../@types' import { BaseEventProcessor } from './BaseProcessor' export * from './DispenserActivatedEventProcessor.js' @@ -13,4 +14,7 @@ export * from './OrderReusedEventProcessor.js' export * from './OrderStartedEventProcessor.js' export * from './BaseProcessor.js' -export type ProcessorConstructor = new (chainId: number) => BaseEventProcessor +export type ProcessorConstructor = new ( + chainId: number, + config: OceanNodeConfig +) => BaseEventProcessor diff --git a/src/components/Indexer/purgatory.ts b/src/components/Indexer/purgatory.ts index c5f607630..9ee8c6384 100644 --- a/src/components/Indexer/purgatory.ts +++ b/src/components/Indexer/purgatory.ts @@ -3,7 +3,7 @@ import { PurgatoryAccounts, PurgatoryAssets } from '../../@types/Purgatory.js' import { INDEXER_LOGGER } from '../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../utils/logging/Logger.js' import { URLUtils } from '../../utils/url.js' -import { getConfiguration } from '../../utils/index.js' +import { OceanNodeConfig } from '../../@types/OceanNode.js' export class Purgatory { private bannedAccounts: Array @@ -165,9 +165,8 @@ export class Purgatory { return this.enabled } - static async getInstance(): Promise { + static getInstance(config: OceanNodeConfig): Purgatory { if (!Purgatory.instance) { - const config = await getConfiguration() Purgatory.instance = new Purgatory( config.accountPurgatoryUrl, config.assetPurgatoryUrl diff --git a/src/components/P2P/handleProtocolCommands.ts b/src/components/P2P/handleProtocolCommands.ts index f94f6a2f5..20ebd839f 100644 --- a/src/components/P2P/handleProtocolCommands.ts +++ b/src/components/P2P/handleProtocolCommands.ts @@ -7,7 +7,6 @@ import { Command } from '../../@types/commands.js' import { P2PCommandResponse } from '../../@types/OceanNode' import { GENERIC_EMOJIS, LOG_LEVELS_STR } from '../../utils/logging/Logger.js' import { BaseHandler } from '../core/handler/handler.js' -import { getConfiguration } from '../../utils/index.js' import { checkGlobalConnectionsRateLimit, checkRequestsRateLimit @@ -89,7 +88,7 @@ export async function handleProtocolCommands(stream: Stream, connection: Connect } // Rate limiting and deny list checks (after reading command) - const configuration = await getConfiguration() + const configuration = this.getConfig() const { denyList } = configuration if (denyList.peers.includes(remotePeer.toString())) { diff --git a/src/components/P2P/index.ts b/src/components/P2P/index.ts index ca66a3a25..018c80c82 100644 --- a/src/components/P2P/index.ts +++ b/src/components/P2P/index.ts @@ -140,6 +140,10 @@ export class OceanP2P extends EventEmitter { return this.coreHandlers } + getConfig() { + return this._config + } + async start(options: any = null) { this._topic = 'oceanprotocol' this._libp2p = await this.createNode(this._config) @@ -751,7 +755,7 @@ export class OceanP2P extends EventEmitter { isNaN(timeout) || timeout === 0 ? AbortSignal.timeout(5000) : AbortSignal.timeout(timeout), - useCache: true, + useCache: false, useNetwork: true }) return data @@ -823,7 +827,7 @@ export class OceanP2P extends EventEmitter { const multiaddrs = multiAddrs?.length ? multiAddrs.map((addr) => multiaddr(addr)) - : (await this.getPeerMultiaddrs(peerName)) || [] + : await this.getPeerMultiaddrs(peerName) if (multiaddrs.length < 1) { const error = `Cannot find any address to dial for peer: ${peerId}` diff --git a/src/components/c2d/compute_engine_base.ts b/src/components/c2d/compute_engine_base.ts index 96708ee45..1fb7bea4e 100644 --- a/src/components/c2d/compute_engine_base.ts +++ b/src/components/c2d/compute_engine_base.ts @@ -20,7 +20,11 @@ import { C2DClusterType } from '../../@types/C2D/C2D.js' import { C2DDatabase } from '../database/C2DDatabase.js' import { Escrow } from '../core/utils/escrow.js' import { KeyManager } from '../KeyManager/index.js' -import { dockerRegistryAuth, dockerRegistrysAuth } from '../../@types/OceanNode.js' +import { + dockerRegistryAuth, + dockerRegistrysAuth, + OceanNodeConfig +} from '../../@types/OceanNode.js' import { ValidateParams } from '../httpRoutes/validateCommands.js' import { EncryptMethod } from '../../@types/fileObject.js' import { CORE_LOGGER } from '../../utils/logging/common.js' @@ -31,19 +35,21 @@ export abstract class C2DEngine { public escrow: Escrow public keyManager: KeyManager public dockerRegistryAuths: dockerRegistrysAuth + public config: OceanNodeConfig public constructor( cluster: C2DClusterInfo, db: C2DDatabase, escrow: Escrow, keyManager: KeyManager, - dockerRegistryAuths: dockerRegistrysAuth + config: OceanNodeConfig ) { this.clusterConfig = cluster this.db = db this.escrow = escrow this.keyManager = keyManager - this.dockerRegistryAuths = dockerRegistryAuths + this.config = config + this.dockerRegistryAuths = config?.dockerRegistrysAuth } getKeyManager(): KeyManager { @@ -656,6 +662,10 @@ export abstract class C2DEngine { return null } + public getConfig(): OceanNodeConfig { + return this.config + } + public async checkEncryptedDockerRegistryAuth( encryptedDockerRegistryAuth: string ): Promise { diff --git a/src/components/c2d/compute_engine_docker.ts b/src/components/c2d/compute_engine_docker.ts index 4721d60b7..cb8bd86e5 100755 --- a/src/components/c2d/compute_engine_docker.ts +++ b/src/components/c2d/compute_engine_docker.ts @@ -25,11 +25,7 @@ import type { C2DEnvironmentConfig, ComputeResourcesPricingInfo } from '../../@types/C2D/C2D.js' -import { - BASE_CHAIN_ID, - getConfiguration, - USDC_TOKEN_ADDRESS_BASE -} from '../../utils/config.js' +import { BASE_CHAIN_ID, USDC_TOKEN_ADDRESS_BASE } from '../../utils/config.js' import { C2DEngine } from './compute_engine_base.js' import { C2DDatabase } from '../database/C2DDatabase.js' import { Escrow } from '../core/utils/escrow.js' @@ -61,7 +57,7 @@ import { decryptFilesObject, omitDBComputeFieldsFromComputeJob } from './index.j import { ValidateParams } from '../httpRoutes/validateCommands.js' import { Service } from '@oceanprotocol/ddo-js' import { getOceanTokenAddressForChain } from '../../utils/address.js' -import { dockerRegistrysAuth, dockerRegistryAuth } from '../../@types/OceanNode.js' +import { dockerRegistryAuth, OceanNodeConfig } from '../../@types/OceanNode.js' import { EncryptMethod } from '../../@types/fileObject.js' import { getAddress, ZeroAddress } from 'ethers' import { AccessList } from '../../@types/AccessList.js' @@ -91,16 +87,15 @@ export class C2DEngineDocker extends C2DEngine { private trivyCachePath: string private cpuAllocations: Map = new Map() private envCpuCoresMap: Map = new Map() - private enableNetwork: boolean public constructor( clusterConfig: C2DClusterInfo, db: C2DDatabase, escrow: Escrow, keyManager: KeyManager, - dockerRegistryAuths: dockerRegistrysAuth + config: OceanNodeConfig ) { - super(clusterConfig, db, escrow, keyManager, dockerRegistryAuths) + super(clusterConfig, db, escrow, keyManager, config) this.docker = null if (clusterConfig.connection.socketPath) { @@ -115,7 +110,7 @@ export class C2DEngineDocker extends C2DEngine { this.paymentClaimInterval = clusterConfig.connection.paymentClaimInterval || 3600 // 1 hour this.scanImages = clusterConfig.connection.scanImages || false // default is not to scan images for now, until it's prod ready this.scanImageDBUpdateInterval = clusterConfig.connection.scanImageDBUpdateInterval - this.enableNetwork = clusterConfig.connection.enableNetwork ?? false + if ( clusterConfig.connection.protocol && clusterConfig.connection.host && @@ -210,10 +205,8 @@ export class C2DEngineDocker extends C2DEngine { } const gpuResources: ComputeResource[] = Array.from(gpuMap.values()) - const benchmarkPrices: ComputeResourcesPricingInfo[] = gpuResources.map((gpu) => ({ - id: gpu.id, - price: 1 - })) + const benchmarkPrices: ComputeResourcesPricingInfo[] = + gpuResources.length > 0 ? [{ id: gpuResources[0].id, price: 1 }] : [] const benchmarkFees: ComputeEnvFeesStructure = { [BASE_CHAIN_ID]: [{ feeToken: USDC_TOKEN_ADDRESS_BASE, prices: benchmarkPrices }] @@ -233,17 +226,18 @@ export class C2DEngineDocker extends C2DEngine { access: { addresses: [], accessLists: [ - { BASE_CHAIN_ID: [getAddress('0xcb7Db55Ca9Aa9C3b25F5Bc266da63317fa02086a')] } + { [BASE_CHAIN_ID]: [getAddress('0xcb7Db55Ca9Aa9C3b25F5Bc266da63317fa02086a')] } ] }, - fees: benchmarkFees + fees: benchmarkFees, + enableNetwork: true } envConfig.environments.push(benchmarkEnv) } public override async start() { - const config = await getConfiguration() + const config = this.getConfig() const envConfig = await this.getC2DConfig().connection if (!envConfig?.environments?.length) { CORE_LOGGER.warn( @@ -367,7 +361,8 @@ export class C2DEngineDocker extends C2DEngine { queMaxWaitTime: 0, queMaxWaitTimeFree: 0, runMaxWaitTime: 0, - runMaxWaitTimeFree: 0 + runMaxWaitTimeFree: 0, + enableNetwork: envDef.enableNetwork } if (envDef.storageExpiry !== undefined) env.storageExpiry = envDef.storageExpiry @@ -1257,9 +1252,10 @@ export class C2DEngineDocker extends C2DEngine { } } if ( + isFree && algorithm.meta.container && algorithm.meta.container.dockerfile && - !env.free.allowImageBuild + !env.free?.allowImageBuild ) { throw new Error(`Building image is not allowed for free jobs`) } @@ -1836,7 +1832,7 @@ export class C2DEngineDocker extends C2DEngine { } ] } - if (!this.enableNetwork) { + if (!env.enableNetwork) { hostConfig.NetworkMode = 'none' // no network inside the container } // disk @@ -2156,7 +2152,7 @@ export class C2DEngineDocker extends C2DEngine { const output = JSON.parse(decryptedOutput.toString()) as ComputeOutput const storage = Storage.getStorageClass( output.remoteStorage, - await getConfiguration() + this.getConfig() ) if ( @@ -2773,7 +2769,7 @@ export class C2DEngineDocker extends C2DEngine { private async uploadData( job: DBComputeJob ): Promise<{ status: C2DStatusNumber; statusText: C2DStatusText }> { - const config = await getConfiguration() + const config = this.getConfig() const ret = { status: C2DStatusNumber.RunningAlgorithm, statusText: C2DStatusText.RunningAlgorithm diff --git a/src/components/c2d/compute_engines.ts b/src/components/c2d/compute_engines.ts index f2da957a0..beab7002a 100644 --- a/src/components/c2d/compute_engines.ts +++ b/src/components/c2d/compute_engines.ts @@ -59,9 +59,7 @@ export class C2DEngines { cfg.connection.scanImageDBUpdateInterval = null } } - this.engines.push( - new C2DEngineDocker(cfg, db, escrow, keyManager, config.dockerRegistrysAuth) - ) + this.engines.push(new C2DEngineDocker(cfg, db, escrow, keyManager, config)) } } } diff --git a/src/components/core/admin/IndexingThreadHandler.ts b/src/components/core/admin/IndexingThreadHandler.ts index 3e8bdb13f..590a6a488 100644 --- a/src/components/core/admin/IndexingThreadHandler.ts +++ b/src/components/core/admin/IndexingThreadHandler.ts @@ -9,7 +9,6 @@ import { ValidateParams } from '../../httpRoutes/validateCommands.js' import { AdminCommandHandler } from './adminHandler.js' -import { checkSupportedChainId } from '../../../utils/blockchain.js' export class IndexingThreadHandler extends AdminCommandHandler { async validateAdminCommand(command: StartStopIndexingCommand): Promise { @@ -18,7 +17,7 @@ export class IndexingThreadHandler extends AdminCommandHandler { ![IndexingCommand.START_THREAD, IndexingCommand.STOP_THREAD].includes( command.action ) || - (command.chainId && !checkSupportedChainId(command.chainId)) + (command.chainId && !this.getOceanNode().checkSupportedChainId(command.chainId)) ) { return buildInvalidRequestMessage( `Missing or invalid "action" and/or "chainId" fields for command: "${command}".` diff --git a/src/components/core/admin/adminHandler.ts b/src/components/core/admin/adminHandler.ts index e335d2c99..ec2502fa2 100644 --- a/src/components/core/admin/adminHandler.ts +++ b/src/components/core/admin/adminHandler.ts @@ -6,14 +6,12 @@ import { buildRateLimitReachedResponse, buildInvalidParametersResponse } from '../../httpRoutes/validateCommands.js' -import { getAdminAddresses } from '../../../utils/auth.js' import { checkSingleCredential } from '../../../utils/credentials.js' import { CREDENTIALS_TYPES } from '../../../@types/DDO/Credentials.js' import { BaseHandler } from '../handler/handler.js' import { P2PCommandResponse } from '../../../@types/OceanNode.js' import { ReadableString } from '../../P2P/handleProtocolCommands.js' import { CommonValidation } from '../../../utils/validators.js' -import { getConfiguration } from '../../../utils/index.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' export abstract class AdminCommandHandler @@ -67,8 +65,7 @@ export abstract class AdminCommandHandler } } try { - const config = await getConfiguration() - const allowedAdmins = await getAdminAddresses(config) + const allowedAdmins = oceanNode.getAdminAddresses() const { addresses, accessLists } = allowedAdmins let allowed = await checkSingleCredential( diff --git a/src/components/core/admin/collectFeesHandler.ts b/src/components/core/admin/collectFeesHandler.ts index e101eab66..10a7dd504 100644 --- a/src/components/core/admin/collectFeesHandler.ts +++ b/src/components/core/admin/collectFeesHandler.ts @@ -11,7 +11,6 @@ import { buildInvalidRequestMessage, validateCommandParameters } from '../../httpRoutes/validateCommands.js' -import { getConfiguration, checkSupportedChainId } from '../../../utils/index.js' import { parseUnits, Contract, ZeroAddress, isAddress } from 'ethers' import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20Template.sol/ERC20Template.json' with { type: 'json' } import { CORE_LOGGER } from '../../../utils/logging/common.js' @@ -44,7 +43,7 @@ export class CollectFeesHandler extends AdminCommandHandler { if (!validation.valid) { return buildInvalidParametersResponse(validation) } - const config = await getConfiguration() + const config = this.getOceanNode().getConfig() const keyManager = this.nodeInstance.getKeyManager() if (task.node && task.node !== keyManager.getPeerIdString()) { const msg: string = `Cannot run this command ${JSON.stringify( @@ -53,7 +52,7 @@ export class CollectFeesHandler extends AdminCommandHandler { CORE_LOGGER.error(msg) return buildErrorResponse(msg) } - const checkChainId = await checkSupportedChainId(task.chainId) + const checkChainId = this.getOceanNode().checkSupportedChainId(task.chainId) if (!checkChainId.validation) { return buildErrorResponse( `Chain ID ${task.chainId} is not supported in the node's config` diff --git a/src/components/core/admin/getLogsHandler.ts b/src/components/core/admin/getLogsHandler.ts index 7cb7f8a4a..9240552c3 100644 --- a/src/components/core/admin/getLogsHandler.ts +++ b/src/components/core/admin/getLogsHandler.ts @@ -20,7 +20,9 @@ export class GetLogsHandler extends AdminCommandHandler { } try { if (task.logId) { - const logs = await this.getOceanNode().getDatabase().logs.retrieveLog(task.logId) + const logs = await ( + await this.getOceanNode().getDatabase() + ).logs.retrieveLog(task.logId) if (logs) { return { status: { httpStatus: 200 }, @@ -40,9 +42,9 @@ export class GetLogsHandler extends AdminCommandHandler { const maxLogs = Math.min(task.maxLogs ?? 100, 1000) const { moduleName, level, page } = task - const logs = await this.getOceanNode() - .getDatabase() - .logs.retrieveMultipleLogs(startTime, endTime, maxLogs, moduleName, level, page) + const logs = await ( + await this.getOceanNode().getDatabase() + ).logs.retrieveMultipleLogs(startTime, endTime, maxLogs, moduleName, level, page) if (!logs || logs.length === 0) { const fileLogs = await readExceptionLogFiles( diff --git a/src/components/core/admin/pushConfigHandler.ts b/src/components/core/admin/pushConfigHandler.ts index 7a4839cb4..cb20c7e70 100644 --- a/src/components/core/admin/pushConfigHandler.ts +++ b/src/components/core/admin/pushConfigHandler.ts @@ -25,7 +25,7 @@ export class PushConfigHandler extends AdminCommandHandler { // Pre-validate the config fields using Zod schema try { - const currentConfig = await getConfiguration() + const currentConfig = this.getOceanNode().getConfig() const mergedConfig = { ...currentConfig, ...command.config } OceanNodeConfigSchema.parse(mergedConfig) diff --git a/src/components/core/admin/reindexChainHandler.ts b/src/components/core/admin/reindexChainHandler.ts index 10ac997bc..27ee87e46 100644 --- a/src/components/core/admin/reindexChainHandler.ts +++ b/src/components/core/admin/reindexChainHandler.ts @@ -9,7 +9,7 @@ import { } from '../../httpRoutes/validateCommands.js' import { P2PCommandResponse } from '../../../@types/OceanNode.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' -import { checkSupportedChainId } from '../../../utils/blockchain.js' + import { ReadableString } from '../../P2P/handleProtocolCommands.js' export class ReindexChainHandler extends AdminCommandHandler { @@ -28,7 +28,7 @@ export class ReindexChainHandler extends AdminCommandHandler { return buildInvalidParametersResponse(validation) } CORE_LOGGER.logMessage(`Reindexing chain command called`) - const checkChainId = await checkSupportedChainId(task.chainId) + const checkChainId = this.getOceanNode().checkSupportedChainId(task.chainId) if (!checkChainId.validation) { return buildErrorResponse( `Chain ID ${task.chainId} is not supported in the node's config` diff --git a/src/components/core/admin/reindexTxHandler.ts b/src/components/core/admin/reindexTxHandler.ts index f9c746da0..a040f3b32 100644 --- a/src/components/core/admin/reindexTxHandler.ts +++ b/src/components/core/admin/reindexTxHandler.ts @@ -10,7 +10,6 @@ import { AdminReindexTxCommand } from '../../../@types/commands.js' import { P2PCommandResponse } from '../../../@types/OceanNode.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { ReadableString } from '../../P2P/handleProtocolCommands.js' -import { checkSupportedChainId } from '../../../utils/blockchain.js' export class ReindexTxHandler extends AdminCommandHandler { async validate(command: AdminReindexTxCommand): Promise { @@ -31,7 +30,7 @@ export class ReindexTxHandler extends AdminCommandHandler { return buildInvalidParametersResponse(validation) } CORE_LOGGER.logMessage(`Reindexing tx...`) - const checkChainId = await checkSupportedChainId(task.chainId) + const checkChainId = this.getOceanNode().checkSupportedChainId(task.chainId) if (!checkChainId.validation) { return buildErrorResponse( `Chain ID ${task.chainId} is not supported in the node's config` diff --git a/src/components/core/compute/environments.ts b/src/components/core/compute/environments.ts index 95e8b0b6d..d82989c64 100644 --- a/src/components/core/compute/environments.ts +++ b/src/components/core/compute/environments.ts @@ -26,6 +26,15 @@ export class ComputeGetEnvironmentsHandler extends CommandHandler { } try { const computeEngines = this.getOceanNode().getC2DEngines() + if (!computeEngines) { + return { + stream: null, + status: { + httpStatus: 503, + error: 'Compute engines are not configured on this node' + } + } + } const result = await computeEngines.fetchEnvironments(task.chainId) CORE_LOGGER.logMessage( diff --git a/src/components/core/compute/initialize.ts b/src/components/core/compute/initialize.ts index d7a498abc..2d31192d8 100644 --- a/src/components/core/compute/initialize.ts +++ b/src/components/core/compute/initialize.ts @@ -12,7 +12,7 @@ import { isDataTokenTemplate4, isERC20Template4Active } from '../../../utils/asset.js' -import { verifyProviderFees, createProviderFee } from '../utils/feesHandler.js' +import { ProviderFees } from '../utils/feesHandler.js' import { validateOrderTransaction } from '../utils/validateOrders.js' import { EncryptMethod } from '../../../@types/fileObject.js' @@ -23,7 +23,7 @@ import { validateCommandParameters } from '../../httpRoutes/validateCommands.js' import { isAddress } from 'ethers' -import { getConfiguration, isPolicyServerConfigured } from '../../../utils/index.js' +import { isPolicyServerConfigured } from '../../../utils/index.js' import { sanitizeServiceFiles } from '../../../utils/util.js' import { FindDdoHandler } from '../handler/ddoHandler.js' import { isOrderingAllowedForAsset } from '../handler/downloadHandler.js' @@ -86,7 +86,7 @@ export class ComputeInitializeHandler extends CommandHandler { let resourcesNeeded try { const node = this.getOceanNode() - const config = await getConfiguration() + const config = node.getConfig() try { // split compute env (which is already in hash-envId format) and get the hash // then get env which might contain dashes as well @@ -219,7 +219,7 @@ export class ComputeInitializeHandler extends CommandHandler { const isValidOutput = await validateOutput( node, task.output, - await getConfiguration() + this.getOceanNode().getConfig() ) if (isValidOutput.status.httpStatus !== 200) { return isValidOutput @@ -301,9 +301,10 @@ export class ComputeInitializeHandler extends CommandHandler { } } } - const config = await getConfiguration() - const { chainId } = config.supportedNetworks[ddoChainId] const oceanNode = this.getOceanNode() + const config = oceanNode.getConfig() + const { chainId } = config.supportedNetworks[ddoChainId] + const blockchain = oceanNode.getBlockchain(chainId) if (!blockchain) { return { @@ -531,6 +532,7 @@ export class ComputeInitializeHandler extends CommandHandler { message: false } result.consumerAddress = env.consumerAddress + const fees = new ProviderFees(node) if ('transferTxId' in elem && elem.transferTxId) { // search for that compute env and see if it has access to dataset const paymentValidation = await validateOrderTransaction( @@ -546,7 +548,7 @@ export class ComputeInitializeHandler extends CommandHandler { if (paymentValidation.isValid === true) { // order is valid, so let's check providerFees result.validOrder = elem.transferTxId - validFee = await verifyProviderFees( + validFee = await fees.verifyProviderFees( elem.transferTxId, task.consumerAddress, provider, @@ -559,7 +561,11 @@ export class ComputeInitializeHandler extends CommandHandler { } if (validFee.isValid === false) { if (canDecrypt) { - result.providerFee = await createProviderFee(ddo, service, service.timeout) + result.providerFee = await fees.createProviderFee( + ddo, + service, + service.timeout + ) } else { // TO DO: Edge case when this asset is served by a remote provider. // We should connect to that provider and get the fee diff --git a/src/components/core/compute/startCompute.ts b/src/components/core/compute/startCompute.ts index b16efd916..ff68feaea 100644 --- a/src/components/core/compute/startCompute.ts +++ b/src/components/core/compute/startCompute.ts @@ -34,7 +34,7 @@ import { } from '../../../@types/C2D/C2D.js' // import { verifyProviderFees } from '../utils/feesHandler.js' import { validateOrderTransaction } from '../utils/validateOrders.js' -import { getConfiguration, isPolicyServerConfigured } from '../../../utils/index.js' +import { isPolicyServerConfigured } from '../../../utils/index.js' import { sanitizeServiceFiles } from '../../../utils/util.js' import { FindDdoHandler } from '../handler/ddoHandler.js' // import { ProviderFeeValidation } from '../../../@types/Fees.js' @@ -192,7 +192,7 @@ export class PaidComputeStartHandler extends CommonComputeHandler { } const { algorithm } = task - const config = await getConfiguration() + const config = node.getConfig() const accessGranted = await validateAccess( task.consumerAddress, @@ -283,9 +283,10 @@ export class PaidComputeStartHandler extends CommonComputeHandler { } } } - const config = await getConfiguration() - const { chainId } = config.supportedNetworks[ddoChainId] const oceanNode = this.getOceanNode() + const config = oceanNode.getConfig() + const { chainId } = config.supportedNetworks[ddoChainId] + const blockchain = oceanNode.getBlockchain(chainId) if (!blockchain) { return { @@ -604,11 +605,7 @@ export class PaidComputeStartHandler extends CommonComputeHandler { } } } - const isValidOutput = await validateOutput( - node, - task.output, - await getConfiguration() - ) + const isValidOutput = await validateOutput(node, task.output, node.getConfig()) if (isValidOutput.status.httpStatus !== 200) { return isValidOutput } @@ -763,11 +760,7 @@ export class FreeComputeStartHandler extends CommonComputeHandler { } } const node = this.getOceanNode() - const isValidOutput = await validateOutput( - node, - task.output, - await getConfiguration() - ) + const isValidOutput = await validateOutput(node, task.output, node.getConfig()) if (isValidOutput.status.httpStatus !== 200) { return isValidOutput } @@ -818,9 +811,9 @@ export class FreeComputeStartHandler extends CommonComputeHandler { } } } - const config = await getConfiguration() - const { chainId } = config.supportedNetworks[ddoChainId] const oceanNode = this.getOceanNode() + const config = oceanNode.getConfig() + const { chainId } = config.supportedNetworks[ddoChainId] const blockchain = oceanNode.getBlockchain(chainId) if (!blockchain) { return { diff --git a/src/components/core/handler/authHandler.ts b/src/components/core/handler/authHandler.ts index 1ec6923c8..2a53a6aa1 100644 --- a/src/components/core/handler/authHandler.ts +++ b/src/components/core/handler/authHandler.ts @@ -32,7 +32,7 @@ export class CreateAuthTokenHandler extends CommandHandler { async handle(task: CreateAuthTokenCommand): Promise { const { address, nonce, signature } = task - const nonceDb = this.getOceanNode().getDatabase().nonce + const nonceDb = (await this.getOceanNode().getDatabase()).nonce const validationResponse = await this.verifyParamsAndRateLimits(task) if (this.shouldDenyTaskHandling(validationResponse)) { return validationResponse @@ -40,6 +40,7 @@ export class CreateAuthTokenHandler extends CommandHandler { try { const nonceCheckResult: NonceResponse = await checkNonce( + this.getOceanNode().getConfig(), nonceDb, address, parseInt(nonce), @@ -84,7 +85,7 @@ export class InvalidateAuthTokenHandler extends CommandHandler { async handle(task: InvalidateAuthTokenCommand): Promise { const { address, nonce, signature, token } = task - const nonceDb = this.getOceanNode().getDatabase().nonce + const nonceDb = (await this.getOceanNode().getDatabase()).nonce const validationResponse = await this.verifyParamsAndRateLimits(task) if (this.shouldDenyTaskHandling(validationResponse)) { return validationResponse @@ -92,6 +93,7 @@ export class InvalidateAuthTokenHandler extends CommandHandler { try { const isValid = await checkNonce( + this.getOceanNode().getConfig(), nonceDb, address, parseInt(nonce), diff --git a/src/components/core/handler/ddoHandler.ts b/src/components/core/handler/ddoHandler.ts index 7ef17b8f3..13f281219 100644 --- a/src/components/core/handler/ddoHandler.ts +++ b/src/components/core/handler/ddoHandler.ts @@ -18,12 +18,8 @@ import { ethers, isAddress } from 'ethers' import ERC721Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC721Template.sol/ERC721Template.json' with { type: 'json' } // import lzma from 'lzma-native' import lzmajs from 'lzma-purejs-requirejs' -import { getValidationSignature, isRemoteDDO } from '../utils/validateDdoHandler.js' -import { - getConfiguration, - hasP2PInterface, - isPolicyServerConfigured -} from '../../../utils/config.js' +import { isRemoteDDO } from '../utils/validateDdoHandler.js' +import { isPolicyServerConfigured } from '../../../utils/config.js' import { PolicyServer } from '../../policyServer/index.js' import { GetDdoCommand, @@ -93,7 +89,7 @@ export class DecryptDdoHandler extends CommandHandler { return validationResponse } const chainId = String(task.chainId) - const config = await getConfiguration() + const config = this.getOceanNode().getConfig() const supportedNetwork = config.supportedNetworks[chainId] // check if supported chainId @@ -402,7 +398,7 @@ export class GetDdoHandler extends CommandHandler { return validationResponse } try { - const database = this.getOceanNode().getDatabase() + const database = await this.getOceanNode().getDatabase() if (!database || !database.ddo) { CORE_LOGGER.error('DDO database is not available') return { @@ -451,7 +447,7 @@ export class FindDdoHandler extends CommandHandler { const p2pNode = node.getP2PNode() // if not P2P node just look on local DB - if (!hasP2PInterface || !p2pNode) { + if (!node.hasP2PInterface || !p2pNode) { // Checking locally only... const ddoInf = await findDDOLocally(node, task.id) const result = ddoInf ? [ddoInf] : [] @@ -478,7 +474,7 @@ export class FindDdoHandler extends CommandHandler { let processed = 0 let toProcess = 0 - const configuration = await getConfiguration() + const configuration = node.getConfig() // Checking locally... const ddoInfo = await findDDOLocally(node, task.id) @@ -527,7 +523,7 @@ export class FindDdoHandler extends CommandHandler { // Store locally if indexer is enabled if (configuration.hasIndexer) { - const database = node.getDatabase() + const database = await node.getDatabase() if (database && database.ddo) { const ddoExistsLocally = await database.ddo.retrieve(ddo.id) if (!ddoExistsLocally) { @@ -668,7 +664,7 @@ export class FindDdoHandler extends CommandHandler { // First try to find the DDO Locally if findDDO is not enforced if (!force) { try { - const database = node.getDatabase() + const database = await node.getDatabase() if (database && database.ddo) { const ddo = await database.ddo.retrieve(ddoId) return ddo as DDO @@ -760,7 +756,7 @@ export class ValidateDDOHandler extends CommandHandler { } } let shouldSign = false - const configuration = await getConfiguration() + const configuration = this.getOceanNode().getConfig() if (configuration.validateUnsignedDDO) { shouldSign = true } @@ -817,7 +813,9 @@ export class ValidateDDOHandler extends CommandHandler { return { stream: shouldSign ? Readable.from( - JSON.stringify(await getValidationSignature(JSON.stringify(task.ddo))) + JSON.stringify( + await this.getOceanNode().getValidationSignature(JSON.stringify(task.ddo)) + ) ) : null, status: { httpStatus: 200 } @@ -904,7 +902,7 @@ async function checkIfDDOResponseIsLegit( } // 3) check if we support this network - const config = await getConfiguration() + const config = oceanNode.getConfig() const network = config.supportedNetworks[chainId.toString()] if (!network) { CORE_LOGGER.error( diff --git a/src/components/core/handler/downloadHandler.ts b/src/components/core/handler/downloadHandler.ts index cf587323a..35f9e93da 100644 --- a/src/components/core/handler/downloadHandler.ts +++ b/src/components/core/handler/downloadHandler.ts @@ -1,7 +1,7 @@ import { CommandHandler } from './handler.js' import { MetadataStates, PROTOCOL_COMMANDS } from '../../../utils/constants.js' import { P2PCommandResponse } from '../../../@types/OceanNode.js' -import { verifyProviderFees } from '../utils/feesHandler.js' +import { ProviderFees } from '../utils/feesHandler.js' import { FindDdoHandler } from './ddoHandler.js' import crypto from 'crypto' import { GENERIC_EMOJIS, LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' @@ -14,7 +14,7 @@ import { isERC20Template4Active } from '../../../utils/asset.js' import { Storage } from '../../storage/index.js' -import { getConfiguration, isPolicyServerConfigured } from '../../../utils/index.js' +import { isPolicyServerConfigured } from '../../../utils/index.js' import { checkCredentials } from '../../../utils/credentials.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { OceanNode } from '../../../OceanNode.js' @@ -60,7 +60,7 @@ export async function handleDownloadUrlCommand( ): Promise { const encryptFile = !!task.aes_encrypted_key CORE_LOGGER.logMessage('DownloadCommand requires file encryption? ' + encryptFile, true) - const config = await getConfiguration() + const config = node.getConfig() try { // Determine the type of storage and get a readable stream const storage = Storage.getStorageClass(task.fileObject, config) @@ -258,7 +258,7 @@ export class DownloadHandler extends CommandHandler { } // Initialize blockchain early (needed for credential checks with accessList) - const config = await getConfiguration() + const config = node.getConfig() const { chainId } = config.supportedNetworks[ddoChainId] let provider let blockchain @@ -382,7 +382,20 @@ export class DownloadHandler extends CommandHandler { // get all compute envs const computeAddrs: string[] = [] - const environments = await oceanNode.getC2DEngines().fetchEnvironments(ddo.chainId) + const c2dEngines = oceanNode.getC2DEngines() + if (!c2dEngines) { + const msg = + 'Compute engines are not configured on this node; cannot validate compute download' + CORE_LOGGER.logMessage(msg, true) + return { + stream: null, + status: { + httpStatus: 503, + error: msg + } + } + } + const environments = await c2dEngines.fetchEnvironments(ddo.chainId) for (const env of environments) computeAddrs.push(env.consumerAddress?.toLowerCase()) @@ -399,7 +412,8 @@ export class DownloadHandler extends CommandHandler { } } // 5. check that the provider fee transaction is valid - const validFee = await verifyProviderFees( + const fees = new ProviderFees(node) + const validFee = await fees.verifyProviderFees( task.transferTxId, task.consumerAddress, provider, @@ -506,40 +520,6 @@ export class DownloadHandler extends CommandHandler { const decryptedFilesString = Buffer.from(decryptedUrlBytes).toString() decryptedFileData = JSON.parse(decryptedFilesString) decriptedFileObject = decryptedFileData.files[task.fileIndex] - CORE_LOGGER.info(JSON.stringify(decriptedFileObject)) - } - - if (decriptedFileObject?.url && task.userData) { - const url = new URL(decriptedFileObject.url) - const userDataObj = - typeof task.userData === 'string' ? JSON.parse(task.userData) : task.userData - for (const [key, value] of Object.entries(userDataObj)) { - url.searchParams.append(key, String(value)) - } - decriptedFileObject.url = url.toString() - CORE_LOGGER.info('Appended userData to file url: ' + decriptedFileObject.url) - } - - if (decriptedFileObject?.url && task.userData) { - const url = new URL(decriptedFileObject.url) - const userDataObj = - typeof task.userData === 'string' ? JSON.parse(task.userData) : task.userData - for (const [key, value] of Object.entries(userDataObj)) { - url.searchParams.append(key, String(value)) - } - decriptedFileObject.url = url.toString() - CORE_LOGGER.info('Appended userData to file url: ' + decriptedFileObject.url) - } - - if (decriptedFileObject?.url && task.userData) { - const url = new URL(decriptedFileObject.url) - const userDataObj = - typeof task.userData === 'string' ? JSON.parse(task.userData) : task.userData - for (const [key, value] of Object.entries(userDataObj)) { - url.searchParams.append(key, String(value)) - } - decriptedFileObject.url = url.toString() - CORE_LOGGER.info('Appended userData to file url: ' + decriptedFileObject.url) } if (decriptedFileObject?.url && task.userData) { diff --git a/src/components/core/handler/encryptHandler.ts b/src/components/core/handler/encryptHandler.ts index 1b213ae85..300808b96 100644 --- a/src/components/core/handler/encryptHandler.ts +++ b/src/components/core/handler/encryptHandler.ts @@ -4,7 +4,7 @@ import { EncryptCommand, EncryptFileCommand } from '../../../@types/commands.js' import * as base58 from 'base58-js' import { Readable } from 'stream' import { Storage } from '../../storage/index.js' -import { getConfiguration, isPolicyServerConfigured } from '../../../utils/index.js' +import { isPolicyServerConfigured } from '../../../utils/index.js' import { PolicyServer } from '../../policyServer/index.js' import { EncryptMethod } from '../../../@types/fileObject.js' import { @@ -180,7 +180,7 @@ export class EncryptFileHandler extends CommandHandler { try { const oceanNode = this.getOceanNode() - const config = await getConfiguration() + const config = oceanNode.getConfig() const headers = { 'Content-Type': 'application/octet-stream', 'X-Encrypted-By': oceanNode.getKeyManager().getPeerId().toString(), diff --git a/src/components/core/handler/feesHandler.ts b/src/components/core/handler/feesHandler.ts index 41d81c483..0dbd44292 100644 --- a/src/components/core/handler/feesHandler.ts +++ b/src/components/core/handler/feesHandler.ts @@ -1,7 +1,7 @@ import { CommandHandler } from './handler.js' import { GetFeesCommand } from '../../../@types/commands.js' import { P2PCommandResponse } from '../../../@types/OceanNode.js' -import { createProviderFee } from '../utils/feesHandler.js' +import { ProviderFees } from '../utils/feesHandler.js' import { Readable } from 'stream' import { GENERIC_EMOJIS, LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { PROVIDER_LOGGER } from '../../../utils/logging/common.js' @@ -95,12 +95,12 @@ export class FeesHandler extends CommandHandler { } } - const nonceDB = this.getOceanNode().getDatabase().nonce + const nonceDB = (await this.getOceanNode().getDatabase()).nonce const nonceHandlerResponse = await getNonce(nonceDB, task.consumerAddress) const nonce = await streamToString(nonceHandlerResponse.stream as Readable) - + const feesClass = new ProviderFees(this.getOceanNode()) try { - const providerFee = await createProviderFee(ddo, service, validUntil) + const providerFee = await feesClass.createProviderFee(ddo, service, validUntil) if (providerFee) { const response: ProviderInitialize = { providerFee, diff --git a/src/components/core/handler/fileInfoHandler.ts b/src/components/core/handler/fileInfoHandler.ts index d11df081b..14649c5b8 100644 --- a/src/components/core/handler/fileInfoHandler.ts +++ b/src/components/core/handler/fileInfoHandler.ts @@ -13,7 +13,6 @@ import { validateCommandParameters } from '../../httpRoutes/validateCommands.js' import { getFile } from '../../../utils/file.js' -import { getConfiguration } from '../../../utils/index.js' async function formatMetadata( file: StorageObject, @@ -75,7 +74,7 @@ export class FileInfoHandler extends CommandHandler { } try { const oceanNode = this.getOceanNode() - const config = await getConfiguration() + const config = oceanNode.getConfig() let fileInfo = [] if (task.file && task.type) { diff --git a/src/components/core/handler/getJobs.ts b/src/components/core/handler/getJobs.ts index 21cdf18ed..df0bf05d5 100644 --- a/src/components/core/handler/getJobs.ts +++ b/src/components/core/handler/getJobs.ts @@ -22,7 +22,7 @@ export class GetJobsHandler extends CommandHandler { } try { - const { c2d } = this.getOceanNode().getDatabase() + const { c2d } = await this.getOceanNode().getDatabase() if (!c2d) { throw new Error('C2D database not initialized') } diff --git a/src/components/core/handler/handler.ts b/src/components/core/handler/handler.ts index 7936e1caa..4f6cd2503 100644 --- a/src/components/core/handler/handler.ts +++ b/src/components/core/handler/handler.ts @@ -11,7 +11,6 @@ import { buildRateLimitReachedResponse, ValidateParams } from '../../httpRoutes/validateCommands.js' -import { getConfiguration } from '../../../utils/index.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { ReadableString } from '../../P2P/handlers.js' import { CONNECTION_HISTORY_DELETE_THRESHOLD } from '../../../utils/constants.js' @@ -32,9 +31,11 @@ export abstract class BaseHandler implements ICommandHandler { } // TODO LOG, implement all handlers + // eslint-disable-next-line require-await async checkRateLimit(caller: string | string[]): Promise { - const requestMap = this.getOceanNode().getRequestMap() - const ratePerMinute = (await getConfiguration()).rateLimit + const node = this.getOceanNode() + const requestMap = node.getRequestMap() + const ratePerMinute = node.getConfig().rateLimit const requestTime = new Date().getTime() let isOK = true @@ -173,6 +174,15 @@ export abstract class CommandHandler } } + async getAddressFromToken(authToken: string): Promise { + const auth = this.getOceanNode().getAuth() + if (!auth) { + throw new Error('Auth not configured') + } + + return (await auth.validateToken(authToken)).address + } + async validateTokenOrSignature( authToken: string, address: string, diff --git a/src/components/core/handler/nonceHandler.ts b/src/components/core/handler/nonceHandler.ts index 18c27cc48..39787325d 100644 --- a/src/components/core/handler/nonceHandler.ts +++ b/src/components/core/handler/nonceHandler.ts @@ -29,6 +29,6 @@ export class NonceHandler extends CommandHandler { return validationResponse } const { address } = task - return getNonce(this.getOceanNode().getDatabase().nonce, address) + return getNonce((await this.getOceanNode().getDatabase()).nonce, address) } } diff --git a/src/components/core/handler/p2p.ts b/src/components/core/handler/p2p.ts index 6b4a58617..c00453aca 100644 --- a/src/components/core/handler/p2p.ts +++ b/src/components/core/handler/p2p.ts @@ -1,5 +1,4 @@ import { CommandHandler } from './handler.js' -import { getConfiguration } from '../../../utils/config.js' import { P2PCommandResponse } from '../../../@types/OceanNode.js' import { FindPeerCommand, @@ -130,9 +129,17 @@ export class GetP2PNetworkStatsHandler extends CommandHandler { return checks } try { - const config = await getConfiguration() + const node = this.getOceanNode() + const config = node.getConfig() if (config.p2pConfig.enableNetworkStats) { - const stats = this.getOceanNode().getP2PNode().getNetworkingStats() + const p2pNode = node.getP2PNode() + if (!p2pNode) { + return { + stream: null, + status: { httpStatus: 503, error: 'P2P Interface is disabled' } + } + } + const stats = p2pNode.getNetworkingStats() return { stream: Readable.from(JSON.stringify(stats)), status: { httpStatus: 200 } diff --git a/src/components/core/handler/persistentStorage.ts b/src/components/core/handler/persistentStorage.ts index 6c12da4ad..a3ff9ae8a 100644 --- a/src/components/core/handler/persistentStorage.ts +++ b/src/components/core/handler/persistentStorage.ts @@ -87,7 +87,9 @@ export class PersistentStorageCreateBucketHandler extends CommandHandler { let ownerNormalized: string try { - ownerNormalized = getAddress(task.consumerAddress) + ownerNormalized = task.consumerAddress + ? getAddress(task.consumerAddress) + : getAddress(await this.getAddressFromToken(task.authorization)) } catch { return { stream: null, @@ -190,7 +192,10 @@ export class PersistentStorageListFilesHandler extends CommandHandler { try { const storage = requirePersistentStorage(this) - const result = await storage.listFiles(task.bucketId, task.consumerAddress) + const ownerNormalized = task.consumerAddress + ? getAddress(task.consumerAddress) + : getAddress(await this.getAddressFromToken(task.authorization)) + const result = await storage.listFiles(task.bucketId, ownerNormalized) return { stream: Readable.from(JSON.stringify(result)), status: { httpStatus: 200, error: null } @@ -231,11 +236,15 @@ export class PersistentStorageGetFileObjectHandler extends CommandHandler { try { const storage = requirePersistentStorage(this) + const ownerNormalized = task.consumerAddress + ? getAddress(task.consumerAddress) + : getAddress(await this.getAddressFromToken(task.authorization)) const obj = await storage.getFileObject( task.bucketId, task.fileName, - task.consumerAddress + ownerNormalized ) + return { stream: Readable.from(JSON.stringify(obj)), status: { httpStatus: 200, error: null } @@ -285,12 +294,16 @@ export class PersistentStorageUploadFileHandler extends CommandHandler { status: { httpStatus: 403, error: 'Upload stream error' } } } + const ownerNormalized = task.consumerAddress + ? getAddress(task.consumerAddress) + : getAddress(await this.getAddressFromToken(task.authorization)) const result = await storage.uploadFile( task.bucketId, task.fileName, task.stream, - task.consumerAddress + ownerNormalized ) + return { stream: Readable.from(JSON.stringify(result)), status: { httpStatus: 200, error: null } @@ -331,7 +344,10 @@ export class PersistentStorageDeleteFileHandler extends CommandHandler { try { const storage = requirePersistentStorage(this) - await storage.deleteFile(task.bucketId, task.fileName, task.consumerAddress) + const ownerNormalized = task.consumerAddress + ? getAddress(task.consumerAddress) + : getAddress(await this.getAddressFromToken(task.authorization)) + await storage.deleteFile(task.bucketId, task.fileName, ownerNormalized) return { stream: Readable.from(JSON.stringify({ success: true })), status: { httpStatus: 200, error: null } diff --git a/src/components/core/handler/policyServer.ts b/src/components/core/handler/policyServer.ts index 053cc869a..98aad2ef6 100644 --- a/src/components/core/handler/policyServer.ts +++ b/src/components/core/handler/policyServer.ts @@ -32,9 +32,9 @@ export class PolicyServerPassthroughHandler extends CommandHandler { task.policyServerPassthrough.ddo = null // resolve DDO first try { - task.policyServerPassthrough.ddo = await this.getOceanNode() - .getDatabase() - .ddo.retrieve(task.policyServerPassthrough.documentId) + task.policyServerPassthrough.ddo = await ( + await this.getOceanNode().getDatabase() + ).ddo.retrieve(task.policyServerPassthrough.documentId) } catch (error) { // just log it CORE_LOGGER.warn( @@ -82,7 +82,7 @@ export class PolicyServerInitializeHandler extends CommandHandler { } // resolve DDO first try { - const database = this.getOceanNode().getDatabase() + const database = await this.getOceanNode().getDatabase() if (!database || !database.ddo) { return { stream: null, diff --git a/src/components/core/handler/queryHandler.ts b/src/components/core/handler/queryHandler.ts index 3e28a1d3c..2f31eb826 100644 --- a/src/components/core/handler/queryHandler.ts +++ b/src/components/core/handler/queryHandler.ts @@ -20,7 +20,7 @@ export class QueryHandler extends CommandHandler { return validationResponse } try { - const database = this.getOceanNode().getDatabase() + const database = await this.getOceanNode().getDatabase() if (!database || !database.ddo) { CORE_LOGGER.error('DDO database is not available') return { @@ -53,7 +53,7 @@ export class QueryDdoStateHandler extends QueryHandler { return buildInvalidParametersResponse(validation) } try { - const database = this.getOceanNode().getDatabase() + const database = await this.getOceanNode().getDatabase() if (!database || !database.ddoState) { CORE_LOGGER.error('DDO State database is not available') return { diff --git a/src/components/core/utils/feesHandler.ts b/src/components/core/utils/feesHandler.ts index b351e5cf8..46341f7d0 100644 --- a/src/components/core/utils/feesHandler.ts +++ b/src/components/core/utils/feesHandler.ts @@ -12,15 +12,10 @@ import { FeeTokens, ProviderFeeData, ProviderFeeValidation, - ProviderFees + ProviderFees as ProviderFeesResult } from '../../../@types/Fees' import { Service, DDOManager, Asset } from '@oceanprotocol/ddo-js' -import { - getDatatokenDecimals, - verifyMessage, - getJsonRpcProvider -} from '../../../utils/blockchain.js' -import { getConfiguration } from '../../../utils/config.js' +import { getDatatokenDecimals, verifyMessage } from '../../../utils/blockchain.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { getOceanArtifactsAdresses } from '../../../utils/address.js' @@ -28,229 +23,241 @@ import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/template import { fetchEventFromTransaction } from '../../../utils/util.js' import { fetchTransactionReceipt } from './validateOrders.js' -export function getEnvironmentPriceSchemaForResource( - prices: ComputeResourcesPricingInfo[], - id: string -): number { - for (const pr of prices) { - if (pr.id === id) { - return pr.price +export class ProviderFees { + private node: OceanNode + + constructor(_node: OceanNode) { + this.node = _node + } + + getEnvironmentPriceSchemaForResource( + prices: ComputeResourcesPricingInfo[], + id: string + ): number { + for (const pr of prices) { + if (pr.id === id) { + return pr.price + } } + return 0 } - return 0 -} -async function calculateProviderFeeAmount( - validUntil: number, - chainId: string - // asset?: DDO -): Promise { - // it's a download provider fee - // we should get asset file size, and do a proper fee management according to time - // something like estimated 3 downloads per day - const config = await getConfiguration() - const providerFeeAmount = config?.feeStrategy?.feeAmount?.amount || 0 - return providerFeeAmount -} -export async function createProviderFee( - asset: Asset, - service: Service, - validUntil: number -): Promise | undefined { - // round for safety - validUntil = Math.round(validUntil) - - const providerData = { - dt: service.datatokenAddress, - id: service.id + calculateProviderFeeAmount( + validUntil: number, + chainId: string + // asset?: DDO + ): number { + // it's a download provider fee + // we should get asset file size, and do a proper fee management according to time + // something like estimated 3 downloads per day + const providerFeeAmount = this.node.getConfig().feeStrategy?.feeAmount?.amount || 0 + return providerFeeAmount } - const ddoInstance = DDOManager.getDDOClass(asset) - const { chainId: assetChainId } = ddoInstance.getDDOFields() - const providerWallet = await getProviderWallet(String(assetChainId)) - const providerFeeAddress: string = providerWallet.address - let providerFeeAmount: number - let providerFeeAmountFormatted: BigNumberish - const providerFeeToken = await getProviderFeeToken(assetChainId) - if (providerFeeToken?.toLowerCase() === ZeroAddress) { - providerFeeAmount = 0 - } else { - providerFeeAmount = await calculateProviderFeeAmount( - validUntil, - String(asset.chainId) + + async createProviderFee( + asset: Asset, + service: Service, + validUntil: number + ): Promise | undefined { + // round for safety + validUntil = Math.round(validUntil) + + const providerData = { + dt: service.datatokenAddress, + id: service.id + } + const ddoInstance = DDOManager.getDDOClass(asset) + const { chainId: assetChainId } = ddoInstance.getDDOFields() + const providerWallet = await this.getProviderWallet(String(assetChainId)) + const providerFeeAddress: string = providerWallet.address + let providerFeeAmount: number + let providerFeeAmountFormatted: BigNumberish + const providerFeeToken = this.getProviderFeeToken(assetChainId) + if (providerFeeToken?.toLowerCase() === ZeroAddress) { + providerFeeAmount = 0 + } else { + providerFeeAmount = this.calculateProviderFeeAmount( + validUntil, + String(asset.chainId) + ) + } + + if (providerFeeToken && providerFeeToken?.toLowerCase() !== ZeroAddress) { + const blockchain = this.node.getBlockchain(Number(assetChainId)) + if (!blockchain) { + throw new Error(`Blockchain for chainId ${assetChainId} is not configured`) + } + const provider = await blockchain.getProvider() + const decimals = await getDatatokenDecimals(providerFeeToken, provider) + providerFeeAmountFormatted = parseUnits(providerFeeAmount.toString(10), decimals) + } else { + providerFeeAmountFormatted = BigInt(0) + } + const messageHash = ethers.solidityPackedKeccak256( + ['bytes', 'address', 'address', 'uint256', 'uint256'], + [ + ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), + ethers.getAddress(providerFeeAddress), + ethers.getAddress(providerFeeToken), + providerFeeAmountFormatted, + validUntil + ] ) - } - if (providerFeeToken && providerFeeToken?.toLowerCase() !== ZeroAddress) { - const provider = await getJsonRpcProvider(assetChainId) - const decimals = await getDatatokenDecimals(providerFeeToken, provider) - providerFeeAmountFormatted = parseUnits(providerFeeAmount.toString(10), decimals) - } else { - providerFeeAmountFormatted = BigInt(0) - } - const messageHash = ethers.solidityPackedKeccak256( - ['bytes', 'address', 'address', 'uint256', 'uint256'], - [ - ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), - ethers.getAddress(providerFeeAddress), - ethers.getAddress(providerFeeToken), - providerFeeAmountFormatted, + const signed32Bytes = await providerWallet.signMessage( + new Uint8Array(ethers.toBeArray(messageHash)) + ) // it already does the prefix = "\x19Ethereum Signed Message:\n32" + // OR just ethCrypto.sign(pk, signable_hash) + + // *** NOTE: provider.py *** + // signed = keys.ecdsa_sign(message_hash=signable_hash, private_key=pk) + + // For Solidity, we need the expanded-format of a signature + const signatureSplitted = ethers.Signature.from(signed32Bytes) + + // # make it compatible with last openzepellin https://github.com/OpenZeppelin/openzeppelin-contracts/pull/1622 + const v = signatureSplitted.v <= 1 ? signatureSplitted.v + 27 : signatureSplitted.v + const r = ethers.hexlify(signatureSplitted.r) // 32 bytes + const s = ethers.hexlify(signatureSplitted.s) + + const providerFee: ProviderFeeData = { + providerFeeAddress: ethers.getAddress(providerFeeAddress), + providerFeeToken: ethers.getAddress(providerFeeToken), + providerFeeAmount: providerFeeAmountFormatted, + providerData: ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), + v, + r, // 32 bytes => get it back: Buffer.from(providerFee.r).toString('hex')) + s, // 32 bytes validUntil - ] - ) - - const signed32Bytes = await providerWallet.signMessage( - new Uint8Array(ethers.toBeArray(messageHash)) - ) // it already does the prefix = "\x19Ethereum Signed Message:\n32" - // OR just ethCrypto.sign(pk, signable_hash) - - // *** NOTE: provider.py *** - // signed = keys.ecdsa_sign(message_hash=signable_hash, private_key=pk) - - // For Solidity, we need the expanded-format of a signature - const signatureSplitted = ethers.Signature.from(signed32Bytes) - - // # make it compatible with last openzepellin https://github.com/OpenZeppelin/openzeppelin-contracts/pull/1622 - const v = signatureSplitted.v <= 1 ? signatureSplitted.v + 27 : signatureSplitted.v - const r = ethers.hexlify(signatureSplitted.r) // 32 bytes - const s = ethers.hexlify(signatureSplitted.s) - - const providerFee: ProviderFeeData = { - providerFeeAddress: ethers.getAddress(providerFeeAddress), - providerFeeToken: ethers.getAddress(providerFeeToken), - providerFeeAmount: providerFeeAmountFormatted, - providerData: ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), - v, - r, // 32 bytes => get it back: Buffer.from(providerFee.r).toString('hex')) - s, // 32 bytes - validUntil - } - return JSON.parse( - JSON.stringify( - providerFee, - (key, value) => (typeof value === 'bigint' ? value.toString() : value) // return everything else unchanged - ) - ) -} -export async function verifyProviderFees( - txId: string, - userAddress: string, - provider: FallbackProvider, - service: Service -): Promise { - /* given a transaction, check if there is a valid provider fee event - * We could have multiple orders, for multiple assets & providers - */ - if (!txId) { - CORE_LOGGER.error('Invalid txId') - return { - isValid: false, - message: 'Invalid txId', - validUntil: 0 } + return JSON.parse( + JSON.stringify( + providerFee, + (key, value) => (typeof value === 'bigint' ? value.toString() : value) // return everything else unchanged + ) + ) } - const { chainId } = await provider.getNetwork() - const providerWallet = await getProviderWallet(String(chainId)) - const contractInterface = new Interface(ERC20Template.abi) - const now = Math.round(new Date().getTime() / 1000) - const txReceiptMined = await fetchTransactionReceipt(txId, provider) - const blockMined = await txReceiptMined.getBlock() - - if (!txReceiptMined) { - const message = `Tx receipt cannot be processed, because tx id ${txId} was not mined.` - CORE_LOGGER.error(message) - return { isValid: false, message, validUntil: 0 } - } + async verifyProviderFees( + txId: string, + userAddress: string, + provider: FallbackProvider, + service: Service + ): Promise { + /* given a transaction, check if there is a valid provider fee event + * We could have multiple orders, for multiple assets & providers + */ + if (!txId) { + CORE_LOGGER.error('Invalid txId') + return { + isValid: false, + message: 'Invalid txId', + validUntil: 0 + } + } - const providerFeesEvents = fetchEventFromTransaction( - txReceiptMined, - 'ProviderFee', - contractInterface - ) - - let foundValid = false - let providerData - for (const event of providerFeesEvents) { - const providerAddress = event.args[0]?.toLowerCase() - const validUntilContract = parseInt(event.args[7].toString()) - const utf = ethers.toUtf8String(event.args[3]) - - try { - providerData = JSON.parse(utf) - } catch (e) { - CORE_LOGGER.error('ProviderFee event JSON parsing failed') - continue + const { chainId } = await provider.getNetwork() + const providerWallet = await this.getProviderWallet(String(chainId)) + const contractInterface = new Interface(ERC20Template.abi) + const now = Math.round(new Date().getTime() / 1000) + const txReceiptMined = await fetchTransactionReceipt(txId, provider) + const blockMined = await txReceiptMined.getBlock() + + if (!txReceiptMined) { + const message = `Tx receipt cannot be processed, because tx id ${txId} was not mined.` + CORE_LOGGER.error(message) + return { isValid: false, message, validUntil: 0 } } - if ( - providerData && - providerAddress === providerWallet.address?.toLowerCase() && - providerData.id === service.id && - providerData.dt?.toLowerCase() === service.datatokenAddress?.toLowerCase() - ) { - if (validUntilContract !== 0) { - // check if it's expired - if (now - blockMined.timestamp <= validUntilContract) { + const providerFeesEvents = fetchEventFromTransaction( + txReceiptMined, + 'ProviderFee', + contractInterface + ) + + let foundValid = false + let providerData + for (const event of providerFeesEvents) { + const providerAddress = event.args[0]?.toLowerCase() + const validUntilContract = parseInt(event.args[7].toString()) + const utf = ethers.toUtf8String(event.args[3]) + + try { + providerData = JSON.parse(utf) + } catch (e) { + CORE_LOGGER.error('ProviderFee event JSON parsing failed') + continue + } + + if ( + providerData && + providerAddress === providerWallet.address?.toLowerCase() && + providerData.id === service.id && + providerData.dt?.toLowerCase() === service.datatokenAddress?.toLowerCase() + ) { + if (validUntilContract !== 0) { + // check if it's expired + if (now - blockMined.timestamp <= validUntilContract) { + foundValid = true + break + } + } else { foundValid = true break } - } else { - foundValid = true - break } } - } - if (!foundValid) { - const message = 'No valid providerFee events' - CORE_LOGGER.error(message) - return { isValid: false, message, validUntil: 0 } - } + if (!foundValid) { + const message = 'No valid providerFee events' + CORE_LOGGER.error(message) + return { isValid: false, message, validUntil: 0 } + } - return { - isValid: true, - message: 'Validation successful', - validUntil: providerData.timestamp + return { + isValid: true, + message: 'Validation successful', + validUntil: providerData.timestamp + } } -} -// TO DO - delete functions below, as they are used in the tests -// new provider create & verify -> see above :) - -// equiv to get_provider_fees -// *** NOTE: provider.py => get_provider_fees *** -export async function createFee( - asset: Asset, - validUntil: number, - computeEnv: string, - service: Service - // provider: OceanProvider // this node provider -): Promise | undefined { - // create providerData struct - const providerData = { - environment: computeEnv, // null for us now - timestamp: Date.now(), - dt: service.datatokenAddress, - id: service.id - } + // TO DO - delete functions below, as they are used in the tests + // new provider create & verify -> see above :) + + // equiv to get_provider_fees + // *** NOTE: provider.py => get_provider_fees *** + async createFee( + asset: Asset, + validUntil: number, + computeEnv: string, + service: Service + // provider: OceanProvider // this node provider + ): Promise | undefined { + // create providerData struct + const providerData = { + environment: computeEnv, // null for us now + timestamp: Date.now(), + dt: service.datatokenAddress, + id: service.id + } - // *** NOTE: provider.py *** - // provider_data = { - // "environment": compute_env, // null for us now - // "timestamp": datetime.now(timezone.utc).timestamp(), - // "dt": service.datatoken_address, - // "id": service.id, - // } - const providerWallet = await getProviderWallet(String(asset.chainId)) - const providerFeeAddress: string = providerWallet.address + // *** NOTE: provider.py *** + // provider_data = { + // "environment": compute_env, // null for us now + // "timestamp": datetime.now(timezone.utc).timestamp(), + // "dt": service.datatoken_address, + // "id": service.id, + // } + const providerWallet = await this.getProviderWallet(String(asset.chainId)) + const providerFeeAddress: string = providerWallet.address - // from env FEE_TOKENS - const providerFeeToken: string = await getProviderFeeToken(asset.chainId) + // from env FEE_TOKENS + const providerFeeToken: string = this.getProviderFeeToken(asset.chainId) - // from env FEE_AMOUNT - const providerFeeAmount: number = await getProviderFeeAmount() // TODO check decimals on contract? + // from env FEE_AMOUNT + const providerFeeAmount: number = this.getProviderFeeAmount() // TODO check decimals on contract? - /** https://github.com/ethers-io/ethers.js/issues/468 + /** https://github.com/ethers-io/ethers.js/issues/468 * * Also, keep in mind that signMessage can take in a string, * which is treated as a UTF-8 string, or an ArrayLike, which is treated like binary data. @@ -267,225 +274,227 @@ export async function createFee( let signature = await wallet.signMessage(messageHashBinary); */ - const messageHash = ethers.solidityPackedKeccak256( - ['bytes', 'address', 'address', 'uint256', 'uint256'], - [ - ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), - ethers.getAddress(providerFeeAddress), - ethers.getAddress(providerFeeToken), + const messageHash = ethers.solidityPackedKeccak256( + ['bytes', 'address', 'address', 'uint256', 'uint256'], + [ + ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), + ethers.getAddress(providerFeeAddress), + ethers.getAddress(providerFeeToken), + providerFeeAmount, + validUntil + ] + ) + + // *** NOTE: provider.py *** + // message_hash = Web3.solidityKeccak( + // ["bytes", "address", "address", "uint256", "uint256"], + // [ + // Web3.toHex(Web3.toBytes(text=provider_data)), + // Web3.toChecksumAddress(provider_fee_address), + // Web3.toChecksumAddress(provider_fee_token), + // provider_fee_amount, + // valid_until, + // ], + // ) + + // console.log('messageHash: ' + messageHash) + // 66 byte string, which represents 32 bytes of data + // ethers.toUtf8Bytes(messageHash).length) // 66 byte string + + // 32 bytes of data in Uint8Array + // console.log( + // 'messageHash bytes length Uint8Array: ', + // ethers.toBeArray(messageHash).length + // ) + + // const signableHash = ethers.solidityPackedKeccak256( + // ['bytes'], + // [ethers.toUtf8Bytes(messageHash)] + + // // OR ethers.utils.hashMessage(ethers.utils.concat([ hash, string, address ]) + // // https://github.com/ethers-io/ethers.js/issues/468 + // ) + + // *** NOTE: provider.py *** + // pk = keys.PrivateKey(provider_wallet.key) + // prefix = "\x19Ethereum Signed Message:\n32" + // signable_hash = Web3.solidityKeccak( + // ["bytes", "bytes"], [Web3.toBytes(text=prefix), Web3.toBytes(message_hash)] + // ) + + // Sign the string message + // const signed32Bytes = await providerWallet.signMessage(ethers.toBeArray(signableHash)) // it already does the prefix = "\x19Ethereum Signed Message:\n32" + // const signed32Bytes = await providerWallet.signMessage(ethers.hexlify(signableHash)) // it already does the prefix = "\x19Ethereum Signed Message:\n32" + const signed32Bytes = await providerWallet.signMessage( + new Uint8Array(ethers.toBeArray(messageHash)) + ) // it already does the prefix = "\x19Ethereum Signed Message:\n32" + // OR just ethCrypto.sign(pk, signable_hash) + + // *** NOTE: provider.py *** + // signed = keys.ecdsa_sign(message_hash=signable_hash, private_key=pk) + + // For Solidity, we need the expanded-format of a signature + const signatureSplitted = ethers.Signature.from(signed32Bytes) + // console.log( + // 'verify message:', + // await verifyMessage( + // ethers.toBeArray(signableHash), // 32 bytes again + // providerWallet.address, + // signed32Bytes + // ) + // ) + + // # make it compatible with last openzepellin https://github.com/OpenZeppelin/openzeppelin-contracts/pull/1622 + const v = signatureSplitted.v <= 1 ? signatureSplitted.v + 27 : signatureSplitted.v + const r = ethers.hexlify(signatureSplitted.r) // 32 bytes + const s = ethers.hexlify(signatureSplitted.s) + // ethers.hexlify(ethers.toUtf8Bytes(signatureSplitted.s)) + + // length 66 + // ethers.toUtf8Bytes(r).length + // length 32 + // ethers.toBeArray(r).length + + const providerFee: ProviderFeeData = { + providerFeeAddress: ethers.getAddress(providerFeeAddress), + providerFeeToken: ethers.getAddress(providerFeeToken), providerFeeAmount, + providerData: ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), + v, + r, // 32 bytes => get it back: Buffer.from(providerFee.r).toString('hex')) + s, // 32 bytes validUntil - ] - ) - - // *** NOTE: provider.py *** - // message_hash = Web3.solidityKeccak( - // ["bytes", "address", "address", "uint256", "uint256"], - // [ - // Web3.toHex(Web3.toBytes(text=provider_data)), - // Web3.toChecksumAddress(provider_fee_address), - // Web3.toChecksumAddress(provider_fee_token), - // provider_fee_amount, - // valid_until, - // ], - // ) - - // console.log('messageHash: ' + messageHash) - // 66 byte string, which represents 32 bytes of data - // ethers.toUtf8Bytes(messageHash).length) // 66 byte string + } - // 32 bytes of data in Uint8Array - // console.log( - // 'messageHash bytes length Uint8Array: ', - // ethers.toBeArray(messageHash).length - // ) - - // const signableHash = ethers.solidityPackedKeccak256( - // ['bytes'], - // [ethers.toUtf8Bytes(messageHash)] - - // // OR ethers.utils.hashMessage(ethers.utils.concat([ hash, string, address ]) - // // https://github.com/ethers-io/ethers.js/issues/468 - // ) - - // *** NOTE: provider.py *** - // pk = keys.PrivateKey(provider_wallet.key) - // prefix = "\x19Ethereum Signed Message:\n32" - // signable_hash = Web3.solidityKeccak( - // ["bytes", "bytes"], [Web3.toBytes(text=prefix), Web3.toBytes(message_hash)] - // ) - - // Sign the string message - // const signed32Bytes = await providerWallet.signMessage(ethers.toBeArray(signableHash)) // it already does the prefix = "\x19Ethereum Signed Message:\n32" - // const signed32Bytes = await providerWallet.signMessage(ethers.hexlify(signableHash)) // it already does the prefix = "\x19Ethereum Signed Message:\n32" - const signed32Bytes = await providerWallet.signMessage( - new Uint8Array(ethers.toBeArray(messageHash)) - ) // it already does the prefix = "\x19Ethereum Signed Message:\n32" - // OR just ethCrypto.sign(pk, signable_hash) - - // *** NOTE: provider.py *** - // signed = keys.ecdsa_sign(message_hash=signable_hash, private_key=pk) - - // For Solidity, we need the expanded-format of a signature - const signatureSplitted = ethers.Signature.from(signed32Bytes) - // console.log( - // 'verify message:', - // await verifyMessage( - // ethers.toBeArray(signableHash), // 32 bytes again - // providerWallet.address, - // signed32Bytes - // ) - // ) - - // # make it compatible with last openzepellin https://github.com/OpenZeppelin/openzeppelin-contracts/pull/1622 - const v = signatureSplitted.v <= 1 ? signatureSplitted.v + 27 : signatureSplitted.v - const r = ethers.hexlify(signatureSplitted.r) // 32 bytes - const s = ethers.hexlify(signatureSplitted.s) - // ethers.hexlify(ethers.toUtf8Bytes(signatureSplitted.s)) - - // length 66 - // ethers.toUtf8Bytes(r).length - // length 32 - // ethers.toBeArray(r).length - - const providerFee: ProviderFeeData = { - providerFeeAddress: ethers.getAddress(providerFeeAddress), - providerFeeToken: ethers.getAddress(providerFeeToken), - providerFeeAmount, - providerData: ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), - v, - r, // 32 bytes => get it back: Buffer.from(providerFee.r).toString('hex')) - s, // 32 bytes - validUntil + // *** NOTE: provider.py *** + // provider_fee = { + // "providerFeeAddress": provider_fee_address, + // "providerFeeToken": provider_fee_token, + // "providerFeeAmount": provider_fee_amount, + // "providerData": Web3.toHex(Web3.toBytes(text=provider_data)), + // # make it compatible with last openzepellin https://github.com/OpenZeppelin/openzeppelin-contracts/pull/1622 + // "v": (signed.v + 27) if signed.v <= 1 else signed.v, + // "r": Web3.toHex(Web3.toBytes(signed.r).rjust(32, b"\0")), + // "s": Web3.toHex(Web3.toBytes(signed.s).rjust(32, b"\0")), + // "validUntil": valid_until, + // } + + return providerFee + + // Example output: { + // providerFeeAddress: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260', + // providerFeeToken: '0xd8992Ed72C445c35Cb4A2be468568Ed1079357c8', + // providerFeeAmount: 1, + // providerData: '0x7b22656e7669726f6e6d656e74223a226e756c6c222c2274696d657374616d70223a313730313239363037303034352c226474223a22307864333166373464314435613833623839364164373436643936663738666436354230613636454266222c226964223a2231227d', + // v: 28, + // r: Uint8Array(32) [ + // 44, 122, 175, 12, 207, 253, 204, 162, + // 244, 36, 184, 29, 204, 27, 51, 43, + // 99, 245, 151, 28, 115, 46, 232, 250, + // 47, 77, 48, 84, 148, 8, 129, 91 + // ], + // s: Uint8Array(32) [ + // 50, 84, 82, 246, 84, 106, 73, 180, + // 118, 160, 230, 0, 229, 175, 234, 42, + // 222, 160, 107, 140, 141, 110, 89, 221, + // 27, 162, 190, 146, 142, 84, 145, 244 + // ] + // validUntil: 0 + // } } - // *** NOTE: provider.py *** - // provider_fee = { - // "providerFeeAddress": provider_fee_address, - // "providerFeeToken": provider_fee_token, - // "providerFeeAmount": provider_fee_amount, - // "providerData": Web3.toHex(Web3.toBytes(text=provider_data)), - // # make it compatible with last openzepellin https://github.com/OpenZeppelin/openzeppelin-contracts/pull/1622 - // "v": (signed.v + 27) if signed.v <= 1 else signed.v, - // "r": Web3.toHex(Web3.toBytes(signed.r).rjust(32, b"\0")), - // "s": Web3.toHex(Web3.toBytes(signed.s).rjust(32, b"\0")), - // "validUntil": valid_until, - // } - - return providerFee - - // Example output: { - // providerFeeAddress: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260', - // providerFeeToken: '0xd8992Ed72C445c35Cb4A2be468568Ed1079357c8', - // providerFeeAmount: 1, - // providerData: '0x7b22656e7669726f6e6d656e74223a226e756c6c222c2274696d657374616d70223a313730313239363037303034352c226474223a22307864333166373464314435613833623839364164373436643936663738666436354230613636454266222c226964223a2231227d', - // v: 28, - // r: Uint8Array(32) [ - // 44, 122, 175, 12, 207, 253, 204, 162, - // 244, 36, 184, 29, 204, 27, 51, 43, - // 99, 245, 151, 28, 115, 46, 232, 250, - // 47, 77, 48, 84, 148, 8, 129, 91 - // ], - // s: Uint8Array(32) [ - // 50, 84, 82, 246, 84, 106, 73, 180, - // 118, 160, 230, 0, 229, 175, 234, 42, - // 222, 160, 107, 140, 141, 110, 89, 221, - // 27, 162, 190, 146, 142, 84, 145, 244 - // ] - // validUntil: 0 - // } -} + async checkFee( + txId: string, + chainId: number, + providerFeesData: ProviderFeeData + // message: string | Uint8Array // the message that was signed (fee structure) ? + ): Promise { + // checkFee function: given a txID, checks: + // the address that signed the fee signature = ocean-node address + // Do not check if amount, tokens, etc are a match, because it can be an old order and config was changed in the meantime + + const wallet = await this.getProviderWallet() + const nodeAddress = wallet.address + + // first check if these are a match + if ( + nodeAddress?.toLowerCase() !== providerFeesData.providerFeeAddress?.toLowerCase() + ) { + return false + } -export async function checkFee( - txId: string, - chainId: number, - providerFeesData: ProviderFeeData - // message: string | Uint8Array // the message that was signed (fee structure) ? -): Promise { - // checkFee function: given a txID, checks: - // the address that signed the fee signature = ocean-node address - // Do not check if amount, tokens, etc are a match, because it can be an old order and config was changed in the meantime - - const wallet = await getProviderWallet() - const nodeAddress = wallet.address - - // first check if these are a match - if (nodeAddress?.toLowerCase() !== providerFeesData.providerFeeAddress?.toLowerCase()) { - return false + const providerDataAsArray = ethers.toBeArray(providerFeesData.providerData) + const providerDataStr = Buffer.from(providerDataAsArray).toString('utf8') + const providerData = JSON.parse(providerDataStr) + + // done previously as ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), + // check signature stuff now + + const messageHash = ethers.solidityPackedKeccak256( + ['bytes', 'address', 'address', 'uint256', 'uint256'], + [ + ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), + ethers.getAddress(providerFeesData.providerFeeAddress), // signer address + ethers.getAddress(providerFeesData.providerFeeToken), // TODO check decimals on contract? + providerFeesData.providerFeeAmount, + providerFeesData.validUntil + ] + ) + + const signableHash = ethers.solidityPackedKeccak256( + ['bytes'], + [ethers.toUtf8Bytes(messageHash)] + ) + + const message = ethers.toBeArray(signableHash) // await wallet.signMessage() + + // and also check that we signed this message + return verifyMessage(message, nodeAddress, txId) + // before was only return await verifyMessage(message, nodeAddress, txId) } - const providerDataAsArray = ethers.toBeArray(providerFeesData.providerData) - const providerDataStr = Buffer.from(providerDataAsArray).toString('utf8') - const providerData = JSON.parse(providerDataStr) - - // done previously as ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), - // check signature stuff now - - const messageHash = ethers.solidityPackedKeccak256( - ['bytes', 'address', 'address', 'uint256', 'uint256'], - [ - ethers.hexlify(ethers.toUtf8Bytes(JSON.stringify(providerData))), - ethers.getAddress(providerFeesData.providerFeeAddress), // signer address - ethers.getAddress(providerFeesData.providerFeeToken), // TODO check decimals on contract? - providerFeesData.providerFeeAmount, - providerFeesData.validUntil - ] - ) - - const signableHash = ethers.solidityPackedKeccak256( - ['bytes'], - [ethers.toUtf8Bytes(messageHash)] - ) - - const message = ethers.toBeArray(signableHash) // await wallet.signMessage() - - // and also check that we signed this message - return verifyMessage(message, nodeAddress, txId) - // before was only return await verifyMessage(message, nodeAddress, txId) -} + // These core functions are provider related functions, maybe they will be on Provider + // this might be different between chains + /** + * Get the provider wallet + * @param chainId the chain id (not used now) + * @returns the wallet + */ + // eslint-disable-next-line require-await + async getProviderWallet(chainId?: string): Promise { + const keyManager = this.node.getKeyManager() + return keyManager.getEthWallet() + } -// These core functions are provider related functions, maybe they will be on Provider -// this might be different between chains -/** - * Get the provider wallet - * @param chainId the chain id (not used now) - * @returns the wallet - */ -// eslint-disable-next-line require-await -export async function getProviderWallet(chainId?: string): Promise { - const oceanNode = OceanNode.getInstance() - const keyManager = oceanNode.getKeyManager() - return keyManager.getEthWallet() -} -export async function getProviderWalletAddress(): Promise { - return (await this.getProviderWallet()).address -} + async getProviderWalletAddress(): Promise { + return (await this.getProviderWallet()).address + } -/** - * Get the fee token - * @param chainId the chain id - * @returns the token address - */ -export async function getProviderFeeToken(chainId: number): Promise { - const config = await getConfiguration() - const feeTokens = config?.feeStrategy?.feeTokens || [] - const result = feeTokens.filter((token: FeeTokens) => Number(token.chain) === chainId) - if (result.length === 0 && chainId === 8996) { - const localOceanToken = getOceanArtifactsAdresses().development.Ocean - return localOceanToken || ethers.ZeroAddress + /** + * Get the fee token + * @param chainId the chain id + * @returns the token address + */ + // eslint-disable-next-line require-await + getProviderFeeToken(chainId: number): string { + const feeTokens = this.node.getConfig().feeStrategy?.feeTokens || [] + const result = feeTokens.filter((token: FeeTokens) => Number(token.chain) === chainId) + if (result.length === 0 && chainId === 8996) { + const localOceanToken = getOceanArtifactsAdresses().development.Ocean + return localOceanToken || ethers.ZeroAddress + } + return result.length ? result[0].token : ethers.ZeroAddress } - return result.length ? result[0].token : ethers.ZeroAddress -} -/** - * get the fee amount (in MB or other units) - * @returns amount - */ -export async function getProviderFeeAmount(): Promise { - const config = await getConfiguration() - return config?.feeStrategy?.feeAmount?.amount || 0 + /** + * get the fee amount (in MB or other units) + * @returns amount + */ + getProviderFeeAmount(): number { + return this.node.getConfig().feeStrategy?.feeAmount?.amount || 0 + } + // https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L65-L74 + // https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L447-L508 + // https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L522 + // https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L589-L608 } -// https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L65-L74 -// https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L447-L508 -// https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L522 -// https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L589-L608 diff --git a/src/components/core/utils/findDdoHandler.ts b/src/components/core/utils/findDdoHandler.ts index 02693903f..20fb0722c 100644 --- a/src/components/core/utils/findDdoHandler.ts +++ b/src/components/core/utils/findDdoHandler.ts @@ -5,7 +5,6 @@ import { FindDDOResponse } from '../../../@types/index.js' import { Service } from '@oceanprotocol/ddo-js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { OceanNode } from '../../../OceanNode.js' -import { hasP2PInterface } from '../../../utils/config.js' /** * Check if the specified ddo is cached and if the cached version is recent enough @@ -58,7 +57,7 @@ export async function findDDOLocally( node: OceanNode, id: string ): Promise | undefined { - const database = node.getDatabase() + const database = await node.getDatabase() if (!database || !database.ddo) { CORE_LOGGER.log( LOG_LEVELS_STR.LEVEL_WARN, @@ -72,7 +71,7 @@ export async function findDDOLocally( if (ddo) { // node has ddo const p2pNode: OceanP2P = node.getP2PNode() - if (!p2pNode || !hasP2PInterface) { + if (!p2pNode || !node.hasP2PInterface()) { const peerId: string = node.getKeyManager().getPeerId().toString() return { id: ddo.id, diff --git a/src/components/core/utils/nonceHandler.ts b/src/components/core/utils/nonceHandler.ts index 9f5195153..44ecdea5c 100644 --- a/src/components/core/utils/nonceHandler.ts +++ b/src/components/core/utils/nonceHandler.ts @@ -1,5 +1,5 @@ import { ReadableString } from '../../P2P/handleProtocolCommands.js' -import { P2PCommandResponse } from '../../../@types/OceanNode.js' +import { OceanNodeConfig, P2PCommandResponse } from '../../../@types/OceanNode.js' import { ethers } from 'ethers' import { GENERIC_EMOJIS, LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { CORE_LOGGER, DATABASE_LOGGER } from '../../../utils/logging/common.js' @@ -10,7 +10,6 @@ import { PROTOCOL_COMMANDS } from '../../../utils/constants.js' import { NonceCommand } from '../../../@types/commands.js' import { streamToString } from '../../../utils/util.js' import { Readable } from 'node:stream' -import { getConfiguration } from '../../../utils/config.js' export function getDefaultErrorResponse(errorMessage: string): P2PCommandResponse { return { @@ -116,6 +115,7 @@ async function updateNonce( // get stored nonce for an address, update it on db, validate signature export async function checkNonce( + config: OceanNodeConfig, db: AbstractNonceDatabase, consumer: string, nonce: number, @@ -137,6 +137,7 @@ export async function checkNonce( consumer, signature, command, + config, chainId ) if (validate.valid) { @@ -185,6 +186,7 @@ async function validateNonceAndSignature( consumer: string, signature: string, command: string = null, + config: OceanNodeConfig, chainId?: string | null ): Promise { if (nonce <= existingNonce) { @@ -218,7 +220,6 @@ async function validateNonceAndSignature( // Try ERC-1271 (smart account) validation try { - const config = await getConfiguration() const targetChainId = chainId || Object.keys(config?.supportedNetworks || {})[0] if (targetChainId && config?.supportedNetworks?.[targetChainId]) { const provider = new ethers.JsonRpcProvider( diff --git a/src/components/core/utils/statusHandler.ts b/src/components/core/utils/statusHandler.ts index 222d40ca0..45e3c5f97 100644 --- a/src/components/core/utils/statusHandler.ts +++ b/src/components/core/utils/statusHandler.ts @@ -11,7 +11,6 @@ import { CORE_LOGGER } from '../../../utils/logging/common.js' import { OceanNode } from '../../../OceanNode.js' import { typesenseSchemas } from '../../database/TypesenseSchemas.js' import { SupportedNetwork } from '../../../@types/blockchain.js' -import { getAdminAddresses } from '../../../utils/auth.js' import HumanHasher from 'humanhash' import { getPackageVersion } from '../../../utils/version.js' @@ -75,7 +74,7 @@ async function getIndexerBlockInfo( ): Promise { let blockNr = '0' try { - const database = oceanNode.getDatabase() + const database = await oceanNode.getDatabase() if (!database || !database.indexer) { CORE_LOGGER.log( LOG_LEVELS_STR.LEVEL_WARN, @@ -136,7 +135,7 @@ export async function status( // uptime: process.uptime(), platform: platformInfo, codeHash: config.codeHash, - allowedAdmins: await getAdminAddresses() + allowedAdmins: oceanNode.getAdminAddresses() } } // need to update at least block info if available diff --git a/src/components/core/utils/validateDdoHandler.ts b/src/components/core/utils/validateDdoHandler.ts index 935070551..ec1aeff23 100644 --- a/src/components/core/utils/validateDdoHandler.ts +++ b/src/components/core/utils/validateDdoHandler.ts @@ -1,30 +1,3 @@ -import { ethers } from 'ethers' -import { CORE_LOGGER } from '../../../utils/logging/common.js' -import { create256Hash } from '../../../utils/crypt.js' -import { getProviderWallet } from './feesHandler.js' - -export async function getValidationSignature(ddo: string): Promise { - try { - const hashedDDO = create256Hash(ddo) - const providerWallet = await getProviderWallet() - const messageHash = ethers.solidityPackedKeccak256( - ['bytes'], - [ethers.hexlify(ethers.toUtf8Bytes(hashedDDO))] - ) - const signed32Bytes = await providerWallet.signMessage( - new Uint8Array(ethers.toBeArray(messageHash)) - ) - const signatureSplitted = ethers.Signature.from(signed32Bytes) - const v = signatureSplitted.v <= 1 ? signatureSplitted.v + 27 : signatureSplitted.v - const r = ethers.hexlify(signatureSplitted.r) // 32 bytes - const s = ethers.hexlify(signatureSplitted.s) - return { hash: hashedDDO, publicKey: providerWallet.address, r, s, v } - } catch (error) { - CORE_LOGGER.logMessage(`Validation signature error: ${error}`, true) - return { hash: '', publicKey: '', r: '', s: '', v: '' } - } -} - export function isRemoteDDO(ddo: any): boolean { let keys try { diff --git a/src/components/database/C2DDatabase.ts b/src/components/database/C2DDatabase.ts index 2fdaedbdc..b1af4db02 100755 --- a/src/components/database/C2DDatabase.ts +++ b/src/components/database/C2DDatabase.ts @@ -11,8 +11,7 @@ import { OceanNodeDBConfig } from '../../@types/OceanNode.js' import { TypesenseSchema } from './TypesenseSchemas.js' import { AbstractDatabase } from './BaseDatabase.js' import { OceanNode } from '../../OceanNode.js' -import { getDatabase } from '../../utils/database.js' -import { getConfiguration } from '../../utils/index.js' + import { generateUniqueID } from '../core/compute/utils.js' export class C2DDatabase extends AbstractDatabase { private provider: SQLiteCompute @@ -129,11 +128,8 @@ export class C2DDatabase extends AbstractDatabase { async cleanStorageExpiredJobs(): Promise { const allEnvironments: ComputeEnvironment[] = [] const currentTimestamp = Date.now() / 1000 - const config = await getConfiguration(true) - const allEngines = await OceanNode.getInstance( - config, - await getDatabase() - ).getC2DEngines().engines + const c2dEngines = OceanNode.getInstance().getC2DEngines() + const allEngines = c2dEngines ? c2dEngines.engines : [] let cleaned = 0 for (const engine of allEngines) { @@ -167,7 +163,6 @@ export class C2DDatabase extends AbstractDatabase { * @returns number of orphans */ async cleanOrphanJobs(existingEnvironments: ComputeEnvironment[]) { - const c2dDatabase = await (await getDatabase()).c2d let cleaned = 0 const envIds: string[] = existingEnvironments @@ -175,11 +170,11 @@ export class C2DDatabase extends AbstractDatabase { .map((env: any) => env.id) // Get all finished jobs from DB, not just from known environments - const allJobs: DBComputeJob[] = await c2dDatabase.getFinishedJobs() + const allJobs: DBComputeJob[] = await this.getFinishedJobs() for (const job of allJobs) { if (!job.environment || !envIds.includes(job.environment)) { - if (await c2dDatabase.deleteJob(job.jobId)) { + if (await this.deleteJob(job.jobId)) { cleaned++ } } diff --git a/src/components/httpRoutes/aquarius.ts b/src/components/httpRoutes/aquarius.ts index c958896e8..bd26efa12 100644 --- a/src/components/httpRoutes/aquarius.ts +++ b/src/components/httpRoutes/aquarius.ts @@ -9,7 +9,6 @@ import { HTTP_LOGGER } from '../../utils/logging/common.js' import { QueryCommand } from '../../@types/commands.js' import { DatabaseFactory } from '../database/DatabaseFactory.js' import { SearchQuery } from '../../@types/DDO/SearchQuery.js' -import { getConfiguration } from '../../utils/index.js' export const aquariusRoutes = express.Router() @@ -71,7 +70,7 @@ aquariusRoutes.post( return } - const config = await getConfiguration() + const config = req.oceanNode.getConfig() const queryStrategy = await DatabaseFactory.createMetadataQuery(config.dbConfig) const transformedQuery = queryStrategy.buildQuery(searchQuery) @@ -95,7 +94,7 @@ aquariusRoutes.post( aquariusRoutes.get(`${AQUARIUS_API_BASE_PATH}/state/ddo`, async (req, res) => { try { - const config = await getConfiguration() + const config = req.oceanNode.getConfig() const queryStrategy = await DatabaseFactory.createDdoStateQuery(config.dbConfig) const did = req.query.did ? String(req.query.did) : undefined diff --git a/src/components/httpRoutes/commands.ts b/src/components/httpRoutes/commands.ts index 1ff338364..565262930 100644 --- a/src/components/httpRoutes/commands.ts +++ b/src/components/httpRoutes/commands.ts @@ -3,7 +3,7 @@ import express, { Request, Response } from 'express' import { toString as uint8ArrayToString } from 'uint8arrays/to-string' import { HTTP_LOGGER } from '../../utils/logging/common.js' -import { hasP2PInterface } from '../../utils/config.js' + import { validateCommandParameters } from './validateCommands.js' import { Readable } from 'stream' @@ -76,7 +76,7 @@ directCommandRoute.post( HTTP_LOGGER.logMessage('Sending command : ' + JSON.stringify(req.body), true) const isLocalCommand = - !hasP2PInterface || + !req.oceanNode.hasP2PInterface() || !req.body.node || req.oceanNode.getP2PNode()?.isTargetPeerSelf(req.body.node) @@ -104,7 +104,7 @@ directCommandRoute.post( closedResponse = true res.end() - } else if (hasP2PInterface) { + } else if (req.oceanNode.hasP2PInterface()) { // Remote command - use P2P sendTo let { multiAddrs } = req.body if (typeof multiAddrs === 'string') { @@ -119,9 +119,11 @@ directCommandRoute.post( multiAddrs = [multiAddrs] } } + const response = await req.oceanNode .getP2PNode() .sendTo(req.body.node as string, JSON.stringify(req.body), multiAddrs) + res.status(response.status.httpStatus) if (response.status.headers) { res.header(response.status.headers) diff --git a/src/components/httpRoutes/dids.ts b/src/components/httpRoutes/dids.ts index 28b60f402..c98790eda 100644 --- a/src/components/httpRoutes/dids.ts +++ b/src/components/httpRoutes/dids.ts @@ -1,6 +1,5 @@ import express, { Request, Response } from 'express' import { sendMissingP2PResponse } from './index.js' -import { hasP2PInterface } from '../../utils/config.js' export const getProvidersForStringRoute = express.Router() getProvidersForStringRoute.get( @@ -11,7 +10,7 @@ getProvidersForStringRoute.get( res.sendStatus(400) return } - if (hasP2PInterface) { + if (req.oceanNode.hasP2PInterface()) { const providers = await req.oceanNode .getP2PNode() .getProvidersForString(req.query.input as string) diff --git a/src/components/httpRoutes/index.ts b/src/components/httpRoutes/index.ts index 184608f80..ad4c0f3dc 100644 --- a/src/components/httpRoutes/index.ts +++ b/src/components/httpRoutes/index.ts @@ -9,7 +9,6 @@ import { rootEndpointRoutes } from './rootEndpoint.js' import { fileInfoRoute } from './fileInfo.js' import { computeRoutes } from './compute.js' import { queueRoutes } from './queue.js' -// import { getConfiguration } from '../../utils/config.js' import { jobsRoutes } from './jobs.js' import { addMapping, allRoutesMapping, findPathName } from './routeUtils.js' import { PolicyServerPassthroughRoute } from './policyServer.js' diff --git a/src/components/httpRoutes/provider.ts b/src/components/httpRoutes/provider.ts index faef472d1..f53e64dd2 100644 --- a/src/components/httpRoutes/provider.ts +++ b/src/components/httpRoutes/provider.ts @@ -36,35 +36,39 @@ providerRoutes.post(`${SERVICES_API_BASE_PATH}/decrypt`, async (req, res) => { } }) -providerRoutes.post(`${SERVICES_API_BASE_PATH}/encrypt`, async (req, res) => { - try { - const data = req.body.toString() - if (!data) { - res.status(400).send('Missing required body') - return - } - const result = await new EncryptHandler(req.oceanNode).handle({ - blob: data, - encoding: 'string', - encryptionType: EncryptMethod.ECIES, - command: PROTOCOL_COMMANDS.ENCRYPT, - caller: req.caller, - nonce: req.query.nonce as string, - consumerAddress: req.query.consumerAddress as string, - signature: req.query.signature as string - }) - if (result.stream) { - const encryptedData = await streamToString(result.stream as Readable) - res.header('Content-Type', 'application/octet-stream') - res.status(200).send(encryptedData) - } else { - res.status(result.status.httpStatus).send(result.status.error) +providerRoutes.post( + `${SERVICES_API_BASE_PATH}/encrypt`, + express.raw({ limit: '25mb' }), + async (req, res) => { + try { + const data = req.body.toString() + if (!data) { + res.status(400).send('Missing required body') + return + } + const result = await new EncryptHandler(req.oceanNode).handle({ + blob: data, + encoding: 'string', + encryptionType: EncryptMethod.ECIES, + command: PROTOCOL_COMMANDS.ENCRYPT, + caller: req.caller, + nonce: req.query.nonce as string, + consumerAddress: req.query.consumerAddress as string, + signature: req.query.signature as string + }) + if (result.stream) { + const encryptedData = await streamToString(result.stream as Readable) + res.header('Content-Type', 'application/octet-stream') + res.status(200).send(encryptedData) + } else { + res.status(result.status.httpStatus).send(result.status.error) + } + } catch (error) { + HTTP_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, `Error: ${error}`) + res.status(500).send('Internal Server Error') } - } catch (error) { - HTTP_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, `Error: ${error}`) - res.status(500).send('Internal Server Error') } -}) +) // There are two ways of encrypting a file: @@ -81,45 +85,30 @@ providerRoutes.post(`${SERVICES_API_BASE_PATH}/encrypt`, async (req, res) => { // Headers // X-Encrypted-By: our_node_id // X-Encrypted-Method: aes or ecies -providerRoutes.post(`${SERVICES_API_BASE_PATH}/encryptFile`, async (req, res) => { - const writeResponse = async ( - result: P2PCommandResponse, - encryptMethod: EncryptMethod - ) => { - if (result.stream) { - const encryptedData = await streamToString(result.stream as Readable) - res.set(result.status.headers) - res.status(200).send(encryptedData) - } else { - res.status(result.status.httpStatus).send(result.status.error) +providerRoutes.post( + `${SERVICES_API_BASE_PATH}/encryptFile`, + express.raw({ limit: '25mb', type: 'application/octet-stream' }), + express.json(), + async (req, res) => { + const writeResponse = async ( + result: P2PCommandResponse, + encryptMethod: EncryptMethod + ) => { + if (result.stream) { + const encryptedData = await streamToString(result.stream as Readable) + res.set(result.status.headers) + res.status(200).send(encryptedData) + } else { + res.status(result.status.httpStatus).send(result.status.error) + } } - } - const getEncryptedData = async ( - encryptMethod: EncryptMethod.AES | EncryptMethod.ECIES, - input: Buffer - ) => { - const result = await new EncryptFileHandler(req.oceanNode).handle({ - rawData: input, - encryptionType: encryptMethod, - command: PROTOCOL_COMMANDS.ENCRYPT_FILE, - caller: req.caller, - nonce: req.query.nonce as string, - consumerAddress: req.query.consumerAddress as string, - signature: req.query.signature as string - }) - return result - } - - try { - const encryptMethod: EncryptMethod = getEncryptMethodFromString( - req.query.encryptMethod as string - ) - let result: P2PCommandResponse - if (req.is('application/json')) { - // body as fileObject - result = await new EncryptFileHandler(req.oceanNode).handle({ - files: req.body as StorageObject, + const getEncryptedData = async ( + encryptMethod: EncryptMethod.AES | EncryptMethod.ECIES, + input: Buffer + ) => { + const result = await new EncryptFileHandler(req.oceanNode).handle({ + rawData: input, encryptionType: encryptMethod, command: PROTOCOL_COMMANDS.ENCRYPT_FILE, caller: req.caller, @@ -127,31 +116,53 @@ providerRoutes.post(`${SERVICES_API_BASE_PATH}/encryptFile`, async (req, res) => consumerAddress: req.query.consumerAddress as string, signature: req.query.signature as string }) - return await writeResponse(result, encryptMethod) - // raw data on body - } else if (req.is('application/octet-stream') || req.is('multipart/form-data')) { - if (req.is('application/octet-stream')) { - result = await getEncryptedData(encryptMethod, req.body) - return await writeResponse(result, encryptMethod) - } else { - // multipart/form-data - const data: Buffer[] = [] - req.on('data', function (chunk) { - data.push(chunk) + return result + } + + try { + const encryptMethod: EncryptMethod = getEncryptMethodFromString( + req.query.encryptMethod as string + ) + let result: P2PCommandResponse + if (req.is('application/json')) { + // body as fileObject + result = await new EncryptFileHandler(req.oceanNode).handle({ + files: req.body as StorageObject, + encryptionType: encryptMethod, + command: PROTOCOL_COMMANDS.ENCRYPT_FILE, + caller: req.caller, + nonce: req.query.nonce as string, + consumerAddress: req.query.consumerAddress as string, + signature: req.query.signature as string }) - req.on('end', async function () { - result = await getEncryptedData(encryptMethod, Buffer.concat(data)) + return await writeResponse(result, encryptMethod) + // raw data on body + } else if (req.is('application/octet-stream') || req.is('multipart/form-data')) { + if (req.is('application/octet-stream')) { + result = await getEncryptedData(encryptMethod, req.body) return await writeResponse(result, encryptMethod) - }) + } else { + // multipart/form-data + const data: Buffer[] = [] + req.on('data', function (chunk) { + data.push(chunk) + }) + req.on('end', async function () { + result = await getEncryptedData(encryptMethod, Buffer.concat(data)) + return await writeResponse(result, encryptMethod) + }) + } + } else { + res + .status(400) + .send('Invalid request (missing body data or invalid content-type)') } - } else { - res.status(400).send('Invalid request (missing body data or invalid content-type)') + } catch (error) { + HTTP_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, `Error: ${error}`) + res.status(500).send('Internal Server Error') } - } catch (error) { - HTTP_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, `Error: ${error}`) - res.status(500).send('Internal Server Error') } -}) +) providerRoutes.get(`${SERVICES_API_BASE_PATH}/initialize`, async (req, res) => { try { @@ -189,7 +200,7 @@ providerRoutes.get(`${SERVICES_API_BASE_PATH}/nonce`, async (req, res) => { res.status(400).send('Missing required parameter: "userAddress"') return } - const nonceDB = req.oceanNode.getDatabase().nonce + const nonceDB = (await req.oceanNode.getDatabase()).nonce const result = await getNonce(nonceDB, userAddress) if (result.stream) { res.json({ nonce: await streamToString(result.stream as Readable) }) diff --git a/src/components/httpRoutes/requestValidator.ts b/src/components/httpRoutes/requestValidator.ts index dd505fca4..a8a17de72 100644 --- a/src/components/httpRoutes/requestValidator.ts +++ b/src/components/httpRoutes/requestValidator.ts @@ -1,5 +1,4 @@ import { Request, Response } from 'express' -import { getConfiguration } from '../../utils/config.js' import { HTTP_LOGGER } from '../../utils/logging/common.js' import { OceanNodeConfig } from '../../@types/OceanNode.js' import { @@ -15,7 +14,7 @@ export const requestValidator = async function (req: Request, res: Response, nex req.socket.remoteAddress || '') as string - const configuration = await getConfiguration() + const configuration = req.oceanNode.getConfig() const ipValidation = await checkIP(requestIP, configuration) if (!ipValidation.valid) { diff --git a/src/components/httpRoutes/rootEndpoint.ts b/src/components/httpRoutes/rootEndpoint.ts index 7d447b057..6a0355f66 100644 --- a/src/components/httpRoutes/rootEndpoint.ts +++ b/src/components/httpRoutes/rootEndpoint.ts @@ -1,12 +1,11 @@ import express from 'express' import { HTTP_LOGGER } from '../../utils/logging/common.js' -import { getConfiguration } from '../../utils/index.js' import { getAllServiceEndpoints } from './index.js' import { getNodeOwnerInfo } from './utils.js' export const rootEndpointRoutes = express.Router() -rootEndpointRoutes.get('/', async (req, res) => { - const config = await getConfiguration() +rootEndpointRoutes.get('/', (req, res) => { + const config = req.oceanNode.getConfig() if (!config.supportedNetworks) { HTTP_LOGGER.warn(`Supported networks not defined`) } diff --git a/src/index.ts b/src/index.ts index e8c740875..14fa24301 100644 --- a/src/index.ts +++ b/src/index.ts @@ -23,24 +23,18 @@ import cors from 'cors' import { scheduleCronJobs } from './utils/cronjobs/scheduleCronJobs.js' import { requestValidator } from './components/httpRoutes/requestValidator.js' import { hasValidDBConfiguration } from './utils/database.js' -import { assertFeeTokensSupportedByOec } from './utils/feeTokenValidation.js' +import { assertConfiguredFeeTokensSupportedByOec } from './utils/feeTokenValidation.js' const app: Express = express() process.on('uncaughtException', (err) => { OCEAN_NODE_LOGGER.error(`Uncaught exception: ${err.message}`) - if (err?.stack) { - OCEAN_NODE_LOGGER.error(`Uncaught exception stack: ${err.stack}`) - } process.exit(1) }) process.on('unhandledRejection', (err) => { OCEAN_NODE_LOGGER.error( `Unhandled rejection: ${err instanceof Error ? err.message : String(err)}` ) - if (err instanceof Error && err.stack) { - OCEAN_NODE_LOGGER.error(`Unhandled rejection stack: ${err.stack}`) - } process.exit(1) }) @@ -103,15 +97,11 @@ if (!hasValidDBConfiguration(config.dbConfig)) { // KeyManager will determine provider type from config.keys.type and initialize in constructor const keyManager = new KeyManager(config) const blockchainRegistry = new BlockchainRegistry(keyManager, config) -if (config.skipFeeTokenValidation) { - OCEAN_NODE_LOGGER.warn('Skipping FEE_TOKENS validation against OEC contracts') -} else { - try { - await assertFeeTokensSupportedByOec(config, blockchainRegistry) - } catch (error) { - OCEAN_NODE_LOGGER.error(error instanceof Error ? error.message : String(error)) - process.exit(1) - } +try { + await assertConfiguredFeeTokensSupportedByOec(config, blockchainRegistry) +} catch (err) { + OCEAN_NODE_LOGGER.error(err instanceof Error ? err.message : String(err)) + process.exit(1) } if (config.hasP2P) { @@ -123,7 +113,7 @@ if (config.hasP2P) { await node.start() } if (config.hasIndexer && dbconn) { - indexer = new OceanIndexer(dbconn, config.indexingNetworks, blockchainRegistry) + indexer = new OceanIndexer(dbconn, config, blockchainRegistry) } if (dbconn) { provider = new OceanProvider(dbconn) @@ -140,7 +130,7 @@ const oceanNode = OceanNode.getInstance( keyManager, blockchainRegistry ) -oceanNode.addC2DEngines() +await oceanNode.addC2DEngines() function removeExtraSlashes(req: any, res: any, next: any) { req.url = req.url.replace(/\/{2,}/g, '/') @@ -148,14 +138,12 @@ function removeExtraSlashes(req: any, res: any, next: any) { } if (config.hasHttp) { - // allow up to 25Mb file upload - app.use(express.raw({ limit: '25mb' })) app.use(cors()) - app.use(requestValidator, (req, res, next) => { + app.use((req, res, next) => { req.caller = req.headers['x-forwarded-for'] || req.socket.remoteAddress req.oceanNode = oceanNode next() - }) + }, requestValidator) // Integrate static file serving middleware app.use(removeExtraSlashes) diff --git a/src/test/.env.test b/src/test/.env.test index b322075a3..5c8b01ed7 100644 --- a/src/test/.env.test +++ b/src/test/.env.test @@ -11,9 +11,7 @@ ARWEAVE_GATEWAY=https://arweave.net/ NODE1_PRIVATE_KEY=0xcb345bd2b11264d523ddaf383094e2675c420a17511c3102a53817f13474a7ff NODE2_PRIVATE_KEY=0x3634cc4a3d2694a1186a7ce545f149e022eea103cc254d18d08675104bb4b5ac INDEXER_INTERVAL=9000 -ACCESS_KEY_ID_S3=DO00XNBWKZJ6N2JBGQPR -SECRET_ACCESS_KEY_S3=x87PklXIyyboglsDoZdOGt7YNvPWMpIjO3lOszv43sI -ADDRESS_FILE=${HOME}/.ocean/ocean-contracts/artifacts/address.json +#ADDRESS_FILE=${HOME}/.ocean/ocean-contracts/artifacts/address.json LOG_LEVEL=debug DB_TYPE=elasticsearch SKIP_FEE_TOKEN_VALIDATION=true diff --git a/src/test/integration/accessLists.test.ts b/src/test/integration/accessLists.test.ts index 18e62fd7d..8debc2717 100644 --- a/src/test/integration/accessLists.test.ts +++ b/src/test/integration/accessLists.test.ts @@ -20,7 +20,7 @@ import { assert, expect } from 'chai' import { checkAddressOnAccessListWithSigner } from '../../utils/accessList.js' import { KeyManager } from '../../components/KeyManager/index.js' -describe('Should deploy some accessLists before all other tests.', () => { +describe('********** AccessLists tests', () => { let config: OceanNodeConfig let provider: JsonRpcProvider const mockSupportedNetworks: RPCS = getMockSupportedNetworks() @@ -139,7 +139,9 @@ describe('Should deploy some accessLists before all other tests.', () => { config = await getConfiguration() }) - + after(async () => { + await tearDownEnvironment(previousConfiguration) + }) it('should have some access lists', () => { expect(EXISTING_ACCESSLISTS.size > 0, 'Should have at least 1 accessList') }) @@ -201,8 +203,4 @@ describe('Should deploy some accessLists before all other tests.', () => { } } }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - }) }) diff --git a/src/test/integration/algorithmsAccess.test.ts b/src/test/integration/algorithmsAccess.test.ts index 677f7343c..2272f7a79 100644 --- a/src/test/integration/algorithmsAccess.test.ts +++ b/src/test/integration/algorithmsAccess.test.ts @@ -50,7 +50,7 @@ import { createHash } from 'crypto' import { getAlgoChecksums } from '../../components/core/compute/utils.js' import { createHashForSignature, safeSign } from '../utils/signature.js' -describe('Trusted algorithms Flow', () => { +describe('********** Trusted algorithms Flow', () => { let previousConfiguration: OverrideEnvConfig[] let config: OceanNodeConfig let dbconn: Database @@ -107,12 +107,17 @@ describe('Trusted algorithms Flow', () => { ) config = await getConfiguration(true) dbconn = await Database.init(config.dbConfig) - oceanNode = await OceanNode.getInstance(config, dbconn, null, null, null) - indexer = new OceanIndexer( + oceanNode = await OceanNode.getInstance( + config, dbconn, - config.indexingNetworks, - oceanNode.blockchainRegistry + null, + null, + null, + null, + null, + true ) + indexer = new OceanIndexer(dbconn, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) oceanNode.addC2DEngines() @@ -130,7 +135,10 @@ describe('Trusted algorithms Flow', () => { publisherAccount ) }) - + after(async () => { + await tearDownEnvironment(previousConfiguration) + await oceanNode.tearDownAll() + }) it('Sets up compute envs', () => { assert(oceanNode, 'Failed to instantiate OceanNode') assert(config.c2dClusters, 'Failed to get c2dClusters') @@ -145,6 +153,7 @@ describe('Trusted algorithms Flow', () => { ) publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) const computeDatasetResult = await waitToIndex( + oceanNode, publishedComputeDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT @@ -157,6 +166,7 @@ describe('Trusted algorithms Flow', () => { }` ) const algoDatasetResult = await waitToIndex( + oceanNode, publishedAlgoDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT @@ -286,6 +296,7 @@ describe('Trusted algorithms Flow', () => { const txReceipt = await setMetaDataTx.wait() assert(txReceipt, 'set metadata failed') publishedComputeDataset = await waitToIndex( + oceanNode, publishedComputeDataset.ddo.id, EVENTS.METADATA_UPDATED, DEFAULT_TEST_TIMEOUT * 2, @@ -493,8 +504,4 @@ describe('Trusted algorithms Flow', () => { jobId = jobs[0].jobId assert(jobId) }) - after(async () => { - await tearDownEnvironment(previousConfiguration) - indexer.stopAllChainIndexers() - }) }) diff --git a/src/test/integration/auth.test.ts b/src/test/integration/auth.test.ts index 6fa20e852..03d01f8cd 100644 --- a/src/test/integration/auth.test.ts +++ b/src/test/integration/auth.test.ts @@ -24,7 +24,7 @@ import { expect } from 'chai' import { ValidateDDOHandler } from '../../components/core/handler/ddoHandler.js' import { createHashForSignature, safeSign } from '../utils/signature.js' -describe('Auth Token Integration Tests', () => { +describe('********** Auth Token Integration Tests', () => { let config: OceanNodeConfig let database: Database let provider: JsonRpcProvider @@ -49,13 +49,23 @@ describe('Auth Token Integration Tests', () => { config = await getConfiguration(true) database = await Database.init(config.dbConfig) - oceanNode = await OceanNode.getInstance(config, database) + oceanNode = await OceanNode.getInstance( + config, + database, + null, + null, + null, + null, + null, + true + ) provider = new JsonRpcProvider(mockSupportedNetworks['8996'].rpc) consumerAccount = (await provider.getSigner(1)) as Signer }) after(async () => { + await oceanNode.tearDownAll() await tearDownEnvironment(previousConfiguration) }) diff --git a/src/test/integration/compute.test.ts b/src/test/integration/compute.test.ts index fcf491e19..bd61f9c54 100644 --- a/src/test/integration/compute.test.ts +++ b/src/test/integration/compute.test.ts @@ -130,7 +130,7 @@ export async function waitForAllJobsToFinish( } } -describe('Compute', () => { +describe('********** Compute', () => { let previousConfiguration: OverrideEnvConfig[] let config: OceanNodeConfig let dbconn: Database @@ -208,11 +208,7 @@ describe('Compute', () => { } oceanNode = OceanNode.getInstance(config, dbconn, null, null, null, null, null, true) - indexer = new OceanIndexer( - dbconn, - config.indexingNetworks, - oceanNode.blockchainRegistry - ) + indexer = new OceanIndexer(dbconn, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) await oceanNode.addC2DEngines() @@ -241,7 +237,10 @@ describe('Compute', () => { publisherAccount ) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('Sets up compute envs', () => { assert(oceanNode, 'Failed to instantiate OceanNode') assert(config.c2dClusters, 'Failed to get c2dClusters') @@ -253,6 +252,7 @@ describe('Compute', () => { publishedComputeDataset = await publishAsset(computeAsset, publisherAccount) publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) const computeDatasetResult = await waitToIndex( + oceanNode, publishedComputeDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT @@ -264,6 +264,7 @@ describe('Compute', () => { ) } const algoDatasetResult = await waitToIndex( + oceanNode, publishedAlgoDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT @@ -315,6 +316,7 @@ describe('Compute', () => { const txReceipt = await setMetaDataTx.wait() assert(txReceipt, 'set metadata failed') publishedComputeDataset = await waitToIndex( + oceanNode, publishedComputeDataset.ddo.id, EVENTS.METADATA_UPDATED, DEFAULT_TEST_TIMEOUT * 2, @@ -1491,6 +1493,7 @@ describe('Compute', () => { it('should getAlgoChecksums', async function () { const { ddo, wasTimeout } = await waitToIndex( + oceanNode, algoDDO.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT, @@ -1518,6 +1521,7 @@ describe('Compute', () => { it('should validateAlgoForDataset', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 10) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, algoDDO.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 2, @@ -1534,6 +1538,7 @@ describe('Compute', () => { config ) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, datasetDDO.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 2, @@ -2598,14 +2603,9 @@ describe('Compute', () => { ) }) }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - await indexer.stopAllChainIndexers() - }) }) -describe('Compute Access Restrictions', () => { +describe('********** Compute Access Restrictions', () => { let previousConfiguration: OverrideEnvConfig[] let config: OceanNodeConfig let dbconn: Database @@ -2748,11 +2748,7 @@ describe('Compute Access Restrictions', () => { null, true ) - const indexer = new OceanIndexer( - dbconn, - config.indexingNetworks, - oceanNode.blockchainRegistry - ) + const indexer = new OceanIndexer(dbconn, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) await oceanNode.addC2DEngines() @@ -2760,17 +2756,22 @@ describe('Compute Access Restrictions', () => { publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) await waitToIndex( + oceanNode, publishedComputeDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT ) await waitToIndex( + oceanNode, publishedAlgoDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT ) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('Get compute environments with address restrictions', async () => { const getEnvironmentsTask = { command: PROTOCOL_COMMANDS.COMPUTE_GET_ENVIRONMENTS } const response = await new ComputeGetEnvironmentsHandler(oceanNode).handle( @@ -2804,10 +2805,6 @@ describe('Compute Access Restrictions', () => { const response = await new FreeComputeStartHandler(oceanNode).handle(command) assert(response.status.httpStatus === 403, 'Should get 403 access denied') }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - }) }) describe('Access List restrictions', () => { @@ -2943,11 +2940,7 @@ describe('Compute Access Restrictions', () => { null, true ) - const indexer = new OceanIndexer( - dbconn, - config.indexingNetworks, - oceanNode.blockchainRegistry - ) + const indexer = new OceanIndexer(dbconn, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) await oceanNode.addC2DEngines() @@ -2955,17 +2948,22 @@ describe('Compute Access Restrictions', () => { publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) await waitToIndex( + oceanNode, publishedComputeDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT ) await waitToIndex( + oceanNode, publishedAlgoDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT ) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('Get compute environments with access list restrictions', async () => { const getEnvironmentsTask = { command: PROTOCOL_COMMANDS.COMPUTE_GET_ENVIRONMENTS } const response = await new ComputeGetEnvironmentsHandler(oceanNode).handle( @@ -3015,10 +3013,6 @@ describe('Compute Access Restrictions', () => { console.log(response) expect(response.status.httpStatus).to.not.equal(403) }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - }) }) describe('Payment Claim Timer and JobSettle Status', () => { @@ -3073,11 +3067,7 @@ describe('Compute Access Restrictions', () => { null, true ) - const indexer = new OceanIndexer( - dbconn, - config.indexingNetworks, - oceanNode.blockchainRegistry - ) + const indexer = new OceanIndexer(dbconn, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) await oceanNode.addC2DEngines() @@ -3115,6 +3105,7 @@ describe('Compute Access Restrictions', () => { }) after(async () => { + await oceanNode.tearDownAll() await tearDownEnvironment(previousConfiguration) }) diff --git a/src/test/integration/configAdmin.test.ts b/src/test/integration/configAdmin.test.ts index 1a67667f0..1012aa91a 100644 --- a/src/test/integration/configAdmin.test.ts +++ b/src/test/integration/configAdmin.test.ts @@ -21,13 +21,15 @@ import { Readable } from 'stream' import { expect } from 'chai' import { createHashForSignature, safeSign } from '../utils/signature.js' -describe('Config Admin Endpoints Integration Tests', () => { +describe('********** Config Admin Endpoints Integration Tests', () => { let config: OceanNodeConfig let database: Database let adminAccount: Signer let nonAdminAccount: Signer let previousConfiguration: OverrideEnvConfig[] let oceanNode: OceanNode + let savedMaxReqPerMinute: string | undefined + let savedMaxConnectionsPerMinute: string | undefined const mockSupportedNetworks: RPCS = getMockSupportedNetworks() @@ -53,12 +55,41 @@ describe('Config Admin Endpoints Integration Tests', () => { ) ) + // buildMergedConfig merges env over file; these vars would override pushed rate limits. + savedMaxReqPerMinute = process.env.MAX_REQ_PER_MINUTE + savedMaxConnectionsPerMinute = process.env.MAX_CONNECTIONS_PER_MINUTE + delete process.env.MAX_REQ_PER_MINUTE + delete process.env.MAX_CONNECTIONS_PER_MINUTE + config = await getConfiguration(true) database = await Database.init(config.dbConfig) - oceanNode = OceanNode.getInstance(config, database) + // Force a new singleton so this suite sees env-based config (e.g. ALLOWED_ADMINS); + // an instance from an earlier test file would otherwise keep stale config. + oceanNode = OceanNode.getInstance( + config, + database, + null, + null, + null, + null, + null, + true + ) }) after(async () => { + if (savedMaxReqPerMinute !== undefined) { + process.env.MAX_REQ_PER_MINUTE = savedMaxReqPerMinute + } else { + delete process.env.MAX_REQ_PER_MINUTE + } + if (savedMaxConnectionsPerMinute !== undefined) { + process.env.MAX_CONNECTIONS_PER_MINUTE = savedMaxConnectionsPerMinute + } else { + delete process.env.MAX_CONNECTIONS_PER_MINUTE + } + await getConfiguration(true) + await oceanNode.tearDownAll() await tearDownEnvironment(previousConfiguration) }) diff --git a/src/test/integration/configDatabase.test.ts b/src/test/integration/configDatabase.test.ts index 401075947..7b10480e7 100644 --- a/src/test/integration/configDatabase.test.ts +++ b/src/test/integration/configDatabase.test.ts @@ -26,11 +26,12 @@ const emptyDBConfig: OceanNodeDBConfig = { dbType: null } -describe('Config Database', () => { +describe('********** Config Database', () => { let database: Database let oceanIndexer: OceanIndexer let initialVersionNull: any let previousConfiguration: OverrideEnvConfig[] + let oceanNode: OceanNode before(async () => { database = await Database.init(versionConfig) @@ -61,15 +62,24 @@ describe('Config Database', () => { initialVersionNull = await oceanIndexer.getDatabase().sqliteConfig.retrieveValue() assert(initialVersionNull.value === null, 'Initial version should be null') }) - - const oceanNode = await OceanNode.getInstance(await getConfiguration(true), database) - oceanIndexer = new OceanIndexer( + const config = await getConfiguration(true) + oceanNode = await OceanNode.getInstance( + config, database, - getMockSupportedNetworks(), - oceanNode.blockchainRegistry + null, + null, + null, + null, + null, + true ) + oceanIndexer = new OceanIndexer(database, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(oceanIndexer) }) + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('check version DB instance of SQL Lite', () => { expect(database.sqliteConfig).to.be.instanceOf(SQLLiteConfigDatabase) @@ -105,10 +115,6 @@ describe('Config Database', () => { version = await oceanIndexer.getDatabase().sqliteConfig.retrieveValue() assert(version.value === updatedVersion, `Version should be ${updatedVersion}`) }) - after(async () => { - oceanIndexer.stopAllChainIndexers() - await tearDownEnvironment(previousConfiguration) - }) }) describe('VersionDatabase CRUD (without Elastic or Typesense config)', () => { diff --git a/src/test/integration/credentials.test.ts b/src/test/integration/credentials.test.ts index 6bd7d83ba..b2e2fed48 100644 --- a/src/test/integration/credentials.test.ts +++ b/src/test/integration/credentials.test.ts @@ -27,8 +27,7 @@ import { ENVIRONMENT_VARIABLES, EVENTS, PROTOCOL_COMMANDS, - getConfiguration, - printCurrentConfig + getConfiguration } from '../../utils/index.js' import { DownloadHandler } from '../../components/core/handler/downloadHandler.js' import { GetDdoHandler } from '../../components/core/handler/ddoHandler.js' @@ -67,7 +66,7 @@ import { ComputeGetEnvironmentsHandler } from '../../components/core/compute/env import { ComputeInitializeCommand } from '../../@types/commands.js' import { createHashForSignature, safeSign } from '../utils/signature.js' -describe('[Credentials Flow] - Should run a complete node flow.', () => { +describe('********** [Credentials Flow] - Should run a complete node flow.', () => { let config: OceanNodeConfig let oceanNode: OceanNode let provider: JsonRpcProvider @@ -137,12 +136,17 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { config = await getConfiguration(true) // Force reload the configuration const database = await Database.init(config.dbConfig) - oceanNode = OceanNode.getInstance(config, database) - const indexer = new OceanIndexer( + oceanNode = OceanNode.getInstance( + config, database, - config.indexingNetworks, - oceanNode.blockchainRegistry + null, + null, + null, + null, + null, + true ) + const indexer = new OceanIndexer(database, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) await oceanNode.addC2DEngines() @@ -157,7 +161,10 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { ] consumerAddresses = await Promise.all(consumerAccounts.map((a) => a.getAddress())) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('should deploy accessList contract', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 2) let networkArtifacts = getOceanArtifactsAdressesByChainId(DEVELOPMENT_CHAIN_ID) @@ -223,6 +230,7 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { did = publishedDataset.ddo.id const { ddo, wasTimeout } = await waitToIndex( + oceanNode, did, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 3 @@ -232,6 +240,7 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { } didWithMatchAll = publishedDatasetWithMatchAll.ddo.id const resolvedDdoWithMatchAll = await waitToIndex( + oceanNode, didWithMatchAll, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 3 @@ -243,6 +252,7 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { computeDid = publishedComputeDataset.ddo.id const resolvedComputeDdo = await waitToIndex( + oceanNode, computeDid, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 3 @@ -255,6 +265,7 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { algoDid = publishedAlgo.ddo.id const resolvedAlgo = await waitToIndex( + oceanNode, algoDid, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 3 @@ -587,7 +598,6 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { const nonAuthorizedAccount = (await provider.getSigner(4)) as Signer const authorizedAccount = await publisherAccount.getAddress() - printCurrentConfig() expect( config.authorizedPublishers.length === 1 && config.authorizedPublishers[0] === authorizedAccount, @@ -601,6 +611,7 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { // will timeout const { ddo, wasTimeout } = await waitToIndex( + oceanNode, publishedDataset?.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT @@ -608,9 +619,4 @@ describe('[Credentials Flow] - Should run a complete node flow.', () => { assert(ddo === null && wasTimeout === true, 'DDO should NOT have been indexed') }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - oceanNode.getIndexer().stopAllChainIndexers() - }) }) diff --git a/src/test/integration/database.test.ts b/src/test/integration/database.test.ts index a47ee2114..519d378e0 100644 --- a/src/test/integration/database.test.ts +++ b/src/test/integration/database.test.ts @@ -25,7 +25,7 @@ const emptyDBConfig: OceanNodeDBConfig = { dbType: null } -describe('Database', () => { +describe('********** Database', () => { let database: Database before(async () => { @@ -37,7 +37,7 @@ describe('Database', () => { }) }) -describe('DdoDatabase CRUD', () => { +describe('********** DdoDatabase CRUD', () => { let database: Database const ddoWithInvalidDid = { hashType: 'sha256', @@ -87,7 +87,7 @@ describe('DdoDatabase CRUD', () => { }) }) -describe('NonceDatabase CRUD - SQL lite (With typesense DB config)', () => { +describe('********** NonceDatabase CRUD - SQL lite (With typesense DB config)', () => { let database: Database before(async () => { @@ -123,7 +123,7 @@ describe('NonceDatabase CRUD - SQL lite (With typesense DB config)', () => { }) }) -describe('NonceDatabase CRUD (without Elastic or Typesense config)', () => { +describe('********** NonceDatabase CRUD (without Elastic or Typesense config)', () => { let database: Database before(async () => { @@ -159,7 +159,7 @@ describe('NonceDatabase CRUD (without Elastic or Typesense config)', () => { }) }) -describe('IndexerDatabase CRUD', () => { +describe('********** IndexerDatabase CRUD', () => { let database: Database let existsPrevious: any = {} @@ -203,7 +203,7 @@ describe('IndexerDatabase CRUD', () => { }) }) -describe('OrderDatabase CRUD', () => { +describe('********** OrderDatabase CRUD', () => { let database: Database before(async () => { @@ -263,7 +263,7 @@ describe('OrderDatabase CRUD', () => { }) }) -describe('Typesense OrderDatabase CRUD', () => { +describe('********** Typesense OrderDatabase CRUD', () => { let database: AbstractOrderDatabase before(async () => { @@ -308,7 +308,7 @@ describe('Typesense OrderDatabase CRUD', () => { }) }) -describe('Elasticsearch OrderDatabase CRUD', () => { +describe('********** Elasticsearch OrderDatabase CRUD', () => { let database: AbstractOrderDatabase before(async () => { @@ -356,7 +356,7 @@ describe('Elasticsearch OrderDatabase CRUD', () => { }) }) -describe('DdoStateQuery', () => { +describe('********** DdoStateQuery', () => { it('should build Typesense query for did', async () => { const query = (await DatabaseFactory.createDdoStateQuery(typesenseConfig)).buildQuery( 'did:op:abc123' @@ -409,7 +409,7 @@ describe('DdoStateQuery', () => { }) }) -describe('MetadataQuery', () => { +describe('********** MetadataQuery', () => { it('should return a Typesense query when DB is Typesense and a Typesense query is passed', async () => { const typesenseQuery = { q: '*', diff --git a/src/test/integration/dockerRegistryAuth.test.ts b/src/test/integration/dockerRegistryAuth.test.ts index 394bbd1df..14d76e528 100644 --- a/src/test/integration/dockerRegistryAuth.test.ts +++ b/src/test/integration/dockerRegistryAuth.test.ts @@ -10,10 +10,14 @@ import { expect, assert } from 'chai' import { C2DEngineDocker } from '../../components/c2d/compute_engine_docker.js' import { C2DClusterInfo, C2DClusterType } from '../../@types/C2D/C2D.js' -import { dockerRegistrysAuth } from '../../@types/OceanNode.js' import { DockerRegistryAuthSchema } from '../../utils/config/schemas.js' +import { getConfiguration } from '../../utils/index.js' +describe('********** Docker Registry Authentication Integration Tests', () => { + let config: any + before(async () => { + config = await getConfiguration(true) + }) -describe('Docker Registry Authentication Integration Tests', () => { describe('Public registry access (no credentials)', () => { it('should successfully fetch manifest for public Docker Hub image', async () => { // Create minimal engine instance for testing @@ -32,7 +36,7 @@ describe('Docker Registry Authentication Integration Tests', () => { null as any, null as any, null as any, - {} // No auth config + config ) // Test with a well-known public image @@ -59,7 +63,7 @@ describe('Docker Registry Authentication Integration Tests', () => { null as any, null as any, null as any, - {} + config ) // Use a simple image reference that will default to Docker Hub @@ -73,7 +77,7 @@ describe('Docker Registry Authentication Integration Tests', () => { describe('Registry authentication configuration', () => { it('should store and retrieve username/password credentials', () => { - const testAuth: dockerRegistrysAuth = { + config.dockerRegistrysAuth = { 'https://registry-1.docker.io': { username: 'testuser', password: 'testpass', @@ -89,13 +93,12 @@ describe('Docker Registry Authentication Integration Tests', () => { }, tempFolder: '/tmp/test-docker-auth' } - const engineWithAuth = new C2DEngineDocker( clusterConfig, null as any, null as any, null as any, - testAuth + config ) // Verify that getDockerRegistryAuth returns the credentials @@ -109,7 +112,7 @@ describe('Docker Registry Authentication Integration Tests', () => { it('should use auth string when provided', () => { const preEncodedAuth = Buffer.from('testuser:testpass').toString('base64') - const testAuth: dockerRegistrysAuth = { + config.dockerRegistrysAuth = { 'https://registry-1.docker.io': { username: 'testuser', password: 'testpass', @@ -131,7 +134,7 @@ describe('Docker Registry Authentication Integration Tests', () => { null as any, null as any, null as any, - testAuth + config ) const auth = (engineWithAuth as any).getDockerRegistryAuth( @@ -150,13 +153,13 @@ describe('Docker Registry Authentication Integration Tests', () => { }, tempFolder: '/tmp/test-docker-3' } - + config.dockerRegistrysAuth = {} const dockerEngine = new C2DEngineDocker( clusterConfig, null as any, null as any, null as any, - {} + config ) const auth = (dockerEngine as any).getDockerRegistryAuth( @@ -166,7 +169,7 @@ describe('Docker Registry Authentication Integration Tests', () => { }) it('should handle multiple registry configurations', () => { - const testAuth: dockerRegistrysAuth = { + config.dockerRegistrysAuth = { 'https://registry-1.docker.io': { username: 'user1', password: 'pass1', @@ -193,7 +196,7 @@ describe('Docker Registry Authentication Integration Tests', () => { null as any, null as any, null as any, - testAuth + config ) const dockerHubAuth = (engineWithAuth as any).getDockerRegistryAuth( @@ -223,13 +226,14 @@ describe('Docker Registry Authentication Integration Tests', () => { }, tempFolder: '/tmp/test-docker-error' } + config.dockerRegistrysAuth = {} const dockerEngine = new C2DEngineDocker( clusterConfig, null as any, null as any, null as any, - {} + config ) try { diff --git a/src/test/integration/download.test.ts b/src/test/integration/download.test.ts index 0e6bfd47c..3aed54e58 100644 --- a/src/test/integration/download.test.ts +++ b/src/test/integration/download.test.ts @@ -45,7 +45,7 @@ import { genericDDO } from '../data/ddo.js' import { homedir } from 'os' import { createHashForSignature, safeSign } from '../utils/signature.js' -describe('[Download Flow] - Should run a complete node flow.', () => { +describe('********** [Download Flow] - Should run a complete node flow.', () => { let config: OceanNodeConfig let database: Database let oceanNode: OceanNode @@ -91,11 +91,7 @@ describe('[Download Flow] - Should run a complete node flow.', () => { config = await getConfiguration(true) // Force reload the configuration database = await Database.init(config.dbConfig) oceanNode = await OceanNode.getInstance(config, database) - indexer = new OceanIndexer( - database, - config.indexingNetworks, - oceanNode.blockchainRegistry - ) + indexer = new OceanIndexer(database, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) let network = getOceanArtifactsAdressesByChainId(DEVELOPMENT_CHAIN_ID) @@ -109,7 +105,10 @@ describe('[Download Flow] - Should run a complete node flow.', () => { consumerAccount = (await provider.getSigner(1)) as Signer anotherConsumer = (await provider.getSigner(2)) as Signer }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('should get node status', async () => { const statusCommand = { command: PROTOCOL_COMMANDS.STATUS, @@ -179,6 +178,7 @@ describe('[Download Flow] - Should run a complete node flow.', () => { publishedDataset = await publishAsset(downloadAsset, publisherAccount) publishedDatasetWithCredentials = await publishAsset(genericDDO, publisherAccount) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, publishedDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 3 @@ -189,6 +189,7 @@ describe('[Download Flow] - Should run a complete node flow.', () => { } const { ddo: ddoWithCredentials, wasTimeout: wasTimeoutCredentials } = await waitToIndex( + oceanNode, publishedDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 3 @@ -316,8 +317,4 @@ describe('[Download Flow] - Should run a complete node flow.', () => { await doCheck() }) - after(async () => { - await tearDownEnvironment(previousConfiguration) - indexer.stopAllChainIndexers() - }) }) diff --git a/src/test/integration/elasticsearch.test.ts b/src/test/integration/elasticsearch.test.ts index 171b6b727..672772d86 100644 --- a/src/test/integration/elasticsearch.test.ts +++ b/src/test/integration/elasticsearch.test.ts @@ -19,7 +19,7 @@ const dbConfig = { } const elasticsearch: Database = await Database.init(dbConfig) -describe('Elastic Search', () => { +describe('********** Elastic Search', () => { it('Get instances of Elastic Search', () => { expect(elasticsearch.ddo).to.be.instanceOf(ElasticsearchDdoDatabase) expect(elasticsearch.indexer).to.be.instanceOf(ElasticsearchIndexerDatabase) @@ -30,7 +30,7 @@ describe('Elastic Search', () => { }) }) -describe('Elastic Search DDO collections', () => { +describe('********** Elastic Search DDO collections', () => { it('create document in ddo collection', async () => { const result = await elasticsearch.ddo.create(ddo) expect(result.result).to.equal('created') diff --git a/src/test/integration/encryptDecryptDDO.test.ts b/src/test/integration/encryptDecryptDDO.test.ts index 8f115727b..e1b099542 100644 --- a/src/test/integration/encryptDecryptDDO.test.ts +++ b/src/test/integration/encryptDecryptDDO.test.ts @@ -41,7 +41,7 @@ import { homedir } from 'os' import { OceanIndexer } from '../../components/Indexer/index.js' import { createHashForSignature, safeSign } from '../utils/signature.js' -describe('Should encrypt and decrypt DDO', () => { +describe('********** Should encrypt and decrypt DDO', () => { let database: Database let oceanNode: OceanNode let provider: JsonRpcProvider @@ -108,16 +108,25 @@ describe('Should encrypt and decrypt DDO', () => { ) const config = await getConfiguration() database = await Database.init(config.dbConfig) - oceanNode = OceanNode.getInstance(config, database) - // will be used later - indexer = new OceanIndexer( + oceanNode = OceanNode.getInstance( + config, database, - mockSupportedNetworks, - oceanNode.blockchainRegistry + null, + null, + null, + null, + null, + true ) + + // will be used later + indexer = new OceanIndexer(database, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('should publish a dataset', async () => { const tx = await (factoryContract as any).createNftWithErc20( { @@ -413,9 +422,4 @@ describe('Should encrypt and decrypt DDO', () => { const stringDDO = JSON.stringify(genericAsset) expect(decryptedStringDDO).to.equal(stringDDO) }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - indexer.stopAllChainIndexers() - }) }) diff --git a/src/test/integration/encryptFile.test.ts b/src/test/integration/encryptFile.test.ts index d65d10015..f26038652 100644 --- a/src/test/integration/encryptFile.test.ts +++ b/src/test/integration/encryptFile.test.ts @@ -19,7 +19,7 @@ import { import { Database } from '../../components/database/index.js' import { createHashForSignature, safeSign } from '../utils/signature.js' -describe('Encrypt File', () => { +describe('********** Encrypt File', () => { let config: OceanNodeConfig let oceanNode: OceanNode let previousConfiguration: OverrideEnvConfig[] @@ -35,11 +35,23 @@ describe('Encrypt File', () => { ) config = await getConfiguration(true) // Force reload the configuration const dbconn = await Database.init(config.dbConfig) - oceanNode = await OceanNode.getInstance(config, dbconn) + oceanNode = await OceanNode.getInstance( + config, + dbconn, + null, + null, + null, + null, + null, + true + ) const provider = new JsonRpcProvider('http://127.0.0.1:8545') anotherConsumerWallet = (await provider.getSigner(1)) as Signer }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('should encrypt files', async () => { const nonce = Date.now().toString() const messageHashBytes = createHashForSignature( @@ -167,8 +179,4 @@ describe('Encrypt File', () => { 'Unknown error: Invalid storage type: Unknown' ) }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - }) }) diff --git a/src/test/integration/getJobs.test.ts b/src/test/integration/getJobs.test.ts index 3311ec7f9..1a75edf41 100644 --- a/src/test/integration/getJobs.test.ts +++ b/src/test/integration/getJobs.test.ts @@ -66,7 +66,7 @@ function buildJob(overrides: Partial = {}): DBComputeJob { } } -describe('GetJobsHandler integration', () => { +describe('********** GetJobsHandler integration', () => { let previousConfiguration: OverrideEnvConfig[] let oceanNode: OceanNode let db: Database @@ -80,7 +80,16 @@ describe('GetJobsHandler integration', () => { previousConfiguration = await setupEnvironment(TEST_ENV_CONFIG_FILE) const config = await getConfiguration(true) db = await Database.init(config.dbConfig) - oceanNode = await OceanNode.getInstance(config, db) + oceanNode = await OceanNode.getInstance( + config, + db, + null, + null, + null, + null, + null, + true + ) handler = new GetJobsHandler(oceanNode) @@ -107,6 +116,7 @@ describe('GetJobsHandler integration', () => { }) after(async () => { + await oceanNode.tearDownAll() await tearDownEnvironment(previousConfiguration) }) diff --git a/src/test/integration/imageCleanup.test.ts b/src/test/integration/imageCleanup.test.ts index 31f60d561..625d9e2f2 100644 --- a/src/test/integration/imageCleanup.test.ts +++ b/src/test/integration/imageCleanup.test.ts @@ -18,7 +18,7 @@ import { KeyManager } from '../../components/KeyManager/index.js' import { C2DClusterInfo } from '../../@types/C2D/C2D.js' import Dockerode from 'dockerode' -describe('Docker Image Cleanup Integration Tests', () => { +describe('********** Docker Image Cleanup Integration Tests', () => { let envOverrides: OverrideEnvConfig[] let config: OceanNodeConfig let db: C2DDatabase @@ -196,7 +196,7 @@ describe('Docker Image Cleanup Integration Tests', () => { escrow = {} as Escrow keyManager = {} as KeyManager - dockerEngine = new C2DEngineDocker(clusterConfig, db, escrow, keyManager, {}) + dockerEngine = new C2DEngineDocker(clusterConfig, db, escrow, keyManager, config) }) it('should track image usage when image is pulled', async () => { diff --git a/src/test/integration/indexer.test.ts b/src/test/integration/indexer.test.ts index 2ef8df0aa..4709148a7 100644 --- a/src/test/integration/indexer.test.ts +++ b/src/test/integration/indexer.test.ts @@ -26,7 +26,7 @@ import { getOceanArtifactsAdresses, getOceanArtifactsAdressesByChainId } from '../../utils/address.js' -import { createFee } from '../../components/core/utils/feesHandler.js' + import { Asset, DDO } from '@oceanprotocol/ddo-js' import { DEFAULT_TEST_TIMEOUT, @@ -48,8 +48,9 @@ import { QueryCommand } from '../../@types/commands.js' import { getConfiguration } from '../../utils/config.js' import { EncryptMethod } from '../../@types/fileObject.js' import { deleteIndexedMetadataIfExists } from '../../utils/asset.js' +import { ProviderFees } from '../../components/core/utils/feesHandler.js' -describe('Indexer stores a new metadata events and orders.', () => { +describe('********** Indexer stores a new metadata events and orders.', () => { let database: Database let oceanNode: OceanNode let provider: JsonRpcProvider @@ -115,11 +116,7 @@ describe('Indexer stores a new metadata events and orders.', () => { null, true ) - indexer = new OceanIndexer( - database, - mockSupportedNetworks, - oceanNode.blockchainRegistry - ) + indexer = new OceanIndexer(database, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) let artifactsAddresses = getOceanArtifactsAdressesByChainId(DEVELOPMENT_CHAIN_ID) if (!artifactsAddresses) { @@ -136,7 +133,10 @@ describe('Indexer stores a new metadata events and orders.', () => { publisherAccount ) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('instance Database', () => { expect(database).to.be.instanceOf(Database) }) @@ -224,6 +224,7 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should store the ddo in the database and return it ', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 2) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 2 @@ -268,7 +269,6 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should store the ddo state in the db with no errors and retrieve it using did', async function () { const ddoState = await database.ddoState.retrieve(resolvedDDO.id) - console.log('ddoState: ', ddoState) assert(ddoState, 'ddoState not found') expect(resolvedDDO.id).to.equal(ddoState.did) expect(ddoState.valid).to.equal(true) @@ -335,6 +335,7 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should detect update event and store the udpdated ddo in the database', async function () { const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.METADATA_UPDATED, DEFAULT_TEST_TIMEOUT, @@ -359,6 +360,7 @@ describe('Indexer stores a new metadata events and orders.', () => { this.timeout(DEFAULT_TEST_TIMEOUT * 3) const result = await nftContract.getMetaData() const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.METADATA_UPDATED, DEFAULT_TEST_TIMEOUT * 3, @@ -383,6 +385,7 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should get the active state', async function () { const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.METADATA_UPDATED, DEFAULT_TEST_TIMEOUT, @@ -409,8 +412,8 @@ describe('Indexer stores a new metadata events and orders.', () => { paymentCollector?.toLowerCase() === publisherAddress?.toLowerCase(), 'paymentCollector not correct' ) - - const feeData = await createFee( + const fees = new ProviderFees(oceanNode) + const feeData = await fees.createFee( resolvedDDO as DDO, 0, 'null', @@ -473,6 +476,7 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should get number of orders', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 4) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.ORDER_STARTED, DEFAULT_TEST_TIMEOUT * 4, @@ -511,7 +515,8 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should detect OrderReused event', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 2) - const feeData = await createFee( + const fees = new ProviderFees(oceanNode) + const feeData = await fees.createFee( resolvedDDO as DDO, 0, 'null', @@ -567,6 +572,7 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should increase number of orders', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 3) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.ORDER_REUSED, DEFAULT_TEST_TIMEOUT * 3, @@ -618,6 +624,7 @@ describe('Indexer stores a new metadata events and orders.', () => { expect(parseInt(result[2].toString())).to.equal(2) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.METADATA_STATE, DEFAULT_TEST_TIMEOUT * 3, @@ -652,6 +659,7 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should store ddo reindex', async function () { const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT @@ -671,9 +679,4 @@ describe('Indexer stores a new metadata events and orders.', () => { expect(queue.length).to.be.equal(0) }, DEFAULT_TEST_TIMEOUT / 2) }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - indexer.stopAllChainIndexers() - }) }) diff --git a/src/test/integration/logs.test.ts b/src/test/integration/logs.test.ts index 102c2b0b2..6e6a8c6df 100644 --- a/src/test/integration/logs.test.ts +++ b/src/test/integration/logs.test.ts @@ -19,7 +19,7 @@ import { getConfiguration } from '../../utils/index.js' let previousConfiguration: OverrideEnvConfig[] -describe('LogDatabase CRUD', () => { +describe('********** LogDatabase CRUD', () => { let database: Database let logger: CustomNodeLogger const logEntry = { @@ -166,7 +166,7 @@ describe('LogDatabase CRUD', () => { }) }) -describe('LogDatabase retrieveMultipleLogs with specific parameters', () => { +describe('********** LogDatabase retrieveMultipleLogs with specific parameters', () => { let database: Database // Assume start and end times are defined to bracket your test logs const startTime = new Date(Date.now() - 10000) // 10 seconds ago @@ -325,7 +325,7 @@ describe('LogDatabase retrieveMultipleLogs with specific parameters', () => { }) }) -describe('LogDatabase deleteOldLogs', () => { +describe('********** LogDatabase deleteOldLogs', () => { let database: Database const oldLogEntry = { timestamp: new Date().getTime() - 31 * 24 * 60 * 60 * 1000, // 31 days ago @@ -403,7 +403,7 @@ describe('LogDatabase deleteOldLogs', () => { await tearDownEnvironment(previousConfiguration) }) }) -describe('LogDatabase retrieveMultipleLogs with pagination', () => { +describe('********** LogDatabase retrieveMultipleLogs with pagination', () => { let database: Database const logCount = 10 // Total number of logs to insert and also the limit for logs per page diff --git a/src/test/integration/nonce.test.ts b/src/test/integration/nonce.test.ts index a39377ea4..575089954 100644 --- a/src/test/integration/nonce.test.ts +++ b/src/test/integration/nonce.test.ts @@ -3,7 +3,7 @@ import { ZeroAddress } from 'ethers' import { nonceSchema } from '../data/nonceSchema.js' import { Typesense, convertTypesenseConfig } from '../../components/database/typesense.js' -describe('handle nonce', () => { +describe('********** Nonce tests', () => { let typesense: Typesense let error: Error diff --git a/src/test/integration/operationsDashboard.test.ts b/src/test/integration/operationsDashboard.test.ts index c9c492963..7ca76e0ca 100644 --- a/src/test/integration/operationsDashboard.test.ts +++ b/src/test/integration/operationsDashboard.test.ts @@ -52,12 +52,13 @@ import { getCrawlingInterval } from '../../components/Indexer/utils.js' import { ReindexTask } from '../../components/Indexer/ChainIndexer.js' import { create256Hash } from '../../utils/crypt.js' import { CollectFeesHandler } from '../../components/core/admin/collectFeesHandler.js' -import { getProviderFeeToken } from '../../components/core/utils/feesHandler.js' + import { KeyManager } from '../../components/KeyManager/index.js' import { BlockchainRegistry } from '../../components/BlockchainRegistry/index.js' import { createHashForSignature, safeSign } from '../utils/signature.js' +import { ProviderFees } from '../../components/core/utils/feesHandler.js' -describe('Should test admin operations', () => { +describe('********** OperationsDashboard tests', () => { let config: OceanNodeConfig let oceanNode: OceanNode let publishedDataset: any @@ -110,14 +111,13 @@ describe('Should test admin operations', () => { keyManager, blockchainRegistry ) - indexer = new OceanIndexer( - dbconn, - config.indexingNetworks, - oceanNode.blockchainRegistry - ) + indexer = new OceanIndexer(dbconn, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('validation should pass for stop node command', async () => { const nonce = Date.now().toString() const messageHashBytes = createHashForSignature( @@ -155,9 +155,10 @@ describe('Should test admin operations', () => { PROTOCOL_COMMANDS.COLLECT_FEES ) let signature = await safeSign(adminWallet, messageHashBytes) + const fees = new ProviderFees(oceanNode) const collectFeesCommand: AdminCollectFeesCommand = { command: PROTOCOL_COMMANDS.COLLECT_FEES, - tokenAddress: await getProviderFeeToken(DEVELOPMENT_CHAIN_ID), + tokenAddress: fees.getProviderFeeToken(DEVELOPMENT_CHAIN_ID), chainId: DEVELOPMENT_CHAIN_ID, tokenAmount: 0.01, destinationAddress: await destinationWallet.getAddress(), @@ -231,6 +232,7 @@ describe('Should test admin operations', () => { this.timeout(DEFAULT_TEST_TIMEOUT * 2) publishedDataset = await publishAsset(downloadAsset, adminWallet) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, publishedDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 2 @@ -242,7 +244,7 @@ describe('Should test admin operations', () => { it('should pass for reindex tx command', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 2) - await waitToIndex(publishedDataset.ddo.did, EVENTS.METADATA_CREATED) + await waitToIndex(oceanNode, publishedDataset.ddo.did, EVENTS.METADATA_CREATED) const nonce = Date.now().toString() const messageHashBytes = createHashForSignature( await adminWallet.getAddress(), @@ -315,6 +317,7 @@ describe('Should test admin operations', () => { this.timeout(DEFAULT_TEST_TIMEOUT * 2) this.timeout(DEFAULT_TEST_TIMEOUT * 2) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, publishedDataset.ddo.did, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 2 @@ -458,10 +461,4 @@ describe('Should test admin operations', () => { assert(responseStart.stream, 'Failed to get stream when starting thread') expect(responseStart.status.httpStatus).to.be.equal(200) }) - - after(async () => { - await tearDownEnvironment(previousConfiguration) - INDEXER_CRAWLING_EVENT_EMITTER.removeAllListeners() - indexer.stopAllChainIndexers() - }) }) diff --git a/src/test/integration/persistentStorage.test.ts b/src/test/integration/persistentStorage.test.ts index 87cd4b8b3..7383d6d30 100644 --- a/src/test/integration/persistentStorage.test.ts +++ b/src/test/integration/persistentStorage.test.ts @@ -39,7 +39,7 @@ import { deployAndGetAccessListConfig } from '../utils/contracts.js' import { OceanNodeConfig, OceanNodeStatus } from '../../@types/OceanNode.js' import { KeyManager } from '../../components/KeyManager/index.js' -describe('Persistent storage handlers (integration)', function () { +describe('********** Persistent storage handlers (integration)', function () { this.timeout(DEFAULT_TEST_TIMEOUT) let previousConfiguration: OverrideEnvConfig[] @@ -116,6 +116,7 @@ describe('Persistent storage handlers (integration)', function () { }) after(async () => { + await oceanNode.tearDownAll() await tearDownEnvironment(previousConfiguration) // await fsp.rm(psRoot, { recursive: true, force: true }) }) diff --git a/src/test/integration/pricing.test.ts b/src/test/integration/pricing.test.ts index fed4f9d15..5467d6061 100644 --- a/src/test/integration/pricing.test.ts +++ b/src/test/integration/pricing.test.ts @@ -40,7 +40,7 @@ import { getConfiguration } from '../../utils/config.js' import { EncryptMethod } from '../../@types/fileObject.js' import { Asset } from '@oceanprotocol/ddo-js' -describe('Publish pricing scehmas and assert ddo stats - FRE & Dispenser', () => { +describe('********** Publish pricing scehmas and assert ddo stats - FRE & Dispenser', () => { let database: Database let oceanNode: OceanNode let provider: JsonRpcProvider @@ -84,12 +84,17 @@ describe('Publish pricing scehmas and assert ddo stats - FRE & Dispenser', () => const config = await getConfiguration(true) database = await Database.init(config.dbConfig) - oceanNode = await OceanNode.getInstance() - indexer = new OceanIndexer( + oceanNode = await OceanNode.getInstance( + config, database, - mockSupportedNetworks, - oceanNode.blockchainRegistry + null, + null, + null, + null, + null, + true ) + indexer = new OceanIndexer(database, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) artifactsAddresses = getOceanArtifactsAdressesByChainId(DEVELOPMENT_CHAIN_ID) if (!artifactsAddresses) { @@ -109,7 +114,10 @@ describe('Publish pricing scehmas and assert ddo stats - FRE & Dispenser', () => delete genericAssetCloned.event delete genericAssetCloned.nft }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('instance Database', () => { expect(database).to.be.instanceOf(Database) }) @@ -209,6 +217,7 @@ describe('Publish pricing scehmas and assert ddo stats - FRE & Dispenser', () => it('should store the ddo in the database and return it ', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 3) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, assetDID, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 2 @@ -346,6 +355,7 @@ describe('Publish pricing scehmas and assert ddo stats - FRE & Dispenser', () => it('should store the updated ddo in the database and return it ', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 3) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, genericAssetCloned.id, EVENTS.METADATA_UPDATED, DEFAULT_TEST_TIMEOUT * 2, @@ -374,8 +384,4 @@ describe('Publish pricing scehmas and assert ddo stats - FRE & Dispenser', () => ) } else expect(expectedTimeoutFailure(this.test.title)).to.be.equal(wasTimeout) }) - after(async () => { - await tearDownEnvironment(previousConfiguration) - indexer.stopAllChainIndexers() - }) }) diff --git a/src/test/integration/purgatory.test.ts b/src/test/integration/purgatory.test.ts index 0f9c8ffa0..588baa09e 100644 --- a/src/test/integration/purgatory.test.ts +++ b/src/test/integration/purgatory.test.ts @@ -6,11 +6,11 @@ import { setupEnvironment, tearDownEnvironment } from '../utils/utils.js' -import { ENVIRONMENT_VARIABLES } from '../../utils/index.js' - -describe('Purgatory test', () => { +import { ENVIRONMENT_VARIABLES, getConfiguration } from '../../utils/index.js' +describe('********** Purgatory test', () => { let purgatory: Purgatory let previousConfiguration: OverrideEnvConfig[] + let config: any before(async () => { // override and save configuration (always before calling getConfig()) @@ -27,8 +27,9 @@ describe('Purgatory test', () => { ] ) ) + config = await getConfiguration(true) - purgatory = await Purgatory.getInstance() + purgatory = await Purgatory.getInstance(config) }) it('instance Purgatory', () => { diff --git a/src/test/integration/testUtils.ts b/src/test/integration/testUtils.ts index b1d7b1053..cc13f4d1c 100644 --- a/src/test/integration/testUtils.ts +++ b/src/test/integration/testUtils.ts @@ -4,8 +4,9 @@ import { INDEXER_LOGGER } from '../../utils/logging/common.js' import { JsonRpcSigner, JsonRpcProvider, getBytes } from 'ethers' import { DEFAULT_TEST_TIMEOUT } from '../utils/utils.js' -import { getDatabase } from '../../utils/database.js' + import { Asset } from '@oceanprotocol/ddo-js' +import { OceanNode } from '../../OceanNode.js' // listen for indexer events export function addIndexerEventListener(eventName: string, ddoId: string, callback: any) { @@ -31,9 +32,9 @@ export function expectedTimeoutFailure(testName: string): boolean { return true } -async function getIndexedDDOFromDB(did: string): Promise { +async function getIndexedDDOFromDB(did: string, node: OceanNode): Promise { try { - const database: Database = await getDatabase() + const database: Database = await node.getDatabase() const ddo = await database.ddo.retrieve(did) if (ddo) { return ddo @@ -50,27 +51,28 @@ export type WaitIndexResult = { } export const waitToIndex = async ( + node: OceanNode, did: string, eventName: string, testTimeout: number = DEFAULT_TEST_TIMEOUT, forceWaitForEvent?: boolean ): Promise => { if (!forceWaitForEvent) { - const ddo = await getIndexedDDOFromDB(did) + const ddo = await getIndexedDDOFromDB(did, node) if (ddo) return { ddo, wasTimeout: false } } return new Promise((resolve) => { const listener = addIndexerEventListener(eventName, did, async () => { clearTimeout(timeoutId) - const ddo = await getIndexedDDOFromDB(did) + const ddo = await getIndexedDDOFromDB(did, node) INDEXER_DDO_EVENT_EMITTER.removeListener(eventName, listener) resolve({ ddo, wasTimeout: false }) }) const timeoutId = setTimeout(async () => { try { - const ddo = await getIndexedDDOFromDB(did) + const ddo = await getIndexedDDOFromDB(did, node) INDEXER_DDO_EVENT_EMITTER.removeListener(eventName, listener) resolve({ ddo, wasTimeout: true }) } catch (error) { diff --git a/src/test/integration/transactionValidation.test.ts b/src/test/integration/transactionValidation.test.ts index 651be6af9..7ceb36ad2 100644 --- a/src/test/integration/transactionValidation.test.ts +++ b/src/test/integration/transactionValidation.test.ts @@ -25,7 +25,7 @@ import { tearDownEnvironment } from '../utils/utils.js' import { homedir } from 'os' -describe('validateOrderTransaction Function with Orders', () => { +describe('********** validateOrderTransaction Function with Orders', () => { let database: Database let oceanNode: OceanNode let provider: JsonRpcProvider @@ -69,12 +69,17 @@ describe('validateOrderTransaction Function with Orders', () => { config = await getConfiguration(true) // Force reload the configuration const dbconn = await Database.init(config.dbConfig) - oceanNode = await OceanNode.getInstance(config, dbconn) - indexer = new OceanIndexer( + oceanNode = await OceanNode.getInstance( + config, dbconn, - config.indexingNetworks, - oceanNode.blockchainRegistry + null, + null, + null, + null, + null, + true ) + indexer = new OceanIndexer(dbconn, config, oceanNode.blockchainRegistry) oceanNode.addIndexer(indexer) let network = getOceanArtifactsAdressesByChainId(DEVELOPMENT_CHAIN_ID) @@ -97,7 +102,10 @@ describe('validateOrderTransaction Function with Orders', () => { const { dbConfig } = await getConfiguration(true) database = await Database.init(dbConfig) }) - + after(async () => { + await oceanNode.tearDownAll() + await tearDownEnvironment(previousConfiguration) + }) it('Start instance of Database', () => { expect(database).to.be.instanceOf(Database) }) @@ -107,6 +115,7 @@ describe('validateOrderTransaction Function with Orders', () => { publishedDataset = await publishAsset(genericDDO, publisherAccount) const { ddo, wasTimeout } = await waitToIndex( + oceanNode, publishedDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT * 2 @@ -125,6 +134,7 @@ describe('validateOrderTransaction Function with Orders', () => { it('should get the active state', async function () { const { ddo, wasTimeout } = await waitToIndex( + oceanNode, publishedDataset.ddo.id, EVENTS.METADATA_CREATED, DEFAULT_TEST_TIMEOUT, @@ -237,8 +247,4 @@ describe('validateOrderTransaction Function with Orders', () => { 'Tx id used not valid, one of the NFT addresses, Datatoken address or the User address contract address does not match.' ) }) - after(async () => { - await tearDownEnvironment(previousConfiguration) - indexer.stopAllChainIndexers() - }) }) diff --git a/src/test/integration/typesense.test.ts b/src/test/integration/typesense.test.ts index 8742284ca..b6ee12c4c 100644 --- a/src/test/integration/typesense.test.ts +++ b/src/test/integration/typesense.test.ts @@ -8,7 +8,7 @@ import { ddo } from '../data/ddo.js' import { expect } from 'chai' import { TypesenseSearchParams } from '../../@types/index.js' -describe('Typesense', () => { +describe('********** Typesense', () => { let typesense: Typesense before(() => { @@ -26,7 +26,7 @@ describe('Typesense', () => { }) }) -describe('Typesense collections', () => { +describe('********** Typesense collections', () => { let typesense: Typesense before(() => { @@ -76,7 +76,7 @@ describe('Typesense collections', () => { }) }) -describe('Typesense documents', () => { +describe('********** Typesense documents', () => { let typesense: Typesense before(() => { @@ -140,7 +140,7 @@ describe('Typesense documents', () => { }) }) -describe('Typesense documents', () => { +describe('********** Typesense documents', () => { let typesense: Typesense before(() => { diff --git a/src/test/unit/auth/token.test.ts b/src/test/unit/auth/token.test.ts index f185de0ba..a2a2c674f 100644 --- a/src/test/unit/auth/token.test.ts +++ b/src/test/unit/auth/token.test.ts @@ -15,7 +15,7 @@ describe('Auth Token Tests', () => { config = await getConfiguration(true) authTokenDatabase = await AuthTokenDatabase.create(config.dbConfig) wallet = new Wallet(process.env.PRIVATE_KEY) - auth = new Auth(authTokenDatabase) + auth = new Auth(authTokenDatabase, config) }) const getRandomNonce = () => { diff --git a/src/test/unit/compute.test.ts b/src/test/unit/compute.test.ts index 4e5120e90..a2766c478 100644 --- a/src/test/unit/compute.test.ts +++ b/src/test/unit/compute.test.ts @@ -282,6 +282,28 @@ describe('Compute Jobs Database', () => { expect(convertStringToArray(str)).to.deep.equal(expectedArray) }) + // it('should convert DBComputeJob to ComputeJob and omit internal DB data', () => { + // const source: any = completeDBComputeJob + // const output: ComputeJob = omitDBComputeFieldsFromComputeJob(source as DBComputeJob) + + // expect(Object.prototype.hasOwnProperty.call(output, 'clusterHash')).to.be.equal(false) + // expect(Object.prototype.hasOwnProperty.call(output, 'configlogURL')).to.be.equal( + // false + // ) + // expect(Object.prototype.hasOwnProperty.call(output, 'publishlogURL')).to.be.equal( + // false + // ) + // expect(Object.prototype.hasOwnProperty.call(output, 'algologURL')).to.be.equal(false) + // expect(Object.prototype.hasOwnProperty.call(output, 'outputsURL')).to.be.equal(false) + // expect(Object.prototype.hasOwnProperty.call(output, 'algorithm')).to.be.equal(false) + // expect(Object.prototype.hasOwnProperty.call(output, 'assets')).to.be.equal(false) + // expect(Object.prototype.hasOwnProperty.call(output, 'isRunning')).to.be.equal(false) + // expect(Object.prototype.hasOwnProperty.call(output, 'isStarted')).to.be.equal(false) + // expect(Object.prototype.hasOwnProperty.call(output, 'containerImage')).to.be.equal( + // false + // ) + // }) + it('should check manifest platform against local platform env', () => { const arch = os.machine() // ex: arm const platform = os.platform() // ex: linux diff --git a/src/test/unit/indexer/validation.test.ts b/src/test/unit/indexer/validation.test.ts index 0197f8286..1e1fd9d5d 100644 --- a/src/test/unit/indexer/validation.test.ts +++ b/src/test/unit/indexer/validation.test.ts @@ -1,5 +1,4 @@ import { DDOExample, ddov5, ddov7, ddoValidationSignature } from '../../data/ddo.js' -import { getValidationSignature } from '../../../components/core/utils/validateDdoHandler.js' import { ENVIRONMENT_VARIABLES, getConfiguration } from '../../../utils/index.js' import { expect } from 'chai' import { @@ -113,7 +112,7 @@ describe('Schema validation tests', () => { const validationResult = await ddoInstance.validate() expect(validationResult[0]).to.eql(true) expect(validationResult[1]).to.eql({}) - const signatureResult = await getValidationSignature( + const signatureResult = await oceanNode.getValidationSignature( JSON.stringify(ddoValidationSignature) ) expect(signatureResult).to.eql({ diff --git a/src/test/unit/networking.test.ts b/src/test/unit/networking.test.ts index 8a88b2bda..2f76c6063 100644 --- a/src/test/unit/networking.test.ts +++ b/src/test/unit/networking.test.ts @@ -152,6 +152,7 @@ describe('Test rate limitations and deny list settings', () => { const keyManager = new KeyManager(config) const p2pNode = new OceanP2P(config, keyManager) await p2pNode.start() + node = OceanNode.getInstance(config, null, p2pNode, null, null, null, null, true) }) diff --git a/src/test/unit/ocean.test.ts b/src/test/unit/ocean.test.ts index 2de2f9dd8..d7667f23f 100644 --- a/src/test/unit/ocean.test.ts +++ b/src/test/unit/ocean.test.ts @@ -15,7 +15,6 @@ import { setupEnvironment, tearDownEnvironment } from '../utils/utils.js' -import { sleep } from '../../utils/util.js' let envOverrides: OverrideEnvConfig[] @@ -44,7 +43,7 @@ describe('Status command tests', async () => { const keyManager = new KeyManager(config) const blockchainRegistry = new BlockchainRegistry(keyManager, config) const oceanP2P = new OceanP2P(config, keyManager, db) - const oceanIndexer = new OceanIndexer(db, config.indexingNetworks, blockchainRegistry) + const oceanIndexer = new OceanIndexer(db, config, blockchainRegistry) const oceanProvider = new OceanProvider(db) const oceanNode = OceanNode.getInstance(config, db, oceanP2P) @@ -54,8 +53,7 @@ describe('Status command tests', async () => { await oceanIndexer.stopAllChainIndexers() }) - it('Ocean Node instance', async () => { - await sleep(3000) + it('Ocean Node instance', () => { expect(oceanNode).to.be.instanceOf(OceanNode) expect(config.supportedNetworks).to.eql({ '1': 'https://rpc.eth.gateway.fm', diff --git a/src/utils/auth.ts b/src/utils/auth.ts deleted file mode 100644 index 019bb8d82..000000000 --- a/src/utils/auth.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { isAddress } from 'ethers' -import { getConfiguration } from './index.js' -import { AccessListContract, OceanNodeConfig } from '../@types/OceanNode.js' - -export async function getAdminAddresses( - existingConfig?: OceanNodeConfig -): Promise<{ addresses: string[]; accessLists: any }> { - let config: OceanNodeConfig - const ret = { - addresses: [] as string[], - accessLists: undefined as AccessListContract | undefined - } - if (!existingConfig) { - config = await getConfiguration() - } else { - config = existingConfig - } - - if (config.allowedAdmins && config.allowedAdmins.length > 0) { - for (const admin of config.allowedAdmins) { - if (isAddress(admin) === true) { - ret.addresses.push(admin) - } - } - } - ret.accessLists = config.allowedAdminsList - return ret -} diff --git a/src/utils/blockchain.ts b/src/utils/blockchain.ts index 0bf9458f7..962246031 100644 --- a/src/utils/blockchain.ts +++ b/src/utils/blockchain.ts @@ -10,10 +10,8 @@ import { Wallet, TransactionReceipt } from 'ethers' -import { getConfiguration } from './config.js' import { CORE_LOGGER } from './logging/common.js' -import { ConnectionStatus } from '../@types/blockchain.js' -import { ValidateChainId } from '../@types/commands.js' +import { ConnectionStatus, SupportedNetwork } from '../@types/blockchain.js' import { KeyManager } from '../components/KeyManager/index.js' export class Blockchain { @@ -22,26 +20,23 @@ export class Blockchain { private provider: FallbackProvider private chainId: number private knownRPCs: string[] = [] + private primaryRpcTimeout: number + private fallbackRpcTimeout: number /** - * Constructor overloads: - * 1. New pattern: (rpc, chainId, signer, fallbackRPCs?) - signer provided by KeyManager - * 2. Old pattern: (rpc, chainId, config, fallbackRPCs?) - for backward compatibility + * Creates a new Blockchain instance utilizing KeyManager and a SupportedNetwork configuration */ - public constructor( - keyManager: KeyManager, - rpc: string, - chainId: number, - fallbackRPCs?: string[] - ) { - this.chainId = chainId + public constructor(keyManager: KeyManager, network: SupportedNetwork) { + this.chainId = network.chainId || 0 this.keyManager = keyManager - this.knownRPCs.push(rpc) - if (fallbackRPCs && fallbackRPCs.length > 0) { - this.knownRPCs.push(...fallbackRPCs) + this.knownRPCs.push(network.rpc) + if (network.fallbackRPCs && network.fallbackRPCs.length > 0) { + this.knownRPCs.push(...network.fallbackRPCs) } this.provider = undefined as undefined as FallbackProvider this.signer = undefined as unknown as Signer + this.primaryRpcTimeout = network.primaryRpcTimeout || 3000 + this.fallbackRpcTimeout = network.fallbackRpcTimeout || 1500 } public getSupportedChain(): number { @@ -56,45 +51,43 @@ export class Blockchain { return await this.signer.getAddress() } + public stop() { + if (this.provider) { + this.provider.providerConfigs.forEach((config) => { + // Each config contains a 'provider' property + config.provider.destroy() + }) + + // 2. Destroy the FallbackProvider itself + this.provider.destroy() + this.provider = null + } + } + + // eslint-disable-next-line require-await public async getProvider(force: boolean = false): Promise { - if (!this.provider) { + if (force || !this.provider?.providerConfigs?.length) { const configs: { provider: JsonRpcProvider priority: number stallTimeout: number }[] = [] - const PRIMARY_RPC_TIMEOUT = 3000 - const FALLBACK_RPC_TIMEOUT = 1500 for (let i = 0; i < this.knownRPCs.length; i++) { const rpc = this.knownRPCs[i] - const rpcProvider = new JsonRpcProvider(rpc) - if (!force) { - try { - const { chainId } = await rpcProvider.getNetwork() - if (chainId.toString() === this.chainId.toString()) { - // primary RPC gets lowest priority = is first to be called - configs.push({ - provider: rpcProvider, - priority: i + 1, - stallTimeout: i === 0 ? PRIMARY_RPC_TIMEOUT : FALLBACK_RPC_TIMEOUT - }) - } - } catch (error) { - CORE_LOGGER.error(`Error getting network for RPC ${rpc}: ${error}`) - } - } else { - configs.push({ - provider: rpcProvider, - priority: i + 1, - stallTimeout: i === 0 ? PRIMARY_RPC_TIMEOUT : FALLBACK_RPC_TIMEOUT - }) - } + const rpcProvider = new JsonRpcProvider(rpc, this.chainId, { + staticNetwork: true + }) + configs.push({ + provider: rpcProvider, + priority: i + 1, + stallTimeout: i === 0 ? this.primaryRpcTimeout : this.fallbackRpcTimeout + }) } // quorum=1: accept the first response to avoid calls to all configured rpcs this.provider = configs.length > 0 - ? new FallbackProvider(configs, undefined, { quorum: 1 }) + ? new FallbackProvider(configs, this.chainId, { quorum: 1 }) : new FallbackProvider([]) } return this.provider @@ -265,28 +258,3 @@ export function getMessageHash(message: string): Uint8Array { const messageHashBytes = ethers.toBeArray(messageHash) return messageHashBytes } - -export async function checkSupportedChainId(chainId: number): Promise { - const config = await getConfiguration() - if (!chainId || !(`${chainId.toString()}` in config.supportedNetworks)) { - CORE_LOGGER.error(`Chain ID ${chainId} is not supported`) - return { - validation: false, - networkRpc: '' - } - } - return { - validation: true, - networkRpc: config.supportedNetworks[chainId.toString()].rpc - } -} - -export async function getJsonRpcProvider( - chainId: number -): Promise | null { - const checkResult = await checkSupportedChainId(chainId) - if (!checkResult.validation) { - return null - } - return new JsonRpcProvider(checkResult.networkRpc) -} diff --git a/src/utils/config.ts b/src/utils/config.ts index f8a538302..81f2c7d64 100644 --- a/src/utils/config.ts +++ b/src/utils/config.ts @@ -1,10 +1,7 @@ import { isDefined } from './util.js' -import { getConfiguration } from './config/builder.js' export * from './config/index.js' export function isPolicyServerConfigured(): boolean { return isDefined(process.env.POLICY_SERVER_URL) } - -export const hasP2PInterface = (await (await getConfiguration())?.hasP2P) || false diff --git a/src/utils/config/builder.ts b/src/utils/config/builder.ts index 37ef7e75f..0fc042b86 100644 --- a/src/utils/config/builder.ts +++ b/src/utils/config/builder.ts @@ -25,9 +25,8 @@ import lodash from 'lodash' let previousConfiguration: OceanNodeConfig = null -function create256Hash(input: string): string { - const result = crypto.createHash('sha256').update(input).digest('hex') - return '0x' + result +function createConfigHash(input: string): string { + return '0x' + crypto.createHash('sha256').update(input).digest('hex') } function mapEnvToConfig( @@ -134,7 +133,7 @@ export function buildC2DClusters( for (const theURL of clustersURLS) { clusters.push({ connection: theURL, - hash: create256Hash(String(count) + theURL), + hash: createConfigHash(String(count) + theURL), type: C2DClusterType.OPF_K8 }) count += 1 @@ -147,7 +146,7 @@ export function buildC2DClusters( if (dockerComputeEnvironments) { for (const dockerC2d of dockerComputeEnvironments) { if (dockerC2d.socketPath || dockerC2d.host) { - const hash = create256Hash( + const hash = createConfigHash( String(count) + JSON.stringify({ socketPath: dockerC2d.socketPath, diff --git a/src/utils/config/schemas.ts b/src/utils/config/schemas.ts index c0d97acf5..1831cb0eb 100644 --- a/src/utils/config/schemas.ts +++ b/src/utils/config/schemas.ts @@ -235,7 +235,8 @@ export const C2DEnvironmentConfigSchema = z }) .optional(), free: ComputeEnvironmentFreeOptionsSchema.optional(), - resources: z.array(ComputeResourceSchema).optional() + resources: z.array(ComputeResourceSchema).optional(), + enableNetwork: z.boolean().optional().default(false) }) .refine( (data) => @@ -270,7 +271,6 @@ export const C2DDockerConfigSchema = z.array( imageCleanupInterval: z.number().int().min(3600).optional().default(86400), // min 1 hour, default 24 hours scanImages: z.boolean().optional().default(false), scanImageDBUpdateInterval: z.number().int().min(3600).optional().default(43200), // default 43200 (12 hours) - enableNetwork: z.boolean().optional().default(false), environments: z.array(C2DEnvironmentConfigSchema).min(1) }) ) diff --git a/src/utils/cronjobs/p2pAnnounceC2D.ts b/src/utils/cronjobs/p2pAnnounceC2D.ts index 156a2ae80..62e8c13f5 100644 --- a/src/utils/cronjobs/p2pAnnounceC2D.ts +++ b/src/utils/cronjobs/p2pAnnounceC2D.ts @@ -1,11 +1,23 @@ import { OceanNode } from '../../OceanNode.js' +import { OCEAN_NODE_LOGGER } from '../logging/common.js' // import { P2P_LOGGER } from '../logging/common.js' const GB = 1024 * 1024 * 1024 // 1 GB in bytes export async function p2pAnnounceC2D(node: OceanNode) { const announce: any[] = [] const computeEngines = node.getC2DEngines() - const result = await computeEngines.fetchEnvironments() + if (!computeEngines) { + return + } + let result + try { + result = await computeEngines.fetchEnvironments() + } catch (err) { + OCEAN_NODE_LOGGER.error( + `p2pAnnounceC2D: failed to fetch environments: ${err instanceof Error ? err.message : String(err)}` + ) + return + } for (const env of result) { for (const resource of env.resources) { switch (resource.type) { @@ -92,10 +104,14 @@ export async function p2pAnnounceC2D(node: OceanNode) { } } } + const p2p = node.getP2PNode() + if (!p2p) { + return + } for (const obj of announce) { const res = { c2d: obj } - node.getP2PNode().advertiseString(JSON.stringify(res)) + p2p.advertiseString(JSON.stringify(res)) } } diff --git a/src/utils/cronjobs/p2pAnnounceDDOS.ts b/src/utils/cronjobs/p2pAnnounceDDOS.ts index 30cf716de..e361b6d95 100644 --- a/src/utils/cronjobs/p2pAnnounceDDOS.ts +++ b/src/utils/cronjobs/p2pAnnounceDDOS.ts @@ -6,11 +6,11 @@ import { P2P_LOGGER } from '../logging/common.js' export async function p2pAnnounceDDOS(node: OceanNode) { try { - const db = node.getDatabase() + const db = await node.getDatabase() const p2pNode = node.getP2PNode() - if (!db || !db.ddo) { + if (!db || !db.ddo || !p2pNode) { P2P_LOGGER.info( - `republishStoredDDOS() attempt aborted because there is no database!` + `republishStoredDDOS() attempt aborted because there is no database or P2P is not available!` ) return } diff --git a/src/utils/cronjobs/scheduleCronJobs.ts b/src/utils/cronjobs/scheduleCronJobs.ts index 776edc451..cbeccecc5 100644 --- a/src/utils/cronjobs/scheduleCronJobs.ts +++ b/src/utils/cronjobs/scheduleCronJobs.ts @@ -16,12 +16,12 @@ const REPUBLISH_INTERVAL_HOURS = 1000 * 60 * 60 * 4 // 4 hours export async function scheduleCronJobs(node: OceanNode) { await sleep(2000) // wait for 2 seconds to ensure the node is fully initialized try { - scheduleDeleteLogsJob(node.getDatabase()) + scheduleDeleteLogsJob(await node.getDatabase()) } catch (e) { OCEAN_NODE_LOGGER.error(`Error when deleting old logs: ${e.message}`) } try { - scheduleCleanExpiredC2DJobs(node.getDatabase()) + scheduleCleanExpiredC2DJobs(await node.getDatabase()) } catch (e) { OCEAN_NODE_LOGGER.error(`Error when deleting expired c2d jobs: ${e.message}`) } diff --git a/src/utils/database.ts b/src/utils/database.ts index b87de11a8..cad5d5745 100644 --- a/src/utils/database.ts +++ b/src/utils/database.ts @@ -1,24 +1,7 @@ import { OceanNodeDBConfig } from '../@types/OceanNode.js' -import { Database } from '../components/database/index.js' -import { getConfiguration } from './config.js' import { DB_TYPES } from './constants.js' import { URLUtils } from './url.js' -// lazy loading -let dbConnection: Database = null - -// lazy load env configuration and then db configuration -// we should be able to use this every where without dep cycle issues -export async function getDatabase(forceReload: boolean = false): Promise { - if (!dbConnection || forceReload) { - const { dbConfig } = await getConfiguration(true) - if (dbConfig && dbConfig.url) { - dbConnection = await Database.init(dbConfig) - } - } - return dbConnection -} - export function hasValidDBConfiguration(configuration: OceanNodeDBConfig): boolean { if (!configuration || !configuration.dbType) { return false diff --git a/src/utils/feeTokenValidation.ts b/src/utils/feeTokenValidation.ts index b1cbe359a..87f2f5e66 100644 --- a/src/utils/feeTokenValidation.ts +++ b/src/utils/feeTokenValidation.ts @@ -4,6 +4,7 @@ import type { OceanNodeConfig } from '../@types/OceanNode.js' import type { FeeTokens } from '../@types/Fees.js' import type { Blockchain } from './blockchain.js' import type { BlockchainRegistry } from '../components/BlockchainRegistry/index.js' +import type { ComputeEnvFeesStructure } from '../@types/C2D/C2D.js' import { getOceanArtifactsAdressesByChainId } from './address.js' import { CORE_LOGGER } from './logging/common.js' @@ -18,15 +19,63 @@ function formatFeeToken(token: UnsupportedFeeToken): string { return `chain=${token.chain}, token=${token.token}` } -export async function validateFeeTokensSupportedByOec( - config: OceanNodeConfig, +function dedupeFeeTokens(feeTokens: UnsupportedFeeToken[]): UnsupportedFeeToken[] { + const seen = new Set() + const unique: UnsupportedFeeToken[] = [] + + for (const feeToken of feeTokens) { + const key = `${feeToken.chain}:${feeToken.token.toLowerCase()}` + if (!seen.has(key)) { + seen.add(key) + unique.push(feeToken) + } + } + + return unique +} + +function addFeeTokensFromFees( + fees: ComputeEnvFeesStructure, + feeTokens: UnsupportedFeeToken[] +): void { + if (!fees) return + + for (const [chain, chainFees] of Object.entries(fees)) { + for (const fee of chainFees || []) { + if (fee.feeToken) { + feeTokens.push({ chain, token: fee.feeToken }) + } + } + } +} + +export function getDockerComputeFeeTokens( + config: OceanNodeConfig +): UnsupportedFeeToken[] { + const feeTokens: UnsupportedFeeToken[] = [] + + for (const dockerCompute of config?.dockerComputeEnvironments || []) { + addFeeTokensFromFees( + (dockerCompute as unknown as { fees?: ComputeEnvFeesStructure }).fees, + feeTokens + ) + + for (const environment of dockerCompute.environments || []) { + addFeeTokensFromFees(environment.fees as ComputeEnvFeesStructure, feeTokens) + } + } + + return dedupeFeeTokens(feeTokens) +} + +async function validateTokensSupportedByOec( + feeTokens: UnsupportedFeeToken[], blockchainRegistry: BlockchainRegistryLike ): Promise { - const feeTokens = config?.feeStrategy?.feeTokens || [] const unsupportedFeeTokens: UnsupportedFeeToken[] = [] for (const feeToken of feeTokens) { - const { chain, token } = feeToken as FeeTokens + const { chain, token } = feeToken const chainId = Number(chain) try { @@ -62,10 +111,33 @@ export async function validateFeeTokensSupportedByOec( return unsupportedFeeTokens } +export async function validateFeeTokensSupportedByOec( + config: OceanNodeConfig, + blockchainRegistry: BlockchainRegistryLike +): Promise { + const feeTokens = config?.feeStrategy?.feeTokens || [] + return await validateTokensSupportedByOec(feeTokens as FeeTokens[], blockchainRegistry) +} + +export async function validateDockerComputeFeeTokensSupportedByOec( + config: OceanNodeConfig, + blockchainRegistry: BlockchainRegistryLike +): Promise { + const feeTokens = getDockerComputeFeeTokens(config) + return await validateTokensSupportedByOec(feeTokens, blockchainRegistry) +} + export async function assertFeeTokensSupportedByOec( config: OceanNodeConfig, blockchainRegistry: BlockchainRegistryLike ): Promise { + if (config.skipFeeTokenValidation) { + CORE_LOGGER.warn( + 'Skipping fee token validation because skipFeeTokenValidation is enabled' + ) + return + } + const unsupportedFeeTokens = await validateFeeTokensSupportedByOec( config, blockchainRegistry @@ -79,3 +151,51 @@ export async function assertFeeTokensSupportedByOec( ) } } + +export async function assertDockerComputeFeeTokensSupportedByOec( + config: OceanNodeConfig, + blockchainRegistry: BlockchainRegistryLike +): Promise { + if (config.skipFeeTokenValidation) { + CORE_LOGGER.warn( + 'Skipping fee token validation because skipFeeTokenValidation is enabled' + ) + return + } + + const feeTokens = getDockerComputeFeeTokens(config) + if (feeTokens.length === 0) { + CORE_LOGGER.info( + 'No fee tokens configured in DOCKER_COMPUTE_ENVIRONMENTS. Skipping Docker compute fee token validation.' + ) + return + } + + const unsupportedFeeTokens = await validateTokensSupportedByOec( + feeTokens, + blockchainRegistry + ) + + if (unsupportedFeeTokens.length > 0) { + throw new Error( + `Unsupported fee token(s) configured in DOCKER_COMPUTE_ENVIRONMENTS: ${unsupportedFeeTokens + .map(formatFeeToken) + .join('; ')}` + ) + } +} + +export async function assertConfiguredFeeTokensSupportedByOec( + config: OceanNodeConfig, + blockchainRegistry: BlockchainRegistryLike +): Promise { + if (config.skipFeeTokenValidation) { + CORE_LOGGER.warn( + 'Skipping fee token validation because skipFeeTokenValidation is enabled' + ) + return + } + + await assertFeeTokensSupportedByOec(config, blockchainRegistry) + await assertDockerComputeFeeTokensSupportedByOec(config, blockchainRegistry) +}