diff --git a/.claude/scheduled_tasks.lock b/.claude/scheduled_tasks.lock new file mode 100644 index 00000000000..23a22a47da8 --- /dev/null +++ b/.claude/scheduled_tasks.lock @@ -0,0 +1 @@ +{"sessionId":"d24b0e5d-b3be-44f7-9103-0a83513f7f13","pid":22016,"acquiredAt":1773312204314} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index b688815352b..92f9a974578 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1583,6 +1583,7 @@ dependencies = [ "drive-proof-verifier", "envy", "futures", + "grovedb-commitment-tree 4.0.0 (git+https://github.com/dashpay/grovedb?rev=7ecb8465fad750c7cddd5332adb6f97fcceb498b)", "hex", "http", "js-sys", @@ -1924,7 +1925,7 @@ dependencies = [ "dpp-json-convertible-derive", "env_logger 0.11.9", "getrandom 0.2.17", - "grovedb-commitment-tree", + "grovedb-commitment-tree 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "hex", "indexmap 2.13.0", "integer-encoding", @@ -1984,7 +1985,7 @@ dependencies = [ "dpp", "enum-map", "grovedb", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-epoch-based-storage-flags", "grovedb-path", "grovedb-storage", @@ -2032,7 +2033,7 @@ dependencies = [ "drive-proof-verifier", "envy", "file-rotate", - "grovedb-commitment-tree", + "grovedb-commitment-tree 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "hex", "indexmap 2.13.0", "integer-encoding", @@ -2308,6 +2309,18 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fancy-regex" version = "0.13.0" @@ -2687,8 +2700,8 @@ dependencies = [ "bincode_derive", "blake3", "grovedb-bulk-append-tree", - "grovedb-commitment-tree", - "grovedb-costs", + "grovedb-commitment-tree 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-dense-fixed-sized-merkle-tree", "grovedb-element", "grovedb-merk", @@ -2722,7 +2735,7 @@ source = "git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808 dependencies = [ "bincode", "blake3", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-dense-fixed-sized-merkle-tree", "grovedb-merkle-mountain-range", "grovedb-query", @@ -2731,6 +2744,20 @@ dependencies = [ "thiserror 2.0.18", ] +[[package]] +name = "grovedb-commitment-tree" +version = "4.0.0" +source = "git+https://github.com/dashpay/grovedb?rev=7ecb8465fad750c7cddd5332adb6f97fcceb498b#7ecb8465fad750c7cddd5332adb6f97fcceb498b" +dependencies = [ + "blake3", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=7ecb8465fad750c7cddd5332adb6f97fcceb498b)", + "incrementalmerkletree", + "orchard", + "rusqlite", + "shardtree", + "thiserror 2.0.18", +] + [[package]] name = "grovedb-commitment-tree" version = "4.0.0" @@ -2738,7 +2765,7 @@ source = "git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808 dependencies = [ "blake3", "grovedb-bulk-append-tree", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-storage", "incrementalmerkletree", "orchard", @@ -2746,6 +2773,16 @@ dependencies = [ "thiserror 2.0.18", ] +[[package]] +name = "grovedb-costs" +version = "4.0.0" +source = "git+https://github.com/dashpay/grovedb?rev=7ecb8465fad750c7cddd5332adb6f97fcceb498b#7ecb8465fad750c7cddd5332adb6f97fcceb498b" +dependencies = [ + "integer-encoding", + "intmap", + "thiserror 2.0.18", +] + [[package]] name = "grovedb-costs" version = "4.0.0" @@ -2763,7 +2800,7 @@ source = "git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808 dependencies = [ "bincode", "blake3", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-query", "grovedb-storage", "thiserror 2.0.18", @@ -2789,7 +2826,7 @@ name = "grovedb-epoch-based-storage-flags" version = "4.0.0" source = "git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346#dd99ed1db0350e5f39127573808dd172c6bc2346" dependencies = [ - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "hex", "integer-encoding", "intmap", @@ -2807,7 +2844,7 @@ dependencies = [ "byteorder", "colored", "ed", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-element", "grovedb-path", "grovedb-query", @@ -2829,7 +2866,7 @@ source = "git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808 dependencies = [ "bincode", "blake3", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-storage", ] @@ -2849,7 +2886,7 @@ dependencies = [ "bincode", "byteorder", "ed", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-storage", "hex", "indexmap 2.13.0", @@ -2863,7 +2900,7 @@ version = "4.0.0" source = "git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346#dd99ed1db0350e5f39127573808dd172c6bc2346" dependencies = [ "blake3", - "grovedb-costs", + "grovedb-costs 4.0.0 (git+https://github.com/dashpay/grovedb?rev=dd99ed1db0350e5f39127573808dd172c6bc2346)", "grovedb-path", "grovedb-visualize", "hex", @@ -3026,6 +3063,15 @@ dependencies = [ "foldhash 0.2.0", ] +[[package]] +name = "hashlink" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" +dependencies = [ + "hashbrown 0.16.1", +] + [[package]] name = "hdrhistogram" version = "7.5.4" @@ -3913,6 +3959,17 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "libsqlite3-sys" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.25" @@ -5851,6 +5908,16 @@ dependencies = [ "libc", ] +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.18", +] + [[package]] name = "rtoolbox" version = "0.0.3" @@ -5861,6 +5928,21 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rusqlite" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" +dependencies = [ + "bitflags 2.11.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", + "sqlite-wasm-rs", +] + [[package]] name = "rust_decimal" version = "1.40.0" @@ -6198,17 +6280,7 @@ dependencies = [ [[package]] name = "serde-wasm-bindgen" version = "0.5.0" -source = "git+https://github.com/QuantumExplorer/serde-wasm-bindgen?branch=feat%2Fnot_human_readable#121d1f7fbf62cb97f74b91626a1b23851098cc82" -dependencies = [ - "js-sys", - "serde", - "wasm-bindgen", -] - -[[package]] -name = "serde-wasm-bindgen" -version = "0.5.0" -source = "git+https://github.com/dashpay/serde-wasm-bindgen?branch=fix%2Fuint8array-to-bytes#0d3e1a8ff058b400bab3a8ececd2fb9581e8c287" +source = "git+https://github.com/dashpay/serde-wasm-bindgen?rev=0d3e1a8ff058b400bab3a8ececd2fb9581e8c287#0d3e1a8ff058b400bab3a8ececd2fb9581e8c287" dependencies = [ "js-sys", "serde", @@ -6604,6 +6676,18 @@ dependencies = [ "der", ] +[[package]] +name = "sqlite-wasm-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" +dependencies = [ + "cc", + "js-sys", + "rsqlite-vfs", + "wasm-bindgen", +] + [[package]] name = "sqlparser" version = "0.38.0" @@ -7984,7 +8068,7 @@ dependencies = [ "num_enum 0.7.5", "paste", "serde", - "serde-wasm-bindgen 0.5.0 (git+https://github.com/QuantumExplorer/serde-wasm-bindgen?branch=feat%2Fnot_human_readable)", + "serde-wasm-bindgen 0.5.0", "serde_json", "thiserror 2.0.18", "wasm-bindgen", @@ -8004,7 +8088,7 @@ dependencies = [ "hex", "js-sys", "serde", - "serde-wasm-bindgen 0.5.0 (git+https://github.com/dashpay/serde-wasm-bindgen?branch=fix%2Fuint8array-to-bytes)", + "serde-wasm-bindgen 0.5.0", "serde_json", "sha2", "thiserror 1.0.69", @@ -8088,7 +8172,7 @@ dependencies = [ "rs-dapi-client", "rs-sdk-trusted-context-provider", "serde", - "serde-wasm-bindgen 0.5.0 (git+https://github.com/dashpay/serde-wasm-bindgen?branch=fix%2Fuint8array-to-bytes)", + "serde-wasm-bindgen 0.5.0", "serde_json", "sha2", "thiserror 2.0.18", diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index da6e837f15e..07199eb0c16 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -21,6 +21,7 @@ - [Validation Pipeline](state-transitions/validation-pipeline.md) - [Transform Into Action](state-transitions/transform-into-action.md) - [Drive Operations](state-transitions/drive-operations.md) +- [Return Proofs](state-transitions/return-proofs.md) # Fees @@ -28,6 +29,10 @@ - [Platform Address Fees](fees/platform-address-fees.md) - [Shielded Transaction Fees](fees/shielded-fees.md) +# Shielded Pool + +- [Anchors and Spend Proofs](shielded-pool/anchors-and-proofs.md) + # Error Handling - [Consensus Errors](error-handling/consensus-errors.md) diff --git a/book/src/shielded-pool/anchors-and-proofs.md b/book/src/shielded-pool/anchors-and-proofs.md new file mode 100644 index 00000000000..7f26b789bb1 --- /dev/null +++ b/book/src/shielded-pool/anchors-and-proofs.md @@ -0,0 +1,233 @@ +# Anchors and Spend Proofs + +This chapter explains why anchors -- Merkle roots of the commitment tree -- are the +mechanism that makes shielded spend proofs work, and how the platform records and +validates them. + +## The Core Problem + +When a user spends a shielded note, they must prove to the network that the note +exists without revealing *which* note it is. This is the fundamental tension of a +privacy system: the network needs to be convinced that value is real, but the spender +needs to hide which specific entry they are spending. + +## What Is an Anchor? + +An **anchor** is the root hash of the Sinsemilla Merkle tree at a particular block +height. The platform maintains a commitment tree -- a binary Merkle tree of depth 32, +hashed with the Sinsemilla hash function -- that holds every note commitment (`cmx`) +ever created in the shielded pool. Each time a new note is created (via Shield, +ShieldedTransfer, or ShieldFromAssetLock), its `cmx` is appended to this tree. + +At the end of each block, after all state transitions have been applied, the platform +computes the current root hash (anchor) of the commitment tree. If the anchor changed +from the previous block (i.e., new notes were added), it is recorded on-chain: + +``` +Anchors tree: block_height (8 bytes BE) --> anchor_bytes (32 bytes) +``` + +The anchors tree lives at `[AddressBalances, "s", [6]]` -- a subtree of the shielded +credit pool. + +## Why Anchors Make Spend Proofs Possible + +A spend proof is a zero-knowledge proof that says: + +> "I know a note with value V at position P in the commitment tree, and I have the +> spending key that controls it. The tree root at the time I am referencing was A." + +The proof reveals *none* of those details to the verifier. It only reveals: + +1. A **nullifier** (a deterministic, unique tag derived from the note and spending + key). The nullifier prevents double-spending -- once published, the same note + cannot be spent again. +2. The **anchor** (A) that the proof was computed against. + +### The Trust Chain + +The anchor creates a chain of trust between the spender's private knowledge and the +public state: + +``` +Note exists in tree <-- Merkle witness proves inclusion <-- Anchor binds to state + (private) (inside ZK proof) (public, on-chain) +``` + +Step by step: + +1. **The spender knows a note at position P.** They have the note's value, recipient + address, randomness (rseed), and the spending key. This is all private. + +2. **The spender generates a Merkle witness.** A Merkle witness (authentication path) + is a sequence of 32 sibling hashes from the leaf at position P up to the root. + Given the leaf and the witness, anyone can recompute the root. The witness proves + that a specific leaf is part of the tree *at the root the witness was generated for*. + +3. **The spender builds a ZK proof.** The Halo 2 circuit takes as private inputs: + - The note (value, address, rseed, rho) + - The spending key + - The Merkle witness (32 sibling hashes) + + And as public inputs: + - The nullifier (derived deterministically from the note and key) + - The anchor (the root hash the witness was computed against) + + The circuit verifies internally that: + - The note commitment `cmx` is correctly derived from the note fields + - The Merkle witness is a valid path from `cmx` to the declared anchor + - The nullifier is correctly derived from the note and spending key + - The spending key matches the note's recipient address + + If all checks pass, the proof is valid. The verifier (the platform) learns the + nullifier and anchor, but nothing else. + +4. **The platform validates the anchor.** The platform checks that the anchor in the + proof matches a historical anchor that was actually recorded on-chain. This is + critical -- without this check, a spender could fabricate a commitment tree that + contains their fake note and produce a valid proof against that fake root. The + anchor check ties the proof to the *real* tree state. + +5. **The platform checks the nullifier.** If the nullifier has been seen before, the + spend is rejected (double-spend attempt). If it is new, it is recorded in the + nullifier tree to prevent future reuse. + +### Why This Preserves Privacy + +The anchor is a single 32-byte hash that represents the entire state of the +commitment tree at a point in time. Many notes share the same anchor (every note +that existed at that block height). The platform learns that the spender's note is +*somewhere* in the tree at that block height, but not *where* -- the tree could +contain millions of notes, and the proof reveals no information about position. + +Furthermore, the nullifier is deterministic but unlinkable to the note commitment. +Given a nullifier, you cannot determine which `cmx` it corresponds to without +knowing the spending key. This means: + +- You cannot link a spend to the transaction that created the note +- You cannot determine which of the millions of notes in the tree was spent +- You cannot even determine whether two spends came from the same wallet + (different notes produce different nullifiers, even from the same key) + +The only information leaked is the anchor's block height, which reveals an +upper bound on when the note was created. Using an older anchor widens the +anonymity set (more notes existed at that point), while using a very recent anchor +narrows it slightly. Clients should use recent-but-not-latest anchors for a good +balance of privacy and liveness. + +## The Full Lifecycle + +``` +1. Shield: Client deposits credits, platform appends cmx to commitment tree + | +2. Block end: Platform computes new anchor = root(commitment_tree) + If changed, stores (block_height -> anchor) in anchors tree + | +3. Sync: Client fetches notes, appends all cmx values to local tree + Client checkpoints at each block + | +4. Spend: Client picks a note and its position in the local tree + Client picks a historical anchor (must exist on-chain) + Client generates Merkle witness at that anchor's checkpoint + Client builds ZK proof with (note, witness, anchor) as inputs + | +5. Verify: Platform receives the state transition containing the proof + Platform checks: anchor in proof matches a recorded on-chain anchor + Platform checks: nullifier has not been seen before + Platform verifies: Halo 2 proof is valid + Platform records: nullifier in nullifier tree (prevents reuse) + Platform updates: pool balance (deducts value_balance) +``` + +## Why Historical Anchors Are Necessary + +The platform does not require spenders to use the *latest* anchor. Any anchor that +was ever recorded on-chain is valid. This is essential for two reasons: + +**1. Concurrency.** Between the time a client builds a proof and the time the +platform processes it, other transactions may have added notes to the tree, changing +the anchor. If only the latest anchor were valid, proofs would become stale almost +immediately. + +**2. Privacy.** If all spenders were forced to use the latest anchor, it would +reveal that their note was created before that block. With historical anchors, a +spender can reference an anchor from any past block, making it impossible to narrow +down when the note was created beyond "sometime before block N" where N can be +any block the spender chooses. + +The platform simply checks that the submitted anchor exists in the anchors tree. +There is no requirement for recency beyond the fact that the anchor must correspond +to a real state of the commitment tree. + +## Anchor Recording: Implementation + +The anchor recording happens in `record_shielded_pool_anchor_if_changed`, which +runs at the end of each block proposal (after all state transitions have been +processed): + +1. **Read the current anchor** from the CommitmentTree at + `[AddressBalances, "s", [1]]` using `commitment_tree_anchor()`. + +2. **Query the most recent stored anchor** from the anchors tree at + `[AddressBalances, "s", [6]]` (descending query, limit 1). + +3. **Compare.** If the current anchor differs from the latest stored anchor + (or no anchor has been stored yet and the tree is non-empty), store the new + anchor keyed by block height. + +This is a post-processing step, not a per-transaction step. Even if multiple +shielded transactions in the same block add notes, only one anchor is recorded +for the entire block. This keeps the anchors tree compact. + +### Version Gating + +Anchor recording uses the standard `OptionalFeatureVersion` dispatch pattern: + +```rust +match platform_version.drive_abci.methods.block_end.record_shielded_pool_anchor { + None => Ok(()), // Protocol versions before v12 -- no shielded pool + Some(0) => self.record_shielded_pool_anchor_if_changed_v0(...), + Some(v) => Err(UnknownVersionMismatch { ... }), +} +``` + +Protocol versions 1--11 have this field set to `None`, so the function is a no-op. +Protocol version 12 (which introduces the shielded pool) sets it to `Some(0)`. + +## Client-Side: Generating Witnesses + +The client maintains a `ClientCommitmentTree` -- a local mirror of the on-chain +Sinsemilla tree. As the client syncs notes from the platform: + +1. Every `cmx` encountered is appended to the local tree (marked as `Retention::Marked` + for the client's own notes, `Retention::Ephemeral` for others). +2. After processing each block's notes, the client calls `tree.checkpoint(block_height)`. +3. To spend a note at position P, the client calls `tree.witness(position, 0)` to + obtain a `MerklePath` -- the 32-sibling authentication path. +4. The client calls `tree.anchor()` to get the current root hash, which must match + a historical anchor on the platform. + +The `ClientCommitmentTree` retains enough internal state to produce witnesses at any +checkpoint it has stored (up to its configured retention limit). This allows the +client to generate witnesses against past anchors, not just the latest one. + +## Security Properties + +| Property | How Anchors Help | +|---|---| +| **Soundness** | A proof against anchor A is only valid if the note actually exists in the tree at state A. A fake note would require finding a Sinsemilla hash collision. | +| **Privacy** | The anchor reveals only "the note existed at or before block N". The anonymity set is every note in the tree at that block. | +| **Double-spend prevention** | The nullifier (not the anchor) prevents double-spending. The anchor proves the note *exists*; the nullifier ensures it is spent only *once*. | +| **Liveness** | Historical anchors remain valid indefinitely, so proofs never expire due to tree state changes. | +| **Binding** | The anchor is included in the Halo 2 public inputs and in the bundle commitment. Changing the anchor after proof generation invalidates the proof. | + +## Relationship to Other Components + +- **Fees:** The fee is encoded in `value_balance` and bound to the proof via the + binding signature. See [Shielded Transaction Fees](../fees/shielded-fees.md). +- **Return proofs:** Platform return proofs for shielded transitions prove the + aggregate pool balance changed, not individual notes. See + [Return Proofs](../state-transitions/return-proofs.md). +- **Light client sync:** Clients fetch historical anchors via the `GetShieldedAnchors` + gRPC query to verify their local tree state matches the platform. See the + [Client Integration Guide](../../docs/SHIELDED_CLIENT_INTEGRATION.md). diff --git a/book/src/state-transitions/return-proofs.md b/book/src/state-transitions/return-proofs.md new file mode 100644 index 00000000000..f0f4e3a56a6 --- /dev/null +++ b/book/src/state-transitions/return-proofs.md @@ -0,0 +1,182 @@ +# Return Proofs + +After a state transition is confirmed in a block, clients can request a **return proof** — +a GroveDB Merkle proof demonstrating that the expected state changes were actually applied. +This lets light clients verify execution without trusting the node. + +## How It Works + +The flow is: + +1. Client broadcasts a state transition via DAPI. +2. Client calls `waitForStateTransitionResult` with `prove: true`. +3. The node waits for the transition to be included in a block. +4. Drive deserializes the transition and calls `prove_state_transition()`. +5. This builds a `PathQuery` describing which GroveDB paths/keys the transition affected. +6. GroveDB generates a Merkle proof covering exactly those paths. +7. The proof is returned to the client in the response. + +The client then calls `verify_state_transition_was_executed_with_proof()` to check the +proof against the known app hash (root hash). If verification succeeds, the client receives +a `StateTransitionProofResult` containing the verified data. + +## Key Design Decisions + +- **Minimal proofs.** Only the paths/keys affected by the transition are included, not the + entire state tree. This keeps proofs small. +- **Type-specific.** Each transition type proves different data — an identity create proves + the full identity, while a top-up only proves the new balance. +- **On-demand.** Proofs are generated after confirmation, not during validation. +- **Batch limitation.** Batch transitions (documents/tokens) currently support proofs only + for single-transition batches. +- **Limits removed.** All `PathQuery` limits are set to `None` before proof generation to + ensure the full result set is included. + +## What Each Transition Proves + +### Identity Transitions + +| Transition | What's Proved | Verified Result | +|---|---|---| +| **IdentityCreate** | Full identity: data, balance, nonce, all public keys | `VerifiedIdentity(Identity)` | +| **IdentityTopUp** | Balance and revision only | `VerifiedPartialIdentity { balance, revision }` | +| **IdentityCreditWithdrawal** | Balance only | `VerifiedPartialIdentity { balance }` | +| **IdentityUpdate** | All public keys | `VerifiedPartialIdentity { loaded_public_keys }` | +| **IdentityCreditTransfer** | Sender balance + recipient balance | `VerifiedBalanceTransfer(sender, recipient)` | + +The proof generation uses these Drive query helpers: + +- `Drive::full_identity_query()` — identity tree + balance + nonce + all key subtree +- `Drive::revision_and_balance_path_query()` — just balance and revision elements +- `Drive::identity_balance_query()` — just the balance element +- `Drive::identity_all_keys_query()` — identity key subtree + +For **IdentityCreditTransfer**, the sender and recipient balance queries are merged into +a single `PathQuery` via `PathQuery::merge()`. + +### Identity + Address Transitions + +| Transition | What's Proved | Verified Result | +|---|---|---| +| **IdentityCreditTransferToAddresses** | Identity balance/revision + recipient address balances | `VerifiedIdentityWithAddressInfos` | +| **IdentityCreateFromAddresses** | Full identity + all input/output address balances | `VerifiedIdentityFullWithAddressInfos` | +| **IdentityTopUpFromAddresses** | Identity balance/revision + input/output address balances | `VerifiedIdentityWithAddressInfos` | + +These combine `Drive::revision_and_balance_path_query()` (or `full_identity_query()` for +create) with `Drive::balances_for_clear_addresses_query()`, merged into a single proof. + +### Address Fund Transitions + +| Transition | What's Proved | Verified Result | +|---|---|---| +| **AddressFundsTransfer** | All input + output address balances | `VerifiedAddressInfos` | +| **AddressFundingFromAssetLock** | All input + output address balances | `VerifiedAddressInfos` | +| **AddressCreditWithdrawal** | Input addresses + output address balance | `VerifiedAddressInfos` | + +All use `Drive::balances_for_clear_addresses_query()`. Each address entry in the proof +contains its nonce and credit balance, allowing the client to verify post-transition +balances. + +### Data Contract Transitions + +| Transition | What's Proved | Verified Result | +|---|---|---| +| **DataContractCreate** | The contract itself | `VerifiedDataContract(DataContract)` | +| **DataContractUpdate** | The updated contract | `VerifiedDataContract(DataContract)` | + +The query depends on whether the contract keeps history: +- Historical: `Drive::fetch_historical_contracts_query()` +- Non-historical: `Drive::fetch_non_historical_contracts_query()` + +Verification reconstructs the contract from the proof and compares it field-by-field +against the state transition's contract data via `first_mismatch()`. + +### Document Transitions (via Batch) + +| Operation | What's Proved | Verified Result | +|---|---|---| +| **Create** | The created document | `VerifiedDocuments({ id: Some(doc) })` | +| **Replace** | The replaced document | `VerifiedDocuments({ id: Some(doc) })` | +| **Delete** | Absence of the document | `VerifiedDocuments({ id: None })` | +| **Transfer** | The document (with new owner) | `VerifiedDocuments({ id: Some(doc) })` | +| **UpdatePrice** | The document (with new price) | `VerifiedDocuments({ id: Some(doc) })` | +| **Purchase** | The document (with new owner) | `VerifiedDocuments({ id: Some(doc) })` | + +All document operations use `SingleDocumentDriveQuery` to construct the path query. +For creates with prefunded voting balances, the query uses `Contested` status to look +up the document in the contested index tree instead of the regular document tree. + +Verification checks: +- **Create/Replace:** Reconstructs the expected document from the transition and compares + fields (ignoring time-based fields and transient fields). +- **Delete:** Asserts the document is absent from the proof. +- **Transfer/Purchase:** Verifies the document's `owner_id` matches the expected recipient. +- **UpdatePrice:** Verifies the document's `price` field matches the transition's price. + +### Token Transitions (via Batch) + +Token proof behavior depends on whether the token keeps historical documents for that +operation type. When history is enabled, the proof contains a historical document in the +token history contract. When disabled, the proof contains the raw state (balance, info, etc.). + +| Operation | History Off | History On | +|---|---|---| +| **Mint** | Recipient token balance | Historical mint document | +| **Burn** | Owner token balance | Historical burn document | +| **Transfer** | Sender + recipient balances | Historical transfer document | +| **Freeze** | Frozen identity's token info | Historical freeze document | +| **Unfreeze** | Unfrozen identity's token info | Historical unfreeze document | +| **DirectPurchase** | Purchaser token balance | Historical purchase document | +| **SetPriceForDirectPurchase** | Token pricing schedule | Historical pricing document | +| **DestroyFrozenFunds** | Always historical document | — | +| **EmergencyAction** | Always historical document | — | +| **ConfigUpdate** | Always historical document | — | +| **Claim** | Always historical document | — | + +**Group actions** add an extra layer: when a token transition uses group consensus +(multi-sig), the proof also includes the group action's signer and total power, plus the +action status (active vs closed). The verified result becomes one of the +`VerifiedTokenGroupAction*` variants. + +### Masternode Vote + +| Transition | What's Proved | Verified Result | +|---|---|---| +| **MasternodeVote** | The vote poll state for the specific vote | `VerifiedMasternodeVote(Vote)` | + +Uses `IdentityBasedVoteDriveQuery` to construct the path query from the voter's ProTxHash +and the resource vote poll. Verification checks the vote exists and matches expectations. + +### Shielded Transitions + +| Transition | Proof Generation | Proof Verification | +|---|---|---| +| **Shield** | Not yet supported | Verifies input address balances (`VerifiedAddressInfos`) | +| **Unshield** | Not yet supported | Verifies output address balance (`VerifiedAddressInfos`) | +| **ShieldedTransfer** | Not yet supported | Verifies shielded pool total balance (`VerifiedShieldedPoolState`) | +| **ShieldFromAssetLock** | Not yet supported | Verifies shielded pool total balance (`VerifiedShieldedPoolState`) | +| **ShieldedWithdrawal** | Not yet supported | Verifies shielded pool total balance (`VerifiedShieldedPoolState`) | + +Proof generation currently returns an error for all shielded transitions. The verification +side has been implemented in anticipation: + +- **Shield** verifies the input platform address balances were debited. +- **Unshield** verifies the output platform address balance was credited. +- **ShieldedTransfer, ShieldFromAssetLock, ShieldedWithdrawal** verify the shielded credit + pool's `total_balance` SumItem, confirming the pool balance changed as expected. + +Note that shielded proofs intentionally do **not** reveal which notes were created or spent +(that would break privacy). Only aggregate pool state or transparent address balances are +provable. + +## Code Locations + +| Component | Path | +|---|---| +| Proof generation | `rs-drive/src/prove/prove_state_transition/v0/mod.rs` | +| Proof verification | `rs-drive/src/verify/state_transition/verify_state_transition_was_executed_with_proof/v0/mod.rs` | +| Proof result enum | `rs-dpp/src/state_transition/proof_result.rs` | +| DAPI wait service | `rs-dapi/src/services/platform_service/wait_for_state_transition_result.rs` | +| ABCI query handler | `rs-drive-abci/src/query/proofs/v0/mod.rs` | +| Shielded pool verify | `rs-drive/src/verify/shielded/verify_shielded_pool_state/v0/mod.rs` | +| Address balance verify | `rs-drive/src/verify/address_funds/verify_addresses_infos/v0/mod.rs` | diff --git a/docs/SHIELDED_CLIENT_INTEGRATION.md b/docs/SHIELDED_CLIENT_INTEGRATION.md new file mode 100644 index 00000000000..8fb338fe932 --- /dev/null +++ b/docs/SHIELDED_CLIENT_INTEGRATION.md @@ -0,0 +1,854 @@ +# Shielded Pool Client Integration Guide + +This guide explains how to build client applications that interact with Dash Platform's shielded credit pool. It covers key management, bundle construction, state transition creation, note tracking, and light client synchronization. + +For the protocol specification, see [DIP-0040](../dip/dip-0040.md) (shielded credit pool), [DIP-0041](../dip/dip-0041.md) (L1 bridge), and [DIP-0042](../dip/dip-0042.md) (light client syncing). + +## Table of Contents + +1. [Overview](#1-overview) +2. [Dependencies](#2-dependencies) +3. [Key Management](#3-key-management) +4. [Commitment Tree and Note Tracking](#4-commitment-tree-and-note-tracking) +5. [Platform Sighash](#5-platform-sighash) +6. [Building Shielded State Transitions](#6-building-shielded-state-transitions) + 1. [Shield (Type 15)](#61-shield-type-15) + 2. [ShieldFromAssetLock (Type 18)](#62-shieldfromassetlock-type-18) + 3. [ShieldedTransfer (Type 16)](#63-shieldedtransfer-type-16) + 4. [Unshield (Type 17)](#64-unshield-type-17) + 5. [ShieldedWithdrawal (Type 19)](#65-shieldedwithdrawal-type-19) +7. [Bundle Serialization](#7-bundle-serialization) +8. [Light Client Syncing](#8-light-client-syncing) +9. [Trial Decryption](#9-trial-decryption) +10. [Fee Model](#10-fee-model) +11. [Security Considerations](#11-security-considerations) + +--- + +## 1. Overview + +The shielded credit pool enables private value transfers on Dash Platform using the Zcash Orchard protocol. Five state transition types move credits between transparent and shielded domains: + +| Type ID | Name | Direction | When to Use | +|---------|------|-----------|-------------| +| 15 | Shield | Transparent -> Pool | Deposit platform credits into the shielded pool | +| 16 | ShieldedTransfer | Pool -> Pool | Transfer value privately within the pool | +| 17 | Unshield | Pool -> Transparent | Withdraw credits to a platform address | +| 18 | ShieldFromAssetLock | Core L1 -> Pool | Deposit directly from a Dash Core asset lock | +| 19 | ShieldedWithdrawal | Pool -> Core L1 | Withdraw credits to a Dash Core address | + +All five types carry Orchard bundle data: serialized actions, a Halo 2 zero-knowledge proof, and RedPallas signatures. The client constructs these bundles using the Orchard builder API, then wraps them in the appropriate state transition struct. + +### Cryptographic Primitives + +| Primitive | Purpose | +|-----------|---------| +| Halo 2 | Zero-knowledge proof system (no trusted setup) | +| RedPallas | Re-randomizable Schnorr signatures on the Pallas curve | +| Sinsemilla | Hash-based commitment scheme for the Merkle tree | +| BLAKE2b-256 | Bundle commitment computation (per ZIP-244) | +| SHA-256 | Platform sighash computation | + +--- + +## 2. Dependencies + +The Dash Platform SDK (`dash-sdk`) re-exports all necessary Orchard and commitment tree types behind the `shielded` feature: + +```toml +[dependencies] +dash-sdk = { version = "3", features = ["shielded"] } +``` + +The `shielded` feature enables `ClientCommitmentTree` for wallet-side note tracking and Merkle witness generation, as well as the Orchard builder for constructing shielded bundles. All Orchard types are re-exported from `dash_sdk::grovedb_commitment_tree`: + +```rust +use dash_sdk::grovedb_commitment_tree::{ + // Builder + Builder, BundleType, + // Key management + SpendingKey, FullViewingKey, IncomingViewingKey, OutgoingViewingKey, + SpendAuthorizingKey, Scope, + // Bundle types + Bundle, Authorized, Flags, Action, + // Memo types (Dash uses 36-byte memos, not Zcash 512-byte) + DashMemo, NoteBytesData, + // Proof creation/verification + ProvingKey, VerifyingKey, + // Note types + Note, NoteValue, PaymentAddress, + ExtractedNoteCommitment, Nullifier, Rho, TransmittedNoteCiphertext, + // Tree types + Anchor, MerklePath, MerkleHashOrchard, + // Client tree + ClientCommitmentTree, Position, Retention, +}; +``` + +For the platform sighash, use the re-exported `dpp`: + +```rust +use dash_sdk::dpp::shielded::compute_platform_sighash; +``` + +Alternatively, for projects that don't use the full SDK, the crate can be used directly: + +```toml +[dependencies] +grovedb-commitment-tree = { version = "4", features = ["client"] } +``` + +--- + +## 3. Key Management + +### Key Hierarchy + +The Orchard key hierarchy derives all keys from a single 32-byte spending key: + +``` +SpendingKey (sk) + | + +-- SpendAuthorizingKey (ask) -- signs spend actions + | + +-- FullViewingKey (fvk) -- derives all viewing keys + addresses + | + +-- IncomingViewingKey (ivk) -- detects incoming notes (trial decryption) + | + +-- OutgoingViewingKey (ovk) -- recovers sent notes (wallet recovery) + | + +-- PaymentAddress -- derived per-contact diversified address +``` + +### Creating Keys + +```rust +use grovedb_commitment_tree::{ + SpendingKey, FullViewingKey, SpendAuthorizingKey, + IncomingViewingKey, OutgoingViewingKey, Scope, +}; + +// Generate or load a 32-byte spending key seed +let sk = SpendingKey::from_bytes(seed_bytes) + .expect("invalid spending key bytes"); + +// Derive all other keys +let fvk = FullViewingKey::from(&sk); +let ask = SpendAuthorizingKey::from(&sk); + +// Derive payment addresses (use different indices for different contacts) +let default_address = fvk.address_at(0u32, Scope::External); +let contact_address = fvk.address_at(1u32, Scope::External); + +// Viewing keys for note detection +let ivk: IncomingViewingKey = fvk.to_ivk(Scope::External); +let ovk: OutgoingViewingKey = fvk.to_ovk(Scope::External); +``` + +### Key Storage + +- **SpendingKey**: Must be stored encrypted. This is the master secret -- anyone who obtains it can spend all shielded funds. +- **FullViewingKey**: Allows detecting all incoming and outgoing notes. Store securely but does not enable spending. +- **IncomingViewingKey**: Allows detecting only incoming notes. Safe to share with a watch-only server for filtered sync (DIP-0043). +- **PaymentAddress**: Safe to share publicly. Give a unique diversified address to each contact for privacy. + +--- + +## 4. Commitment Tree and Note Tracking + +### Server-Side Storage: BulkAppendTree + +On the platform, encrypted notes are stored in a **CommitmentTree** element backed by a **BulkAppendTree** — a two-level append-only authenticated data structure: + +``` +CommitmentTree (epoch_size = 2048) + | + +-- Buffer (dense Merkle tree, up to 2048 entries) + | Entries 0..2047 of the current epoch + | + +-- MMR (Merkle Mountain Range of completed epochs) + Epoch 0: entries 0..2047 (immutable blob, CDN-cacheable) + Epoch 1: entries 2048..4095 (immutable blob, CDN-cacheable) + ... +``` + +When the buffer fills (2048 notes), all entries are compacted into an immutable epoch blob and appended to the MMR. This gives: +- **O(1) append** for new notes +- **O(log n) authenticated reads** by global position +- **CDN-cacheable epoch blobs** for bulk syncing (completed epochs never change) + +Each note is stored as `cmx (32 bytes) || encrypted_note (216 bytes)` = 248 bytes, accessed by its global position (0-indexed `u64`). + +Separately, the CommitmentTree maintains a **Sinsemilla frontier** in auxiliary storage, used to compute the Orchard anchor (Merkle root) at the end of each block. + +### Client-Side: ClientCommitmentTree + +The `ClientCommitmentTree` maintains a local copy of the on-chain Sinsemilla Merkle tree (depth 32). It supports: + +- Appending note commitments as they appear on-chain +- Checkpointing after each block +- Generating Merkle witnesses (authentication paths) for spending notes + +```rust +use grovedb_commitment_tree::{ClientCommitmentTree, Retention, Position, Anchor, MerklePath}; + +// Create a new client tree (retain up to 1000 checkpoints) +let mut tree = ClientCommitmentTree::new(1000); + +// Append notes as they appear on-chain (in global position order) +// Use Retention::Marked for notes belonging to this wallet (need witnesses later) +// Use Retention::Ephemeral for notes belonging to other wallets +tree.append(cmx_bytes, Retention::Marked)?; // Our note +tree.append(other_cmx, Retention::Ephemeral)?; // Someone else's note + +// Checkpoint after each block +tree.checkpoint(block_height)?; + +// Get the current anchor (Merkle root) +let anchor: Anchor = tree.anchor()?; + +// Generate a witness for spending a note at a known position +let merkle_path: MerklePath = tree.witness(position, 0)? + .expect("witness should exist for marked leaf"); +``` + +The `ClientCommitmentTree` tracks the Sinsemilla tree only — it does not replicate the BulkAppendTree structure. The server stores notes in the BulkAppendTree for efficient retrieval; the client appends cmx values to its Sinsemilla tree for witness generation. + +### Wallet Note State + +A wallet tracks each note through its lifecycle: + +``` +Created (cmx appended to tree) + | + +-- Unspent (nullifier not seen on-chain) + | | + | +-- Spendable (witness available at current anchor) + | + +-- Spent (nullifier published on-chain) +``` + +For each detected note, store: + +| Field | Source | Purpose | +|-------|--------|---------| +| `Note` | Trial decryption | The note object (value, address, rho, rseed) | +| `Position` | Tree append order (= global position) | Location in commitment tree (for witness generation) | +| `cmx` | `ExtractedNoteCommitment::from(note.commitment())` | For tree tracking | +| `nullifier` | Known from note + spending key | To detect when the note is spent | +| `block_height` | Block where cmx appeared | For sync tracking | + +--- + +## 5. Platform Sighash + +The **platform sighash** cryptographically binds Orchard bundle data to platform-specific transparent fields. It is the hash that all Orchard signatures commit to. + +``` +sighash = SHA-256("DashPlatformSighash" || bundle_commitment || extra_data) +``` + +Where: +- `"DashPlatformSighash"` is a fixed 19-byte ASCII domain separator +- `bundle_commitment` is the 32-byte BLAKE2b-256 Orchard bundle commitment (per ZIP-244), covering: flags, value_balance, anchor, and all action fields (nullifier, rk, cmx, cv_net, encrypted_note) -- but NOT signatures or proof +- `extra_data` varies by transition type: + +| Transition | extra_data | Rationale | +|------------|------------|-----------| +| Shield | empty (`&[]`) | Witness signatures already authenticate inputs | +| ShieldFromAssetLock | empty (`&[]`) | Asset lock proof authenticates the source | +| ShieldedTransfer | empty (`&[]`) | No transparent fields exist | +| Unshield | `output_address.to_bytes() \|\| amount.to_le_bytes()` | Binds destination and amount to the proof | +| ShieldedWithdrawal | `output_script \|\| amount.to_le_bytes()` | Binds Core script and amount to the proof | + +### Computing the Sighash + +```rust +use dpp::shielded::compute_platform_sighash; + +// After building the bundle but before signing: +let bundle_commitment: [u8; 32] = unauthorized_bundle.commitment().into(); + +// For Shield, ShieldFromAssetLock, or ShieldedTransfer (no extra data): +let sighash = compute_platform_sighash(&bundle_commitment, &[]); + +// For Unshield (bind output_address and amount): +let mut extra_data = output_address.to_bytes(); +extra_data.extend_from_slice(&amount.to_le_bytes()); +let sighash = compute_platform_sighash(&bundle_commitment, &extra_data); + +// For ShieldedWithdrawal (bind output_script and amount): +let mut extra_data = output_script.to_bytes(); +extra_data.extend_from_slice(&amount.to_le_bytes()); +let sighash = compute_platform_sighash(&bundle_commitment, &extra_data); +``` + +The same sighash must be computed identically on both the signing (client) and verification (platform) sides. If any transparent field is modified after signing, verification will fail. + +--- + +## 6. Building Shielded State Transitions + +### Common Pattern + +All shielded transitions follow the same five-step pattern: + +1. **Create an Orchard builder** with the appropriate flags and anchor +2. **Add spends and/or outputs** to the builder +3. **Build, prove, and sign** the bundle using the platform sighash +4. **Serialize the bundle** into platform format (`SerializedAction` structs) +5. **Wrap in a state transition** and broadcast + +### ProvingKey Caching + +The `ProvingKey` takes approximately 30 seconds to build. Cache it for the lifetime of the application: + +```rust +use std::sync::OnceLock; +use grovedb_commitment_tree::ProvingKey; + +static PROVING_KEY: OnceLock = OnceLock::new(); + +fn get_proving_key() -> &'static ProvingKey { + PROVING_KEY.get_or_init(ProvingKey::build) +} +``` + +### 6.1 Shield (Type 15) + +Deposits credits from transparent platform addresses into the shielded pool. This is an **output-only** bundle (no spends). + +```rust +use grovedb_commitment_tree::{ + Builder, BundleType, Flags as OrchardFlags, Anchor, + SpendingKey, FullViewingKey, NoteValue, Scope, +}; +use dpp::shielded::compute_platform_sighash; +use dpp::state_transition::state_transitions::shielded::shield_transition::ShieldTransition; + +// 1. Setup keys and recipient +let sk = SpendingKey::from_bytes(seed)?; +let fvk = FullViewingKey::from(&sk); +let recipient = fvk.address_at(0u32, Scope::External); + +// 2. Build output-only bundle (spends disabled for shielding) +let anchor = Anchor::empty_tree(); // No spends, so anchor is unused +let mut builder = Builder::::new( + BundleType::Transactional { + flags: OrchardFlags::SPENDS_DISABLED, + bundle_required: false, + }, + anchor, +); + +let shield_amount: u64 = 100_000; // Credits to shield +builder.add_output( + None, // No outgoing viewing key needed + recipient, + NoteValue::from_raw(shield_amount), + [0u8; 36], // 36-byte structured memo +)?; + +// 3. Build -> prove -> sign +let pk = get_proving_key(); +let mut rng = rand::rngs::OsRng; +let (unauthorized, _) = builder + .build::(&mut rng)? + .expect("bundle should be present"); + +let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); +let sighash = compute_platform_sighash(&bundle_commitment, &[]); +let proven = unauthorized.create_proof(pk, &mut rng)?; +let bundle = proven.apply_signatures(rng, sighash, &[])?; // No spend auth keys + +// 4. Serialize bundle (see Section 7) +let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + +// 5. Create state transition +// `inputs` = map of platform addresses contributing credits +// `signer` = signs the address witnesses +let transition = ShieldTransition::try_from_bundle_with_signer( + inputs, // BTreeMap + actions, + flags, + value_balance, // Negative (credits flow INTO pool) + anchor_bytes, + proof_bytes, + binding_sig, + fee_strategy, // Which inputs pay fees + signer, // Signs address witnesses + user_fee_increase, + platform_version, +)?; +``` + +**Key points:** +- `value_balance` will be **negative** (credits flow into the pool) +- The shield amount equals `|value_balance|` +- Fees are paid from the transparent platform address inputs +- No `SpendAuthorizingKey` needed (empty `&[]` for signatures) + +### 6.2 ShieldFromAssetLock (Type 18) + +Deposits credits directly from a Dash Core asset lock proof. Identical bundle construction to Shield, but the funding source is a core asset lock instead of platform address balances. + +```rust +use dpp::state_transition::state_transitions::shielded::shield_from_asset_lock_transition::ShieldFromAssetLockTransition; + +// Bundle construction is identical to Shield (output-only, empty sighash) +// ... (same builder/prove/sign steps as Shield) ... + +// Create state transition with asset lock proof +let transition = ShieldFromAssetLockTransition::try_from_asset_lock_with_bundle( + asset_lock_proof, // From Dash Core + asset_lock_private_key_bytes, // Signs the asset lock + actions, + flags, + value_balance, + anchor_bytes, + proof_bytes, + binding_sig, + user_fee_increase, + platform_version, +)?; +``` + +### 6.3 ShieldedTransfer (Type 16) + +Transfers value privately within the shielded pool. **Spends** an existing note and creates a new output note. The ZK proof is the sole authorization. + +```rust +use dpp::state_transition::state_transitions::shielded::shielded_transfer_transition::ShieldedTransferTransition; + +// 1. Get a spendable note with its Merkle witness +let (note, merkle_path, anchor) = wallet.take_spendable_note()?; +let note_value = note.value().inner(); + +// 2. Build spend + output bundle +let mut builder = Builder::::new(BundleType::DEFAULT, anchor); +builder.add_spend(fvk.clone(), note, merkle_path)?; +builder.add_output( + None, + recipient_address, // The recipient's payment address + NoteValue::from_raw(note_value), // Transfer full value (no fee from pool) + memo_bytes, // [u8; 36] structured memo +)?; + +// 3. Build -> prove -> sign (needs SpendAuthorizingKey for the spend) +let pk = get_proving_key(); +let mut rng = rand::rngs::OsRng; +let (unauthorized, _) = builder.build::(&mut rng)?.expect("bundle present"); + +let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); +let sighash = compute_platform_sighash(&bundle_commitment, &[]); // No extra_data +let proven = unauthorized.create_proof(pk, &mut rng)?; +let bundle = proven.apply_signatures(rng, sighash, &[ask])?; // Spend auth key required + +// 4. Serialize and create transition +let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + +let transition = ShieldedTransferTransition::try_from_bundle( + actions, + flags, + value_balance as u64, // 0 for pure transfer, >0 if paying fees from pool + anchor_bytes, + proof_bytes, + binding_sig, + platform_version, +)?; +``` + +**Key points:** +- The `anchor` must match a historical anchor stored on-chain (not `Anchor::empty_tree()`) +- `value_balance` is 0 for a pure private transfer (all value stays in the pool) +- `value_balance` > 0 means that amount is extracted from the pool as a fee +- The `SpendAuthorizingKey` (`ask`) must be provided to `apply_signatures` + +### 6.4 Unshield (Type 17) + +Withdraws credits from the shielded pool to a transparent platform address. Spends a note and delivers part of the value to a transparent address. + +```rust +use dpp::state_transition::state_transitions::shielded::unshield_transition::UnshieldTransition; + +// 1. Get a spendable note +let (note, merkle_path, anchor) = wallet.take_spendable_note()?; +let note_value = note.value().inner(); + +// 2. Decide amounts +let unshield_amount = note_value / 2; // Amount going to transparent address +let change_amount = note_value - unshield_amount; // Change staying in pool + +// 3. Build bundle: spend note, output change back to self +let mut builder = Builder::::new(BundleType::DEFAULT, anchor); +builder.add_spend(fvk.clone(), note, merkle_path)?; +builder.add_output( + None, + self_address, // Change goes back to our shielded address + NoteValue::from_raw(change_amount), + [0u8; 36], // 36-byte structured memo +)?; + +// 4. Build -> prove -> sign WITH extra_data binding +let pk = get_proving_key(); +let mut rng = rand::rngs::OsRng; +let (unauthorized, _) = builder.build::(&mut rng)?.expect("bundle present"); + +let output_address = PlatformAddress::P2pkh(recipient_hash); +let mut extra_data = output_address.to_bytes(); +extra_data.extend_from_slice(&unshield_amount.to_le_bytes()); + +let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); +let sighash = compute_platform_sighash(&bundle_commitment, &extra_data); +let proven = unauthorized.create_proof(pk, &mut rng)?; +let bundle = proven.apply_signatures(rng, sighash, &[ask])?; + +// 5. Serialize and create transition +let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + +let transition = UnshieldTransition::try_from_bundle( + output_address, + unshield_amount, + actions, + flags, + value_balance, // Positive (credits flow OUT of pool) + anchor_bytes, + proof_bytes, + binding_sig, + platform_version, +)?; +``` + +**Key points:** +- `value_balance` must be **positive** (credits flow out of the pool) +- `value_balance >= amount` (the difference is the fee paid from the pool) +- `output_address` and `amount` are bound to the sighash -- they cannot be modified after signing +- The `output_address` is a `PlatformAddress` (P2pkh or P2sh, not a Core address) + +### 6.5 ShieldedWithdrawal (Type 19) + +Withdraws credits from the shielded pool to a Dash Core L1 address. Similar to Unshield but targets a Core script instead of a platform address. + +```rust +use dpp::state_transition::state_transitions::shielded::shielded_withdrawal_transition::ShieldedWithdrawalTransition; + +// Bundle construction similar to Unshield, but with output_script in extra_data +let mut extra_data = output_script.to_bytes(); +extra_data.extend_from_slice(&withdrawal_amount.to_le_bytes()); + +let sighash = compute_platform_sighash(&bundle_commitment, &extra_data); +// ... prove and sign as usual ... + +let transition = ShieldedWithdrawalTransition::try_from_bundle( + withdrawal_amount, + actions, + flags, + value_balance, + anchor_bytes, + proof_bytes, + binding_sig, + core_fee_per_byte, // Core transaction fee rate + pooling, // Pooling strategy (Never, Standard, etc.) + output_script, // Dash Core output script (e.g., P2PKH) + platform_version, +)?; +``` + +--- + +## 7. Bundle Serialization + +After building and signing an Orchard bundle, decompose it into the platform serialization format: + +```rust +use dpp::shielded::SerializedAction; +use grovedb_commitment_tree::{Bundle, Authorized, DashMemo}; + +fn serialize_authorized_bundle( + bundle: &Bundle, +) -> (Vec, u8, i64, [u8; 32], Vec, [u8; 64]) { + let actions: Vec = bundle.actions().iter().map(|action| { + let enc = action.encrypted_note(); + let mut encrypted_note = Vec::with_capacity(216); // 32 + 104 + 80 + encrypted_note.extend_from_slice(&enc.epk_bytes); + encrypted_note.extend_from_slice(enc.enc_ciphertext.as_ref()); + encrypted_note.extend_from_slice(&enc.out_ciphertext); + + SerializedAction { + nullifier: action.nullifier().to_bytes(), + rk: <[u8; 32]>::from(action.rk()), + cmx: action.cmx().to_bytes(), + encrypted_note, + cv_net: action.cv_net().to_bytes(), + spend_auth_sig: <[u8; 64]>::from(action.authorization()), + } + }).collect(); + + let flags = bundle.flags().to_byte(); + let value_balance = *bundle.value_balance(); + let anchor = bundle.anchor().to_bytes(); + let proof = bundle.authorization().proof().as_ref().to_vec(); + let binding_sig = <[u8; 64]>::from(bundle.authorization().binding_signature()); + + (actions, flags, value_balance, anchor, proof, binding_sig) +} +``` + +### SerializedAction Fields + +| Field | Size | Description | +|-------|------|-------------| +| `nullifier` | 32 bytes | Unique tag preventing double-spends | +| `rk` | 32 bytes | Randomized spend validating key (RedPallas) | +| `cmx` | 32 bytes | Extracted note commitment for the new output | +| `encrypted_note` | 216 bytes | `epk` (32) + `enc_ciphertext` (104) + `out_ciphertext` (80) | +| `cv_net` | 32 bytes | Pedersen value commitment | +| `spend_auth_sig` | 64 bytes | RedPallas spend authorization signature | + +--- + +## 8. Light Client Syncing + +Light clients must synchronize with the on-chain shielded pool state. The protocol is specified in [DIP-0042](../dip/dip-0042.md). The key gRPC queries are: + +### Available Queries + +| Query | Returns | Purpose | +|-------|---------|---------| +| `GetShieldedPoolState` | Pool parameters, total balance, note count | Initial state check | +| `GetShieldedEncryptedNotes` | Encrypted notes by global position range | Note discovery via trial decryption | +| `GetShieldedAnchors` | Historical anchors by block height | Verify spend witnesses | +| `GetShieldedNullifiers` | Published nullifiers | Detect spent notes | + +### GetShieldedEncryptedNotes + +Notes are indexed by **global position** (a monotonically increasing `u64`). The request takes: + +| Field | Type | Description | +|-------|------|-------------| +| `start_index` | `u64` | First global position to fetch (0-based) | +| `count` | `u32` | Maximum number of notes to return | +| `prove` | `bool` | Whether to return a GroveDB proof (V1) instead of raw data | + +**Non-proved response** (`prove = false`): Returns a list of `EncryptedNote { cmx, encrypted_note }` for each position in the requested range. The response stops early if the position is past the end of the tree. + +**Proved response** (`prove = true`): Returns a GroveDB **V1 proof** (supports BulkAppendTree subqueries). The client verifies using: + +```rust +use grovedb::GroveDb; +use grovedb::VerifyOptions; + +let (root_hash, result_set) = GroveDb::verify_query_with_options( + &proof_bytes, + &path_query, // Same PathQuery structure as the server used + VerifyOptions { + absence_proofs_for_non_existing_searched_keys: false, + verify_proof_succinctness: false, + include_empty_trees_in_result: false, + }, + grove_version, +)?; +``` + +V1 proofs authenticate BulkAppendTree entries by global position range. A single proof covers all requested positions efficiently (epoch blobs + buffer entries). + +### Sync Flow + +``` +1. Query pool state to get current note count and latest block height +2. Determine the last synced position (wallet state) +3. Fetch notes in batches by position range: + a. GetShieldedEncryptedNotes(start_index = last_synced + 1, count = batch_size) + b. Trial-decrypt each note with IncomingViewingKey + c. For decrypted notes: record (Note, Position, cmx) in wallet + d. Append ALL cmx values to ClientCommitmentTree (Marked for ours, Ephemeral for others) + e. Repeat until fewer than batch_size notes returned (caught up) +4. Checkpoint the ClientCommitmentTree at the current sync point +5. Query nullifiers to detect which of our notes have been spent +6. Remove spent notes from the spendable set +``` + +### Epoch-Based Bulk Syncing + +The BulkAppendTree's epoch structure (epoch_size = 2048) enables efficient bulk syncing: + +- **Completed epochs** (positions 0..2047, 2048..4095, ...) are immutable blobs that never change +- These can be served from CDN/cache without re-querying the state tree +- A client syncing from scratch can download completed epoch blobs in parallel +- Only the current (partial) buffer needs fresh queries from platform nodes + +### Sync Strategies + +| Strategy | Description | Best For | +|----------|-------------|----------| +| Sequential | Process every position in order | Simple implementation, full history | +| Warp Sync | Scan notes first, compute witnesses later | Fast initial sync (10-100x faster) | +| Spend-Before-Sync | Use server-provided witness for immediate spending | Spending before full sync completes | +| Epoch-Parallel | Download completed epoch blobs concurrently | Initial sync on fast connections | + +--- + +## 9. Trial Decryption + +Light clients discover their notes by attempting to decrypt every encrypted note on-chain. The Orchard protocol uses standard `try_note_decryption` from `zcash_note_encryption`, parameterized with `OrchardDomain`. + +### 9.1 Decrypting from Bundle Actions + +When you have a full `Bundle` (e.g., from a state transition you submitted or received via P2P), use `try_note_decryption` directly on each action: + +```rust +use dash_sdk::grovedb_commitment_tree::{ + OrchardDomain, DashMemo, IncomingViewingKey, Note, PaymentAddress, + try_note_decryption, Bundle, Authorized, Action, +}; + +fn scan_bundle_for_owned_notes( + ivk: &IncomingViewingKey, + bundle: &Bundle, +) -> Vec<(Note, PaymentAddress, [u8; 36])> { + let mut found = Vec::new(); + for action in bundle.actions() { + // OrchardDomain binds the decryption to this action's Rho (nullifier-derived) + let domain = OrchardDomain::::for_action(action); + // Action implements ShieldedOutput>, + // so it can be passed directly to try_note_decryption + if let Some((note, address, memo)) = try_note_decryption(&domain, ivk, action) { + found.push((note, address, memo)); + } + } + found +} +``` + +### 9.2 Decrypting from RPC Encrypted Notes + +When syncing from the `GetShieldedEncryptedNotes` RPC, each entry includes: +- `nullifier` (32 bytes) -- the nullifier from the action that created this note (needed for Rho derivation) +- `cmx` (32 bytes) -- the extracted note commitment +- `encrypted_note` (216 bytes) -- `epk (32) || enc_ciphertext (104) || out_ciphertext (80)` + +The nullifier is essential because `OrchardDomain` uses `Rho::from_nf_old(nullifier)` to validate `RandomSeed` and construct the `Note` during decryption. + +#### Compact Trial Decryption (Fast Scanning) + +Compact decryption only uses the first 52 bytes of the enc_ciphertext (version + diversifier + value + rseed). It's faster for scanning but does not recover the memo: + +```rust +use dash_sdk::grovedb_commitment_tree::{ + OrchardDomain, DashMemo, IncomingViewingKey, Note, + CompactAction, try_compact_note_decryption, + ExtractedNoteCommitment, Nullifier, EphemeralKeyBytes, + COMPACT_NOTE_SIZE, +}; + +/// Attempt compact trial decryption on an entry from GetShieldedEncryptedNotes. +fn try_compact_decrypt( + ivk: &IncomingViewingKey, + nullifier_bytes: &[u8; 32], + cmx_bytes: &[u8; 32], + encrypted_note: &[u8], +) -> Option { + let nf = Nullifier::from_bytes(nullifier_bytes).into()?; + let cmx = ExtractedNoteCommitment::from_bytes(cmx_bytes).into()?; + let epk_bytes: [u8; 32] = encrypted_note[0..32].try_into().ok()?; + + let enc_compact: [u8; COMPACT_NOTE_SIZE] = + encrypted_note[32..32 + COMPACT_NOTE_SIZE].try_into().ok()?; + + let compact = CompactAction::from_parts(nf, cmx, EphemeralKeyBytes(epk_bytes), enc_compact); + let domain = OrchardDomain::::for_compact_action(&compact); + let (note, _address) = try_compact_note_decryption(&domain, ivk, &compact)?; + Some(note) +} +``` + +#### Full Sync Loop + +```rust +// Fetch notes from the RPC +let response = client.get_shielded_encrypted_notes(start_index, count, false).await?; +for (pos, entry) in response.entries.iter().enumerate() { + let position = start_index + pos as u64; + let nf: [u8; 32] = entry.nullifier.as_slice().try_into()?; + let cmx: [u8; 32] = entry.cmx.as_slice().try_into()?; + + if let Some(note) = try_compact_decrypt(&ivk, &nf, &cmx, &entry.encrypted_note) { + // This note belongs to us -- mark position in commitment tree for future spending + tree.mark_position(position); + wallet.add_note(note, position); + } + + // Always append the cmx to the commitment tree (even for non-owned notes) + tree.append(cmx); +} +``` + +### 9.3 Integration with ClientCommitmentTree + +After detecting an owned note via trial decryption, mark it in the `ClientCommitmentTree`: + +```rust +// After successful decryption at position `pos`: +tree.mark_position(pos); +``` + +This ensures the tree retains the witness (Merkle path) for this note, enabling future spend proofs. + +Trial decryption is the core privacy guarantee: the server cannot determine which notes belong to which client. The client downloads all encrypted notes and tests each one locally. + +--- + +## 10. Fee Model + +Fees vary by transition type: + +| Transition | Fee Source | Calculation | +|------------|-----------|-------------| +| Shield | Platform address inputs | Standard fee model (deducted from input addresses) | +| ShieldFromAssetLock | Asset lock value | `asset_lock_value - shield_amount` | +| ShieldedTransfer | Shielded pool | `value_balance` (extracted from pool, can be 0) | +| Unshield | Shielded pool | `value_balance - amount` (extracted from pool) | +| ShieldedWithdrawal | Shielded pool | `value_balance - amount` (extracted from pool) | + +For Shield, fees are deducted from the transparent platform address inputs using the standard fee model with `user_fee_increase` as a multiplier (0 = 100% of base fee, 1 = 101%, etc.). For ShieldFromAssetLock, the fee is `asset_lock_value - shield_amount`, validated against the minimum fee with `user_fee_increase` applied. For ShieldedTransfer, Unshield, and ShieldedWithdrawal, fees are cryptographically locked by the Orchard binding signature -- the client chooses the fee at bundle construction time by setting the `value_balance` appropriately. + +--- + +## 11. Security Considerations + +### Sighash Binding + +The platform sighash cryptographically binds transparent fields to the Orchard proof. For Unshield and ShieldedWithdrawal, the `output_address`/`output_script` and `amount` are included in `extra_data`. If an attacker modifies these fields, the binding signature and spend authorization signatures will fail verification. + +**Always compute the sighash correctly.** Using wrong `extra_data` will produce a valid-looking bundle that the platform will reject. + +### Anchor Freshness + +Spend-based transitions (ShieldedTransfer, Unshield, ShieldedWithdrawal) must reference a **historical anchor** stored on-chain. The platform rejects: +- `Anchor::empty_tree()` (all zeros) for spend transitions +- Anchors that don't match any recorded on-chain anchor + +Build spend bundles using the anchor from your `ClientCommitmentTree`, which must be in sync with the on-chain state. + +### Nullifier Uniqueness + +Each nullifier can only be published once. If a client submits a transition containing a nullifier that already exists in the on-chain nullifier set, the transition will be rejected (double-spend prevention). + +### Key Security + +- Never transmit the `SpendingKey` or `SpendAuthorizingKey` over the network +- The `ProvingKey` and `VerifyingKey` are deterministic and public -- safe to share +- Diversified addresses (different `address_at` indices) are unlinkable to each other without the `FullViewingKey` + +### Value Conservation + +The Orchard binding signature mathematically guarantees that no credits are created or destroyed: + +``` +sum(input_values) = sum(output_values) + value_balance +``` + +The platform verifies this constraint via the binding signature without learning any individual values. diff --git a/packages/dapi-grpc/protos/platform/v0/platform.proto b/packages/dapi-grpc/protos/platform/v0/platform.proto index f602871e1cc..46b43821857 100644 --- a/packages/dapi-grpc/protos/platform/v0/platform.proto +++ b/packages/dapi-grpc/protos/platform/v0/platform.proto @@ -2215,6 +2215,7 @@ message GetMostRecentShieldedAnchorResponse { oneof version { GetMostRecentShieldedAnchorResponseV0 v0 = 1; } } + message GetShieldedPoolStateRequest { message GetShieldedPoolStateRequestV0 { bool prove = 1; diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/Platform.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/Platform.ts index 873fe62c53d..0539c8365f8 100644 --- a/packages/js-dash-sdk/src/SDK/Client/Platform/Platform.ts +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/Platform.ts @@ -34,6 +34,12 @@ import resolveNameByRecord from './methods/names/resolveByRecord'; import searchName from './methods/names/search'; import broadcastStateTransition from './broadcastStateTransition'; +import shieldMethod from './methods/shielded/shield'; +import shieldedTransferMethod from './methods/shielded/shieldedTransfer'; +import unshieldMethod from './methods/shielded/unshield'; +import shieldFromAssetLockMethod from './methods/shielded/shieldFromAssetLock'; +import shieldedWithdrawalMethod from './methods/shielded/shieldedWithdrawal'; + import logger, { ConfigurableLogger } from '../../../logger'; import Fetcher from './Fetcher'; import NonceManager from './NonceManager/NonceManager'; @@ -98,6 +104,14 @@ interface DataContracts { history: Function, } +interface Shielded { + shield: Function, + shieldedTransfer: Function, + unshield: Function, + shieldFromAssetLock: Function, + shieldedWithdrawal: Function, +} + /** * Class for Dash Platform * @@ -117,6 +131,11 @@ export class Platform { public documents: Records; + /** + * Shielded pool operations (shield, transfer, unshield) + */ + public shielded: Shielded; + /** * @param {Function} get - get identities from the platform * @param {Function} register - register identities on the platform @@ -180,6 +199,13 @@ export class Platform { resolveByRecord: resolveNameByRecord.bind(this), search: searchName.bind(this), }; + this.shielded = { + shield: shieldMethod.bind(this), + shieldedTransfer: shieldedTransferMethod.bind(this), + unshield: unshieldMethod.bind(this), + shieldFromAssetLock: shieldFromAssetLockMethod.bind(this), + shieldedWithdrawal: shieldedWithdrawalMethod.bind(this), + }; this.identities = { register: registerIdentity.bind(this), get: getIdentity.bind(this), diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shield.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shield.ts new file mode 100644 index 00000000000..5391cb205e8 --- /dev/null +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shield.ts @@ -0,0 +1,38 @@ +import broadcastStateTransition from '../../broadcastStateTransition'; +import { Platform } from '../../Platform'; +import { IStateTransitionResult } from '../../IStateTransitionResult'; + +/** + * Broadcast a shield transition (transparent platform addresses -> shielded pool). + * + * The Orchard bundle, platform address witnesses, and full state transition + * must be built externally (e.g., via rs-sdk's `shield_funds()` or a native + * wallet library) and serialized to platform binary format. + * + * @param serializedTransition - Platform-serialized ShieldTransition bytes + * @returns Broadcast result + */ +export async function shield( + this: Platform, + serializedTransition: Uint8Array, +): Promise { + this.logger.debug('[Shielded#shield] Broadcasting shield transition'); + await this.initialize(); + + const { dpp } = this; + + const transition = dpp.stateTransition.createFromBuffer( + serializedTransition, + {}, + ); + + const result = await broadcastStateTransition(this, await transition, { + skipValidation: true, + }); + + this.logger.silly('[Shielded#shield] Broadcasted ShieldTransition'); + + return result; +} + +export default shield; diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldFromAssetLock.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldFromAssetLock.ts new file mode 100644 index 00000000000..95e4b1cd1a8 --- /dev/null +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldFromAssetLock.ts @@ -0,0 +1,39 @@ +import broadcastStateTransition from '../../broadcastStateTransition'; +import { Platform } from '../../Platform'; +import { IStateTransitionResult } from '../../IStateTransitionResult'; + +/** + * Broadcast a shield-from-asset-lock transition (core asset lock -> shielded pool). + * + * The Orchard bundle, asset lock proof, ECDSA signature, and full state + * transition must be built externally (e.g., via rs-sdk's + * `shield_from_asset_lock()` or a native wallet library) and serialized + * to platform binary format. + * + * @param serializedTransition - Platform-serialized ShieldFromAssetLockTransition bytes + * @returns Broadcast result + */ +export async function shieldFromAssetLock( + this: Platform, + serializedTransition: Uint8Array, +): Promise { + this.logger.debug('[Shielded#shieldFromAssetLock] Broadcasting shield from asset lock'); + await this.initialize(); + + const { dpp } = this; + + const transition = dpp.stateTransition.createFromBuffer( + serializedTransition, + {}, + ); + + const result = await broadcastStateTransition(this, await transition, { + skipValidation: true, + }); + + this.logger.silly('[Shielded#shieldFromAssetLock] Broadcasted ShieldFromAssetLockTransition'); + + return result; +} + +export default shieldFromAssetLock; diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldedTransfer.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldedTransfer.ts new file mode 100644 index 00000000000..81f68a4aaff --- /dev/null +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldedTransfer.ts @@ -0,0 +1,41 @@ +import broadcastStateTransition from '../../broadcastStateTransition'; +import { Platform } from '../../Platform'; +import { IStateTransitionResult } from '../../IStateTransitionResult'; + +/** + * Broadcast a shielded transfer (shielded-to-shielded) transition. + * + * The Orchard bundle and full state transition must be built externally + * (e.g., via rs-sdk's `transfer_shielded()` or a native wallet library) + * and serialized to platform binary format before passing here. + * + * Authentication is provided entirely by Orchard spend authorization + * signatures within the bundle — no identity signing needed. + * + * @param serializedTransition - Platform-serialized ShieldedTransferTransition bytes + * @returns Broadcast result + */ +export async function shieldedTransfer( + this: Platform, + serializedTransition: Uint8Array, +): Promise { + this.logger.debug('[Shielded#shieldedTransfer] Broadcasting shielded transfer'); + await this.initialize(); + + const { dpp } = this; + + const transition = dpp.stateTransition.createFromBuffer( + serializedTransition, + {}, + ); + + const result = await broadcastStateTransition(this, await transition, { + skipValidation: true, + }); + + this.logger.silly('[Shielded#shieldedTransfer] Broadcasted ShieldedTransferTransition'); + + return result; +} + +export default shieldedTransfer; diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldedWithdrawal.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldedWithdrawal.ts new file mode 100644 index 00000000000..fb6e47b63bf --- /dev/null +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/shieldedWithdrawal.ts @@ -0,0 +1,41 @@ +import broadcastStateTransition from '../../broadcastStateTransition'; +import { Platform } from '../../Platform'; +import { IStateTransitionResult } from '../../IStateTransitionResult'; + +/** + * Broadcast a shielded withdrawal transition (shielded pool -> core L1 address). + * + * The Orchard bundle and full state transition must be built externally + * (e.g., via rs-sdk's `withdraw_shielded()` or a native wallet library) + * and serialized to platform binary format. + * + * The platform sighash binds `outputScript || amount` to prevent + * an attacker from substituting a different L1 destination or amount. + * + * @param serializedTransition - Platform-serialized ShieldedWithdrawalTransition bytes + * @returns Broadcast result + */ +export async function shieldedWithdrawal( + this: Platform, + serializedTransition: Uint8Array, +): Promise { + this.logger.debug('[Shielded#shieldedWithdrawal] Broadcasting shielded withdrawal'); + await this.initialize(); + + const { dpp } = this; + + const transition = dpp.stateTransition.createFromBuffer( + serializedTransition, + {}, + ); + + const result = await broadcastStateTransition(this, await transition, { + skipValidation: true, + }); + + this.logger.silly('[Shielded#shieldedWithdrawal] Broadcasted ShieldedWithdrawalTransition'); + + return result; +} + +export default shieldedWithdrawal; diff --git a/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/unshield.ts b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/unshield.ts new file mode 100644 index 00000000000..95eb4636601 --- /dev/null +++ b/packages/js-dash-sdk/src/SDK/Client/Platform/methods/shielded/unshield.ts @@ -0,0 +1,41 @@ +import broadcastStateTransition from '../../broadcastStateTransition'; +import { Platform } from '../../Platform'; +import { IStateTransitionResult } from '../../IStateTransitionResult'; + +/** + * Broadcast an unshield transition (shielded pool -> platform address). + * + * The Orchard bundle and full state transition must be built externally + * (e.g., via rs-sdk's `unshield_funds()` or a native wallet library) + * and serialized to platform binary format. + * + * The platform sighash binds `outputAddress || amount` to prevent + * an attacker from substituting a different destination or amount. + * + * @param serializedTransition - Platform-serialized UnshieldTransition bytes + * @returns Broadcast result + */ +export async function unshield( + this: Platform, + serializedTransition: Uint8Array, +): Promise { + this.logger.debug('[Shielded#unshield] Broadcasting unshield transition'); + await this.initialize(); + + const { dpp } = this; + + const transition = dpp.stateTransition.createFromBuffer( + serializedTransition, + {}, + ); + + const result = await broadcastStateTransition(this, await transition, { + skipValidation: true, + }); + + this.logger.silly('[Shielded#unshield] Broadcasted UnshieldTransition'); + + return result; +} + +export default unshield; diff --git a/packages/rs-dapi-client/src/transport/grpc.rs b/packages/rs-dapi-client/src/transport/grpc.rs index a9f85020f3c..02b01f66cd8 100644 --- a/packages/rs-dapi-client/src/transport/grpc.rs +++ b/packages/rs-dapi-client/src/transport/grpc.rs @@ -479,6 +479,7 @@ impl_transport_request_grpc!( get_most_recent_shielded_anchor ); + // rpc getShieldedPoolState(GetShieldedPoolStateRequest) returns (GetShieldedPoolStateResponse); impl_transport_request_grpc!( platform_proto::GetShieldedPoolStateRequest, diff --git a/packages/rs-dpp/src/address_funds/platform_address.rs b/packages/rs-dpp/src/address_funds/platform_address.rs index 7e6708903e6..86671ef3fca 100644 --- a/packages/rs-dpp/src/address_funds/platform_address.rs +++ b/packages/rs-dpp/src/address_funds/platform_address.rs @@ -563,6 +563,204 @@ impl PlatformAddress { } } +// --------------------------------------------------------------------------- +// Orchard shielded payment address (requires `shielded-bundle-building`) +// --------------------------------------------------------------------------- + +/// Size of the Orchard diversifier (11 bytes). +#[cfg(feature = "shielded-bundle-building")] +pub const ORCHARD_DIVERSIFIER_SIZE: usize = 11; +/// Size of the Orchard diversified transmission key pk_d (32 bytes, Pallas curve point). +#[cfg(feature = "shielded-bundle-building")] +pub const ORCHARD_PKD_SIZE: usize = 32; +/// Total size of a raw Orchard payment address (43 bytes = diversifier + pk_d). +#[cfg(feature = "shielded-bundle-building")] +pub const ORCHARD_ADDRESS_SIZE: usize = ORCHARD_DIVERSIFIER_SIZE + ORCHARD_PKD_SIZE; + +/// An Orchard shielded payment address. +/// +/// Composed of a diversifier (11 bytes) and a diversified transmission key (32 bytes). +/// The diversifier enables a single spending key to derive an unlimited number of +/// unlinkable payment addresses. Only the holder of the corresponding FullViewingKey +/// (or IncomingViewingKey) can link diversified addresses to the same wallet. +/// +/// Bech32m encoding uses type byte `0x10`, producing addresses that start with `z`: +/// - Mainnet: `dash1z...` +/// - Testnet: `tdash1z...` +/// +/// The raw Orchard address format matches Zcash Orchard (43 bytes), but the +/// string encoding is Dash-specific (no F4Jumble, no Unified Address wrapper). +/// +/// Use [`From`] to convert from the `orchard` crate's native type, +/// or [`to_payment_address()`](OrchardAddress::to_payment_address) to convert back +/// (with pk_d validation). +/// +/// Requires the `shielded-bundle-building` feature. +#[cfg(feature = "shielded-bundle-building")] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct OrchardAddress { + /// 11-byte diversifier derived from the FullViewingKey with an index. + diversifier: [u8; ORCHARD_DIVERSIFIER_SIZE], + /// 32-byte diversified transmission key (point on the Pallas curve). + pk_d: [u8; ORCHARD_PKD_SIZE], +} + +#[cfg(feature = "shielded-bundle-building")] +impl OrchardAddress { + /// Type byte for Orchard addresses in bech32m encoding (user-facing). + /// Produces 'z' as the first bech32 character. + pub const ORCHARD_TYPE: u8 = 0x10; + + /// Creates an OrchardAddress from its raw components. + pub fn from_parts( + diversifier: [u8; ORCHARD_DIVERSIFIER_SIZE], + pk_d: [u8; ORCHARD_PKD_SIZE], + ) -> Self { + Self { diversifier, pk_d } + } + + /// Creates an OrchardAddress from a 43-byte raw address. + /// + /// The first 11 bytes are the diversifier, the next 32 are pk_d. + /// No validation is performed on pk_d; use [`From`] + /// for a pre-validated address. + pub fn from_raw_bytes(bytes: &[u8; ORCHARD_ADDRESS_SIZE]) -> Self { + let mut diversifier = [0u8; ORCHARD_DIVERSIFIER_SIZE]; + let mut pk_d = [0u8; ORCHARD_PKD_SIZE]; + diversifier.copy_from_slice(&bytes[..ORCHARD_DIVERSIFIER_SIZE]); + pk_d.copy_from_slice(&bytes[ORCHARD_DIVERSIFIER_SIZE..]); + Self { diversifier, pk_d } + } + + /// Returns the raw 43-byte address (diversifier || pk_d). + pub fn to_raw_bytes(&self) -> [u8; ORCHARD_ADDRESS_SIZE] { + let mut bytes = [0u8; ORCHARD_ADDRESS_SIZE]; + bytes[..ORCHARD_DIVERSIFIER_SIZE].copy_from_slice(&self.diversifier); + bytes[ORCHARD_DIVERSIFIER_SIZE..].copy_from_slice(&self.pk_d); + bytes + } + + /// Returns the 11-byte diversifier. + pub fn diversifier(&self) -> &[u8; ORCHARD_DIVERSIFIER_SIZE] { + &self.diversifier + } + + /// Returns the 32-byte diversified transmission key. + pub fn pk_d(&self) -> &[u8; ORCHARD_PKD_SIZE] { + &self.pk_d + } + + /// Encodes the OrchardAddress as a bech32m string for the specified network. + /// + /// Format: `1` + /// - Data: type_byte (0x10) || diversifier (11 bytes) || pk_d (32 bytes) + /// - Total payload: 44 bytes + /// - Checksum: bech32m (BIP-350) + /// + /// # Example + /// ```ignore + /// let address = OrchardAddress::from_raw_bytes(&raw_bytes); + /// let encoded = address.to_bech32m_string(Network::Dash); + /// // Returns something like "dash1z..." + /// ``` + pub fn to_bech32m_string(&self, network: Network) -> String { + let hrp_str = PlatformAddress::hrp_for_network(network); + let hrp = Hrp::parse(hrp_str).expect("HRP is valid"); + + let mut payload = Vec::with_capacity(1 + ORCHARD_ADDRESS_SIZE); + payload.push(Self::ORCHARD_TYPE); + payload.extend_from_slice(&self.diversifier); + payload.extend_from_slice(&self.pk_d); + + bech32::encode::(hrp, &payload).expect("encoding should succeed") + } + + /// Converts this address to an Orchard [`PaymentAddress`](grovedb_commitment_tree::PaymentAddress). + /// + /// Returns an error if `pk_d` is not a valid Pallas curve point. + pub fn to_payment_address( + &self, + ) -> Result { + crate::shielded::builder::orchard_address_to_payment_address(self) + } + + /// Decodes a bech32m-encoded Orchard address string. + /// + /// # Returns + /// - `Ok((OrchardAddress, Network))` - The decoded address and its network + /// - `Err(ProtocolError)` - If the address is invalid + pub fn from_bech32m_string(s: &str) -> Result<(Self, Network), ProtocolError> { + let (hrp, data) = + bech32::decode(s).map_err(|e| ProtocolError::DecodingError(format!("{}", e)))?; + + let hrp_lower = hrp.as_str().to_ascii_lowercase(); + let network = match hrp_lower.as_str() { + s if s == PLATFORM_HRP_MAINNET => Network::Dash, + s if s == PLATFORM_HRP_TESTNET => Network::Testnet, + _ => { + return Err(ProtocolError::DecodingError(format!( + "invalid HRP '{}': expected '{}' or '{}'", + hrp, PLATFORM_HRP_MAINNET, PLATFORM_HRP_TESTNET + ))) + } + }; + + // Validate payload: 1 type byte + 11 diversifier + 32 pk_d = 44 bytes + if data.len() != 1 + ORCHARD_ADDRESS_SIZE { + return Err(ProtocolError::DecodingError(format!( + "invalid Orchard address length: expected {} bytes, got {}", + 1 + ORCHARD_ADDRESS_SIZE, + data.len() + ))); + } + + if data[0] != Self::ORCHARD_TYPE { + return Err(ProtocolError::DecodingError(format!( + "invalid Orchard address type byte: expected 0x{:02x}, got 0x{:02x}", + Self::ORCHARD_TYPE, + data[0] + ))); + } + + let mut diversifier = [0u8; ORCHARD_DIVERSIFIER_SIZE]; + let mut pk_d = [0u8; ORCHARD_PKD_SIZE]; + diversifier.copy_from_slice(&data[1..1 + ORCHARD_DIVERSIFIER_SIZE]); + pk_d.copy_from_slice(&data[1 + ORCHARD_DIVERSIFIER_SIZE..]); + + Ok((Self { diversifier, pk_d }, network)) + } +} + +/// Infallible conversion from the orchard crate's `PaymentAddress` to `OrchardAddress`. +/// +/// Extracts the raw 43 bytes (diversifier || pk_d) from the validated address. +#[cfg(feature = "shielded-bundle-building")] +impl From for OrchardAddress { + fn from(addr: grovedb_commitment_tree::PaymentAddress) -> Self { + Self::from_raw_bytes(&addr.to_raw_address_bytes()) + } +} + +/// Infallible conversion from a reference to `PaymentAddress`. +#[cfg(feature = "shielded-bundle-building")] +impl From<&grovedb_commitment_tree::PaymentAddress> for OrchardAddress { + fn from(addr: &grovedb_commitment_tree::PaymentAddress) -> Self { + Self::from_raw_bytes(&addr.to_raw_address_bytes()) + } +} + +#[cfg(feature = "shielded-bundle-building")] +impl std::fmt::Display for OrchardAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Orchard(d={}, pk_d={})", + hex::encode(self.diversifier), + hex::encode(self.pk_d) + ) + } +} + impl std::fmt::Display for PlatformAddress { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -1321,4 +1519,149 @@ mod tests { assert_eq!(p2pkh_decoded, p2pkh); assert_eq!(p2sh_decoded, p2sh); } + + // ======================== + // Orchard address tests (require shielded-bundle-building feature) + // ======================== + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_address_from_parts_roundtrip() { + let diversifier = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, + ]; + let pk_d = [0xAB; 32]; + let address = OrchardAddress::from_parts(diversifier, pk_d); + + assert_eq!(address.diversifier(), &diversifier); + assert_eq!(address.pk_d(), &pk_d); + + let raw = address.to_raw_bytes(); + assert_eq!(raw.len(), 43); + assert_eq!(&raw[..11], &diversifier); + assert_eq!(&raw[11..], &pk_d[..]); + + let recovered = OrchardAddress::from_raw_bytes(&raw); + assert_eq!(recovered, address); + } + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_bech32m_mainnet_roundtrip() { + let diversifier = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, + ]; + let pk_d = [0xAB; 32]; + let address = OrchardAddress::from_parts(diversifier, pk_d); + + let encoded = address.to_bech32m_string(Network::Dash); + assert!( + encoded.starts_with("dash1z"), + "Orchard mainnet address should start with 'dash1z', got: {}", + encoded + ); + + let (decoded, network) = + OrchardAddress::from_bech32m_string(&encoded).expect("decoding should succeed"); + assert_eq!(decoded, address); + assert_eq!(network, Network::Dash); + } + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_bech32m_testnet_roundtrip() { + let diversifier = [0xFF; 11]; + let pk_d = [0x42; 32]; + let address = OrchardAddress::from_parts(diversifier, pk_d); + + let encoded = address.to_bech32m_string(Network::Testnet); + assert!( + encoded.starts_with("tdash1z"), + "Orchard testnet address should start with 'tdash1z', got: {}", + encoded + ); + + let (decoded, network) = + OrchardAddress::from_bech32m_string(&encoded).expect("decoding should succeed"); + assert_eq!(decoded, address); + assert_eq!(network, Network::Testnet); + } + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_bech32m_wrong_type_byte_fails() { + // Manually construct an address with P2PKH type byte (0xb0) but 44-byte payload + let hrp = Hrp::parse("dash").unwrap(); + let mut payload = vec![PlatformAddress::P2PKH_TYPE]; // Wrong type byte + payload.extend_from_slice(&[0u8; 43]); + let encoded = bech32::encode::(hrp, &payload).unwrap(); + + let result = OrchardAddress::from_bech32m_string(&encoded); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("invalid Orchard address type byte")); + } + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_bech32m_wrong_length_fails() { + // Too short (only 20 bytes instead of 43) + let hrp = Hrp::parse("dash").unwrap(); + let mut payload = vec![OrchardAddress::ORCHARD_TYPE]; + payload.extend_from_slice(&[0u8; 20]); + let encoded = bech32::encode::(hrp, &payload).unwrap(); + + let result = OrchardAddress::from_bech32m_string(&encoded); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("invalid Orchard address length")); + } + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_and_platform_addresses_are_distinguishable() { + // Verify that the type bytes produce distinct prefixes + let p2pkh = PlatformAddress::P2pkh([0xAB; 20]); + let p2sh = PlatformAddress::P2sh([0xAB; 20]); + let orchard = OrchardAddress::from_parts([0xAB; 11], [0xAB; 32]); + + let p2pkh_enc = p2pkh.to_bech32m_string(Network::Dash); + let p2sh_enc = p2sh.to_bech32m_string(Network::Dash); + let orchard_enc = orchard.to_bech32m_string(Network::Dash); + + // All three start with "dash1" but have different type-byte characters + assert!(p2pkh_enc.starts_with("dash1k"), "P2PKH: {}", p2pkh_enc); + assert!(p2sh_enc.starts_with("dash1s"), "P2SH: {}", p2sh_enc); + assert!( + orchard_enc.starts_with("dash1z"), + "Orchard: {}", + orchard_enc + ); + + // Cross-decoding should fail + assert!(PlatformAddress::from_bech32m_string(&orchard_enc).is_err()); + assert!(OrchardAddress::from_bech32m_string(&p2pkh_enc).is_err()); + } + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_address_all_zeros() { + let address = OrchardAddress::from_parts([0u8; 11], [0u8; 32]); + let encoded = address.to_bech32m_string(Network::Dash); + let (decoded, _) = OrchardAddress::from_bech32m_string(&encoded).unwrap(); + assert_eq!(decoded, address); + } + + #[cfg(feature = "shielded-bundle-building")] + #[test] + fn test_orchard_address_display() { + let address = OrchardAddress::from_parts([0x01; 11], [0x02; 32]); + let display = format!("{}", address); + assert!(display.starts_with("Orchard(d=")); + assert!(display.contains("pk_d=")); + } } diff --git a/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs b/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs index 05dee2b8ab2..be18afa8680 100644 --- a/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs +++ b/packages/rs-dpp/src/errors/consensus/basic/basic_error.rs @@ -81,7 +81,8 @@ use crate::consensus::basic::state_transition::{ ShieldedNoActionsError, ShieldedTooManyActionsError, ShieldedZeroAnchorError, StateTransitionMaxSizeExceededError, StateTransitionNotActiveError, TransitionNoInputsError, TransitionNoOutputsError, TransitionOverMaxInputsError, TransitionOverMaxOutputsError, - WithdrawalBalanceMismatchError, WithdrawalBelowMinAmountError, + UnshieldAmountZeroError, UnshieldValueBalanceBelowAmountError, WithdrawalBalanceMismatchError, + WithdrawalBelowMinAmountError, }; use crate::consensus::basic::{ IncompatibleProtocolVersionError, UnsupportedFeatureError, UnsupportedProtocolVersionError, @@ -673,6 +674,12 @@ pub enum BasicError { #[error(transparent)] ShieldedInvalidValueBalanceError(ShieldedInvalidValueBalanceError), + + #[error(transparent)] + UnshieldAmountZeroError(UnshieldAmountZeroError), + + #[error(transparent)] + UnshieldValueBalanceBelowAmountError(UnshieldValueBalanceBelowAmountError), } impl From for ConsensusError { diff --git a/packages/rs-dpp/src/errors/consensus/basic/state_transition/mod.rs b/packages/rs-dpp/src/errors/consensus/basic/state_transition/mod.rs index b9acc33f2a7..a5721bcdd4a 100644 --- a/packages/rs-dpp/src/errors/consensus/basic/state_transition/mod.rs +++ b/packages/rs-dpp/src/errors/consensus/basic/state_transition/mod.rs @@ -24,6 +24,8 @@ mod transition_no_inputs_error; mod transition_no_outputs_error; mod transition_over_max_inputs_error; mod transition_over_max_outputs_error; +mod unshield_amount_zero_error; +mod unshield_value_balance_below_amount_error; mod withdrawal_balance_mismatch_error; mod withdrawal_below_min_amount_error; @@ -53,5 +55,7 @@ pub use transition_no_inputs_error::*; pub use transition_no_outputs_error::*; pub use transition_over_max_inputs_error::*; pub use transition_over_max_outputs_error::*; +pub use unshield_amount_zero_error::*; +pub use unshield_value_balance_below_amount_error::*; pub use withdrawal_balance_mismatch_error::*; pub use withdrawal_below_min_amount_error::*; diff --git a/packages/rs-dpp/src/errors/consensus/basic/state_transition/unshield_amount_zero_error.rs b/packages/rs-dpp/src/errors/consensus/basic/state_transition/unshield_amount_zero_error.rs new file mode 100644 index 00000000000..4d0a618c73f --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/state_transition/unshield_amount_zero_error.rs @@ -0,0 +1,37 @@ +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::errors::ProtocolError; +use bincode::{Decode, Encode}; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error("Unshield transition amount must be greater than zero")] +#[platform_serialize(unversioned)] +pub struct UnshieldAmountZeroError { + /* + + DO NOT CHANGE ORDER OF FIELDS WITHOUT INTRODUCING OF NEW VERSION + + */ +} + +impl UnshieldAmountZeroError { + pub fn new() -> Self { + Self {} + } +} + +impl Default for UnshieldAmountZeroError { + fn default() -> Self { + Self::new() + } +} + +impl From for ConsensusError { + fn from(err: UnshieldAmountZeroError) -> Self { + Self::BasicError(BasicError::UnshieldAmountZeroError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/state_transition/unshield_value_balance_below_amount_error.rs b/packages/rs-dpp/src/errors/consensus/basic/state_transition/unshield_value_balance_below_amount_error.rs new file mode 100644 index 00000000000..bbaae67bfb6 --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/state_transition/unshield_value_balance_below_amount_error.rs @@ -0,0 +1,44 @@ +use crate::consensus::basic::BasicError; +use crate::consensus::ConsensusError; +use crate::errors::ProtocolError; +use bincode::{Decode, Encode}; +use platform_serialization_derive::{PlatformDeserialize, PlatformSerialize}; +use thiserror::Error; + +#[derive( + Error, Debug, Clone, PartialEq, Eq, Encode, Decode, PlatformSerialize, PlatformDeserialize, +)] +#[error("value_balance ({value_balance}) must be >= amount ({amount})")] +#[platform_serialize(unversioned)] +pub struct UnshieldValueBalanceBelowAmountError { + /* + + DO NOT CHANGE ORDER OF FIELDS WITHOUT INTRODUCING OF NEW VERSION + + */ + value_balance: i64, + amount: u64, +} + +impl UnshieldValueBalanceBelowAmountError { + pub fn new(value_balance: i64, amount: u64) -> Self { + Self { + value_balance, + amount, + } + } + + pub fn value_balance(&self) -> i64 { + self.value_balance + } + + pub fn amount(&self) -> u64 { + self.amount + } +} + +impl From for ConsensusError { + fn from(err: UnshieldValueBalanceBelowAmountError) -> Self { + Self::BasicError(BasicError::UnshieldValueBalanceBelowAmountError(err)) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/codes.rs b/packages/rs-dpp/src/errors/consensus/codes.rs index ed24cbc15aa..dff4f34aaa0 100644 --- a/packages/rs-dpp/src/errors/consensus/codes.rs +++ b/packages/rs-dpp/src/errors/consensus/codes.rs @@ -237,6 +237,8 @@ impl ErrorWithCode for BasicError { Self::ShieldedEmptyProofError(_) => 10820, Self::ShieldedZeroAnchorError(_) => 10821, Self::ShieldedInvalidValueBalanceError(_) => 10822, + Self::UnshieldAmountZeroError(_) => 10823, + Self::UnshieldValueBalanceBelowAmountError(_) => 10824, Self::ShieldedTooManyActionsError(_) => 10825, } } diff --git a/packages/rs-dpp/src/shielded/builder.rs b/packages/rs-dpp/src/shielded/builder.rs new file mode 100644 index 00000000000..878ee41c015 --- /dev/null +++ b/packages/rs-dpp/src/shielded/builder.rs @@ -0,0 +1,611 @@ +//! Convenience builders for constructing shielded state transitions. +//! +//! These functions encapsulate the full Orchard bundle construction pipeline: +//! builder configuration, proof generation, signature application, +//! and serialization into platform state transitions. +//! +//! Requires the `shielded-bundle-building` feature, which pulls in +//! `grovedb-commitment-tree` (and transitively the `orchard` crate). +//! +//! # Example +//! +//! ```ignore +//! use dpp::shielded::builder::*; +//! use grovedb_commitment_tree::{SpendingKey, FullViewingKey, Scope, ProvingKey}; +//! +//! // Derive recipient address +//! let sk = SpendingKey::from_bytes(seed)?; +//! let fvk = FullViewingKey::from(&sk); +//! let recipient = OrchardAddress::from_raw_bytes( +//! &fvk.address_at(0, Scope::External).to_raw_address_bytes(), +//! ); +//! +//! // Build a shield transition +//! let pk = ProvingKey::build(); +//! let st = build_shield_transition( +//! &recipient, shield_amount, inputs, fee_strategy, +//! &signer, 0, &pk, [0u8; 36], platform_version, +//! )?; +//! ``` + +use std::collections::BTreeMap; + +use grovedb_commitment_tree::{ + Anchor, Authorized, Builder, Bundle, BundleType, DashMemo, Flags as OrchardFlags, + FullViewingKey, MerklePath, Note, NoteValue, PaymentAddress, ProvingKey, SpendAuthorizingKey, +}; +use rand::rngs::OsRng; + +use crate::address_funds::AddressFundsFeeStrategy; +use crate::address_funds::{OrchardAddress, PlatformAddress}; +use crate::fee::Credits; +use crate::identity::core_script::CoreScript; +use crate::identity::signer::Signer; +use crate::prelude::{AddressNonce, UserFeeIncrease}; +use crate::shielded::{compute_minimum_shielded_fee, compute_platform_sighash, SerializedAction}; +use crate::state_transition::shield_from_asset_lock_transition::methods::ShieldFromAssetLockTransitionMethodsV0; +use crate::state_transition::shield_from_asset_lock_transition::ShieldFromAssetLockTransition; +use crate::state_transition::shield_transition::methods::ShieldTransitionMethodsV0; +use crate::state_transition::shield_transition::ShieldTransition; +use crate::state_transition::shielded_transfer_transition::methods::ShieldedTransferTransitionMethodsV0; +use crate::state_transition::shielded_transfer_transition::ShieldedTransferTransition; +use crate::state_transition::shielded_withdrawal_transition::methods::ShieldedWithdrawalTransitionMethodsV0; +use crate::state_transition::shielded_withdrawal_transition::ShieldedWithdrawalTransition; +use crate::state_transition::unshield_transition::methods::UnshieldTransitionMethodsV0; +use crate::state_transition::unshield_transition::UnshieldTransition; +use crate::state_transition::StateTransition; +use crate::withdrawal::Pooling; +use crate::ProtocolError; +use platform_version::version::PlatformVersion; + +/// A note that can be spent in a shielded transaction, paired with its +/// Merkle inclusion path in the commitment tree. +pub struct SpendableNote { + /// The Orchard note to spend. + pub note: Note, + /// Merkle path proving the note's commitment exists in the tree. + pub merkle_path: MerklePath, +} + +/// Converts an [`OrchardAddress`] to an Orchard [`PaymentAddress`]. +/// +/// Returns an error if `pk_d` is not a valid Pallas curve point. +pub fn orchard_address_to_payment_address( + address: &OrchardAddress, +) -> Result { + let raw = address.to_raw_bytes(); + Option::from(PaymentAddress::from_raw_address_bytes(&raw)).ok_or_else(|| { + ProtocolError::DecodingError( + "OrchardAddress pk_d is not a valid Pallas curve point".to_string(), + ) + }) +} + +/// Serializes an authorized Orchard bundle into the raw fields used by +/// state transition constructors. +/// +/// Returns `(actions, flags, value_balance, anchor, proof, binding_signature)`. +pub fn serialize_authorized_bundle( + bundle: &Bundle, +) -> (Vec, u8, i64, [u8; 32], Vec, [u8; 64]) { + let actions: Vec = bundle + .actions() + .iter() + .map(|action| { + let enc = action.encrypted_note(); + let mut encrypted_note = Vec::with_capacity(216); + encrypted_note.extend_from_slice(&enc.epk_bytes); + encrypted_note.extend_from_slice(enc.enc_ciphertext.as_ref()); + encrypted_note.extend_from_slice(&enc.out_ciphertext); + SerializedAction { + nullifier: action.nullifier().to_bytes(), + rk: <[u8; 32]>::from(action.rk()), + cmx: action.cmx().to_bytes(), + encrypted_note, + cv_net: action.cv_net().to_bytes(), + spend_auth_sig: <[u8; 64]>::from(action.authorization()), + } + }) + .collect(); + let flags = bundle.flags().to_byte(); + let value_balance = *bundle.value_balance(); + let anchor = bundle.anchor().to_bytes(); + let proof = bundle.authorization().proof().as_ref().to_vec(); + let binding_sig = <[u8; 64]>::from(bundle.authorization().binding_signature()); + (actions, flags, value_balance, anchor, proof, binding_sig) +} + +// --------------------------------------------------------------------------- +// Internal helpers +// --------------------------------------------------------------------------- + +/// Builds an output-only Orchard bundle (no spends). +/// +/// Used by Shield and ShieldFromAssetLock transitions where funds enter +/// the shielded pool from transparent sources. +fn build_output_only_bundle( + recipient: &OrchardAddress, + amount: u64, + memo: [u8; 36], + proving_key: &ProvingKey, +) -> Result, ProtocolError> { + let payment_address = orchard_address_to_payment_address(recipient)?; + let anchor = Anchor::empty_tree(); + let mut builder = Builder::::new( + BundleType::Transactional { + flags: OrchardFlags::SPENDS_DISABLED, + bundle_required: false, + }, + anchor, + ); + + builder + .add_output(None, payment_address, NoteValue::from_raw(amount), memo) + .map_err(|e| ProtocolError::Generic(format!("failed to add output: {:?}", e)))?; + + prove_and_sign_bundle(builder, proving_key, &[], &[]) +} + +/// Builds a spend+output Orchard bundle. +/// +/// Used by ShieldedTransfer, Unshield, and ShieldedWithdrawal where funds +/// are spent from existing notes. +fn build_spend_bundle( + spends: Vec, + recipient: &OrchardAddress, + output_amount: u64, + memo: [u8; 36], + fvk: &FullViewingKey, + ask: &SpendAuthorizingKey, + anchor: Anchor, + proving_key: &ProvingKey, + extra_sighash_data: &[u8], +) -> Result, ProtocolError> { + let payment_address = orchard_address_to_payment_address(recipient)?; + + let mut builder = Builder::::new(BundleType::DEFAULT, anchor); + + for spend in spends { + builder + .add_spend(fvk.clone(), spend.note, spend.merkle_path) + .map_err(|e| ProtocolError::Generic(format!("failed to add spend: {:?}", e)))?; + } + + builder + .add_output( + None, + payment_address, + NoteValue::from_raw(output_amount), + memo, + ) + .map_err(|e| ProtocolError::Generic(format!("failed to add output: {:?}", e)))?; + + prove_and_sign_bundle(builder, proving_key, &[ask.clone()], extra_sighash_data) +} + +/// Takes a configured Builder, generates the proof, computes the platform +/// sighash, and applies signatures. +fn prove_and_sign_bundle( + builder: Builder, + proving_key: &ProvingKey, + signing_keys: &[SpendAuthorizingKey], + extra_sighash_data: &[u8], +) -> Result, ProtocolError> { + let mut rng = OsRng; + + let (unauthorized, _) = builder + .build::(&mut rng) + .map_err(|e| ProtocolError::Generic(format!("failed to build bundle: {:?}", e)))? + .ok_or_else(|| ProtocolError::Generic("bundle was empty after build".to_string()))?; + + let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); + let sighash = compute_platform_sighash(&bundle_commitment, extra_sighash_data); + + let proven = unauthorized + .create_proof(proving_key, &mut rng) + .map_err(|e| ProtocolError::Generic(format!("failed to create proof: {:?}", e)))?; + + proven + .apply_signatures(rng, sighash, signing_keys) + .map_err(|e| ProtocolError::Generic(format!("failed to apply signatures: {:?}", e))) +} + +// --------------------------------------------------------------------------- +// Public builder functions +// --------------------------------------------------------------------------- + +/// Builds a Shield state transition (transparent platform addresses -> shielded pool). +/// +/// Constructs an output-only Orchard bundle (no spends), proves it, signs the +/// transparent input witnesses, and returns a ready-to-broadcast `StateTransition`. +/// +/// # Parameters +/// - `recipient` - Orchard address to receive the shielded note +/// - `shield_amount` - Amount of credits to shield +/// - `inputs` - Platform address inputs with their nonces and balances +/// - `fee_strategy` - How to deduct fees from the transparent inputs +/// - `signer` - Signs each input address witness (ECDSA) +/// - `user_fee_increase` - Fee multiplier (0 = 100% base fee) +/// - `proving_key` - Halo 2 proving key (cache with `OnceLock` — ~30s to build) +/// - `memo` - 36-byte structured memo for the recipient (4-byte type tag + 32-byte payload) +/// - `platform_version` - Protocol version +pub fn build_shield_transition>( + recipient: &OrchardAddress, + shield_amount: u64, + inputs: BTreeMap, + fee_strategy: AddressFundsFeeStrategy, + signer: &S, + user_fee_increase: UserFeeIncrease, + proving_key: &ProvingKey, + memo: [u8; 36], + platform_version: &PlatformVersion, +) -> Result { + if fee_strategy.is_empty() { + return Err(ProtocolError::Generic( + "fee_strategy must have at least one step".to_string(), + )); + } + + let bundle = build_output_only_bundle(recipient, shield_amount, memo, proving_key)?; + let (actions, flags, value_balance, anchor, proof, binding_sig) = + serialize_authorized_bundle(&bundle); + + ShieldTransition::try_from_bundle_with_signer( + inputs, + actions, + flags, + value_balance, + anchor, + proof, + binding_sig, + fee_strategy, + signer, + user_fee_increase, + platform_version, + ) +} + +/// Builds a ShieldFromAssetLock state transition (core asset lock -> shielded pool). +/// +/// Like Shield, constructs an output-only Orchard bundle. The funds come from +/// a core asset lock proof rather than platform address inputs. +/// +/// # Parameters +/// - `recipient` - Orchard address to receive the shielded note +/// - `shield_amount` - Amount of credits to shield (from the asset lock) +/// - `asset_lock_proof` - Proof that funds are locked on core chain +/// - `asset_lock_private_key` - Private key for the asset lock (signs the transition) +/// - `user_fee_increase` - Fee multiplier (0 = 100% base fee) +/// - `proving_key` - Halo 2 proving key +/// - `memo` - 36-byte structured memo for the recipient (4-byte type tag + 32-byte payload) +/// - `platform_version` - Protocol version +pub fn build_shield_from_asset_lock_transition( + recipient: &OrchardAddress, + shield_amount: u64, + asset_lock_proof: crate::prelude::AssetLockProof, + asset_lock_private_key: &[u8], + user_fee_increase: UserFeeIncrease, + proving_key: &ProvingKey, + memo: [u8; 36], + platform_version: &PlatformVersion, +) -> Result { + let bundle = build_output_only_bundle(recipient, shield_amount, memo, proving_key)?; + let (actions, flags, value_balance, anchor, proof, binding_sig) = + serialize_authorized_bundle(&bundle); + + ShieldFromAssetLockTransition::try_from_asset_lock_with_bundle( + asset_lock_proof, + asset_lock_private_key, + actions, + flags, + value_balance, + anchor, + proof, + binding_sig, + user_fee_increase, + platform_version, + ) +} + +/// Builds a ShieldedTransfer state transition (shielded pool -> shielded pool). +/// +/// Spends existing notes and creates a new note for the recipient. The shielded +/// fee is deducted from the spent notes. Any remaining change is returned to +/// the `change_address`. +/// +/// # Parameters +/// - `spends` - Notes to spend with their Merkle paths +/// - `recipient` - Orchard address to receive the transferred note +/// - `transfer_amount` - Amount to transfer to the recipient +/// - `change_address` - Orchard address for change output (if any) +/// - `fvk` - Full viewing key for spend authorization +/// - `ask` - Spend authorizing key for RedPallas signatures +/// - `anchor` - Merkle root of the commitment tree +/// - `proving_key` - Halo 2 proving key +/// - `memo` - 36-byte structured memo for the recipient (4-byte type tag + 32-byte payload) +/// - `fee` - Optional fee override; if `None`, the minimum fee is computed automatically. +/// If `Some`, must be >= the minimum fee. +/// - `platform_version` - Protocol version +pub fn build_shielded_transfer_transition( + spends: Vec, + recipient: &OrchardAddress, + transfer_amount: u64, + change_address: &OrchardAddress, + fvk: &FullViewingKey, + ask: &SpendAuthorizingKey, + anchor: Anchor, + proving_key: &ProvingKey, + memo: [u8; 36], + fee: Option, + platform_version: &PlatformVersion, +) -> Result { + let total_spent: u64 = spends.iter().map(|s| s.note.value().inner()).sum(); + + // Conservative action count: at least (spends, 2) since we always have + // a recipient output and likely a change output. + let num_actions = spends.len().max(2); + let min_fee = compute_minimum_shielded_fee(num_actions, platform_version); + let effective_fee = match fee { + Some(f) if f < min_fee => { + return Err(ProtocolError::Generic(format!( + "fee {} is below minimum required fee {}", + f, min_fee + ))); + } + Some(f) => f, + None => min_fee, + }; + + let required = transfer_amount + .checked_add(effective_fee) + .ok_or_else(|| ProtocolError::Generic("fee + transfer_amount overflows u64".to_string()))?; + if required > total_spent { + return Err(ProtocolError::Generic(format!( + "transfer amount {} + fee {} = {} exceeds total spendable value {}", + transfer_amount, effective_fee, required, total_spent + ))); + } + + let change_amount = total_spent - required; + + let recipient_payment = orchard_address_to_payment_address(recipient)?; + + let mut builder = Builder::::new(BundleType::DEFAULT, anchor); + + for spend in spends { + builder + .add_spend(fvk.clone(), spend.note, spend.merkle_path) + .map_err(|e| ProtocolError::Generic(format!("failed to add spend: {:?}", e)))?; + } + + // Primary output to recipient + builder + .add_output( + None, + recipient_payment, + NoteValue::from_raw(transfer_amount), + memo, + ) + .map_err(|e| ProtocolError::Generic(format!("failed to add output: {:?}", e)))?; + + // Change output (if any) + if change_amount > 0 { + let change_payment = orchard_address_to_payment_address(change_address)?; + builder + .add_output( + None, + change_payment, + NoteValue::from_raw(change_amount), + [0u8; 36], + ) + .map_err(|e| ProtocolError::Generic(format!("failed to add change output: {:?}", e)))?; + } + + // ShieldedTransfer has no extra_data in sighash + let bundle = prove_and_sign_bundle(builder, proving_key, &[ask.clone()], &[])?; + let (actions, flags, value_balance, anchor_bytes, proof, binding_sig) = + serialize_authorized_bundle(&bundle); + + // value_balance = effective_fee (the amount leaving the shielded pool as fee) + ShieldedTransferTransition::try_from_bundle( + actions, + flags, + value_balance as u64, + anchor_bytes, + proof, + binding_sig, + platform_version, + ) +} + +/// Builds an Unshield state transition (shielded pool -> platform address). +/// +/// Spends existing notes and sends part of the value to a transparent platform +/// address. The shielded fee is deducted from the spent notes. Any remaining +/// value is returned to the shielded `change_address`. +/// +/// # Parameters +/// - `spends` - Notes to spend with their Merkle paths +/// - `output_address` - Platform address to receive the unshielded funds +/// - `unshield_amount` - Amount to unshield to the platform address +/// - `change_address` - Orchard address for change output +/// - `fvk` - Full viewing key for spend authorization +/// - `ask` - Spend authorizing key for RedPallas signatures +/// - `anchor` - Merkle root of the commitment tree +/// - `proving_key` - Halo 2 proving key +/// - `memo` - 36-byte structured memo for the change output (4-byte type tag + 32-byte payload) +/// - `fee` - Optional fee override; if `None`, the minimum fee is computed automatically. +/// If `Some`, must be >= the minimum fee. +/// - `platform_version` - Protocol version +pub fn build_unshield_transition( + spends: Vec, + output_address: PlatformAddress, + unshield_amount: u64, + change_address: &OrchardAddress, + fvk: &FullViewingKey, + ask: &SpendAuthorizingKey, + anchor: Anchor, + proving_key: &ProvingKey, + memo: [u8; 36], + fee: Option, + platform_version: &PlatformVersion, +) -> Result { + let total_spent: u64 = spends.iter().map(|s| s.note.value().inner()).sum(); + + // Conservative action count: at least (spends, 1) since we have a change output. + let num_actions = spends.len().max(1); + let min_fee = compute_minimum_shielded_fee(num_actions, platform_version); + let effective_fee = match fee { + Some(f) if f < min_fee => { + return Err(ProtocolError::Generic(format!( + "fee {} is below minimum required fee {}", + f, min_fee + ))); + } + Some(f) => f, + None => min_fee, + }; + + let required = unshield_amount + .checked_add(effective_fee) + .ok_or_else(|| ProtocolError::Generic("fee + unshield_amount overflows u64".to_string()))?; + if required > total_spent { + return Err(ProtocolError::Generic(format!( + "unshield amount {} + fee {} = {} exceeds total spendable value {}", + unshield_amount, effective_fee, required, total_spent + ))); + } + + let change_amount = total_spent - required; + + // Unshield extra_data = output_address.to_bytes() || amount.to_le_bytes() + let mut extra_sighash_data = output_address.to_bytes(); + extra_sighash_data.extend_from_slice(&unshield_amount.to_le_bytes()); + + let bundle = build_spend_bundle( + spends, + change_address, + change_amount, + memo, + fvk, + ask, + anchor, + proving_key, + &extra_sighash_data, + )?; + + let (actions, flags, value_balance, anchor_bytes, proof, binding_sig) = + serialize_authorized_bundle(&bundle); + + UnshieldTransition::try_from_bundle( + output_address, + unshield_amount, + actions, + flags, + value_balance, + anchor_bytes, + proof, + binding_sig, + platform_version, + ) +} + +/// Builds a ShieldedWithdrawal state transition (shielded pool -> core L1 address). +/// +/// Spends existing notes and withdraws value to a core chain script output. +/// The shielded fee is deducted from the spent notes. Any remaining value is +/// returned to the shielded `change_address`. +/// +/// # Parameters +/// - `spends` - Notes to spend with their Merkle paths +/// - `withdrawal_amount` - Amount to withdraw to the core chain +/// - `output_script` - Core chain script to receive the funds +/// - `core_fee_per_byte` - Core chain fee rate +/// - `pooling` - Withdrawal pooling strategy +/// - `change_address` - Orchard address for change output +/// - `fvk` - Full viewing key for spend authorization +/// - `ask` - Spend authorizing key for RedPallas signatures +/// - `anchor` - Merkle root of the commitment tree +/// - `proving_key` - Halo 2 proving key +/// - `memo` - 36-byte structured memo for the change output (4-byte type tag + 32-byte payload) +/// - `fee` - Optional fee override; if `None`, the minimum fee is computed automatically. +/// If `Some`, must be >= the minimum fee. +/// - `platform_version` - Protocol version +pub fn build_shielded_withdrawal_transition( + spends: Vec, + withdrawal_amount: u64, + output_script: CoreScript, + core_fee_per_byte: u32, + pooling: Pooling, + change_address: &OrchardAddress, + fvk: &FullViewingKey, + ask: &SpendAuthorizingKey, + anchor: Anchor, + proving_key: &ProvingKey, + memo: [u8; 36], + fee: Option, + platform_version: &PlatformVersion, +) -> Result { + let total_spent: u64 = spends.iter().map(|s| s.note.value().inner()).sum(); + + // Conservative action count: at least (spends, 1) since we have a change output. + let num_actions = spends.len().max(1); + let min_fee = compute_minimum_shielded_fee(num_actions, platform_version); + let effective_fee = match fee { + Some(f) if f < min_fee => { + return Err(ProtocolError::Generic(format!( + "fee {} is below minimum required fee {}", + f, min_fee + ))); + } + Some(f) => f, + None => min_fee, + }; + + let required = withdrawal_amount + .checked_add(effective_fee) + .ok_or_else(|| { + ProtocolError::Generic("fee + withdrawal_amount overflows u64".to_string()) + })?; + if required > total_spent { + return Err(ProtocolError::Generic(format!( + "withdrawal amount {} + fee {} = {} exceeds total spendable value {}", + withdrawal_amount, effective_fee, required, total_spent + ))); + } + + let change_amount = total_spent - required; + + // ShieldedWithdrawal extra_data = output_script.as_bytes() || amount.to_le_bytes() + let mut extra_sighash_data = output_script.as_bytes().to_vec(); + extra_sighash_data.extend_from_slice(&withdrawal_amount.to_le_bytes()); + + let bundle = build_spend_bundle( + spends, + change_address, + change_amount, + memo, + fvk, + ask, + anchor, + proving_key, + &extra_sighash_data, + )?; + + let (actions, flags, value_balance, anchor_bytes, proof, binding_sig) = + serialize_authorized_bundle(&bundle); + + ShieldedWithdrawalTransition::try_from_bundle( + withdrawal_amount, + actions, + flags, + value_balance, + anchor_bytes, + proof, + binding_sig, + core_fee_per_byte, + pooling, + output_script, + platform_version, + ) +} diff --git a/packages/rs-drive-abci/src/query/service.rs b/packages/rs-drive-abci/src/query/service.rs index 5bf6259fa6d..71ce2e44a1e 100644 --- a/packages/rs-drive-abci/src/query/service.rs +++ b/packages/rs-drive-abci/src/query/service.rs @@ -923,6 +923,7 @@ impl PlatformService for QueryService { .await } + async fn get_shielded_pool_state( &self, request: Request, diff --git a/packages/rs-drive-abci/tests/strategy_tests/strategy.rs b/packages/rs-drive-abci/tests/strategy_tests/strategy.rs index 3f1d5eeeeae..01758718b98 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/strategy.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/strategy.rs @@ -3,28 +3,25 @@ use crate::query::QueryStrategy; use dpp::block::block_info::BlockInfo; use dpp::dashcore::{Network, PrivateKey}; use dpp::dashcore::{ProTxHash, QuorumHash}; -// TODO: Re-enable when OperationType has shielded variants -// use dpp::shielded::{compute_platform_sighash, SerializedAction}; +use dpp::shielded::{compute_platform_sighash, SerializedAction}; use dpp::state_transition::identity_topup_transition::methods::IdentityTopUpTransitionMethodsV0; -// TODO: Re-enable when OperationType has shielded variants -// use dpp::state_transition::shield_from_asset_lock_transition::methods::ShieldFromAssetLockTransitionMethodsV0; -// use dpp::state_transition::shield_from_asset_lock_transition::ShieldFromAssetLockTransition; -// use dpp::state_transition::shield_transition::methods::ShieldTransitionMethodsV0; -// use dpp::state_transition::shield_transition::ShieldTransition; -// use dpp::state_transition::shielded_transfer_transition::methods::ShieldedTransferTransitionMethodsV0; -// use dpp::state_transition::shielded_transfer_transition::ShieldedTransferTransition; -// use dpp::state_transition::shielded_withdrawal_transition::methods::ShieldedWithdrawalTransitionMethodsV0; -// use dpp::state_transition::shielded_withdrawal_transition::ShieldedWithdrawalTransition; -// use dpp::state_transition::unshield_transition::methods::UnshieldTransitionMethodsV0; -// use dpp::state_transition::unshield_transition::UnshieldTransition; +use dpp::state_transition::shield_from_asset_lock_transition::methods::ShieldFromAssetLockTransitionMethodsV0; +use dpp::state_transition::shield_from_asset_lock_transition::ShieldFromAssetLockTransition; +use dpp::state_transition::shield_transition::methods::ShieldTransitionMethodsV0; +use dpp::state_transition::shield_transition::ShieldTransition; +use dpp::state_transition::shielded_transfer_transition::methods::ShieldedTransferTransitionMethodsV0; +use dpp::state_transition::shielded_transfer_transition::ShieldedTransferTransition; +use dpp::state_transition::shielded_withdrawal_transition::methods::ShieldedWithdrawalTransitionMethodsV0; +use dpp::state_transition::shielded_withdrawal_transition::ShieldedWithdrawalTransition; +use dpp::state_transition::unshield_transition::methods::UnshieldTransitionMethodsV0; +use dpp::state_transition::unshield_transition::UnshieldTransition; use dpp::ProtocolError; -// TODO: Re-enable when OperationType has shielded variants -// use grovedb_commitment_tree::{ -// Anchor, Authorized as OrchardAuthorized, Builder, Bundle, BundleType, -// ClientMemoryCommitmentTree, DashMemo, ExtractedNoteCommitment, Flags as OrchardFlags, -// FullViewingKey, MerklePath, Note, NoteValue, Position, ProvingKey, RandomSeed, Retention, Rho, -// Scope, SpendAuthorizingKey, SpendingKey, -// }; +use grovedb_commitment_tree::{ + Anchor, Authorized as OrchardAuthorized, Builder, Bundle, BundleType, + ClientMemoryCommitmentTree, DashMemo, ExtractedNoteCommitment, Flags as OrchardFlags, + FullViewingKey, MerklePath, Note, NoteValue, Position, ProvingKey, RandomSeed, Retention, Rho, + Scope, SpendAuthorizingKey, SpendingKey, +}; use dpp::dashcore::secp256k1::SecretKey; use dpp::data_contract::document_type::random_document::CreateRandomDocument; @@ -129,8 +126,7 @@ use std::borrow::Cow; use std::collections::{BTreeMap, HashMap, HashSet}; use std::ops::RangeInclusive; use std::str::FromStr; -// TODO: Re-enable when OperationType has shielded variants -// use std::sync::OnceLock; +use std::sync::OnceLock; use strategy_tests::transitions::{ create_identity_credit_transfer_to_addresses_transition, create_identity_credit_transfer_to_addresses_transition_with_outputs, @@ -141,38 +137,142 @@ use strategy_tests::transitions::{ use strategy_tests::Strategy; use tenderdash_abci::proto::abci::{ExecTxResult, ValidatorSetUpdate}; -// TODO: Re-enable when OperationType has shielded variants -// /// Cached Orchard proving key for strategy tests (~30s to build, reused across tests). -// static TEST_PROVING_KEY: OnceLock = OnceLock::new(); -// -// fn get_proving_key() -> &'static ProvingKey { -// TEST_PROVING_KEY.get_or_init(ProvingKey::build) -// } -// -// /// Deterministic Orchard spending key seed used throughout all shielded strategy tests. -// const TEST_SK_BYTES: [u8; 32] = [0u8; 32]; - -/// Stub type for shielded pool state in strategy tests. -/// TODO: Re-enable full implementation when OperationType has shielded variants. -/// The full implementation with commitment tree tracking, spendable notes, and -/// Orchard key management is commented out below until the shielded OperationType -/// variants (Shield, ShieldFromAssetLock, ShieldedTransfer, Unshield, -/// ShieldedWithdrawal) are added back to the OperationType enum. -pub struct ShieldedState; - -// TODO: Re-enable when OperationType has shielded variants -// Original ShieldedState had fields: tree (ClientMemoryCommitmentTree), -// spendable_notes, checkpoint_counter, sk, fvk, ask, rho_counter -// and methods: new(), record_shielded_note(), checkpoint(), -// take_spendable_note(), has_spendable_notes() -// -// Also commented out: serialize_authorized_bundle() function and -// the 5 helper methods on NetworkStrategy: -// create_shield_transition() -// create_shield_from_asset_lock_transition() -// create_shielded_transfer_transition() -// create_unshield_transition() -// create_shielded_withdrawal_transition() +/// Cached Orchard proving key for strategy tests (~30s to build, reused across tests). +static TEST_PROVING_KEY: OnceLock = OnceLock::new(); + +fn get_proving_key() -> &'static ProvingKey { + TEST_PROVING_KEY.get_or_init(ProvingKey::build) +} + +/// Deterministic Orchard spending key seed used throughout all shielded strategy tests. +const TEST_SK_BYTES: [u8; 32] = [0u8; 32]; + +/// Tracks shielded pool state locally for strategy tests. +/// +/// After each block, successful Shield/ShieldFromAssetLock transitions append their +/// output note commitments to this tree. Spend-based transitions (ShieldedTransfer, +/// Unshield, ShieldedWithdrawal) then pick notes from here to build spend bundles +/// with valid Merkle witnesses. +pub struct ShieldedState { + /// Local commitment tree mirroring the on-chain tree. + pub tree: ClientMemoryCommitmentTree, + /// Spendable notes: (Note, Position in commitment tree). + /// Notes are removed once spent. + pub spendable_notes: Vec<(Note, Position)>, + /// Monotonically increasing checkpoint ID. + pub checkpoint_counter: u32, + /// Cached spending key derived from TEST_SK_BYTES. + #[allow(dead_code)] + pub sk: SpendingKey, + /// Cached full viewing key derived from sk. + pub fvk: FullViewingKey, + /// Cached spend authorizing key for signing spend bundles. + pub ask: SpendAuthorizingKey, + /// Counter for generating unique rho values for notes. + pub rho_counter: u64, +} + +impl ShieldedState { + pub fn new() -> Self { + let sk = SpendingKey::from_bytes(TEST_SK_BYTES).unwrap(); + let fvk = FullViewingKey::from(&sk); + let ask = SpendAuthorizingKey::from(&sk); + Self { + tree: ClientMemoryCommitmentTree::new(1000), + spendable_notes: Vec::new(), + checkpoint_counter: 0, + sk, + fvk, + ask, + rho_counter: 1, // Start at 1 to avoid zero rho + } + } + + /// Record a note that was output by a successful shield transition. + /// + /// `value` is the shielded amount in credits. + /// The note is reconstructed deterministically using the test spending key + /// and a unique rho derived from `rho_counter`. + pub fn record_shielded_note(&mut self, value: u64) { + let recipient = self.fvk.address_at(0u32, Scope::External); + + // Create a deterministic rho from the counter + let mut rho_bytes = [0u8; 32]; + rho_bytes[..8].copy_from_slice(&self.rho_counter.to_le_bytes()); + self.rho_counter += 1; + + let rho = Rho::from_bytes(&rho_bytes).unwrap(); + let rseed = RandomSeed::from_bytes([42u8; 32], &rho).unwrap(); + let note = Note::from_parts(recipient, NoteValue::from_raw(value), rho, rseed).unwrap(); + + // Append to commitment tree + let cmx = ExtractedNoteCommitment::from(note.commitment()); + let cmx_bytes: [u8; 32] = cmx.to_bytes(); + self.tree.append(cmx_bytes, Retention::Marked).unwrap(); + + let position = self.tree.max_leaf_position().unwrap().unwrap(); + self.spendable_notes.push((note, position)); + + tracing::debug!( + value, + position = u64::from(position), + "Recorded spendable shielded note" + ); + } + + /// Create a checkpoint after processing a block. + pub fn checkpoint(&mut self) { + self.tree.checkpoint(self.checkpoint_counter).unwrap(); + self.checkpoint_counter += 1; + } + + /// Take a spendable note (removes it from the pool). + /// Returns (Note, MerklePath, Anchor) if a note is available. + pub fn take_spendable_note(&mut self) -> Option<(Note, MerklePath, Anchor)> { + if self.spendable_notes.is_empty() { + return None; + } + let (note, position) = self.spendable_notes.remove(0); + let merkle_path = self.tree.witness(position, 0).ok()??; + let anchor = self.tree.anchor().ok()?; + Some((note, merkle_path, anchor)) + } + + /// Check if any spendable notes exist. + pub fn has_spendable_notes(&self) -> bool { + !self.spendable_notes.is_empty() + } +} + +fn serialize_authorized_bundle( + bundle: &Bundle, +) -> (Vec, u8, i64, [u8; 32], Vec, [u8; 64]) { + let actions: Vec = bundle + .actions() + .iter() + .map(|action| { + let enc = action.encrypted_note(); + let mut encrypted_note = Vec::with_capacity(216); + encrypted_note.extend_from_slice(&enc.epk_bytes); + encrypted_note.extend_from_slice(enc.enc_ciphertext.as_ref()); + encrypted_note.extend_from_slice(&enc.out_ciphertext); + SerializedAction { + nullifier: action.nullifier().to_bytes(), + rk: <[u8; 32]>::from(action.rk()), + cmx: action.cmx().to_bytes(), + encrypted_note, + cv_net: action.cv_net().to_bytes(), + spend_auth_sig: <[u8; 64]>::from(action.authorization()), + } + }) + .collect(); + let flags = bundle.flags().to_byte(); + let value_balance = *bundle.value_balance(); + let anchor = bundle.anchor().to_bytes(); + let proof = bundle.authorization().proof().as_ref().to_vec(); + let binding_sig = <[u8; 64]>::from(bundle.authorization().binding_signature()); + (actions, flags, value_balance, anchor, proof, binding_sig) +} #[derive(Clone, Debug, Default)] pub struct MasternodeListChangesStrategy { @@ -669,7 +769,7 @@ impl NetworkStrategy { instant_lock_quorums: &Quorums, rng: &mut StdRng, platform_version: &PlatformVersion, - _shielded_state: &mut Option, // TODO: Re-enable when OperationType has shielded variants + shielded_state: &mut Option, ) -> (Vec, Vec) { let mut maybe_state = None; let mut operations = vec![]; @@ -1889,103 +1989,102 @@ impl NetworkStrategy { operations.push(batch_transition); } - // TODO: Re-enable when OperationType has shielded variants - // OperationType::Shield(amount_range) => { - // for _i in 0..count { - // let Some(state_transition) = self.create_shield_transition( - // current_addresses_with_balance, - // amount_range, - // signer, - // rng, - // platform_version, - // ) else { - // break; - // }; - // // Record the shielded note for potential future spends. - // // The value is |-value_balance| since value_balance is negative - // // for shield transitions (money flowing into the pool). - // if let StateTransition::Shield(ref shield) = state_transition { - // let shielded_value = match shield { - // ShieldTransition::V0(v0) => (-v0.amount) as u64, - // }; - // let state = shielded_state.get_or_insert_with(ShieldedState::new); - // state.record_shielded_note(shielded_value); - // state.checkpoint(); - // } - // operations.push(state_transition); - // } - // } - // OperationType::ShieldFromAssetLock(amount_range) => { - // for _i in 0..count { - // let Some(state_transition) = self - // .create_shield_from_asset_lock_transition( - // amount_range, - // rng, - // instant_lock_quorums, - // &platform.config, - // platform_version, - // ) - // else { - // break; - // }; - // // Record the shielded note for potential future spends - // if let StateTransition::ShieldFromAssetLock(ref shield) = - // state_transition - // { - // let shielded_value = match shield { - // ShieldFromAssetLockTransition::V0(v0) => { - // (-v0.amount) as u64 - // } - // }; - // let state = shielded_state.get_or_insert_with(ShieldedState::new); - // state.record_shielded_note(shielded_value); - // state.checkpoint(); - // } - // operations.push(state_transition); - // } - // } - // OperationType::ShieldedTransfer(amount_range) => { - // for _i in 0..count { - // let Some(state_transition) = self.create_shielded_transfer_transition( - // amount_range, - // rng, - // shielded_state, - // platform_version, - // ) else { - // break; - // }; - // operations.push(state_transition); - // } - // } - // OperationType::Unshield(amount_range) => { - // for _i in 0..count { - // let Some(state_transition) = self.create_unshield_transition( - // current_addresses_with_balance, - // amount_range, - // rng, - // shielded_state, - // platform_version, - // ) else { - // break; - // }; - // operations.push(state_transition); - // } - // } - // OperationType::ShieldedWithdrawal(amount_range) => { - // for _i in 0..count { - // let Some(state_transition) = self - // .create_shielded_withdrawal_transition( - // amount_range, - // rng, - // shielded_state, - // platform_version, - // ) - // else { - // break; - // }; - // operations.push(state_transition); - // } - // } + OperationType::Shield(amount_range) => { + for _i in 0..count { + let Some(state_transition) = self.create_shield_transition( + current_addresses_with_balance, + amount_range, + signer, + rng, + platform_version, + ) else { + break; + }; + // Record the shielded note for potential future spends. + // The value is |-value_balance| since value_balance is negative + // for shield transitions (money flowing into the pool). + if let StateTransition::Shield(ref shield) = state_transition { + let shielded_value = match shield { + ShieldTransition::V0(v0) => (-v0.amount) as u64, + }; + let state = shielded_state.get_or_insert_with(ShieldedState::new); + state.record_shielded_note(shielded_value); + state.checkpoint(); + } + operations.push(state_transition); + } + } + OperationType::ShieldFromAssetLock(amount_range) => { + for _i in 0..count { + let Some(state_transition) = self + .create_shield_from_asset_lock_transition( + amount_range, + rng, + instant_lock_quorums, + &platform.config, + platform_version, + ) + else { + break; + }; + // Record the shielded note for potential future spends + if let StateTransition::ShieldFromAssetLock(ref shield) = + state_transition + { + let shielded_value = match shield { + ShieldFromAssetLockTransition::V0(v0) => { + (-v0.amount) as u64 + } + }; + let state = shielded_state.get_or_insert_with(ShieldedState::new); + state.record_shielded_note(shielded_value); + state.checkpoint(); + } + operations.push(state_transition); + } + } + OperationType::ShieldedTransfer(amount_range) => { + for _i in 0..count { + let Some(state_transition) = self.create_shielded_transfer_transition( + amount_range, + rng, + shielded_state, + platform_version, + ) else { + break; + }; + operations.push(state_transition); + } + } + OperationType::Unshield(amount_range) => { + for _i in 0..count { + let Some(state_transition) = self.create_unshield_transition( + current_addresses_with_balance, + amount_range, + rng, + shielded_state, + platform_version, + ) else { + break; + }; + operations.push(state_transition); + } + } + OperationType::ShieldedWithdrawal(amount_range) => { + for _i in 0..count { + let Some(state_transition) = self + .create_shielded_withdrawal_transition( + amount_range, + rng, + shielded_state, + platform_version, + ) + else { + break; + }; + operations.push(state_transition); + } + } _ => {} } } @@ -2608,437 +2707,436 @@ impl NetworkStrategy { Some(funding_transition) } - // TODO: Re-enable when OperationType has shielded variants - // /// Build a Shield state transition (transparent addresses -> shielded pool). - // /// - // /// Creates an output-only Orchard bundle (no spends) with a real Halo 2 proof, - // /// signs the address input witnesses, and returns the transition. - // fn create_shield_transition( - // &mut self, - // current_addresses_with_balance: &mut AddressesWithBalance, - // amount_range: &AmountRange, - // signer: &mut SimpleSigner, - // rng: &mut StdRng, - // platform_version: &PlatformVersion, - // ) -> Option { - // // 1. Pick input addresses with sufficient balances - // let inputs = - // current_addresses_with_balance.take_random_amounts_with_range(amount_range, rng)?; - // - // let total_input: Credits = inputs.values().map(|(_, credits)| credits).sum(); - // - // tracing::debug!(?inputs, total_input, "Preparing shield transition"); - // - // // 2. Create deterministic Orchard recipient (same key each time is fine for testing) - // let sk = SpendingKey::from_bytes([0u8; 32]).unwrap(); - // let fvk = FullViewingKey::from(&sk); - // let recipient = fvk.address_at(0u32, Scope::External); - // - // // 3. Build output-only Orchard bundle (shield = outputs only, no spends) - // let anchor = Anchor::empty_tree(); - // let mut builder = Builder::::new( - // BundleType::Transactional { - // flags: OrchardFlags::SPENDS_DISABLED, - // bundle_required: false, - // }, - // anchor, - // ); - // - // // Use total_input as the shielded value (fee will be deducted from inputs) - // // value_balance will be negative (money flowing into the pool) - // let shield_value = total_input; - // builder - // .add_output( - // None, - // recipient, - // NoteValue::from_raw(shield_value), - // [0u8; 36], - // ) - // .expect("expected to add output"); - // - // // 4. Build -> prove -> sign - // let pk = get_proving_key(); - // let mut bundle_rng = rand::rngs::OsRng; - // let (unauthorized, _) = builder - // .build::(&mut bundle_rng) - // .expect("expected to build bundle") - // .expect("expected bundle to be present"); - // - // let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); - // let sighash = compute_platform_sighash(&bundle_commitment, &[]); - // let proven = unauthorized - // .create_proof(pk, &mut bundle_rng) - // .expect("expected to create proof"); - // let bundle = proven - // .apply_signatures(bundle_rng, sighash, &[]) - // .expect("expected to apply signatures"); - // - // // 5. Decompose bundle into platform serialization fields - // let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = - // serialize_authorized_bundle(&bundle); - // - // // 6. Build ShieldTransition with signed address witnesses - // let fee_strategy: AddressFundsFeeStrategy = - // vec![AddressFundsFeeStrategyStep::DeductFromInput(0)].into(); - // - // let shield_transition = ShieldTransition::try_from_bundle_with_signer( - // inputs, - // actions, - // flags, - // value_balance, - // anchor_bytes, - // proof_bytes, - // binding_sig, - // fee_strategy, - // signer, - // 0, - // platform_version, - // ) - // .expect("expected to create shield transition"); - // - // tracing::debug!("Shield transition successfully built and signed"); - // - // Some(shield_transition) - // } - // - // /// Build a ShieldFromAssetLock state transition (core asset lock -> shielded pool). - // /// - // /// Like Shield, this is output-only (no spends). The funds come from a core - // /// asset lock proof rather than platform address inputs. - // fn create_shield_from_asset_lock_transition( - // &mut self, - // amount_range: &AmountRange, - // rng: &mut StdRng, - // instant_lock_quorums: &Quorums, - // platform_config: &PlatformConfig, - // platform_version: &PlatformVersion, - // ) -> Option { - // // 1. Create asset lock proof - // let (asset_lock_proof, asset_lock_private_key, funded_amount) = self - // .create_asset_lock_proof_with_amount( - // rng, - // amount_range, - // instant_lock_quorums, - // platform_config, - // platform_version, - // ); - // - // tracing::debug!(funded_amount, "Preparing shield from asset lock transition"); - // - // // 2. Create deterministic Orchard recipient - // let sk = SpendingKey::from_bytes(TEST_SK_BYTES).unwrap(); - // let fvk = FullViewingKey::from(&sk); - // let recipient = fvk.address_at(0u32, Scope::External); - // - // // 3. Build output-only Orchard bundle (same as Shield) - // let anchor = Anchor::empty_tree(); - // let mut builder = Builder::::new( - // BundleType::Transactional { - // flags: OrchardFlags::SPENDS_DISABLED, - // bundle_required: false, - // }, - // anchor, - // ); - // - // builder - // .add_output( - // None, - // recipient, - // NoteValue::from_raw(funded_amount), - // [0u8; 36], - // ) - // .expect("expected to add output"); - // - // // 4. Build -> prove -> sign - // let pk = get_proving_key(); - // let mut bundle_rng = rand::rngs::OsRng; - // let (unauthorized, _) = builder - // .build::(&mut bundle_rng) - // .expect("expected to build bundle") - // .expect("expected bundle to be present"); - // - // let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); - // let sighash = compute_platform_sighash(&bundle_commitment, &[]); - // let proven = unauthorized - // .create_proof(pk, &mut bundle_rng) - // .expect("expected to create proof"); - // let bundle = proven - // .apply_signatures(bundle_rng, sighash, &[]) - // .expect("expected to apply signatures"); - // - // // 5. Decompose bundle - // let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = - // serialize_authorized_bundle(&bundle); - // - // // 6. Build ShieldFromAssetLockTransition - // let transition = ShieldFromAssetLockTransition::try_from_asset_lock_with_bundle( - // asset_lock_proof, - // asset_lock_private_key.as_slice(), - // actions, - // flags, - // value_balance, - // anchor_bytes, - // proof_bytes, - // binding_sig, - // 0, - // platform_version, - // ) - // .expect("expected to create shield from asset lock transition"); - // - // tracing::debug!("ShieldFromAssetLock transition successfully built and signed"); - // - // Some(transition) - // } - // - // /// Build a ShieldedTransfer state transition (shielded pool -> shielded pool). - // /// - // /// Spends an existing note and creates a new note with the same value. - // /// Requires notes from prior Shield or ShieldFromAssetLock transitions. - // fn create_shielded_transfer_transition( - // &mut self, - // _amount_range: &AmountRange, - // _rng: &mut StdRng, - // shielded_state: &mut Option, - // platform_version: &PlatformVersion, - // ) -> Option { - // let state = shielded_state.as_mut()?; - // if !state.has_spendable_notes() { - // tracing::debug!("No spendable notes available for shielded transfer"); - // return None; - // } - // - // let (note, merkle_path, anchor) = state.take_spendable_note()?; - // let note_value = note.value().inner(); - // - // tracing::debug!(note_value, "Building shielded transfer bundle"); - // - // let fvk = state.fvk.clone(); - // let ask = state.ask.clone(); - // let recipient = fvk.address_at(0u32, Scope::External); - // - // // Build bundle: spend note -> output same value (value_balance = 0) - // let mut builder = Builder::::new(BundleType::DEFAULT, anchor); - // builder - // .add_spend(fvk, note, merkle_path) - // .expect("expected to add spend"); - // builder - // .add_output(None, recipient, NoteValue::from_raw(note_value), [0u8; 36]) - // .expect("expected to add output"); - // - // let pk = get_proving_key(); - // let mut bundle_rng = rand::rngs::OsRng; - // let (unauthorized, _) = builder - // .build::(&mut bundle_rng) - // .expect("expected to build bundle") - // .expect("expected bundle to be present"); - // - // // Shielded transfer has no extra_data in sighash - // let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); - // let sighash = compute_platform_sighash(&bundle_commitment, &[]); - // let proven = unauthorized - // .create_proof(pk, &mut bundle_rng) - // .expect("expected to create proof"); - // let bundle = proven - // .apply_signatures(bundle_rng, sighash, &[ask]) - // .expect("expected to apply signatures"); - // - // let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = - // serialize_authorized_bundle(&bundle); - // - // // value_balance should be 0 (all value stays in pool) - // // Cast i64 to u64 for the ShieldedTransferTransition API - // let transition = ShieldedTransferTransition::try_from_bundle( - // actions, - // flags, - // value_balance as u64, - // anchor_bytes, - // proof_bytes, - // binding_sig, - // platform_version, - // ) - // .expect("expected to create shielded transfer transition"); - // - // tracing::debug!("ShieldedTransfer transition successfully built"); - // - // Some(transition) - // } - // - // /// Build an Unshield state transition (shielded pool -> platform address). - // /// - // /// Spends an existing note and sends the value to a platform address. - // /// Requires notes from prior Shield or ShieldFromAssetLock transitions. - // fn create_unshield_transition( - // &mut self, - // _current_addresses_with_balance: &mut AddressesWithBalance, - // _amount_range: &AmountRange, - // _rng: &mut StdRng, - // shielded_state: &mut Option, - // platform_version: &PlatformVersion, - // ) -> Option { - // let state = shielded_state.as_mut()?; - // if !state.has_spendable_notes() { - // tracing::debug!("No spendable notes available for unshield"); - // return None; - // } - // - // let (note, merkle_path, anchor) = state.take_spendable_note()?; - // let note_value = note.value().inner(); - // - // tracing::debug!(note_value, "Building unshield bundle"); - // - // let fvk = state.fvk.clone(); - // let ask = state.ask.clone(); - // let recipient = fvk.address_at(0u32, Scope::External); - // - // // Spend full note, output half back to pool, unshield the other half - // let unshield_amount = note_value / 2; - // let change_amount = note_value - unshield_amount; - // - // // Build bundle: spend note -> output change (value_balance = unshield_amount) - // let mut builder = Builder::::new(BundleType::DEFAULT, anchor); - // builder - // .add_spend(fvk, note, merkle_path) - // .expect("expected to add spend"); - // builder - // .add_output( - // None, - // recipient, - // NoteValue::from_raw(change_amount), - // [0u8; 36], - // ) - // .expect("expected to add output"); - // - // let pk = get_proving_key(); - // let mut bundle_rng = rand::rngs::OsRng; - // let (unauthorized, _) = builder - // .build::(&mut bundle_rng) - // .expect("expected to build bundle") - // .expect("expected bundle to be present"); - // - // // Unshield extra_data = output_address.to_bytes() || amount.to_le_bytes() - // let output_address = PlatformAddress::P2pkh([42u8; 20]); - // let amount = unshield_amount; - // let mut extra_sighash_data = output_address.to_bytes(); - // extra_sighash_data.extend_from_slice(&amount.to_le_bytes()); - // - // let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); - // let sighash = compute_platform_sighash(&bundle_commitment, &extra_sighash_data); - // let proven = unauthorized - // .create_proof(pk, &mut bundle_rng) - // .expect("expected to create proof"); - // let bundle = proven - // .apply_signatures(bundle_rng, sighash, &[ask]) - // .expect("expected to apply signatures"); - // - // let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = - // serialize_authorized_bundle(&bundle); - // - // let transition = UnshieldTransition::try_from_bundle( - // output_address, - // amount, - // actions, - // flags, - // value_balance, - // anchor_bytes, - // proof_bytes, - // binding_sig, - // platform_version, - // ) - // .expect("expected to create unshield transition"); - // - // tracing::debug!(amount, "Unshield transition successfully built"); - // - // Some(transition) - // } - // - // /// Build a ShieldedWithdrawal state transition (shielded pool -> core L1 address). - // /// - // /// Spends an existing note and withdraws the value to a core script. - // /// Requires notes from prior Shield or ShieldFromAssetLock transitions. - // fn create_shielded_withdrawal_transition( - // &mut self, - // _amount_range: &AmountRange, - // _rng: &mut StdRng, - // shielded_state: &mut Option, - // platform_version: &PlatformVersion, - // ) -> Option { - // let state = shielded_state.as_mut()?; - // if !state.has_spendable_notes() { - // tracing::debug!("No spendable notes available for shielded withdrawal"); - // return None; - // } - // - // let (note, merkle_path, anchor) = state.take_spendable_note()?; - // let note_value = note.value().inner(); - // - // tracing::debug!(note_value, "Building shielded withdrawal bundle"); - // - // let fvk = state.fvk.clone(); - // let ask = state.ask.clone(); - // let recipient = fvk.address_at(0u32, Scope::External); - // - // // Spend full note, output half back to pool, withdraw the other half - // let withdrawal_amount = note_value / 2; - // let change_amount = note_value - withdrawal_amount; - // - // // Build bundle: spend note -> output change (value_balance = withdrawal_amount) - // let mut builder = Builder::::new(BundleType::DEFAULT, anchor); - // builder - // .add_spend(fvk, note, merkle_path) - // .expect("expected to add spend"); - // builder - // .add_output( - // None, - // recipient, - // NoteValue::from_raw(change_amount), - // [0u8; 36], - // ) - // .expect("expected to add output"); - // - // let pk = get_proving_key(); - // let mut bundle_rng = rand::rngs::OsRng; - // let (unauthorized, _) = builder - // .build::(&mut bundle_rng) - // .expect("expected to build bundle") - // .expect("expected bundle to be present"); - // - // // ShieldedWithdrawal extra_data = output_script.as_bytes() || amount.to_le_bytes() - // let output_script = CoreScript::new_p2pkh([7u8; 20]); - // let amount = withdrawal_amount; - // let mut extra_sighash_data = output_script.as_bytes().to_vec(); - // extra_sighash_data.extend_from_slice(&amount.to_le_bytes()); - // - // let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); - // let sighash = compute_platform_sighash(&bundle_commitment, &extra_sighash_data); - // let proven = unauthorized - // .create_proof(pk, &mut bundle_rng) - // .expect("expected to create proof"); - // let bundle = proven - // .apply_signatures(bundle_rng, sighash, &[ask]) - // .expect("expected to apply signatures"); - // - // let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = - // serialize_authorized_bundle(&bundle); - // - // let transition = ShieldedWithdrawalTransition::try_from_bundle( - // amount, - // actions, - // flags, - // value_balance, - // anchor_bytes, - // proof_bytes, - // binding_sig, - // 1, // core_fee_per_byte - // Pooling::Never, - // output_script, - // platform_version, - // ) - // .expect("expected to create shielded withdrawal transition"); - // - // tracing::debug!(amount, "ShieldedWithdrawal transition successfully built"); - // - // Some(transition) - // } + /// Build a Shield state transition (transparent addresses -> shielded pool). + /// + /// Creates an output-only Orchard bundle (no spends) with a real Halo 2 proof, + /// signs the address input witnesses, and returns the transition. + fn create_shield_transition( + &mut self, + current_addresses_with_balance: &mut AddressesWithBalance, + amount_range: &AmountRange, + signer: &mut SimpleSigner, + rng: &mut StdRng, + platform_version: &PlatformVersion, + ) -> Option { + // 1. Pick input addresses with sufficient balances + let inputs = + current_addresses_with_balance.take_random_amounts_with_range(amount_range, rng)?; + + let total_input: Credits = inputs.values().map(|(_, credits)| credits).sum(); + + tracing::debug!(?inputs, total_input, "Preparing shield transition"); + + // 2. Create deterministic Orchard recipient (same key each time is fine for testing) + let sk = SpendingKey::from_bytes([0u8; 32]).unwrap(); + let fvk = FullViewingKey::from(&sk); + let recipient = fvk.address_at(0u32, Scope::External); + + // 3. Build output-only Orchard bundle (shield = outputs only, no spends) + let anchor = Anchor::empty_tree(); + let mut builder = Builder::::new( + BundleType::Transactional { + flags: OrchardFlags::SPENDS_DISABLED, + bundle_required: false, + }, + anchor, + ); + + // Use total_input as the shielded value (fee will be deducted from inputs) + // value_balance will be negative (money flowing into the pool) + let shield_value = total_input; + builder + .add_output( + None, + recipient, + NoteValue::from_raw(shield_value), + [0u8; 36], + ) + .expect("expected to add output"); + + // 4. Build -> prove -> sign + let pk = get_proving_key(); + let mut bundle_rng = rand::rngs::OsRng; + let (unauthorized, _) = builder + .build::(&mut bundle_rng) + .expect("expected to build bundle") + .expect("expected bundle to be present"); + + let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); + let sighash = compute_platform_sighash(&bundle_commitment, &[]); + let proven = unauthorized + .create_proof(pk, &mut bundle_rng) + .expect("expected to create proof"); + let bundle = proven + .apply_signatures(bundle_rng, sighash, &[]) + .expect("expected to apply signatures"); + + // 5. Decompose bundle into platform serialization fields + let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + + // 6. Build ShieldTransition with signed address witnesses + let fee_strategy: AddressFundsFeeStrategy = + vec![AddressFundsFeeStrategyStep::DeductFromInput(0)].into(); + + let shield_transition = ShieldTransition::try_from_bundle_with_signer( + inputs, + actions, + flags, + value_balance, + anchor_bytes, + proof_bytes, + binding_sig, + fee_strategy, + signer, + 0, + platform_version, + ) + .expect("expected to create shield transition"); + + tracing::debug!("Shield transition successfully built and signed"); + + Some(shield_transition) + } + + /// Build a ShieldFromAssetLock state transition (core asset lock -> shielded pool). + /// + /// Like Shield, this is output-only (no spends). The funds come from a core + /// asset lock proof rather than platform address inputs. + fn create_shield_from_asset_lock_transition( + &mut self, + amount_range: &AmountRange, + rng: &mut StdRng, + instant_lock_quorums: &Quorums, + platform_config: &PlatformConfig, + platform_version: &PlatformVersion, + ) -> Option { + // 1. Create asset lock proof + let (asset_lock_proof, asset_lock_private_key, funded_amount) = self + .create_asset_lock_proof_with_amount( + rng, + amount_range, + instant_lock_quorums, + platform_config, + platform_version, + ); + + tracing::debug!(funded_amount, "Preparing shield from asset lock transition"); + + // 2. Create deterministic Orchard recipient + let sk = SpendingKey::from_bytes(TEST_SK_BYTES).unwrap(); + let fvk = FullViewingKey::from(&sk); + let recipient = fvk.address_at(0u32, Scope::External); + + // 3. Build output-only Orchard bundle (same as Shield) + let anchor = Anchor::empty_tree(); + let mut builder = Builder::::new( + BundleType::Transactional { + flags: OrchardFlags::SPENDS_DISABLED, + bundle_required: false, + }, + anchor, + ); + + builder + .add_output( + None, + recipient, + NoteValue::from_raw(funded_amount), + [0u8; 36], + ) + .expect("expected to add output"); + + // 4. Build -> prove -> sign + let pk = get_proving_key(); + let mut bundle_rng = rand::rngs::OsRng; + let (unauthorized, _) = builder + .build::(&mut bundle_rng) + .expect("expected to build bundle") + .expect("expected bundle to be present"); + + let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); + let sighash = compute_platform_sighash(&bundle_commitment, &[]); + let proven = unauthorized + .create_proof(pk, &mut bundle_rng) + .expect("expected to create proof"); + let bundle = proven + .apply_signatures(bundle_rng, sighash, &[]) + .expect("expected to apply signatures"); + + // 5. Decompose bundle + let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + + // 6. Build ShieldFromAssetLockTransition + let transition = ShieldFromAssetLockTransition::try_from_asset_lock_with_bundle( + asset_lock_proof, + asset_lock_private_key.as_slice(), + actions, + flags, + value_balance, + anchor_bytes, + proof_bytes, + binding_sig, + 0, + platform_version, + ) + .expect("expected to create shield from asset lock transition"); + + tracing::debug!("ShieldFromAssetLock transition successfully built and signed"); + + Some(transition) + } + + /// Build a ShieldedTransfer state transition (shielded pool -> shielded pool). + /// + /// Spends an existing note and creates a new note with the same value. + /// Requires notes from prior Shield or ShieldFromAssetLock transitions. + fn create_shielded_transfer_transition( + &mut self, + _amount_range: &AmountRange, + _rng: &mut StdRng, + shielded_state: &mut Option, + platform_version: &PlatformVersion, + ) -> Option { + let state = shielded_state.as_mut()?; + if !state.has_spendable_notes() { + tracing::debug!("No spendable notes available for shielded transfer"); + return None; + } + + let (note, merkle_path, anchor) = state.take_spendable_note()?; + let note_value = note.value().inner(); + + tracing::debug!(note_value, "Building shielded transfer bundle"); + + let fvk = state.fvk.clone(); + let ask = state.ask.clone(); + let recipient = fvk.address_at(0u32, Scope::External); + + // Build bundle: spend note -> output same value (value_balance = 0) + let mut builder = Builder::::new(BundleType::DEFAULT, anchor); + builder + .add_spend(fvk, note, merkle_path) + .expect("expected to add spend"); + builder + .add_output(None, recipient, NoteValue::from_raw(note_value), [0u8; 36]) + .expect("expected to add output"); + + let pk = get_proving_key(); + let mut bundle_rng = rand::rngs::OsRng; + let (unauthorized, _) = builder + .build::(&mut bundle_rng) + .expect("expected to build bundle") + .expect("expected bundle to be present"); + + // Shielded transfer has no extra_data in sighash + let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); + let sighash = compute_platform_sighash(&bundle_commitment, &[]); + let proven = unauthorized + .create_proof(pk, &mut bundle_rng) + .expect("expected to create proof"); + let bundle = proven + .apply_signatures(bundle_rng, sighash, &[ask]) + .expect("expected to apply signatures"); + + let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + + // value_balance should be 0 (all value stays in pool) + // Cast i64 to u64 for the ShieldedTransferTransition API + let transition = ShieldedTransferTransition::try_from_bundle( + actions, + flags, + value_balance as u64, + anchor_bytes, + proof_bytes, + binding_sig, + platform_version, + ) + .expect("expected to create shielded transfer transition"); + + tracing::debug!("ShieldedTransfer transition successfully built"); + + Some(transition) + } + + /// Build an Unshield state transition (shielded pool -> platform address). + /// + /// Spends an existing note and sends the value to a platform address. + /// Requires notes from prior Shield or ShieldFromAssetLock transitions. + fn create_unshield_transition( + &mut self, + _current_addresses_with_balance: &mut AddressesWithBalance, + _amount_range: &AmountRange, + _rng: &mut StdRng, + shielded_state: &mut Option, + platform_version: &PlatformVersion, + ) -> Option { + let state = shielded_state.as_mut()?; + if !state.has_spendable_notes() { + tracing::debug!("No spendable notes available for unshield"); + return None; + } + + let (note, merkle_path, anchor) = state.take_spendable_note()?; + let note_value = note.value().inner(); + + tracing::debug!(note_value, "Building unshield bundle"); + + let fvk = state.fvk.clone(); + let ask = state.ask.clone(); + let recipient = fvk.address_at(0u32, Scope::External); + + // Spend full note, output half back to pool, unshield the other half + let unshield_amount = note_value / 2; + let change_amount = note_value - unshield_amount; + + // Build bundle: spend note -> output change (value_balance = unshield_amount) + let mut builder = Builder::::new(BundleType::DEFAULT, anchor); + builder + .add_spend(fvk, note, merkle_path) + .expect("expected to add spend"); + builder + .add_output( + None, + recipient, + NoteValue::from_raw(change_amount), + [0u8; 36], + ) + .expect("expected to add output"); + + let pk = get_proving_key(); + let mut bundle_rng = rand::rngs::OsRng; + let (unauthorized, _) = builder + .build::(&mut bundle_rng) + .expect("expected to build bundle") + .expect("expected bundle to be present"); + + // Unshield extra_data = output_address.to_bytes() || amount.to_le_bytes() + let output_address = PlatformAddress::P2pkh([42u8; 20]); + let amount = unshield_amount; + let mut extra_sighash_data = output_address.to_bytes(); + extra_sighash_data.extend_from_slice(&amount.to_le_bytes()); + + let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); + let sighash = compute_platform_sighash(&bundle_commitment, &extra_sighash_data); + let proven = unauthorized + .create_proof(pk, &mut bundle_rng) + .expect("expected to create proof"); + let bundle = proven + .apply_signatures(bundle_rng, sighash, &[ask]) + .expect("expected to apply signatures"); + + let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + + let transition = UnshieldTransition::try_from_bundle( + output_address, + amount, + actions, + flags, + value_balance, + anchor_bytes, + proof_bytes, + binding_sig, + platform_version, + ) + .expect("expected to create unshield transition"); + + tracing::debug!(amount, "Unshield transition successfully built"); + + Some(transition) + } + + /// Build a ShieldedWithdrawal state transition (shielded pool -> core L1 address). + /// + /// Spends an existing note and withdraws the value to a core script. + /// Requires notes from prior Shield or ShieldFromAssetLock transitions. + fn create_shielded_withdrawal_transition( + &mut self, + _amount_range: &AmountRange, + _rng: &mut StdRng, + shielded_state: &mut Option, + platform_version: &PlatformVersion, + ) -> Option { + let state = shielded_state.as_mut()?; + if !state.has_spendable_notes() { + tracing::debug!("No spendable notes available for shielded withdrawal"); + return None; + } + + let (note, merkle_path, anchor) = state.take_spendable_note()?; + let note_value = note.value().inner(); + + tracing::debug!(note_value, "Building shielded withdrawal bundle"); + + let fvk = state.fvk.clone(); + let ask = state.ask.clone(); + let recipient = fvk.address_at(0u32, Scope::External); + + // Spend full note, output half back to pool, withdraw the other half + let withdrawal_amount = note_value / 2; + let change_amount = note_value - withdrawal_amount; + + // Build bundle: spend note -> output change (value_balance = withdrawal_amount) + let mut builder = Builder::::new(BundleType::DEFAULT, anchor); + builder + .add_spend(fvk, note, merkle_path) + .expect("expected to add spend"); + builder + .add_output( + None, + recipient, + NoteValue::from_raw(change_amount), + [0u8; 36], + ) + .expect("expected to add output"); + + let pk = get_proving_key(); + let mut bundle_rng = rand::rngs::OsRng; + let (unauthorized, _) = builder + .build::(&mut bundle_rng) + .expect("expected to build bundle") + .expect("expected bundle to be present"); + + // ShieldedWithdrawal extra_data = output_script.as_bytes() || amount.to_le_bytes() + let output_script = CoreScript::new_p2pkh([7u8; 20]); + let amount = withdrawal_amount; + let mut extra_sighash_data = output_script.as_bytes().to_vec(); + extra_sighash_data.extend_from_slice(&amount.to_le_bytes()); + + let bundle_commitment: [u8; 32] = unauthorized.commitment().into(); + let sighash = compute_platform_sighash(&bundle_commitment, &extra_sighash_data); + let proven = unauthorized + .create_proof(pk, &mut bundle_rng) + .expect("expected to create proof"); + let bundle = proven + .apply_signatures(bundle_rng, sighash, &[ask]) + .expect("expected to apply signatures"); + + let (actions, flags, value_balance, anchor_bytes, proof_bytes, binding_sig) = + serialize_authorized_bundle(&bundle); + + let transition = ShieldedWithdrawalTransition::try_from_bundle( + amount, + actions, + flags, + value_balance, + anchor_bytes, + proof_bytes, + binding_sig, + 1, // core_fee_per_byte + Pooling::Never, + output_script, + platform_version, + ) + .expect("expected to create shielded withdrawal transition"); + + tracing::debug!(amount, "ShieldedWithdrawal transition successfully built"); + + Some(transition) + } } pub enum StrategyRandomness { diff --git a/packages/rs-drive-abci/tests/strategy_tests/test_cases/mod.rs b/packages/rs-drive-abci/tests/strategy_tests/test_cases/mod.rs index 8f0537b8c14..ae385fd7531 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/test_cases/mod.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/test_cases/mod.rs @@ -6,8 +6,7 @@ mod core_update_tests; mod data_contract_history_tests; mod identity_and_document_tests; mod identity_transfer_tests; -// TODO: re-enable once OperationType shielded variants are implemented -// mod shielded_tests; +mod shielded_tests; mod token_tests; mod top_up_tests; mod update_identities_tests; diff --git a/packages/rs-drive-abci/tests/strategy_tests/test_cases/shielded_tests.rs b/packages/rs-drive-abci/tests/strategy_tests/test_cases/shielded_tests.rs index 96e0cf4c792..af97a207237 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/test_cases/shielded_tests.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/test_cases/shielded_tests.rs @@ -1,6 +1,5 @@ // Feature-gated because shielded strategy tests are long-running (ZK proof generation). // Run with: cargo test -p drive-abci --features __shielded_strategy_tests -// TODO: Add shielded variants to OperationType enum to enable these tests. #[cfg(feature = "__shielded_strategy_tests")] #[cfg(test)] mod tests { diff --git a/packages/rs-drive-proof-verifier/src/proof.rs b/packages/rs-drive-proof-verifier/src/proof.rs index 18f597914af..6b6cb561cc5 100644 --- a/packages/rs-drive-proof-verifier/src/proof.rs +++ b/packages/rs-drive-proof-verifier/src/proof.rs @@ -2472,6 +2472,7 @@ impl FromProof for MostRecentShiel } } + impl FromProof for ShieldedEncryptedNotes { type Request = platform::GetShieldedEncryptedNotesRequest; type Response = platform::GetShieldedEncryptedNotesResponse; diff --git a/packages/rs-drive-proof-verifier/src/types.rs b/packages/rs-drive-proof-verifier/src/types.rs index 27f824260a7..8dfe89b3578 100644 --- a/packages/rs-drive-proof-verifier/src/types.rs +++ b/packages/rs-drive-proof-verifier/src/types.rs @@ -840,6 +840,7 @@ pub struct ShieldedAnchors(pub Vec<[u8; 32]>); )] pub struct MostRecentShieldedAnchor(pub [u8; 32]); + /// Status of a single nullifier (spent or unspent) #[derive(Debug, Clone)] #[cfg_attr( diff --git a/packages/rs-drive/src/drive/saved_block_transactions/cleanup_expired_nullifiers/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/cleanup_expired_nullifiers/mod.rs new file mode 100644 index 00000000000..a3288990f1b --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/cleanup_expired_nullifiers/mod.rs @@ -0,0 +1,49 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use grovedb::TransactionArg; +use platform_version::version::PlatformVersion; + +impl Drive { + /// Cleans up expired compacted nullifier entries. + /// + /// This function queries the nullifier expiration time tree for entries with + /// expiration time <= current_block_time_ms, then deletes: + /// 1. The corresponding compacted nullifier entries + /// 2. The expiration time entries themselves + /// + /// # Arguments + /// * `current_block_time_ms` - The current block time in milliseconds + /// * `transaction` - Optional database transaction + /// * `platform_version` - The platform version + /// + /// # Returns + /// * `Ok(usize)` - The number of compacted entries that were cleaned up + /// * `Err` - An error occurred + pub fn cleanup_expired_nullifiers( + &self, + current_block_time_ms: u64, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive + .methods + .saved_block_transactions + .cleanup_expired_nullifiers + { + 0 => self.cleanup_expired_nullifiers_v0( + current_block_time_ms, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "cleanup_expired_nullifiers".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/cleanup_expired_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/cleanup_expired_nullifiers/v0/mod.rs new file mode 100644 index 00000000000..e9a48f2e051 --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/cleanup_expired_nullifiers/v0/mod.rs @@ -0,0 +1,95 @@ +use crate::drive::Drive; +use crate::error::Error; +use crate::util::batch::grovedb_op_batch::GroveDbOpBatchV0Methods; +use crate::util::batch::GroveDbOpBatch; +use dpp::ProtocolError; +use grovedb::query_result_type::QueryResultType; +use grovedb::{PathQuery, Query, QueryItem, SizedQuery, TransactionArg}; +use platform_version::version::PlatformVersion; + +impl Drive { + /// Version 0 implementation of cleaning up expired compacted nullifier entries. + /// + /// Queries for all expiration entries with time <= current_block_time_ms, + /// then deletes the corresponding compacted entries and the expiration entries. + pub(super) fn cleanup_expired_nullifiers_v0( + &self, + current_block_time_ms: u64, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + let expiration_path = Self::saved_nullifiers_expiration_time_path_vec(); + + // Query all entries with expiration time <= current_block_time_ms + let mut query = Query::new(); + // Range from 0 to current_block_time_ms (inclusive) + query.insert_item(QueryItem::RangeToInclusive( + ..=current_block_time_ms.to_be_bytes().to_vec(), + )); + + let path_query = + PathQuery::new(expiration_path.clone(), SizedQuery::new(query, None, None)); + + let (results, _) = self.grove_get_path_query( + &path_query, + transaction, + QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + )?; + + let key_elements = results.to_key_elements(); + + if key_elements.is_empty() { + return Ok(0); + } + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let mut batch = GroveDbOpBatch::new(); + let mut total_cleaned = 0usize; + + let compacted_path = Self::saved_compacted_block_transactions_nullifiers_path_vec(); + + for (expiration_key, element) in key_elements { + // Get the vec of block ranges from the element + let grovedb::Element::Item(serialized_ranges, _) = element else { + return Err(Error::Protocol(Box::new( + ProtocolError::CorruptedSerialization( + "expected item element for expiration block ranges".to_string(), + ), + ))); + }; + + // Deserialize the vec of block ranges + let (ranges, _): (Vec<(u64, u64)>, usize) = + bincode::decode_from_slice(&serialized_ranges, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot decode expiration block ranges: {}", + e + )))) + })?; + + // Delete each compacted nullifier entry + for (start_block, end_block) in &ranges { + let mut compacted_key = Vec::with_capacity(16); + compacted_key.extend_from_slice(&start_block.to_be_bytes()); + compacted_key.extend_from_slice(&end_block.to_be_bytes()); + + batch.add_delete(compacted_path.clone(), compacted_key); + total_cleaned += 1; + } + + // Delete the expiration entry itself + batch.add_delete(expiration_path.clone(), expiration_key); + } + + if !batch.is_empty() { + self.grove_apply_batch(batch, false, transaction, &platform_version.drive)?; + } + + Ok(total_cleaned) + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/compact_nullifiers/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/compact_nullifiers/mod.rs new file mode 100644 index 00000000000..d85d2815fa1 --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/compact_nullifiers/mod.rs @@ -0,0 +1,61 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use grovedb::TransactionArg; +use platform_version::version::PlatformVersion; + +/// One week in milliseconds (used for compacted nullifier expiration) +pub const ONE_WEEK_IN_MS: u64 = 7 * 24 * 60 * 60 * 1000; + +impl Drive { + /// Compacts nullifiers from recent blocks, including the current block, + /// into a single compacted entry. + /// + /// This function drains all entries from the nullifiers tree, concatenates them + /// with the provided current block's nullifiers, and stores the result in + /// the compacted nullifiers tree with a (start_block, end_block) key. + /// + /// Also stores the expiration time (current block time + 1 week) in the + /// nullifiers expiration time tree. + /// + /// # Arguments + /// * `current_nullifiers` - The current block's nullifiers to include + /// * `current_block_height` - The height of the current block + /// * `current_block_time_ms` - The current block time in milliseconds + /// * `transaction` - Optional database transaction + /// * `platform_version` - The platform version + /// + /// # Returns + /// * `Ok((start, end))` - The block range that was compacted + /// * `Err` - An error occurred + pub fn compact_nullifiers_with_current_block( + &self, + current_nullifiers: &[[u8; 32]], + current_block_height: u64, + current_block_time_ms: u64, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result<(u64, u64), Error> { + match platform_version + .drive + .methods + .saved_block_transactions + .compact_nullifiers + { + 0 => self.compact_nullifiers_with_current_block_v0( + current_nullifiers, + current_block_height, + current_block_time_ms, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "compact_nullifiers_with_current_block".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/compact_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/compact_nullifiers/v0/mod.rs new file mode 100644 index 00000000000..849c6d7d037 --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/compact_nullifiers/v0/mod.rs @@ -0,0 +1,194 @@ +use crate::drive::saved_block_transactions::compact_nullifiers::ONE_WEEK_IN_MS; +use crate::drive::Drive; +use crate::error::Error; +use crate::util::batch::grovedb_op_batch::GroveDbOpBatchV0Methods; +use crate::util::batch::GroveDbOpBatch; +use crate::util::grove_operations::DirectQueryType; +use dpp::ProtocolError; +use grovedb::query_result_type::QueryResultType; +use grovedb::{Element, PathQuery, Query, SizedQuery, TransactionArg}; +use grovedb_path::SubtreePath; +use platform_version::version::PlatformVersion; + +impl Drive { + /// Version 0 implementation of compacting nullifiers including a current block. + /// + /// Drains all entries from the nullifiers count sum tree, + /// concatenates them with the provided current block's nullifiers, + /// and stores the result in the compacted nullifiers tree + /// with a (start_block, end_block) key. + /// + /// Also stores the expiration time (current block time + 1 week) in the + /// nullifiers expiration time tree with the same (start_block, end_block) key. + /// + /// Returns the range of blocks that were compacted (start_block, end_block). + pub(super) fn compact_nullifiers_with_current_block_v0( + &self, + current_nullifiers: &[[u8; 32]], + current_block_height: u64, + current_block_time_ms: u64, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result<(u64, u64), Error> { + let path = Self::saved_block_transactions_nullifiers_path_vec(); + + // Query all entries from the nullifiers tree + let mut query = Query::new(); + query.insert_all(); + + let path_query = PathQuery::new(path.clone(), SizedQuery::new(query, None, None)); + + let (results, _) = self.grove_get_path_query( + &path_query, + transaction, + QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + )?; + + let key_elements = results.to_key_elements(); + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + // Track block range - start with current block + let mut start_block: u64 = current_block_height; + let mut end_block: u64 = current_block_height; + + // Concatenate all nullifiers together (no merge semantics needed) + let mut combined_nullifiers: Vec<[u8; 32]> = Vec::new(); + let mut keys_to_delete: Vec> = Vec::new(); + + // Process stored blocks in chronological order (ascending by block height) + // Keys are big-endian u64, so they're already in ascending order + for (key, element) in key_elements { + // Parse block height from key (8 bytes, big-endian) + let height_bytes: [u8; 8] = key.clone().try_into().map_err(|_| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization( + "invalid block height key length".to_string(), + ))) + })?; + let block_height = u64::from_be_bytes(height_bytes); + + // Track start and end blocks + if block_height < start_block { + start_block = block_height; + } + if block_height > end_block { + end_block = block_height; + } + + // Get the serialized data from the ItemWithSumItem element + let Element::ItemWithSumItem(serialized_data, _, _) = element else { + return Err(Error::Protocol(Box::new( + ProtocolError::CorruptedSerialization( + "expected item with sum item element for nullifiers".to_string(), + ), + ))); + }; + + // Deserialize the nullifier list + let (block_nullifiers, _): (Vec<[u8; 32]>, usize) = + bincode::decode_from_slice(&serialized_data, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot decode nullifiers: {}", + e + )))) + })?; + + // Simply concatenate - no merge semantics needed for nullifiers + combined_nullifiers.extend(block_nullifiers); + + keys_to_delete.push(key); + } + + // Append the current block's nullifiers + combined_nullifiers.extend_from_slice(current_nullifiers); + + // Serialize the combined nullifiers + let serialized = bincode::encode_to_vec(&combined_nullifiers, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot encode compacted nullifiers: {}", + e + )))) + })?; + + // Create the compacted key: (start_block, end_block) as 16 bytes + let mut compacted_key = Vec::with_capacity(16); + compacted_key.extend_from_slice(&start_block.to_be_bytes()); + compacted_key.extend_from_slice(&end_block.to_be_bytes()); + + // Build batch operations + let mut batch = GroveDbOpBatch::new(); + + // Delete all original entries from the count sum tree + for key in keys_to_delete { + batch.add_delete(path.clone(), key); + } + + // Insert the compacted entry as a plain Item (not ItemWithSumItem) + batch.add_insert( + Self::saved_compacted_block_transactions_nullifiers_path_vec(), + compacted_key.clone(), + Element::new_item(serialized), + ); + + // Calculate expiration time (current block time + 1 week) + let expiration_time_ms = current_block_time_ms.saturating_add(ONE_WEEK_IN_MS); + let expiration_key = expiration_time_ms.to_be_bytes().to_vec(); + + // Check if an entry with this expiration time already exists + // If so, we need to append to the existing vec of block ranges + let expiration_path = Self::saved_nullifiers_expiration_time_path(); + + let mut drive_operations = vec![]; + let existing_ranges = self.grove_get_raw_optional( + SubtreePath::from(expiration_path.as_ref()), + &expiration_key, + DirectQueryType::StatefulDirectQuery, + transaction, + &mut drive_operations, + &platform_version.drive, + )?; + + let expiration_value = if let Some(Element::Item(existing_data, _)) = existing_ranges { + // Deserialize existing vec of block ranges and append the new one + let (mut ranges, _): (Vec<(u64, u64)>, usize) = + bincode::decode_from_slice(&existing_data, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot decode expiration block ranges: {}", + e + )))) + })?; + ranges.push((start_block, end_block)); + bincode::encode_to_vec(&ranges, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot encode expiration block ranges: {}", + e + )))) + })? + } else { + // No existing entry, create new vec with single range + let ranges: Vec<(u64, u64)> = vec![(start_block, end_block)]; + bincode::encode_to_vec(&ranges, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot encode expiration block ranges: {}", + e + )))) + })? + }; + + // Store in the expiration tree: key = expiration_time, value = vec of (start_block, end_block) + batch.add_insert( + Self::saved_nullifiers_expiration_time_path_vec(), + expiration_key, + Element::new_item(expiration_value), + ); + + // Apply the batch + self.grove_apply_batch(batch, false, transaction, &platform_version.drive)?; + + Ok((start_block, end_block)) + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/fetch_compacted_nullifiers/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/fetch_compacted_nullifiers/mod.rs new file mode 100644 index 00000000000..49892a3f085 --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/fetch_compacted_nullifiers/mod.rs @@ -0,0 +1,85 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use grovedb::TransactionArg; +use platform_version::version::PlatformVersion; + +pub use v0::CompactedNullifierChanges; + +impl Drive { + /// Fetches compacted nullifier changes starting from a given block height. + /// + /// # Arguments + /// * `start_block_height` - The block height to start fetching from + /// * `limit` - Optional maximum number of compacted entries to return + /// * `transaction` - Optional database transaction + /// * `platform_version` - The platform version + /// + /// # Returns + /// A vector of (start_block, end_block, nullifiers) tuples + pub fn fetch_compacted_nullifier_changes( + &self, + start_block_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive + .methods + .saved_block_transactions + .fetch_nullifiers + { + 0 => self.fetch_compacted_nullifier_changes_v0( + start_block_height, + limit, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "fetch_compacted_nullifier_changes".to_string(), + known_versions: vec![0], + received: version, + })), + } + } + + /// Proves compacted nullifier changes starting from a given block height. + /// + /// # Arguments + /// * `start_block_height` - The block height to start from + /// * `limit` - Optional maximum number of compacted entries to prove + /// * `transaction` - Optional database transaction + /// * `platform_version` - The platform version + /// + /// # Returns + /// A grovedb proof + pub fn prove_compacted_nullifier_changes( + &self, + start_block_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + match platform_version + .drive + .methods + .saved_block_transactions + .fetch_nullifiers + { + 0 => self.prove_compacted_nullifier_changes_v0( + start_block_height, + limit, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "prove_compacted_nullifier_changes".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/fetch_compacted_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/fetch_compacted_nullifiers/v0/mod.rs new file mode 100644 index 00000000000..d95e5c5a3b3 --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/fetch_compacted_nullifiers/v0/mod.rs @@ -0,0 +1,277 @@ +use crate::drive::Drive; +use crate::error::Error; +use dpp::ProtocolError; +use grovedb::query_result_type::QueryResultType; +use grovedb::{Element, PathQuery, Query, SizedQuery, TransactionArg}; +use platform_version::version::PlatformVersion; + +/// Result type for fetched compacted nullifier changes +/// Each entry is (start_block, end_block, nullifiers) +pub type CompactedNullifierChanges = Vec<(u64, u64, Vec<[u8; 32]>)>; + +impl Drive { + /// Version 0 implementation of fetching compacted nullifier changes. + /// + /// Retrieves all compacted nullifier change records where `end_block >= start_block_height`. + /// This includes ranges that contain `start_block_height` (e.g., range 400-600 when querying + /// from block 505) as well as ranges that start after `start_block_height`. + /// + /// Returns a vector of (start_block, end_block, nullifiers) tuples. + pub(super) fn fetch_compacted_nullifier_changes_v0( + &self, + start_block_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + let path = Self::saved_compacted_block_transactions_nullifiers_path_vec(); + + // Keys are 16 bytes: (start_block, end_block), both big-endian. + // We want ranges where end_block >= start_block_height, which includes: + // - Ranges that contain start_block_height (e.g., 400-600 contains 505) + // - Ranges that start at or after start_block_height + // + // Strategy: + // 1. First query: descending from (start_block_height, u64::MAX) with limit 1 + // to find any range where start_block <= start_block_height that might contain it + // 2. Second query: ascending from (start_block_height, 0) to get ranges + // that start at or after start_block_height + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let mut compacted_changes = Vec::new(); + let limit_usize = limit.map(|l| l as usize); + + // Query 1: Find if there's a range containing start_block_height + // Query descending from (start_block_height, u64::MAX) with limit 1 + let mut desc_end_key = Vec::with_capacity(16); + desc_end_key.extend_from_slice(&start_block_height.to_be_bytes()); + desc_end_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + let mut desc_query = Query::new_with_direction(false); // descending + desc_query.insert_range_to_inclusive(..=desc_end_key); + + let desc_path_query = + PathQuery::new(path.clone(), SizedQuery::new(desc_query, Some(1), None)); + + let (desc_results, _) = self.grove_get_path_query( + &desc_path_query, + transaction, + QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + )?; + + // Check if we found a range that contains start_block_height + if let Some((key, element)) = desc_results.to_key_elements().into_iter().next() { + if key.len() != 16 { + return Err(Error::Protocol(Box::new( + ProtocolError::CorruptedSerialization( + "invalid compacted block key length, expected 16 bytes".to_string(), + ), + ))); + } + + let start_block = u64::from_be_bytes( + key[0..8] + .try_into() + .expect("slice is exactly 8 bytes from a 16-byte key"), + ); + let end_block = u64::from_be_bytes( + key[8..16] + .try_into() + .expect("slice is exactly 8 bytes from a 16-byte key"), + ); + + // Only include if end_block >= start_block_height (range contains our block) + if end_block >= start_block_height { + let Element::Item(serialized_data, _) = element else { + return Err(Error::Protocol(Box::new( + ProtocolError::CorruptedSerialization( + "expected item element for compacted nullifiers".to_string(), + ), + ))); + }; + + let (nullifiers, _): (Vec<[u8; 32]>, usize) = + bincode::decode_from_slice(&serialized_data, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot decode compacted nullifiers: {}", + e + )))) + })?; + + compacted_changes.push((start_block, end_block, nullifiers)); + } + } + + // Check if we've already hit the limit + if let Some(l) = limit_usize { + if compacted_changes.len() >= l { + return Ok(compacted_changes); + } + } + + // Query 2: Get ranges that start at or after start_block_height (ascending) + // Always use (start_block_height, 0) for consistent proof verification + // The result may overlap with descending query if descending found a range + // starting exactly at start_block_height - we dedupe below + let mut asc_start_key = Vec::with_capacity(16); + asc_start_key.extend_from_slice(&start_block_height.to_be_bytes()); + asc_start_key.extend_from_slice(&0u64.to_be_bytes()); + + let mut asc_query = Query::new(); + asc_query.insert_range_from(asc_start_key..); + + let asc_path_query = PathQuery::new(path, SizedQuery::new(asc_query, limit, None)); + + let (asc_results, _) = self.grove_get_path_query( + &asc_path_query, + transaction, + QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + )?; + + // Track the start_block from descending query to avoid duplicates + let desc_start_block = compacted_changes.first().map(|(start, _, _)| *start); + + for (key, element) in asc_results.to_key_elements() { + // Check if we've reached the limit + if let Some(l) = limit_usize { + if compacted_changes.len() >= l { + break; + } + } + + if key.len() != 16 { + return Err(Error::Protocol(Box::new( + ProtocolError::CorruptedSerialization( + "invalid compacted block key length, expected 16 bytes".to_string(), + ), + ))); + } + + let start_block = u64::from_be_bytes( + key[0..8] + .try_into() + .expect("slice is exactly 8 bytes from a 16-byte key"), + ); + let end_block = u64::from_be_bytes( + key[8..16] + .try_into() + .expect("slice is exactly 8 bytes from a 16-byte key"), + ); + + // Skip if this is the same range we got from descending query + if Some(start_block) == desc_start_block { + continue; + } + + let Element::Item(serialized_data, _) = element else { + return Err(Error::Protocol(Box::new( + ProtocolError::CorruptedSerialization( + "expected item element for compacted nullifiers".to_string(), + ), + ))); + }; + + let (nullifiers, _): (Vec<[u8; 32]>, usize) = + bincode::decode_from_slice(&serialized_data, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot decode compacted nullifiers: {}", + e + )))) + })?; + + compacted_changes.push((start_block, end_block, nullifiers)); + } + + Ok(compacted_changes) + } + + /// Version 0 implementation for proving compacted nullifier changes. + /// + /// Uses a two-step approach: + /// 1. First query (non-proving): descending to find any range containing start_block_height + /// 2. Second query (proving): ascending from the found start_block or start_block_height + /// + /// This ensures the proof covers all relevant ranges efficiently. + pub(super) fn prove_compacted_nullifier_changes_v0( + &self, + start_block_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + let path = Self::saved_compacted_block_transactions_nullifiers_path_vec(); + + // Step 1: Non-proving descending query to find any range containing start_block_height + let mut desc_end_key = Vec::with_capacity(16); + desc_end_key.extend_from_slice(&start_block_height.to_be_bytes()); + desc_end_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + let mut desc_query = Query::new_with_direction(false); // descending + desc_query.insert_range_to_inclusive(..=desc_end_key); + + let desc_path_query = + PathQuery::new(path.clone(), SizedQuery::new(desc_query, Some(1), None)); + + let (desc_results, _) = self.grove_get_path_query( + &desc_path_query, + transaction, + QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + )?; + + // Determine the actual start key for the proved query + // If we found a containing range, use its exact key + // Otherwise use (start_block_height, start_block_height) since end_block >= start_block always + let start_key = if let Some((key, _)) = desc_results.to_key_elements().into_iter().next() { + if key.len() == 16 { + let end_block = u64::from_be_bytes( + key[8..16] + .try_into() + .expect("slice is exactly 8 bytes from a 16-byte key"), + ); + // If this range contains start_block_height, use its exact key + if end_block >= start_block_height { + key + } else { + // No containing range, use (start_block_height, start_block_height) + let mut key = Vec::with_capacity(16); + key.extend_from_slice(&start_block_height.to_be_bytes()); + key.extend_from_slice(&start_block_height.to_be_bytes()); + key + } + } else { + let mut key = Vec::with_capacity(16); + key.extend_from_slice(&start_block_height.to_be_bytes()); + key.extend_from_slice(&start_block_height.to_be_bytes()); + key + } + } else { + let mut key = Vec::with_capacity(16); + key.extend_from_slice(&start_block_height.to_be_bytes()); + key.extend_from_slice(&start_block_height.to_be_bytes()); + key + }; + + // Step 2: Proved ascending query from start_key + + let mut query = Query::new(); + query.insert_range_from(start_key..); + + let path_query = PathQuery::new(path, SizedQuery::new(query, limit, None)); + + self.grove_get_proved_path_query( + &path_query, + transaction, + &mut vec![], + &platform_version.drive, + ) + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/fetch_nullifiers/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/fetch_nullifiers/mod.rs new file mode 100644 index 00000000000..fb7ed9694ae --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/fetch_nullifiers/mod.rs @@ -0,0 +1,85 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use grovedb::TransactionArg; +use platform_version::version::PlatformVersion; + +pub use v0::NullifierChangesPerBlock; + +impl Drive { + /// Fetches recent nullifier changes starting from a given block height. + /// + /// # Arguments + /// * `start_height` - The block height to start fetching from + /// * `limit` - Optional maximum number of blocks to return + /// * `transaction` - Optional database transaction + /// * `platform_version` - The platform version + /// + /// # Returns + /// A vector of (block_height, nullifiers) tuples + pub fn fetch_recent_nullifier_changes( + &self, + start_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + match platform_version + .drive + .methods + .saved_block_transactions + .fetch_nullifiers + { + 0 => self.fetch_recent_nullifier_changes_v0( + start_height, + limit, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "fetch_recent_nullifier_changes".to_string(), + known_versions: vec![0], + received: version, + })), + } + } + + /// Proves recent nullifier changes starting from a given block height. + /// + /// # Arguments + /// * `start_height` - The block height to start from + /// * `limit` - Optional maximum number of blocks to prove + /// * `transaction` - Optional database transaction + /// * `platform_version` - The platform version + /// + /// # Returns + /// A grovedb proof + pub fn prove_recent_nullifier_changes( + &self, + start_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + match platform_version + .drive + .methods + .saved_block_transactions + .fetch_nullifiers + { + 0 => self.prove_recent_nullifier_changes_v0( + start_height, + limit, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "prove_recent_nullifier_changes".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/fetch_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/fetch_nullifiers/v0/mod.rs new file mode 100644 index 00000000000..c44662fa8a3 --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/fetch_nullifiers/v0/mod.rs @@ -0,0 +1,101 @@ +use crate::drive::Drive; +use crate::error::Error; +use dpp::ProtocolError; +use grovedb::query_result_type::QueryResultType; +use grovedb::{Element, PathQuery, Query, SizedQuery, TransactionArg}; +use platform_version::version::PlatformVersion; + +/// Result type for fetched nullifier changes per block +pub type NullifierChangesPerBlock = Vec<(u64, Vec<[u8; 32]>)>; + +impl Drive { + /// Version 0 implementation of fetching nullifier changes from a start height. + /// + /// Retrieves all nullifier change records from `start_height` onwards. + /// Returns a vector of (block_height, nullifiers) tuples. + pub(super) fn fetch_recent_nullifier_changes_v0( + &self, + start_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + let path = Self::saved_block_transactions_nullifiers_path_vec(); + + // Create a range query starting from the specified height + let mut query = Query::new(); + query.insert_range_from(start_height.to_be_bytes().to_vec()..); + + let path_query = PathQuery::new(path, SizedQuery::new(query, limit, None)); + + let (results, _) = self.grove_get_path_query( + &path_query, + transaction, + QueryResultType::QueryKeyElementPairResultType, + &mut vec![], + &platform_version.drive, + )?; + + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let mut nullifier_changes = Vec::new(); + + for (key, element) in results.to_key_elements() { + // Parse block height from key (8 bytes, big-endian) + let height_bytes: [u8; 8] = key.try_into().map_err(|_| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization( + "invalid block height key length".to_string(), + ))) + })?; + let block_height = u64::from_be_bytes(height_bytes); + + // Get the serialized data from the ItemWithSumItem element + let Element::ItemWithSumItem(serialized_data, _, _) = element else { + return Err(Error::Protocol(Box::new( + ProtocolError::CorruptedSerialization( + "expected item with sum item element for nullifiers".to_string(), + ), + ))); + }; + + // Deserialize the nullifier list + let (nullifiers, _): (Vec<[u8; 32]>, usize) = + bincode::decode_from_slice(&serialized_data, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot decode nullifiers: {}", + e + )))) + })?; + + nullifier_changes.push((block_height, nullifiers)); + } + + Ok(nullifier_changes) + } + + /// Version 0 implementation for proving nullifier changes from a start height. + pub(super) fn prove_recent_nullifier_changes_v0( + &self, + start_height: u64, + limit: Option, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result, Error> { + let path = Self::saved_block_transactions_nullifiers_path_vec(); + + // Create a range query starting from the specified height + let mut query = Query::new(); + query.insert_range_from(start_height.to_be_bytes().to_vec()..); + + let path_query = PathQuery::new(path, SizedQuery::new(query, limit, None)); + + self.grove_get_proved_path_query( + &path_query, + transaction, + &mut vec![], + &platform_version.drive, + ) + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/store_nullifiers/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/store_nullifiers/mod.rs new file mode 100644 index 00000000000..ece1654af51 --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/store_nullifiers/mod.rs @@ -0,0 +1,55 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use grovedb::TransactionArg; +use platform_version::version::PlatformVersion; + +impl Drive { + /// Stores nullifiers for a block in the SavedBlockTransactions tree. + /// + /// This method serializes the nullifiers using bincode and stores + /// them keyed by block height. If compaction thresholds are exceeded, it will + /// compact existing entries along with the current block's data and store + /// an expiration time for the compacted entry. + /// + /// # Parameters + /// - `nullifiers`: The nullifiers to store for this block + /// - `block_height`: The height of the block these nullifiers belong to + /// - `block_time_ms`: The block time in milliseconds (used for expiration calculation) + /// - `transaction`: The database transaction + /// - `platform_version`: The platform version + /// + /// # Returns + /// - `Ok(())` on success + /// - `Err(Error)` if the operation fails + pub fn store_nullifiers_for_block( + &self, + nullifiers: &[[u8; 32]], + block_height: u64, + block_time_ms: u64, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result<(), Error> { + match platform_version + .drive + .methods + .saved_block_transactions + .store_nullifiers + { + 0 => self.store_nullifiers_for_block_v0( + nullifiers, + block_height, + block_time_ms, + transaction, + platform_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "store_nullifiers_for_block".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/drive/saved_block_transactions/store_nullifiers/v0/mod.rs b/packages/rs-drive/src/drive/saved_block_transactions/store_nullifiers/v0/mod.rs new file mode 100644 index 00000000000..15bcad2a6ef --- /dev/null +++ b/packages/rs-drive/src/drive/saved_block_transactions/store_nullifiers/v0/mod.rs @@ -0,0 +1,154 @@ +use crate::drive::saved_block_transactions::NULLIFIERS_KEY_U8; +use crate::drive::Drive; +use crate::error::Error; +use crate::util::grove_operations::DirectQueryType; +use dpp::ProtocolError; +use grovedb::Element; +use grovedb::TransactionArg; +use grovedb_path::SubtreePath; +use platform_version::version::PlatformVersion; + +impl Drive { + /// Version 0 implementation of storing nullifiers for a block. + /// + /// Serializes the nullifier list using bincode and stores it in the + /// SavedBlockTransactions/Nullifiers count sum tree keyed by block height. + /// Each entry is an ItemWithSumItem where: + /// - The item contains the serialized nullifiers + /// - The sum value is the number of nullifiers + /// + /// Before storing, checks if compaction thresholds are exceeded and triggers + /// compaction if necessary. If compaction occurs, the current block's nullifiers + /// are included in the compaction rather than stored separately. + pub(super) fn store_nullifiers_for_block_v0( + &self, + nullifiers: &[[u8; 32]], + block_height: u64, + block_time_ms: u64, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result<(), Error> { + // Early return if there are no nullifiers to store + if nullifiers.is_empty() { + return Ok(()); + } + + // Check if compaction is needed - if so, include current nullifiers in compaction + let compacted = self.check_and_compact_nullifiers_if_needed( + nullifiers, + block_height, + block_time_ms, + transaction, + platform_version, + )?; + + // If we compacted, the current nullifiers are already included - don't store separately + if compacted { + return Ok(()); + } + + // Serialize the nullifiers using bincode + let config = bincode::config::standard() + .with_big_endian() + .with_no_limit(); + + let serialized = bincode::encode_to_vec(nullifiers, config).map_err(|e| { + Error::Protocol(Box::new(ProtocolError::CorruptedSerialization(format!( + "cannot encode nullifiers: {}", + e + )))) + })?; + + // The sum value is the number of nullifiers + let entry_count = nullifiers.len() as i64; + + // Store in the SavedBlockTransactions/Nullifiers count sum tree with block height as key + let path: [&[u8]; 2] = Drive::saved_block_transactions_nullifiers_path(); + + // Use block height as the key (big-endian for proper ordering) + let key = block_height.to_be_bytes(); + + // Insert as ItemWithSumItem where: + // - item data = serialized nullifiers + // - sum value = number of nullifiers + let mut drive_operations = vec![]; + self.grove_insert( + path.as_ref().into(), + &key, + Element::new_item_with_sum_item(serialized, entry_count), + transaction, + None, + &mut drive_operations, + &platform_version.drive, + )?; + + // Apply any operations that were generated + self.apply_batch_low_level_drive_operations( + None, + transaction, + drive_operations, + &mut vec![], + &platform_version.drive, + )?; + + Ok(()) + } + + /// Checks if compaction thresholds are exceeded and triggers compaction if needed. + /// If compaction occurs, the provided nullifiers are included in the compaction. + /// + /// Returns true if compaction was performed (meaning the nullifiers were included). + fn check_and_compact_nullifiers_if_needed( + &self, + nullifiers: &[[u8; 32]], + block_height: u64, + block_time_ms: u64, + transaction: TransactionArg, + platform_version: &PlatformVersion, + ) -> Result { + let saved_block_tx_path = Self::saved_block_transactions_path(); + + // Get the count sum tree element to check current count and sum + let mut drive_operations = vec![]; + let tree_element = self.grove_get_raw( + SubtreePath::from(saved_block_tx_path.as_slice()), + &[NULLIFIERS_KEY_U8], + DirectQueryType::StatefulDirectQuery, + transaction, + &mut drive_operations, + &platform_version.drive, + )?; + + if let Some(Element::CountSumTree(_, count, sum, _)) = tree_element { + let max_blocks = platform_version + .drive + .methods + .saved_block_transactions + .max_blocks_before_nullifier_compaction as u64; + let max_nullifiers = platform_version + .drive + .methods + .saved_block_transactions + .max_nullifiers_before_compaction as i64; + + // Check if either threshold would be exceeded after adding the current block + // count + 1 for the new block, sum + current nullifiers count + let new_count = count + 1; + let new_sum = sum + nullifiers.len() as i64; + + if new_count >= max_blocks || new_sum >= max_nullifiers { + // Trigger compaction, including the current block's nullifiers + self.compact_nullifiers_with_current_block( + nullifiers, + block_height, + block_time_ms, + transaction, + platform_version, + )?; + return Ok(true); + } + } + + Ok(false) + } +} diff --git a/packages/rs-drive/src/util/grove_operations/batch_insert_auto_incremented_items_in_count_tree/mod.rs b/packages/rs-drive/src/util/grove_operations/batch_insert_auto_incremented_items_in_count_tree/mod.rs new file mode 100644 index 00000000000..588876aeb4c --- /dev/null +++ b/packages/rs-drive/src/util/grove_operations/batch_insert_auto_incremented_items_in_count_tree/mod.rs @@ -0,0 +1,44 @@ +mod v0; + +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use dpp::version::drive_versions::DriveVersion; +use grovedb::{Element, TransactionArg}; + +impl Drive { + /// Inserts multiple items into a count tree with auto-incremented u64 big-endian keys. + /// + /// Reads the current count from the count tree, then generates sequential insert + /// operations starting from that count. + pub fn batch_insert_auto_incremented_items_in_count_tree( + &self, + parent_path: Vec>, + count_tree_key: &[u8], + items: Vec, + transaction: TransactionArg, + drive_operations: &mut Vec, + drive_version: &DriveVersion, + ) -> Result<(), Error> { + match drive_version + .grove_methods + .batch + .batch_insert_auto_incremented_items_in_count_tree + { + 0 => self.batch_insert_auto_incremented_items_in_count_tree_v0( + parent_path, + count_tree_key, + items, + transaction, + drive_operations, + drive_version, + ), + version => Err(Error::Drive(DriveError::UnknownVersionMismatch { + method: "batch_insert_auto_incremented_items_in_count_tree".to_string(), + known_versions: vec![0], + received: version, + })), + } + } +} diff --git a/packages/rs-drive/src/util/grove_operations/batch_insert_auto_incremented_items_in_count_tree/v0/mod.rs b/packages/rs-drive/src/util/grove_operations/batch_insert_auto_incremented_items_in_count_tree/v0/mod.rs new file mode 100644 index 00000000000..d46442c815a --- /dev/null +++ b/packages/rs-drive/src/util/grove_operations/batch_insert_auto_incremented_items_in_count_tree/v0/mod.rs @@ -0,0 +1,64 @@ +use crate::drive::Drive; +use crate::error::drive::DriveError; +use crate::error::Error; +use crate::fees::op::LowLevelDriveOperation; +use crate::util::grove_operations::DirectQueryType; +use dpp::version::drive_versions::DriveVersion; +use grovedb::{Element, TransactionArg}; + +impl Drive { + /// Inserts multiple items into a count tree with auto-incremented u64 big-endian keys. + /// + /// Reads the current count from the count tree, then generates sequential insert + /// operations starting from that count. + /// + /// # Parameters + /// * `parent_path` - Path to the parent tree containing the count tree + /// * `count_tree_key` - Key of the count tree under the parent path + /// * `items` - Items (Elements) to insert with auto-incremented keys + /// * `transaction` - GroveDB transaction + /// * `drive_operations` - Accumulator for low-level drive operations + /// * `drive_version` - Drive version for method dispatch + pub(crate) fn batch_insert_auto_incremented_items_in_count_tree_v0( + &self, + parent_path: Vec>, + count_tree_key: &[u8], + items: Vec, + transaction: TransactionArg, + drive_operations: &mut Vec, + drive_version: &DriveVersion, + ) -> Result<(), Error> { + if items.is_empty() { + return Ok(()); + } + + // Read the current count from the count tree element + let current_count = self + .grove_get_raw_optional( + parent_path.as_slice().into(), + count_tree_key, + DirectQueryType::StatefulDirectQuery, + transaction, + drive_operations, + drive_version, + )? + .map(|element| element.count_value_or_default()) + .unwrap_or(0); + + // Build the full path to the count tree (parent_path + count_tree_key) + let mut insert_path = parent_path; + insert_path.push(count_tree_key.to_vec()); + + // Insert each item with a sequential key + for (i, element) in items.into_iter().enumerate() { + let index = current_count + i as u64; + drive_operations.push(LowLevelDriveOperation::insert_for_known_path_key_element( + insert_path.clone(), + index.to_be_bytes().to_vec(), + element, + )); + } + + Ok(()) + } +} diff --git a/packages/rs-platform-version/src/version/drive_versions/v7.rs b/packages/rs-platform-version/src/version/drive_versions/v7.rs index 50ec75a550d..574d7999825 100644 --- a/packages/rs-platform-version/src/version/drive_versions/v7.rs +++ b/packages/rs-platform-version/src/version/drive_versions/v7.rs @@ -28,7 +28,7 @@ pub const DRIVE_VERSION_V7: DriveVersion = DriveVersion { structure: DRIVE_STRUCTURE_V1, methods: DriveMethodVersions { initialization: DriveInitializationMethodVersions { - create_initial_state_structure: 3, // changed: adds shielded pool trees (commitment tree, nullifiers, anchors) + create_initial_state_structure: 3, }, credit_pools: CREDIT_POOL_METHOD_VERSIONS_V1, protocol_upgrade: DriveProtocolUpgradeVersions { @@ -50,9 +50,9 @@ pub const DRIVE_VERSION_V7: DriveVersion = DriveVersion { add_to_system_credits_operations: 0, remove_from_system_credits: 0, remove_from_system_credits_operations: 0, - calculate_total_credits_balance: 1, + calculate_total_credits_balance: 1, // Changed because we now add the address trees }, - document: DRIVE_DOCUMENT_METHOD_VERSIONS_V2, + document: DRIVE_DOCUMENT_METHOD_VERSIONS_V2, // Changed vote: DRIVE_VOTE_METHOD_VERSIONS_V2, contract: DRIVE_CONTRACT_METHOD_VERSIONS_V2, fees: DriveFeesMethodVersions { calculate_fee: 0 }, @@ -84,7 +84,7 @@ pub const DRIVE_VERSION_V7: DriveVersion = DriveVersion { apply_batch_low_level_drive_operations: 0, apply_batch_grovedb_operations: 0, }, - state_transitions: DRIVE_STATE_TRANSITION_METHOD_VERSIONS_V2, + state_transitions: DRIVE_STATE_TRANSITION_METHOD_VERSIONS_V2, //changed batch_operations: DriveBatchOperationsMethodVersion { convert_drive_operations_to_grove_operations: 0, apply_drive_operations: 0, diff --git a/packages/rs-platform-version/src/version/v12.rs b/packages/rs-platform-version/src/version/v12.rs index c14b80facb6..b7dd3bd9986 100644 --- a/packages/rs-platform-version/src/version/v12.rs +++ b/packages/rs-platform-version/src/version/v12.rs @@ -33,7 +33,7 @@ pub const PROTOCOL_VERSION_12: ProtocolVersion = 12; /// This version is for Platform release 3.1.0 pub const PLATFORM_V12: PlatformVersion = PlatformVersion { protocol_version: PROTOCOL_VERSION_12, - drive: DRIVE_VERSION_V7, // changed: shielded pool (commitment tree, nullifiers, anchors, address funds, sinsemilla hashing) + drive: DRIVE_VERSION_V7, drive_abci: DriveAbciVersion { structs: DRIVE_ABCI_STRUCTURE_VERSIONS_V1, methods: DRIVE_ABCI_METHOD_VERSIONS_V7, diff --git a/packages/rs-sdk-ffi/src/system/queries/path_elements.rs b/packages/rs-sdk-ffi/src/system/queries/path_elements.rs index 432c4353d08..942d2ac35de 100644 --- a/packages/rs-sdk-ffi/src/system/queries/path_elements.rs +++ b/packages/rs-sdk-ffi/src/system/queries/path_elements.rs @@ -160,7 +160,7 @@ fn get_path_elements( Element::MmrTree(_, _) => "mmr_tree".to_string(), Element::BulkAppendTree(_, _, _) => "bulk_append_tree".to_string(), Element::DenseAppendOnlyFixedSizeTree(_, _, _) => { - "dense_append_only_fixed_size_tree".to_string() + "dense_tree".to_string() } }; @@ -185,8 +185,7 @@ fn get_path_elements( Element::CommitmentTree(_, _, _) => "commitment_tree", Element::MmrTree(_, _) => "mmr_tree", Element::BulkAppendTree(_, _, _) => "bulk_append_tree", - Element::DenseAppendOnlyFixedSizeTree(_, _, _) => - "dense_append_only_fixed_size_tree", + Element::DenseAppendOnlyFixedSizeTree(_, _, _) => "dense_tree", } ) }) diff --git a/packages/rs-sdk/Cargo.toml b/packages/rs-sdk/Cargo.toml index aba3bda97eb..ea97932d737 100644 --- a/packages/rs-sdk/Cargo.toml +++ b/packages/rs-sdk/Cargo.toml @@ -17,6 +17,7 @@ drive = { path = "../rs-drive", default-features = false, features = [ ] } drive-proof-verifier = { path = "../rs-drive-proof-verifier", default-features = false } +grovedb-commitment-tree = { git = "https://github.com/dashpay/grovedb", rev = "7ecb8465fad750c7cddd5332adb6f97fcceb498b", features = ["client", "sqlite"], optional = true } dash-context-provider = { path = "../rs-context-provider", default-features = false } dash-platform-macros = { path = "../rs-dash-platform-macros" } platform-encryption = { path = "../rs-platform-encryption" } @@ -149,6 +150,7 @@ core_key_wallet_manager = ["dpp/core_key_wallet_manager"] core_key_wallet_bip38 = ["dpp/core_key_wallet_bip_38"] core_spv = ["dpp/core_spv"] core_rpc_client = ["dpp/core_rpc_client"] +shielded = ["dep:grovedb-commitment-tree", "dpp/shielded-client"] # Platform wallet support wallet = ["core_key_wallet_manager", "core_key_wallet",] diff --git a/packages/rs-sdk/src/lib.rs b/packages/rs-sdk/src/lib.rs index 2518453e7ef..868cfddc649 100644 --- a/packages/rs-sdk/src/lib.rs +++ b/packages/rs-sdk/src/lib.rs @@ -98,6 +98,12 @@ pub use dpp::dashcore_rpc; pub use drive; pub use drive_proof_verifier::types as query_types; pub use drive_proof_verifier::Error as ProofVerifierError; +#[cfg(feature = "shielded")] +pub mod shielded; +#[cfg(feature = "shielded")] +pub use grovedb_commitment_tree; +#[cfg(feature = "platform-wallet")] +pub use platform_wallet; pub use rs_dapi_client as dapi_client; pub mod sync; diff --git a/packages/rs-sdk/src/mock/requests.rs b/packages/rs-sdk/src/mock/requests.rs index 7236315e504..e50d752ccd8 100644 --- a/packages/rs-sdk/src/mock/requests.rs +++ b/packages/rs-sdk/src/mock/requests.rs @@ -35,9 +35,12 @@ use drive_proof_verifier::types::token_status::TokenStatuses; use drive::grovedb::GroveTrunkQueryResult; use drive_proof_verifier::types::{ AddressInfo, Contenders, ContestedResources, CurrentQuorumsInfo, ElementFetchRequestItem, - IdentityBalanceAndRevision, IndexMap, MasternodeProtocolVote, PlatformAddressTrunkState, - PrefundedSpecializedBalance, ProposerBlockCounts, RecentAddressBalanceChanges, - RecentCompactedAddressBalanceChanges, RetrievedValues, TotalCreditsInPlatform, + IdentityBalanceAndRevision, IndexMap, MasternodeProtocolVote, NullifiersTrunkState, + PlatformAddressTrunkState, PrefundedSpecializedBalance, ProposerBlockCounts, + RecentAddressBalanceChanges, RecentCompactedAddressBalanceChanges, + RecentCompactedNullifierChanges, RecentNullifierChanges, RetrievedValues, ShieldedAnchors, + ShieldedEncryptedNote, ShieldedEncryptedNotes, ShieldedNullifierStatus, + ShieldedNullifierStatuses, ShieldedPoolState, TotalCreditsInPlatform, VotePollsGroupedByTimestamp, Voters, }; use std::{collections::BTreeMap, hash::Hash}; @@ -507,6 +510,14 @@ impl_mock_response!(PlatformAddress); impl_mock_response!(AddressInfo); impl_mock_response!(RecentAddressBalanceChanges); impl_mock_response!(RecentCompactedAddressBalanceChanges); +impl_mock_response!(ShieldedPoolState); +impl_mock_response!(ShieldedAnchors); +impl_mock_response!(ShieldedEncryptedNotes); +impl_mock_response!(ShieldedEncryptedNote); +impl_mock_response!(ShieldedNullifierStatuses); +impl_mock_response!(ShieldedNullifierStatus); +impl_mock_response!(RecentNullifierChanges); +impl_mock_response!(RecentCompactedNullifierChanges); /// MockResponse for GroveTrunkQueryResult - panics when called because the Tree type /// doesn't support serialization. Address sync operations should not be mocked. @@ -537,3 +548,18 @@ impl MockResponse for PlatformAddressTrunkState { unimplemented!("PlatformAddressTrunkState does not support mock deserialization - the Tree type is not serializable") } } + +/// MockResponse for NullifiersTrunkState - panics when called because the underlying +/// Tree type doesn't support serialization. Nullifier sync operations should not be mocked. +impl MockResponse for NullifiersTrunkState { + fn mock_serialize(&self, _sdk: &MockDashPlatformSdk) -> Vec { + unimplemented!("NullifiersTrunkState does not support mock serialization - the Tree type is not serializable") + } + + fn mock_deserialize(_sdk: &MockDashPlatformSdk, _buf: &[u8]) -> Self + where + Self: Sized, + { + unimplemented!("NullifiersTrunkState does not support mock deserialization - the Tree type is not serializable") + } +} diff --git a/packages/rs-sdk/src/platform.rs b/packages/rs-sdk/src/platform.rs index 162ca5eeae9..fca09fb17a0 100644 --- a/packages/rs-sdk/src/platform.rs +++ b/packages/rs-sdk/src/platform.rs @@ -17,6 +17,7 @@ mod fetch_many; mod fetch_unproved; pub mod group_actions; pub mod identities_contract_keys_query; +pub mod nullifier_sync; pub mod query; pub mod tokens; pub mod transition; diff --git a/packages/rs-sdk/src/platform/fetch.rs b/packages/rs-sdk/src/platform/fetch.rs index 1a22c531398..00ba0474cc4 100644 --- a/packages/rs-sdk/src/platform/fetch.rs +++ b/packages/rs-sdk/src/platform/fetch.rs @@ -11,7 +11,9 @@ use crate::mock::MockResponse; use crate::sync::retry; use crate::{error::Error, platform::query::Query, Sdk}; +use dash_context_provider::ContextProvider; use dapi_grpc::platform::v0::{self as platform_proto, Proof, ResponseMetadata}; +use dpp::data_contract::accessors::v0::DataContractV0Getters; use dpp::data_contract::associated_token::token_perpetual_distribution::reward_distribution_moment::RewardDistributionMoment; use dpp::identity::identities_contract_keys::IdentitiesContractKeys; use dpp::voting::votes::Vote; @@ -158,47 +160,8 @@ where query: Q, settings: Option, ) -> Result<(Option, ResponseMetadata, Proof), Error> { - let request: &::Request = &query.query(sdk.prove())?; - - let fut = |settings: RequestSettings| async move { - let ExecutionResponse { - address, - retries, - inner: response, - } = request - .clone() - .execute(sdk, settings) - .await - .map_err(|execution_error| execution_error.inner_into())?; - - let object_type = std::any::type_name::().to_string(); - tracing::trace!(request = ?request, response = ?response, ?address, retries, object_type, "fetched object from platform"); - - let (object, response_metadata, proof): (Option, ResponseMetadata, Proof) = sdk - .parse_proof_with_metadata_and_proof(request.clone(), response) - .await - .map_err(|e| ExecutionError { - inner: e, - address: Some(address.clone()), - retries, - })?; - - match object { - Some(item) => Ok((item.into(), response_metadata, proof)), - None => Ok((None, response_metadata, proof)), - } - .map(|x| ExecutionResponse { - inner: x, - address, - retries, - }) - }; - - let settings = sdk - .dapi_client_settings - .override_by(settings.unwrap_or_default()); - - retry(sdk.address_list(), settings, fut).await.into_inner() + let request = query.query(sdk.prove())?; + fetch_request(sdk, &request, settings).await } /// Fetch single object from Platform. @@ -261,8 +224,85 @@ impl Fetch for (dpp::prelude::DataContract, Vec) { type Request = platform_proto::GetDataContractRequest; } +#[async_trait::async_trait] impl Fetch for Document { type Request = DocumentQuery; + + async fn fetch_with_metadata_and_proof::Request>>( + sdk: &Sdk, + query: Q, + settings: Option, + ) -> Result<(Option, ResponseMetadata, Proof), Error> { + let document_query: DocumentQuery = query.query(sdk.prove())?; + + // First attempt with current (possibly cached) contract + match fetch_request(sdk, &document_query, settings).await { + Ok(result) => Ok(result), + Err(e) if is_document_deserialization_error(&e) => { + let fresh_query = refetch_contract_for_query(sdk, &document_query).await?; + fetch_request(sdk, &fresh_query, settings).await + } + Err(e) => Err(e), + } + } +} + +/// Execute a fetch request with node-level retry logic. +/// +/// Shared implementation used by both the default [Fetch::fetch_with_metadata_and_proof] +/// and the [Document]-specific override. +async fn fetch_request( + sdk: &Sdk, + request: &R, + settings: Option, +) -> Result<(Option, ResponseMetadata, Proof), Error> +where + O: Sized + + Send + + Debug + + MockResponse + + FromProof::Response>, + R: TransportRequest + Into<>::Request> + Clone + Debug, +{ + let fut = |settings: RequestSettings| async move { + let ExecutionResponse { + address, + retries, + inner: response, + } = request + .clone() + .execute(sdk, settings) + .await + .map_err(|execution_error| execution_error.inner_into())?; + + let object_type = std::any::type_name::().to_string(); + tracing::trace!(request = ?request, response = ?response, ?address, retries, object_type, "fetched object from platform"); + + let (object, response_metadata, proof): (Option, ResponseMetadata, Proof) = sdk + .parse_proof_with_metadata_and_proof(request.clone(), response) + .await + .map_err(|e| ExecutionError { + inner: e, + address: Some(address.clone()), + retries, + })?; + + match object { + Some(item) => Ok((item.into(), response_metadata, proof)), + None => Ok((None, response_metadata, proof)), + } + .map(|x| ExecutionResponse { + inner: x, + address, + retries, + }) + }; + + let settings = sdk + .dapi_client_settings + .override_by(settings.unwrap_or_default()); + + retry(sdk.address_list(), settings, fut).await.into_inner() } impl Fetch for drive_proof_verifier::types::IdentityBalance { @@ -329,3 +369,118 @@ impl Fetch for drive_proof_verifier::types::RecentCompactedAddressBalanceChanges impl Fetch for drive_proof_verifier::types::PlatformAddressTrunkState { type Request = platform_proto::GetAddressesTrunkStateRequest; } + +impl Fetch for drive_proof_verifier::types::ShieldedPoolState { + type Request = platform_proto::GetShieldedPoolStateRequest; +} + +impl Fetch for drive_proof_verifier::types::ShieldedAnchors { + type Request = platform_proto::GetShieldedAnchorsRequest; +} + +impl Fetch for drive_proof_verifier::types::ShieldedEncryptedNotes { + type Request = platform_proto::GetShieldedEncryptedNotesRequest; +} + +impl Fetch for drive_proof_verifier::types::ShieldedNullifierStatuses { + type Request = platform_proto::GetShieldedNullifiersRequest; +} + +impl Fetch for drive_proof_verifier::types::NullifiersTrunkState { + type Request = platform_proto::GetNullifiersTrunkStateRequest; +} + +impl Fetch for drive_proof_verifier::types::RecentNullifierChanges { + type Request = platform_proto::GetRecentNullifierChangesRequest; +} + +impl Fetch for drive_proof_verifier::types::RecentCompactedNullifierChanges { + type Request = platform_proto::GetRecentCompactedNullifierChangesRequest; +} + +/// Refetch the data contract from the network, update the context provider +/// cache, and return a new [DocumentQuery] with the fresh contract. +/// +/// Used by document fetch retry logic when a deserialization error indicates +/// a stale cached contract. +pub(super) async fn refetch_contract_for_query( + sdk: &Sdk, + document_query: &DocumentQuery, +) -> Result { + tracing::debug!( + contract_id = ?document_query.data_contract.id(), + "refetching contract for document query after deserialization failure" + ); + + let fresh_contract = dpp::prelude::DataContract::fetch(sdk, document_query.data_contract.id()) + .await? + .ok_or(Error::MissingDependency( + "DataContract".to_string(), + format!( + "data contract {} not found during refetch", + document_query.data_contract.id() + ), + ))?; + + let fresh_contract = std::sync::Arc::new(fresh_contract); + + // Update the cached contract in the context provider + if let Some(context_provider) = sdk.context_provider() { + context_provider.update_data_contract(fresh_contract.clone()); + } + + Ok(document_query.clone_with_contract(fresh_contract)) +} + +/// Returns true if the error indicates a document deserialization failure +/// that could be caused by a stale/outdated data contract schema. +pub(super) fn is_document_deserialization_error(error: &Error) -> bool { + use dpp::data_contract::errors::DataContractError; + + matches!( + error, + Error::Proof(drive_proof_verifier::Error::ProtocolError( + dpp::ProtocolError::DataContractError(DataContractError::CorruptedSerialization(_)) + )) + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use dpp::data_contract::errors::DataContractError; + + #[test] + fn test_corrupted_serialization_is_detected() { + let error = Error::Proof(drive_proof_verifier::Error::ProtocolError( + dpp::ProtocolError::DataContractError(DataContractError::CorruptedSerialization( + "test error".to_string(), + )), + )); + + assert!(is_document_deserialization_error(&error)); + } + + #[test] + fn test_other_protocol_error_is_not_detected() { + let error = Error::Proof(drive_proof_verifier::Error::ProtocolError( + dpp::ProtocolError::DecodingError("some decoding error".to_string()), + )); + + assert!(!is_document_deserialization_error(&error)); + } + + #[test] + fn test_other_proof_error_is_not_detected() { + let error = Error::Proof(drive_proof_verifier::Error::EmptyVersion); + + assert!(!is_document_deserialization_error(&error)); + } + + #[test] + fn test_non_proof_error_is_not_detected() { + let error = Error::Generic("some error".to_string()); + + assert!(!is_document_deserialization_error(&error)); + } +} diff --git a/packages/rs-sdk/src/platform/nullifier_sync/mod.rs b/packages/rs-sdk/src/platform/nullifier_sync/mod.rs new file mode 100644 index 00000000000..7eda799fbf1 --- /dev/null +++ b/packages/rs-sdk/src/platform/nullifier_sync/mod.rs @@ -0,0 +1,811 @@ +//! Nullifier synchronization using trunk/branch chunk queries with incremental catch-up. +//! +//! This module provides privacy-preserving nullifier status checking for wallets. +//! It combines two strategies: +//! +//! 1. **Tree scan** (trunk/branch): Privacy-preserving bulk query of the nullifier +//! Merkle tree. Used for initial sync or when the last sync is stale. +//! +//! 2. **Incremental catch-up** (compacted + recent blocks): Fetches nullifier +//! changes block-by-block from a known height to chain tip. Fast for frequent +//! re-syncs. +//! +//! # Sync Modes +//! +//! The behavior depends on the `last_sync_timestamp` parameter passed to +//! [`sync_nullifiers`]: +//! +//! - **`None`** — Full tree scan, then incremental catch-up from the tree +//! snapshot to chain tip. +//! - **`Some(timestamp)`** — Incremental-only from `last_sync_height` +//! (unless the elapsed time exceeds +//! [`NullifierSyncConfig::full_rescan_after_time_s`], in which case a full +//! scan runs). +//! +//! # Example +//! +//! ```rust,ignore +//! use dash_sdk::Sdk; +//! +//! let nullifiers: Vec<[u8; 32]> = vec![/* ... */]; +//! +//! // First sync — full tree scan + catch-up +//! let result = sdk.sync_nullifiers(&nullifiers, None, None, None).await?; +//! let saved_height = result.new_sync_height; // store for next call +//! let saved_timestamp = result.new_sync_timestamp; // store for next call +//! +//! // Subsequent sync — incremental only (unless too old per full_rescan_after_time_s) +//! let result = sdk.sync_nullifiers(&nullifiers, None, Some(saved_height), Some(saved_timestamp)).await?; +//! let saved_height = result.new_sync_height; +//! let saved_timestamp = result.new_sync_timestamp; +//! ``` + +mod provider; +mod types; + +pub use provider::NullifierProvider; +pub use types::{NullifierKey, NullifierSyncConfig, NullifierSyncMetrics, NullifierSyncResult}; + +use crate::error::Error; +use crate::platform::address_sync::tracker::KeyLeafTracker; +use crate::platform::Fetch; +use crate::sync::retry; +use crate::Sdk; +use dapi_grpc::platform::v0::{ + get_nullifiers_branch_state_request, get_nullifiers_branch_state_response, + get_recent_compacted_nullifier_changes_request, get_recent_nullifier_changes_request, + GetNullifiersBranchStateRequest, GetRecentCompactedNullifierChangesRequest, + GetRecentNullifierChangesRequest, +}; +use drive::drive::Drive; +use drive::grovedb::{ + calculate_max_tree_depth_from_count, GroveBranchQueryResult, GroveTrunkQueryResult, LeafInfo, +}; +use drive_proof_verifier::types::{ + NullifiersTrunkQuery, NullifiersTrunkState, RecentCompactedNullifierChanges, + RecentNullifierChanges, +}; +use futures::stream::{FuturesUnordered, StreamExt}; +use rs_dapi_client::{ + DapiRequest, ExecutionError, ExecutionResponse, InnerInto, IntoInner, RequestSettings, +}; +use std::collections::{BTreeSet, HashSet}; +use tracing::{debug, trace, warn}; + +use dpp::version::PlatformVersion; + +type LeafBoundaryKey = Vec; + +/// Server limit for compacted nullifier changes per request. +const COMPACTED_BATCH_LIMIT: usize = 25; +/// Server limit for recent nullifier changes per request. +const RECENT_BATCH_LIMIT: usize = 100; + +/// Synchronize nullifier statuses using trunk/branch chunk queries with +/// incremental block-based catch-up. +/// +/// See [module docs](self) for full description of sync modes. +/// +/// # Arguments +/// - `sdk`: The SDK instance for making network requests. +/// - `provider`: An implementation of [`NullifierProvider`] that supplies nullifier keys. +/// - `config`: Optional configuration; uses defaults if `None`. +/// - `last_sync_height`: Optional block height from the previous sync's +/// [`NullifierSyncResult::new_sync_height`]. Used as the starting point for +/// incremental-only catch-up. +/// - `last_sync_timestamp`: Optional block time (Unix seconds) from the previous +/// sync's [`NullifierSyncResult::new_sync_timestamp`]. When provided together +/// with a non-zero [`NullifierSyncConfig::full_rescan_after_time_s`], the +/// function compares `now - last_sync_timestamp` to decide whether a full tree +/// rescan is needed or incremental-only catch-up suffices. +/// Pass `None` to always perform a full tree scan. +/// +/// # Returns +/// - `Ok(NullifierSyncResult)`: Contains found (spent) and absent (unspent) +/// nullifiers, plus `new_sync_height` and `new_sync_timestamp` to persist +/// for the next call. +/// - `Err(Error)`: If the sync fails after exhausting retries. +pub async fn sync_nullifiers( + sdk: &Sdk, + provider: &P, + config: Option, + last_sync_height: Option, + last_sync_timestamp: Option, +) -> Result { + let config = config.unwrap_or_default(); + let platform_version = sdk.version(); + + let nullifiers = provider.nullifiers_to_check(); + + let mut result = NullifierSyncResult::new(); + + if nullifiers.is_empty() { + return Ok(result); + } + + // Decide whether to do a full tree scan or incremental-only. + // + // Incremental-only is chosen when ALL of these are true: + // 1. last_sync_timestamp is provided + // 2. full_rescan_after_time_s > 0 + // 3. elapsed time since last sync < full_rescan_after_time_s + let needs_full_scan = match last_sync_timestamp { + Some(last_ts) if config.full_rescan_after_time_s > 0 => { + let now_secs = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0); + let elapsed = now_secs.saturating_sub(last_ts); + if elapsed >= config.full_rescan_after_time_s { + debug!( + "Nullifier sync: full rescan needed (elapsed {}s >= threshold {}s)", + elapsed, config.full_rescan_after_time_s + ); + true + } else { + false + } + } + _ => true, + }; + + let catch_up_from = if !needs_full_scan { + // Incremental-only mode — skip the tree scan + let start_height = last_sync_height.unwrap_or(0); + debug!( + "Nullifier sync: incremental-only from height {}", + start_height + ); + start_height + } else { + // Full tree scan + let (scan_height, block_time_ms) = + full_tree_scan(sdk, &config, &nullifiers, &mut result, platform_version).await?; + result.new_sync_timestamp = block_time_ms / 1000; + scan_height + }; + + // Incremental catch-up from catch_up_from to chain tip + let nullifier_set: HashSet = nullifiers.iter().copied().collect(); + incremental_catch_up( + sdk, + &nullifier_set, + catch_up_from, + &mut result, + config.request_settings, + ) + .await?; + + Ok(result) +} + +/// Perform the full trunk/branch tree scan. +/// +/// Returns `(checkpoint_height, block_time_ms)` from the trunk query. +async fn full_tree_scan( + sdk: &Sdk, + config: &NullifierSyncConfig, + nullifiers: &[NullifierKey], + result: &mut NullifierSyncResult, + platform_version: &PlatformVersion, +) -> Result<(u64, u64), Error> { + // Step 1: Execute trunk query + let (trunk_result, checkpoint_height, block_time_ms) = + execute_trunk_query(sdk, config, config.request_settings, &mut result.metrics).await?; + result.checkpoint_height = checkpoint_height; + + trace!( + "Nullifier trunk query returned {} elements, {} leaf_keys", + trunk_result.elements.len(), + trunk_result.leaf_keys.len() + ); + + // Step 2: Process trunk result + let mut tracker = KeyLeafTracker::new(); + process_trunk_result(&trunk_result, nullifiers, result, &mut tracker); + + // Step 3: Iterative branch queries + let min_query_depth = platform_version + .drive + .methods + .shielded + .nullifiers_query_min_depth; + let max_query_depth = platform_version + .drive + .methods + .shielded + .nullifiers_query_max_depth; + + let mut iterations = 0; + while !tracker.is_empty() && iterations < config.max_iterations { + iterations += 1; + result.metrics.iterations = iterations; + + let leaves_to_query = get_privacy_adjusted_leaves( + &tracker, + &trunk_result, + config.min_privacy_count, + min_query_depth, + max_query_depth, + ); + + if leaves_to_query.is_empty() { + break; + } + + debug!( + "Iteration {}: querying {} leaves for {} remaining nullifiers", + iterations, + leaves_to_query.len(), + tracker.remaining_count() + ); + + let branch_results = execute_branch_queries( + sdk, + config, + &leaves_to_query, + checkpoint_height, + &mut result.metrics, + config.max_concurrent_requests, + config.request_settings, + platform_version, + ) + .await?; + + for (leaf_key, branch_result) in branch_results { + process_branch_result(&branch_result, &leaf_key, result, &mut tracker); + } + } + + if iterations >= config.max_iterations { + warn!( + "Nullifier sync reached max iterations ({}) with {} keys remaining", + config.max_iterations, + tracker.remaining_count() + ); + } + + Ok((checkpoint_height, block_time_ms)) +} + +/// Perform incremental block-based catch-up using compacted + recent nullifier +/// changes RPCs. +/// +/// Updates `result.new_sync_height` and `result.new_sync_timestamp`. +async fn incremental_catch_up( + sdk: &Sdk, + nullifier_set: &HashSet, + start_height: u64, + result: &mut NullifierSyncResult, + settings: RequestSettings, +) -> Result<(), Error> { + let mut current_height = start_height; + let mut had_successful_query = false; + + // Phase 1 — Compacted (historical) catch-up + loop { + let request = GetRecentCompactedNullifierChangesRequest { + version: Some( + get_recent_compacted_nullifier_changes_request::Version::V0( + get_recent_compacted_nullifier_changes_request::GetRecentCompactedNullifierChangesRequestV0 { + start_block_height: current_height, + prove: true, + }, + ), + ), + }; + + let (changes, metadata): (Option, _) = + match RecentCompactedNullifierChanges::fetch_with_metadata(sdk, request, Some(settings)) + .await + { + Ok(result) => result, + Err(e) if !had_successful_query => { + debug!( + "Compacted nullifier changes query failed (non-fatal): {}", + e + ); + break; + } + Err(e) => return Err(e), + }; + + let entries = match changes { + Some(c) => c.into_inner(), + None => break, + }; + + result.new_sync_timestamp = metadata.time_ms / 1000; + + if entries.is_empty() { + break; + } + + let entry_count = entries.len(); + result.metrics.compacted_queries += 1; + had_successful_query = true; + + for entry in &entries { + for nf_bytes in &entry.nullifiers { + if nullifier_set.contains(nf_bytes) { + result.found.insert(*nf_bytes); + } + } + if entry.end_block_height.saturating_add(1) > current_height { + current_height = entry.end_block_height.saturating_add(1); + } + } + + if entry_count < COMPACTED_BATCH_LIMIT { + break; + } + } + + // Phase 2 — Recent (per-block) changes + loop { + let request = GetRecentNullifierChangesRequest { + version: Some(get_recent_nullifier_changes_request::Version::V0( + get_recent_nullifier_changes_request::GetRecentNullifierChangesRequestV0 { + start_height: current_height, + prove: true, + }, + )), + }; + + let (changes, metadata): (Option, _) = + match RecentNullifierChanges::fetch_with_metadata(sdk, request, Some(settings)).await { + Ok(result) => result, + Err(e) if !had_successful_query => { + debug!("Recent nullifier changes query failed (non-fatal): {}", e); + break; + } + Err(e) => return Err(e), + }; + + let entries = match changes { + Some(c) => c.into_inner(), + None => break, + }; + + result.new_sync_timestamp = metadata.time_ms / 1000; + + if entries.is_empty() { + break; + } + + let entry_count = entries.len(); + result.metrics.recent_queries += 1; + had_successful_query = true; + + for entry in &entries { + for nf_bytes in &entry.nullifiers { + if nullifier_set.contains(nf_bytes) { + result.found.insert(*nf_bytes); + } + } + if entry.block_height.saturating_add(1) > current_height { + current_height = entry.block_height.saturating_add(1); + } + } + + if entry_count < RECENT_BATCH_LIMIT { + break; + } + } + + result.new_sync_height = current_height; + Ok(()) +} + +// ── Tree scan helpers ──────────────────────────────────────────────── + +/// Execute the trunk query and return the verified result. +/// +/// Returns `(trunk_result, checkpoint_height, block_time_ms)`. +async fn execute_trunk_query( + sdk: &Sdk, + config: &NullifierSyncConfig, + settings: RequestSettings, + metrics: &mut NullifierSyncMetrics, +) -> Result<(GroveTrunkQueryResult, u64, u64), Error> { + let trunk_query = NullifiersTrunkQuery { + pool_type: config.pool_type, + pool_identifier: config.pool_identifier.clone(), + }; + + let (trunk_state, metadata) = + NullifiersTrunkState::fetch_with_metadata(sdk, trunk_query, Some(settings)).await?; + + metrics.trunk_queries += 1; + + let trunk_state = trunk_state.ok_or_else(|| { + Error::InvalidProvedResponse("Nullifier trunk query returned no state".to_string()) + })?; + + metrics.total_elements_seen += trunk_state.elements.len(); + + Ok((trunk_state.into_inner(), metadata.height, metadata.time_ms)) +} + +/// Process the trunk query result. +fn process_trunk_result( + trunk_result: &GroveTrunkQueryResult, + nullifiers: &[NullifierKey], + result: &mut NullifierSyncResult, + tracker: &mut KeyLeafTracker, +) { + for key in nullifiers { + let key_vec = key.to_vec(); + + if trunk_result.elements.contains_key(&key_vec) { + // Nullifier found in tree — the note is spent + result.found.insert(*key); + } else if let Some((leaf_key, info)) = trunk_result.trace_key_to_leaf(&key_vec) { + // Not in trunk elements, but traces to a leaf subtree + tracker.add_key(key_vec, leaf_key, info); + } else { + // Proven absent — the note is unspent + result.absent.insert(*key); + } + } +} + +/// Get privacy-adjusted leaves to query. +/// +/// For leaves with count below min_privacy_count, find an ancestor with sufficient count. +fn get_privacy_adjusted_leaves( + tracker: &KeyLeafTracker, + trunk_result: &GroveTrunkQueryResult, + min_privacy_count: u64, + min_query_depth: u8, + max_query_depth: u8, +) -> Vec<(LeafBoundaryKey, LeafInfo, u8)> { + let active_leaves = tracker.active_leaves(); + let mut result = Vec::new(); + let mut seen_ancestors: BTreeSet = BTreeSet::new(); + + for (leaf_key, info) in active_leaves { + let count = info.count.unwrap_or(0); + let tree_depth = calculate_max_tree_depth_from_count(count); + let clamped_depth = tree_depth.clamp(min_query_depth, max_query_depth); + + if count >= min_privacy_count { + if seen_ancestors.insert(leaf_key.clone()) { + result.push((leaf_key, info, clamped_depth)); + } + } else if let Some((levels_up, ancestor_count, ancestor_key, ancestor_hash)) = + trunk_result.get_ancestor(&leaf_key, min_privacy_count) + { + if seen_ancestors.insert(ancestor_key.clone()) { + let ancestor_info = LeafInfo { + hash: ancestor_hash, + count: Some(ancestor_count), + }; + let depth = tree_depth + .saturating_sub(levels_up) + .clamp(min_query_depth, max_query_depth); + result.push((ancestor_key, ancestor_info, depth)); + } + } else { + // No suitable ancestor found, use the leaf anyway + if seen_ancestors.insert(leaf_key.clone()) { + result.push((leaf_key, info, clamped_depth)); + } + } + } + + result +} + +/// Execute branch queries in parallel. +async fn execute_branch_queries( + sdk: &Sdk, + config: &NullifierSyncConfig, + leaves: &[(LeafBoundaryKey, LeafInfo, u8)], + checkpoint_height: u64, + metrics: &mut NullifierSyncMetrics, + max_concurrent: usize, + settings: RequestSettings, + platform_version: &PlatformVersion, +) -> Result, Error> { + let mut futures = FuturesUnordered::new(); + let mut results = Vec::new(); + + for (leaf_key, info, depth) in leaves.iter().cloned() { + let sdk = sdk.clone(); + let expected_hash = info.hash; + let depth_u32 = depth as u32; + let pool_type = config.pool_type; + let pool_identifier = config.pool_identifier.clone(); + + futures.push(async move { + execute_single_branch_query( + &sdk, + pool_type, + pool_identifier.as_deref(), + leaf_key.clone(), + depth_u32, + expected_hash, + checkpoint_height, + settings, + platform_version, + ) + .await + .map(|result| (leaf_key, result)) + }); + + // Limit concurrency + if futures.len() >= max_concurrent { + if let Some(result) = futures.next().await { + match result { + Ok((key, branch_result)) => { + metrics.branch_queries += 1; + results.push((key, branch_result)); + } + Err(e) => { + warn!("Nullifier branch query failed: {:?}", e); + } + } + } + } + } + + // Collect remaining futures + while let Some(result) = futures.next().await { + match result { + Ok((key, branch_result)) => { + metrics.branch_queries += 1; + results.push((key, branch_result)); + } + Err(e) => { + warn!("Nullifier branch query failed: {:?}", e); + } + } + } + + Ok(results) +} + +/// Execute a single branch query with retry logic. +async fn execute_single_branch_query( + sdk: &Sdk, + pool_type: u32, + pool_identifier: Option<&[u8]>, + key: LeafBoundaryKey, + depth: u32, + expected_hash: [u8; 32], + checkpoint_height: u64, + settings: RequestSettings, + platform_version: &PlatformVersion, +) -> Result { + let pool_id_owned = pool_identifier.map(|p| p.to_vec()); + + let request = GetNullifiersBranchStateRequest { + version: Some(get_nullifiers_branch_state_request::Version::V0( + get_nullifiers_branch_state_request::GetNullifiersBranchStateRequestV0 { + pool_type, + pool_identifier: pool_id_owned.clone().unwrap_or_default(), + key: key.clone(), + depth, + checkpoint_height, + }, + )), + }; + + let fut = |settings: RequestSettings| { + let request = request.clone(); + let key = key.clone(); + let pool_id_owned = pool_id_owned.clone(); + async move { + let ExecutionResponse { + address, + retries, + inner: response, + } = request + .execute(sdk, settings) + .await + .map_err(|execution_error| execution_error.inner_into())?; + + // Extract merk proof + let proof_bytes = match response.version { + Some(get_nullifiers_branch_state_response::Version::V0(v0)) => v0.merk_proof, + None => { + return Err(ExecutionError { + inner: Error::Proof(drive_proof_verifier::Error::EmptyVersion), + address: Some(address), + retries, + }); + } + }; + + // Verify the proof + let branch_result = Drive::verify_nullifiers_branch_query( + &proof_bytes, + pool_type, + pool_id_owned.as_deref(), + key, + depth as u8, + expected_hash, + platform_version, + ) + .map_err(|e| ExecutionError { + inner: e.into(), + address: Some(address.clone()), + retries, + })?; + + Ok(ExecutionResponse { + inner: branch_result, + address, + retries, + }) + } + }; + + let settings = sdk.dapi_client_settings.override_by(settings); + + retry(sdk.address_list(), settings, fut).await.into_inner() +} + +/// Process a branch query result for nullifier presence. +fn process_branch_result( + branch_result: &GroveBranchQueryResult, + queried_leaf_key: &[u8], + result: &mut NullifierSyncResult, + tracker: &mut KeyLeafTracker, +) { + let target_keys = tracker.keys_for_leaf(queried_leaf_key); + + for target_key in target_keys { + if branch_result.elements.contains_key(&target_key) { + // Nullifier found — note is spent + if let Ok(nf) = <[u8; 32]>::try_from(target_key.as_slice()) { + result.found.insert(nf); + } + tracker.key_found(&target_key); + } else if let Some((new_leaf_key, info)) = branch_result.trace_key_to_leaf(&target_key) { + // Traces to a deeper leaf — need another iteration + tracker.update_leaf(&target_key, new_leaf_key, info); + } else { + // Proven absent — note is unspent + if let Ok(nf) = <[u8; 32]>::try_from(target_key.as_slice()) { + result.absent.insert(nf); + } + tracker.key_found(&target_key); // Remove from tracking + } + } + + result.metrics.total_elements_seen += branch_result.elements.len(); +} + +// ── SDK integration ────────────────────────────────────────────────── + +impl Sdk { + /// Synchronize nullifier statuses with incremental catch-up support. + /// + /// This is the main entry point for nullifier synchronization. It handles + /// both full tree scans and incremental block-based catch-up, depending on + /// the parameters. + /// + /// On subsequent calls, pass [`NullifierSyncResult::new_sync_height`] as + /// `last_sync_height` and [`NullifierSyncResult::new_sync_timestamp`] as + /// `last_sync_timestamp` so the function can decide whether a full tree + /// rescan is needed or incremental-only catch-up suffices. + /// + /// # Arguments + /// - `provider`: An implementation of [`NullifierProvider`] that supplies nullifier keys. + /// - `config`: Optional configuration; uses defaults if `None`. + /// - `last_sync_height`: Optional block height from the previous sync's + /// [`NullifierSyncResult::new_sync_height`]. Used as the starting point + /// for incremental-only catch-up. + /// - `last_sync_timestamp`: Optional block time (Unix seconds) from the + /// previous sync's [`NullifierSyncResult::new_sync_timestamp`]. + /// Pass `None` to always perform a full tree scan. + /// + /// # Returns + /// - `Ok(NullifierSyncResult)`: Contains found (spent) and absent (unspent) + /// nullifiers, `new_sync_height` and `new_sync_timestamp` to store for + /// the next call. + /// - `Err(Error)`: If the sync fails after exhausting retries. + /// + /// # Example + /// + /// ```rust,ignore + /// use dash_sdk::Sdk; + /// + /// let sdk = Sdk::new(/* ... */); + /// let nullifiers: Vec<[u8; 32]> = vec![/* known nullifiers */]; + /// + /// // First call — full scan + /// let result = sdk.sync_nullifiers(&nullifiers, None, None, None).await?; + /// let height = result.new_sync_height; // → last_sync_height param + /// let timestamp = result.new_sync_timestamp; // → last_sync_timestamp param + /// + /// // Next call — incremental only if within threshold + /// let result = sdk.sync_nullifiers(&nullifiers, None, Some(height), Some(timestamp)).await?; + /// ``` + pub async fn sync_nullifiers( + &self, + provider: &P, + config: Option, + last_sync_height: Option, + last_sync_timestamp: Option, + ) -> Result { + sync_nullifiers( + self, + provider, + config, + last_sync_height, + last_sync_timestamp, + ) + .await + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_vec_provider() { + let nullifiers: Vec = vec![[0u8; 32], [1u8; 32]]; + let result = nullifiers.nullifiers_to_check(); + assert_eq!(result.len(), 2); + } + + #[test] + fn test_btreeset_provider() { + let mut set = BTreeSet::new(); + set.insert([0u8; 32]); + set.insert([1u8; 32]); + let result = set.nullifiers_to_check(); + assert_eq!(result.len(), 2); + } + + #[test] + fn test_slice_provider() { + let nullifiers = [[0u8; 32], [1u8; 32]]; + let slice: &[NullifierKey] = &nullifiers; + let result = slice.nullifiers_to_check(); + assert_eq!(result.len(), 2); + } + + #[test] + fn test_empty_provider_returns_empty() { + let nullifiers: Vec = vec![]; + let result = nullifiers.nullifiers_to_check(); + assert!(result.is_empty()); + } + + #[test] + fn test_default_config() { + let config = NullifierSyncConfig::default(); + assert_eq!(config.min_privacy_count, 32); + assert_eq!(config.max_concurrent_requests, 10); + assert_eq!(config.max_iterations, 50); + assert_eq!(config.pool_type, 0); + assert!(config.pool_identifier.is_none()); + assert_eq!(config.full_rescan_after_time_s, 7 * 24 * 60 * 60); + } + + #[test] + fn test_result_default() { + let result = NullifierSyncResult::new(); + assert!(result.found.is_empty()); + assert!(result.absent.is_empty()); + assert_eq!(result.checkpoint_height, 0); + assert_eq!(result.new_sync_height, 0); + assert_eq!(result.new_sync_timestamp, 0); + assert_eq!(result.metrics.total_queries(), 0); + } + + #[test] + fn test_metrics_total_queries() { + let mut metrics = NullifierSyncMetrics::default(); + metrics.trunk_queries = 1; + metrics.branch_queries = 3; + metrics.compacted_queries = 2; + metrics.recent_queries = 1; + assert_eq!(metrics.total_queries(), 7); + } +} diff --git a/packages/rs-sdk/src/platform/nullifier_sync/provider.rs b/packages/rs-sdk/src/platform/nullifier_sync/provider.rs new file mode 100644 index 00000000000..659bea58cf0 --- /dev/null +++ b/packages/rs-sdk/src/platform/nullifier_sync/provider.rs @@ -0,0 +1,42 @@ +//! Nullifier provider trait for nullifier synchronization. + +use super::types::NullifierKey; +use std::collections::BTreeSet; + +/// Trait for providing nullifier keys to check against the nullifier tree. +/// +/// Unlike [`AddressProvider`](crate::platform::address_sync::AddressProvider), +/// this trait is simpler — no gap limit, no index, no callbacks that extend the set. +/// It just provides a fixed set of nullifier keys to check. +/// +/// # Example +/// +/// ```rust,ignore +/// use dash_sdk::platform::nullifier_sync::NullifierProvider; +/// +/// let nullifiers: Vec<[u8; 32]> = vec![[0u8; 32], [1u8; 32]]; +/// // Vec<[u8; 32]> implements NullifierProvider directly +/// let result = sdk.sync_nullifiers(&nullifiers, None, None, None).await?; +/// ``` +pub trait NullifierProvider: Send { + /// Get the set of nullifier keys to check. + fn nullifiers_to_check(&self) -> Vec; +} + +impl NullifierProvider for Vec { + fn nullifiers_to_check(&self) -> Vec { + self.clone() + } +} + +impl NullifierProvider for BTreeSet { + fn nullifiers_to_check(&self) -> Vec { + self.iter().copied().collect() + } +} + +impl NullifierProvider for &[NullifierKey] { + fn nullifiers_to_check(&self) -> Vec { + self.to_vec() + } +} diff --git a/packages/rs-sdk/src/platform/nullifier_sync/types.rs b/packages/rs-sdk/src/platform/nullifier_sync/types.rs new file mode 100644 index 00000000000..d1a72db6238 --- /dev/null +++ b/packages/rs-sdk/src/platform/nullifier_sync/types.rs @@ -0,0 +1,155 @@ +//! Types for nullifier synchronization. + +use rs_dapi_client::RequestSettings; +use std::collections::BTreeSet; + +/// A 32-byte nullifier key. +pub type NullifierKey = [u8; 32]; + +/// Configuration for nullifier synchronization. +#[derive(Debug, Clone)] +pub struct NullifierSyncConfig { + /// Minimum privacy count - subtrees smaller than this will be expanded + /// to include ancestor subtrees for better privacy. + /// + /// Default: 32 + pub min_privacy_count: u64, + + /// Maximum concurrent branch queries. + /// + /// Default: 10 + pub max_concurrent_requests: usize, + + /// Maximum number of iterations (safety limit). + /// + /// Default: 50 + pub max_iterations: usize, + + /// The shielded pool type (0 = credit, 1 = main token, 2 = individual token). + /// + /// Default: 0 (credit pool) + pub pool_type: u32, + + /// Optional 32-byte identifier for individual token pools. + /// + /// Default: None + pub pool_identifier: Option>, + + /// Maximum age in seconds before a full tree rescan is forced. + /// + /// When `last_sync_timestamp` is passed to [`sync_nullifiers`](super::sync_nullifiers), + /// the function compares `now - last_sync_timestamp` against this threshold. + /// If the elapsed time exceeds this value, a full tree rescan is performed + /// instead of incremental-only catch-up. + /// + /// Set to `0` to always do a full tree scan regardless of the timestamp. + /// + /// Default: 604800 (7 days) + pub full_rescan_after_time_s: u64, + + /// Request settings for nullifier sync queries. + pub request_settings: RequestSettings, +} + +impl Default for NullifierSyncConfig { + fn default() -> Self { + Self { + min_privacy_count: 32, + max_concurrent_requests: 10, + max_iterations: 50, + pool_type: 0, + pool_identifier: None, + full_rescan_after_time_s: 7 * 24 * 60 * 60, + request_settings: RequestSettings::default(), + } + } +} + +/// Result of nullifier synchronization. +#[derive(Debug)] +pub struct NullifierSyncResult { + /// Nullifiers found in the tree (spent). + pub found: BTreeSet, + + /// Nullifiers proven absent from the tree (unspent). + pub absent: BTreeSet, + + /// Metrics about the sync process. + pub metrics: NullifierSyncMetrics, + + /// The checkpoint height from the trunk/branch scan. + /// + /// This is the block height at which the tree snapshot was taken. + /// Only meaningful when a full tree scan was performed. + pub checkpoint_height: u64, + + /// The highest block height seen from the incremental phase + /// (or the checkpoint height if no incremental phase ran). + /// + /// After each sync the caller should persist two values: + /// 1. This `new_sync_height` — pass it back as `last_sync_height` on the + /// next call to [`sync_nullifiers`](super::sync_nullifiers). + /// 2. [`new_sync_timestamp`](Self::new_sync_timestamp) — pass it as the + /// `last_sync_timestamp` parameter of [`sync_nullifiers`](super::sync_nullifiers). + pub new_sync_height: u64, + + /// Platform block time (Unix seconds) at the point of the latest response. + /// + /// Store this value and pass it back as `last_sync_timestamp` on the next + /// call to [`sync_nullifiers`](super::sync_nullifiers). The function compares + /// it against the current wall-clock time to decide whether a full tree + /// rescan is needed. + pub new_sync_timestamp: u64, +} + +impl NullifierSyncResult { + /// Create a new empty result. + pub fn new() -> Self { + Self { + found: BTreeSet::new(), + absent: BTreeSet::new(), + metrics: NullifierSyncMetrics::default(), + checkpoint_height: 0, + new_sync_height: 0, + new_sync_timestamp: 0, + } + } +} + +impl Default for NullifierSyncResult { + fn default() -> Self { + Self::new() + } +} + +/// Metrics about the nullifier synchronization process. +#[derive(Debug, Default, Clone)] +pub struct NullifierSyncMetrics { + /// Number of trunk queries (0 for incremental-only, 1 for full scan). + pub trunk_queries: usize, + + /// Number of branch queries. + pub branch_queries: usize, + + /// Total elements seen across all proofs. + pub total_elements_seen: usize, + + /// Total proof bytes received. + pub total_proof_bytes: usize, + + /// Number of branch iterations (0 = trunk only, 1+ = trunk plus branch rounds). + pub iterations: usize, + + /// Number of compacted incremental queries. + pub compacted_queries: usize, + + /// Number of recent incremental queries. + pub recent_queries: usize, +} + +impl NullifierSyncMetrics { + /// Get total number of queries (trunk + branch + incremental). + pub fn total_queries(&self) -> usize { + self.trunk_queries + self.branch_queries + self.compacted_queries + self.recent_queries + } +} diff --git a/packages/rs-sdk/src/platform/query.rs b/packages/rs-sdk/src/platform/query.rs index b239841e931..a5e806bac86 100644 --- a/packages/rs-sdk/src/platform/query.rs +++ b/packages/rs-sdk/src/platform/query.rs @@ -28,8 +28,12 @@ use dapi_grpc::platform::v0::{ GetTotalCreditsInPlatformRequest, KeyRequestType, }; use dapi_grpc::platform::v0::{ - get_status_request, GetContestedResourceIdentityVotesRequest, - GetPrefundedSpecializedBalanceRequest, GetStatusRequest, GetTokenDirectPurchasePricesRequest, + get_nullifiers_trunk_state_request, get_shielded_anchors_request, + get_shielded_encrypted_notes_request, get_shielded_nullifiers_request, + get_shielded_pool_state_request, get_status_request, GetContestedResourceIdentityVotesRequest, + GetNullifiersTrunkStateRequest, GetPrefundedSpecializedBalanceRequest, + GetShieldedAnchorsRequest, GetShieldedEncryptedNotesRequest, GetShieldedNullifiersRequest, + GetShieldedPoolStateRequest, GetStatusRequest, GetTokenDirectPurchasePricesRequest, GetTokenPerpetualDistributionLastClaimRequest, GetVotePollsByEndDateRequest, SpecificKeys, }; use dpp::address_funds::PlatformAddress; @@ -43,7 +47,10 @@ use drive::query::vote_poll_vote_state_query::ContestedDocumentVotePollDriveQuer use drive::query::vote_polls_by_document_type_query::VotePollsByDocumentTypeQuery; use drive::query::{DriveDocumentQuery, VotePollsByEndDateDriveQuery}; use drive_proof_verifier::from_request::TryFromRequest; -use drive_proof_verifier::types::{KeysInPath, NoParamQuery}; +use drive_proof_verifier::types::{ + KeysInPath, NoParamQuery, NullifiersTrunkQuery, ShieldedEncryptedNotesQuery, + ShieldedNullifiersQuery, +}; use rs_dapi_client::transport::TransportRequest; use std::collections::BTreeSet; use std::fmt::Debug; @@ -982,3 +989,85 @@ impl Query }) } } + +// --- Shielded Pool Queries --- + +impl Query for NoParamQuery { + fn query(self, prove: bool) -> Result { + if !prove { + unimplemented!("queries without proofs are not supported yet"); + } + + Ok(GetShieldedPoolStateRequest { + version: Some(get_shielded_pool_state_request::Version::V0( + get_shielded_pool_state_request::GetShieldedPoolStateRequestV0 { prove }, + )), + }) + } +} + +impl Query for NoParamQuery { + fn query(self, prove: bool) -> Result { + if !prove { + unimplemented!("queries without proofs are not supported yet"); + } + + Ok(GetShieldedAnchorsRequest { + version: Some(get_shielded_anchors_request::Version::V0( + get_shielded_anchors_request::GetShieldedAnchorsRequestV0 { prove }, + )), + }) + } +} + +impl Query for ShieldedEncryptedNotesQuery { + fn query(self, prove: bool) -> Result { + if !prove { + unimplemented!("queries without proofs are not supported yet"); + } + + Ok(GetShieldedEncryptedNotesRequest { + version: Some(get_shielded_encrypted_notes_request::Version::V0( + get_shielded_encrypted_notes_request::GetShieldedEncryptedNotesRequestV0 { + start_index: self.start_index, + count: self.count, + prove, + }, + )), + }) + } +} + +impl Query for ShieldedNullifiersQuery { + fn query(self, prove: bool) -> Result { + if !prove { + unimplemented!("queries without proofs are not supported yet"); + } + + Ok(GetShieldedNullifiersRequest { + version: Some(get_shielded_nullifiers_request::Version::V0( + get_shielded_nullifiers_request::GetShieldedNullifiersRequestV0 { + nullifiers: self.0, + prove, + }, + )), + }) + } +} + +impl Query for NullifiersTrunkQuery { + fn query(self, prove: bool) -> Result { + if !prove { + unimplemented!("queries without proofs are not supported yet"); + } + + Ok(GetNullifiersTrunkStateRequest { + version: Some(get_nullifiers_trunk_state_request::Version::V0( + get_nullifiers_trunk_state_request::GetNullifiersTrunkStateRequestV0 { + pool_type: self.pool_type, + pool_identifier: self.pool_identifier.unwrap_or_default(), + }, + )), + }) + } +} diff --git a/packages/rs-sdk/src/platform/transition.rs b/packages/rs-sdk/src/platform/transition.rs index e477750f5eb..e7e8388fae5 100644 --- a/packages/rs-sdk/src/platform/transition.rs +++ b/packages/rs-sdk/src/platform/transition.rs @@ -9,6 +9,10 @@ pub mod put_contract; pub mod put_document; pub mod put_identity; pub mod put_settings; +pub mod shield; +pub mod shield_from_asset_lock; +pub mod shielded_transfer; +pub mod shielded_withdrawal; pub mod top_up_address; pub mod top_up_identity; pub mod top_up_identity_from_addresses; @@ -17,6 +21,7 @@ pub mod transfer_address_funds; pub mod transfer_document; pub mod transfer_to_addresses; mod txid; +pub mod unshield; pub mod update_price_of_document; pub(crate) mod validation; pub mod vote; diff --git a/packages/rs-sdk/src/platform/transition/shield.rs b/packages/rs-sdk/src/platform/transition/shield.rs new file mode 100644 index 00000000000..634fe7b4567 --- /dev/null +++ b/packages/rs-sdk/src/platform/transition/shield.rs @@ -0,0 +1,104 @@ +use std::collections::BTreeMap; + +use super::address_inputs::{fetch_inputs_with_nonce, nonce_inc}; +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use super::validation::ensure_valid_state_transition_structure; +use crate::{Error, Sdk}; +use dpp::address_funds::{AddressFundsFeeStrategy, PlatformAddress}; +use dpp::fee::Credits; +use dpp::identity::signer::Signer; +use dpp::prelude::AddressNonce; +use dpp::shielded::OrchardBundleParams; +use dpp::state_transition::shield_transition::methods::ShieldTransitionMethodsV0; +use dpp::state_transition::shield_transition::ShieldTransition; + +/// Helper trait to shield platform credits into the shielded pool. +#[async_trait::async_trait] +pub trait ShieldFunds> { + /// Shield funds from platform addresses into the shielded pool. + /// Address nonces are fetched automatically. + async fn shield_funds( + &self, + inputs: BTreeMap, + bundle: OrchardBundleParams, + amount: u64, + fee_strategy: AddressFundsFeeStrategy, + signer: &S, + settings: Option, + ) -> Result<(), Error>; + + /// Shield funds with explicitly provided address nonces. + async fn shield_funds_with_nonce( + &self, + inputs: BTreeMap, + bundle: OrchardBundleParams, + amount: u64, + fee_strategy: AddressFundsFeeStrategy, + signer: &S, + settings: Option, + ) -> Result<(), Error>; +} + +#[async_trait::async_trait] +impl> ShieldFunds for Sdk { + async fn shield_funds( + &self, + inputs: BTreeMap, + bundle: OrchardBundleParams, + amount: u64, + fee_strategy: AddressFundsFeeStrategy, + signer: &S, + settings: Option, + ) -> Result<(), Error> { + let inputs_with_nonce = nonce_inc(fetch_inputs_with_nonce(self, &inputs).await?); + self.shield_funds_with_nonce( + inputs_with_nonce, + bundle, + amount, + fee_strategy, + signer, + settings, + ) + .await + } + + async fn shield_funds_with_nonce( + &self, + inputs: BTreeMap, + bundle: OrchardBundleParams, + amount: u64, + fee_strategy: AddressFundsFeeStrategy, + signer: &S, + settings: Option, + ) -> Result<(), Error> { + let user_fee_increase = settings + .as_ref() + .and_then(|s| s.user_fee_increase) + .unwrap_or_default(); + + let OrchardBundleParams { + actions, + anchor, + proof, + binding_signature, + } = bundle; + + let state_transition = ShieldTransition::try_from_bundle_with_signer( + inputs, + actions, + amount, + anchor, + proof, + binding_signature, + fee_strategy, + signer, + user_fee_increase, + self.version(), + )?; + ensure_valid_state_transition_structure(&state_transition, self.version())?; + + state_transition.broadcast(self, settings).await?; + Ok(()) + } +} diff --git a/packages/rs-sdk/src/platform/transition/shield_from_asset_lock.rs b/packages/rs-sdk/src/platform/transition/shield_from_asset_lock.rs new file mode 100644 index 00000000000..7e3f56e302e --- /dev/null +++ b/packages/rs-sdk/src/platform/transition/shield_from_asset_lock.rs @@ -0,0 +1,58 @@ +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use super::validation::ensure_valid_state_transition_structure; +use crate::{Error, Sdk}; +use dpp::prelude::AssetLockProof; +use dpp::shielded::OrchardBundleParams; +use dpp::state_transition::shield_from_asset_lock_transition::methods::ShieldFromAssetLockTransitionMethodsV0; +use dpp::state_transition::shield_from_asset_lock_transition::ShieldFromAssetLockTransition; + +/// Helper trait to shield funds from an L1 asset lock into the shielded pool. +#[async_trait::async_trait] +pub trait ShieldFromAssetLock { + /// Shield funds from an L1 asset lock into the shielded pool. + /// The asset lock proof proves ownership of L1 funds, and the ECDSA signature + /// binds those funds to this specific Orchard bundle. + async fn shield_from_asset_lock( + &self, + asset_lock_proof: AssetLockProof, + asset_lock_proof_private_key: &[u8], + bundle: OrchardBundleParams, + value_balance: u64, + settings: Option, + ) -> Result<(), Error>; +} + +#[async_trait::async_trait] +impl ShieldFromAssetLock for Sdk { + async fn shield_from_asset_lock( + &self, + asset_lock_proof: AssetLockProof, + asset_lock_proof_private_key: &[u8], + bundle: OrchardBundleParams, + value_balance: u64, + settings: Option, + ) -> Result<(), Error> { + let OrchardBundleParams { + actions, + anchor, + proof, + binding_signature, + } = bundle; + + let state_transition = ShieldFromAssetLockTransition::try_from_asset_lock_with_bundle( + asset_lock_proof, + asset_lock_proof_private_key, + actions, + value_balance, + anchor, + proof, + binding_signature, + self.version(), + )?; + ensure_valid_state_transition_structure(&state_transition, self.version())?; + + state_transition.broadcast(self, settings).await?; + Ok(()) + } +} diff --git a/packages/rs-sdk/src/platform/transition/shielded_transfer.rs b/packages/rs-sdk/src/platform/transition/shielded_transfer.rs new file mode 100644 index 00000000000..a132862210c --- /dev/null +++ b/packages/rs-sdk/src/platform/transition/shielded_transfer.rs @@ -0,0 +1,50 @@ +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use super::validation::ensure_valid_state_transition_structure; +use crate::{Error, Sdk}; +use dpp::shielded::OrchardBundleParams; +use dpp::state_transition::shielded_transfer_transition::methods::ShieldedTransferTransitionMethodsV0; +use dpp::state_transition::shielded_transfer_transition::ShieldedTransferTransition; + +/// Helper trait to transfer funds within the shielded pool. +#[async_trait::async_trait] +pub trait TransferShielded { + /// Transfer funds within the shielded pool. + /// Authentication is via Orchard spend authorization signatures in the bundle actions. + async fn transfer_shielded( + &self, + bundle: OrchardBundleParams, + value_balance: u64, + settings: Option, + ) -> Result<(), Error>; +} + +#[async_trait::async_trait] +impl TransferShielded for Sdk { + async fn transfer_shielded( + &self, + bundle: OrchardBundleParams, + value_balance: u64, + settings: Option, + ) -> Result<(), Error> { + let OrchardBundleParams { + actions, + anchor, + proof, + binding_signature, + } = bundle; + + let state_transition = ShieldedTransferTransition::try_from_bundle( + actions, + value_balance, + anchor, + proof, + binding_signature, + self.version(), + )?; + ensure_valid_state_transition_structure(&state_transition, self.version())?; + + state_transition.broadcast(self, settings).await?; + Ok(()) + } +} diff --git a/packages/rs-sdk/src/platform/transition/shielded_withdrawal.rs b/packages/rs-sdk/src/platform/transition/shielded_withdrawal.rs new file mode 100644 index 00000000000..44ffe18c5bf --- /dev/null +++ b/packages/rs-sdk/src/platform/transition/shielded_withdrawal.rs @@ -0,0 +1,63 @@ +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use super::validation::ensure_valid_state_transition_structure; +use crate::{Error, Sdk}; +use dpp::identity::core_script::CoreScript; +use dpp::shielded::OrchardBundleParams; +use dpp::state_transition::shielded_withdrawal_transition::methods::ShieldedWithdrawalTransitionMethodsV0; +use dpp::state_transition::shielded_withdrawal_transition::ShieldedWithdrawalTransition; +use dpp::withdrawal::Pooling; + +/// Helper trait to withdraw funds from the shielded pool to L1. +#[async_trait::async_trait] +pub trait WithdrawShielded { + /// Withdraw funds from the shielded pool to a Core address. + /// Authentication is via Orchard spend authorization signatures in the bundle actions. + #[allow(clippy::too_many_arguments)] + async fn withdraw_shielded( + &self, + unshielding_amount: u64, + bundle: OrchardBundleParams, + core_fee_per_byte: u32, + pooling: Pooling, + output_script: CoreScript, + settings: Option, + ) -> Result<(), Error>; +} + +#[async_trait::async_trait] +impl WithdrawShielded for Sdk { + #[allow(clippy::too_many_arguments)] + async fn withdraw_shielded( + &self, + unshielding_amount: u64, + bundle: OrchardBundleParams, + core_fee_per_byte: u32, + pooling: Pooling, + output_script: CoreScript, + settings: Option, + ) -> Result<(), Error> { + let OrchardBundleParams { + actions, + anchor, + proof, + binding_signature, + } = bundle; + + let state_transition = ShieldedWithdrawalTransition::try_from_bundle( + actions, + unshielding_amount, + anchor, + proof, + binding_signature, + core_fee_per_byte, + pooling, + output_script, + self.version(), + )?; + ensure_valid_state_transition_structure(&state_transition, self.version())?; + + state_transition.broadcast(self, settings).await?; + Ok(()) + } +} diff --git a/packages/rs-sdk/src/platform/transition/unshield.rs b/packages/rs-sdk/src/platform/transition/unshield.rs new file mode 100644 index 00000000000..e8e14794f02 --- /dev/null +++ b/packages/rs-sdk/src/platform/transition/unshield.rs @@ -0,0 +1,54 @@ +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use super::validation::ensure_valid_state_transition_structure; +use crate::{Error, Sdk}; +use dpp::address_funds::PlatformAddress; +use dpp::shielded::OrchardBundleParams; +use dpp::state_transition::unshield_transition::methods::UnshieldTransitionMethodsV0; +use dpp::state_transition::unshield_transition::UnshieldTransition; + +/// Helper trait to unshield funds from the shielded pool to a platform address. +#[async_trait::async_trait] +pub trait UnshieldFunds { + /// Unshield funds from the shielded pool to a platform address. + /// Authentication is via Orchard spend authorization signatures in the bundle actions. + async fn unshield_funds( + &self, + output_address: PlatformAddress, + unshielding_amount: u64, + bundle: OrchardBundleParams, + settings: Option, + ) -> Result<(), Error>; +} + +#[async_trait::async_trait] +impl UnshieldFunds for Sdk { + async fn unshield_funds( + &self, + output_address: PlatformAddress, + unshielding_amount: u64, + bundle: OrchardBundleParams, + settings: Option, + ) -> Result<(), Error> { + let OrchardBundleParams { + actions, + anchor, + proof, + binding_signature, + } = bundle; + + let state_transition = UnshieldTransition::try_from_bundle( + output_address, + actions, + unshielding_amount, + anchor, + proof, + binding_signature, + self.version(), + )?; + ensure_valid_state_transition_structure(&state_transition, self.version())?; + + state_transition.broadcast(self, settings).await?; + Ok(()) + } +} diff --git a/packages/rs-sdk/src/platform/types.rs b/packages/rs-sdk/src/platform/types.rs index 5eb63d5185f..b77f8fa3c5b 100644 --- a/packages/rs-sdk/src/platform/types.rs +++ b/packages/rs-sdk/src/platform/types.rs @@ -4,5 +4,6 @@ pub mod evonode; pub mod finalized_epoch; pub mod identity; pub mod proposed_blocks; +mod shielded; mod total_credits_in_platform; pub mod version_votes; diff --git a/packages/rs-sdk/src/platform/types/shielded.rs b/packages/rs-sdk/src/platform/types/shielded.rs new file mode 100644 index 00000000000..38b41f0ab4b --- /dev/null +++ b/packages/rs-sdk/src/platform/types/shielded.rs @@ -0,0 +1,62 @@ +//! Shielded pool query types and helpers +use crate::platform::fetch_current_no_parameters::FetchCurrent; +use crate::{platform::Fetch, Error, Sdk}; +use async_trait::async_trait; +use dapi_grpc::platform::v0::{Proof, ResponseMetadata}; +use drive_proof_verifier::types::{NoParamQuery, ShieldedAnchors, ShieldedPoolState}; + +#[async_trait] +impl FetchCurrent for ShieldedPoolState { + async fn fetch_current(sdk: &Sdk) -> Result { + let (state, _) = Self::fetch_current_with_metadata(sdk).await?; + Ok(state) + } + + async fn fetch_current_with_metadata(sdk: &Sdk) -> Result<(Self, ResponseMetadata), Error> { + let (state, metadata) = Self::fetch_with_metadata(sdk, NoParamQuery {}, None).await?; + Ok(( + state.ok_or(Error::Generic("shielded pool state not found".to_string()))?, + metadata, + )) + } + + async fn fetch_current_with_metadata_and_proof( + sdk: &Sdk, + ) -> Result<(Self, ResponseMetadata, Proof), Error> { + let (state, metadata, proof) = + Self::fetch_with_metadata_and_proof(sdk, NoParamQuery {}, None).await?; + Ok(( + state.ok_or(Error::Generic("shielded pool state not found".to_string()))?, + metadata, + proof, + )) + } +} + +#[async_trait] +impl FetchCurrent for ShieldedAnchors { + async fn fetch_current(sdk: &Sdk) -> Result { + let (anchors, _) = Self::fetch_current_with_metadata(sdk).await?; + Ok(anchors) + } + + async fn fetch_current_with_metadata(sdk: &Sdk) -> Result<(Self, ResponseMetadata), Error> { + let (anchors, metadata) = Self::fetch_with_metadata(sdk, NoParamQuery {}, None).await?; + Ok(( + anchors.ok_or(Error::Generic("shielded anchors not found".to_string()))?, + metadata, + )) + } + + async fn fetch_current_with_metadata_and_proof( + sdk: &Sdk, + ) -> Result<(Self, ResponseMetadata, Proof), Error> { + let (anchors, metadata, proof) = + Self::fetch_with_metadata_and_proof(sdk, NoParamQuery {}, None).await?; + Ok(( + anchors.ok_or(Error::Generic("shielded anchors not found".to_string()))?, + metadata, + proof, + )) + } +} diff --git a/packages/rs-sdk/src/sdk.rs b/packages/rs-sdk/src/sdk.rs index 74d8efa9fd9..0d1dae9aae7 100644 --- a/packages/rs-sdk/src/sdk.rs +++ b/packages/rs-sdk/src/sdk.rs @@ -215,7 +215,10 @@ impl Sdk { /// respective tolerance will be None regardless of method, to allow disabling staleness checks globally. fn freshness_criteria(&self, method_name: &str) -> (Option, Option) { match method_name { - "get_addresses_trunk_state" | "get_addresses_branch_state" => ( + "get_addresses_trunk_state" + | "get_addresses_branch_state" + | "get_nullifiers_trunk_state" + | "get_nullifiers_branch_state" => ( None, self.metadata_time_tolerance_ms .and(Some(ADDRESS_STATE_TIME_TOLERANCE_MS)), diff --git a/packages/rs-sdk/src/shielded.rs b/packages/rs-sdk/src/shielded.rs new file mode 100644 index 00000000000..4bfbd5f01cc --- /dev/null +++ b/packages/rs-sdk/src/shielded.rs @@ -0,0 +1,312 @@ +//! Shielded note sync and trial decryption utilities. +//! +//! This module provides: +//! - [`try_decrypt_note`]: compact trial decryption on a single encrypted note +//! - [`sync_shielded_notes`]: end-to-end sync that fetches encrypted notes from +//! the network in parallel and performs trial decryption + +use crate::error::Error; +use crate::platform::Fetch; +use crate::Sdk; +use drive_proof_verifier::types::ShieldedEncryptedNotesQuery; +use drive_proof_verifier::types::{ShieldedEncryptedNote, ShieldedEncryptedNotes}; +use futures::stream::{FuturesUnordered, StreamExt}; +use grovedb_commitment_tree::{ + try_compact_note_decryption, CompactAction, DashMemo, EphemeralKeyBytes, + ExtractedNoteCommitment, Note, Nullifier, OrchardDomain, PaymentAddress, + PreparedIncomingViewingKey, COMPACT_NOTE_SIZE, +}; +use rs_dapi_client::RequestSettings; +use std::collections::BTreeMap; +use std::future::Future; +use std::pin::Pin; +use tracing::debug; + +/// Minimum length of the `encrypted_note` field for compact trial decryption. +/// +/// The `encrypted_note` field layout is: +/// `epk(32) || enc_ciphertext(104) || out_ciphertext(80)` = 216 bytes +/// +/// For compact decryption we need at least `epk(32) + COMPACT_NOTE_SIZE` bytes +/// of the enc_ciphertext. +const MIN_ENCRYPTED_NOTE_LEN: usize = 32 + COMPACT_NOTE_SIZE; + +/// Default maximum number of chunk queries in flight at once. +const DEFAULT_MAX_CONCURRENT: usize = 4; + +// --------------------------------------------------------------------------- +// Trial decryption +// --------------------------------------------------------------------------- + +/// Attempt compact trial decryption on a [`ShieldedEncryptedNote`]. +/// +/// The [`ShieldedEncryptedNote`] struct (from proof verification) has three +/// separate fields: +/// - `cmx`: note commitment (32 bytes) +/// - `nullifier`: nullifier (32 bytes) — used for Rho derivation +/// - `encrypted_note`: `epk(32) || enc_ciphertext(104) || out_ciphertext(80)` +/// +/// Returns `Some((note, address))` if the note decrypts successfully under the +/// given incoming viewing key, or `None` if it does not belong to the viewer +/// (including dummy/padding notes). +pub fn try_decrypt_note( + ivk: &PreparedIncomingViewingKey, + encrypted_note: &ShieldedEncryptedNote, +) -> Option<(Note, PaymentAddress)> { + let data = &encrypted_note.encrypted_note; + if data.len() < MIN_ENCRYPTED_NOTE_LEN { + return None; + } + + // Parse nullifier from the dedicated field (32 bytes) + let nf_bytes: [u8; 32] = encrypted_note.nullifier.as_slice().try_into().ok()?; + let nf = Nullifier::from_bytes(&nf_bytes).into_option()?; + + // Parse cmx from the dedicated field (32 bytes) + let cmx_bytes: [u8; 32] = encrypted_note.cmx.as_slice().try_into().ok()?; + let cmx = ExtractedNoteCommitment::from_bytes(&cmx_bytes).into_option()?; + + // Parse ephemeral public key (first 32 bytes of encrypted_note) + let epk_bytes: [u8; 32] = data[0..32].try_into().ok()?; + + // Parse compact ciphertext (first COMPACT_NOTE_SIZE bytes of enc_ciphertext, + // starting at byte 32) + let enc_compact: [u8; COMPACT_NOTE_SIZE] = data[32..32 + COMPACT_NOTE_SIZE].try_into().ok()?; + + // Build CompactAction and OrchardDomain for trial decryption + let compact = CompactAction::from_parts(nf, cmx, EphemeralKeyBytes(epk_bytes), enc_compact); + let domain = OrchardDomain::::for_compact_action(&compact); + + try_compact_note_decryption(&domain, ivk, &compact) +} + +// --------------------------------------------------------------------------- +// Shielded note sync +// --------------------------------------------------------------------------- + +/// Configuration for [`sync_shielded_notes`]. +pub struct ShieldedSyncConfig { + /// Maximum number of chunk queries in flight at once (default: 4). + pub max_concurrent: usize, + /// Request settings forwarded to each individual fetch call. + pub request_settings: RequestSettings, +} + +impl Default for ShieldedSyncConfig { + fn default() -> Self { + Self { + max_concurrent: DEFAULT_MAX_CONCURRENT, + request_settings: RequestSettings::default(), + } + } +} + +/// A note that was successfully decrypted (belongs to the viewer). +pub struct DecryptedNote { + /// Global position of this note in the commitment tree. + pub position: u64, + /// The decrypted Orchard note (contains value, rseed, rho). + pub note: Note, + /// The recipient payment address. + pub address: PaymentAddress, + /// The nullifier (32 bytes). + pub nullifier: [u8; 32], + /// The note commitment (32 bytes). + pub cmx: [u8; 32], +} + +/// Result of [`sync_shielded_notes`]. +pub struct ShieldedSyncResult { + /// Notes that successfully decrypted (belong to the viewer). + pub decrypted_notes: Vec, + /// All raw encrypted notes fetched, in tree order. + /// Useful for updating a local commitment tree. + pub all_notes: Vec, + /// Next chunk-aligned index to resume syncing from. + pub next_start_index: u64, + /// Total number of notes scanned in this sync. + pub total_notes_scanned: u64, + /// Platform block height at the time of the most recent chunk response. + pub block_height: u64, +} + +/// Fetch all shielded encrypted notes starting from `start_index`, query +/// multiple nodes in parallel, and perform trial decryption. +/// +/// This is the main entry point for wallet sync. It handles: +/// 1. Chunk-aligned pagination (each query covers one BulkAppendTree chunk) +/// 2. Parallel dispatch of chunk queries across network nodes +/// 3. Proof verification on every response +/// 4. Trial decryption with the provided incoming viewing key +/// +/// # Arguments +/// +/// - `sdk` — SDK instance connected to the network +/// - `ivk` — prepared incoming viewing key for trial decryption +/// - `start_index` — first note position to fetch (must be a multiple of +/// the chunk size, typically 2048) +/// - `config` — optional tuning; `None` uses sensible defaults +/// +/// # Returns +/// +/// [`ShieldedSyncResult`] containing decrypted notes that belong to the +/// viewer, all raw notes for commitment tree updates, and the next index +/// to resume from. +pub async fn sync_shielded_notes( + sdk: &Sdk, + ivk: &PreparedIncomingViewingKey, + start_index: u64, + config: Option, +) -> Result { + let config = config.unwrap_or_default(); + + let chunk_size = sdk + .version() + .drive_abci + .query + .shielded_queries + .max_encrypted_notes_per_query as u64; + + // Validate alignment + if chunk_size > 0 && start_index % chunk_size != 0 { + return Err(Error::Generic(format!( + "start_index {} is not chunk-aligned; must be a multiple of {}", + start_index, chunk_size + ))); + } + + let max_concurrent = config.max_concurrent.max(1); + let settings = config.request_settings; + + type ChunkFuture = + Pin, u64), Error>> + Send>>; + + // Sliding-window parallel fetch using FuturesUnordered. + // Each future fetches one chunk and returns (chunk_start_index, notes, block_height). + let mut futures: FuturesUnordered = FuturesUnordered::new(); + let mut next_chunk_index = start_index; + let mut reached_end = false; + + // Seed initial batch of chunk queries + for _ in 0..max_concurrent { + let chunk_idx = next_chunk_index; + next_chunk_index += chunk_size; + let sdk = sdk.clone(); + futures.push(Box::pin(async move { + fetch_chunk(&sdk, chunk_idx, chunk_size, settings).await + })); + } + + // Collect results keyed by chunk start_index for ordered reassembly + let mut chunk_results: BTreeMap> = BTreeMap::new(); + let mut max_block_height: u64 = 0; + + while let Some(result) = futures.next().await { + let (chunk_idx, notes, block_height) = result?; + let is_partial = (notes.len() as u64) < chunk_size; + chunk_results.insert(chunk_idx, notes); + max_block_height = max_block_height.max(block_height); + + if is_partial { + reached_end = true; + } + + // Queue the next chunk if we haven't reached the end + if !reached_end { + let chunk_idx = next_chunk_index; + next_chunk_index += chunk_size; + let sdk = sdk.clone(); + futures.push(Box::pin(async move { + fetch_chunk(&sdk, chunk_idx, chunk_size, settings).await + })); + } + } + + // Flatten in tree order and perform trial decryption + let mut all_notes = Vec::new(); + let mut decrypted_notes = Vec::new(); + + for (&chunk_start, notes) in &chunk_results { + for (i, note) in notes.iter().enumerate() { + let position = chunk_start + i as u64; + + if let Some((decrypted, address)) = try_decrypt_note(ivk, note) { + let nf: [u8; 32] = note.nullifier.as_slice().try_into().unwrap_or([0u8; 32]); + let cmx: [u8; 32] = note.cmx.as_slice().try_into().unwrap_or([0u8; 32]); + + decrypted_notes.push(DecryptedNote { + position, + note: decrypted, + address, + nullifier: nf, + cmx, + }); + } + } + } + + let total_notes_scanned: u64 = chunk_results.values().map(|v| v.len() as u64).sum(); + + // Move notes out of the BTreeMap in order + for (_, notes) in chunk_results { + all_notes.extend(notes); + } + + // Next start index: round up to next chunk boundary + let raw_next = start_index + total_notes_scanned; + let next_start_index = if chunk_size > 0 { + ((raw_next + chunk_size - 1) / chunk_size) * chunk_size + } else { + raw_next + }; + + debug!( + total_notes_scanned, + decrypted_count = decrypted_notes.len(), + next_start_index, + "shielded note sync complete" + ); + + Ok(ShieldedSyncResult { + decrypted_notes, + all_notes, + next_start_index, + total_notes_scanned, + block_height: max_block_height, + }) +} + +/// Fetch a single chunk of encrypted notes from the network. +/// +/// Returns `(chunk_start_index, notes, block_height)`. An empty vec means no +/// notes exist at this position (past end of tree). +async fn fetch_chunk( + sdk: &Sdk, + chunk_start: u64, + chunk_size: u64, + settings: RequestSettings, +) -> Result<(u64, Vec, u64), Error> { + let query = ShieldedEncryptedNotesQuery { + start_index: chunk_start, + count: chunk_size as u32, + }; + + debug!(chunk_start, chunk_size, "fetching shielded notes chunk"); + + let (result, metadata) = + ShieldedEncryptedNotes::fetch_with_metadata(sdk, query, Some(settings)).await?; + + let notes = match result { + Some(ShieldedEncryptedNotes(notes)) => notes, + None => Vec::new(), + }; + + debug!( + chunk_start, + notes_returned = notes.len(), + block_height = metadata.height, + "shielded notes chunk fetched" + ); + + Ok((chunk_start, notes, metadata.height)) +} diff --git a/packages/strategy-tests/src/lib.rs b/packages/strategy-tests/src/lib.rs index 3a66119b895..e760d8cd9cb 100644 --- a/packages/strategy-tests/src/lib.rs +++ b/packages/strategy-tests/src/lib.rs @@ -2615,6 +2615,21 @@ impl Strategy { } } + OperationType::Shield(_amount_range) + | OperationType::ShieldedTransfer(_amount_range) + | OperationType::Unshield(_amount_range) + | OperationType::ShieldFromAssetLock(_amount_range) + | OperationType::ShieldedWithdrawal(_amount_range) => { + // Shielded transitions require client-side Orchard bundle + // building which is not yet available in strategy tests: + // - Add orchard / grovedb-commitment-tree dependencies + // - Cache ProvingKey via OnceLock (~30s first build) + // - Track CommitmentTree across blocks for spend witnesses + // - Build bundles with Builder, create_proof, apply_signatures + // Individual validation tests in drive-abci provide full + // coverage of all 5 shielded transition types (84+ tests). + } + _ => {} } } diff --git a/packages/strategy-tests/src/operations.rs b/packages/strategy-tests/src/operations.rs index b4b89bfd872..688dcfb6137 100644 --- a/packages/strategy-tests/src/operations.rs +++ b/packages/strategy-tests/src/operations.rs @@ -852,6 +852,21 @@ pub enum OperationType { KeyCount, ExtraKeys, ), + + /// Shield funds into the shielded pool (requires Orchard bundle). + Shield(AmountRange), + + /// Transfer within the shielded pool (requires Orchard bundle). + ShieldedTransfer(AmountRange), + + /// Unshield from the shielded pool to a platform address (requires Orchard bundle). + Unshield(AmountRange), + + /// Shield funds from a core asset lock into the shielded pool (requires Orchard bundle). + ShieldFromAssetLock(AmountRange), + + /// Withdraw from the shielded pool to a core (L1) address (requires Orchard bundle). + ShieldedWithdrawal(AmountRange), } #[allow(clippy::large_enum_variant)] @@ -892,6 +907,11 @@ enum OperationTypeInSerializationFormat { KeyCount, ExtraKeys, ), + Shield(AmountRange), + ShieldedTransfer(AmountRange), + Unshield(AmountRange), + ShieldFromAssetLock(AmountRange), + ShieldedWithdrawal(AmountRange), } impl PlatformSerializableWithPlatformVersion for OperationType { @@ -997,6 +1017,17 @@ impl PlatformSerializableWithPlatformVersion for OperationType { key_count, extra_keys, ), + OperationType::Shield(range) => OperationTypeInSerializationFormat::Shield(range), + OperationType::ShieldedTransfer(range) => { + OperationTypeInSerializationFormat::ShieldedTransfer(range) + } + OperationType::Unshield(range) => OperationTypeInSerializationFormat::Unshield(range), + OperationType::ShieldFromAssetLock(range) => { + OperationTypeInSerializationFormat::ShieldFromAssetLock(range) + } + OperationType::ShieldedWithdrawal(range) => { + OperationTypeInSerializationFormat::ShieldedWithdrawal(range) + } }; let config = bincode::config::standard() .with_big_endian() @@ -1115,6 +1146,17 @@ impl PlatformDeserializableWithPotentialValidationFromVersionedStructure for Ope key_count, extra_keys, ), + OperationTypeInSerializationFormat::Shield(range) => OperationType::Shield(range), + OperationTypeInSerializationFormat::ShieldedTransfer(range) => { + OperationType::ShieldedTransfer(range) + } + OperationTypeInSerializationFormat::Unshield(range) => OperationType::Unshield(range), + OperationTypeInSerializationFormat::ShieldFromAssetLock(range) => { + OperationType::ShieldFromAssetLock(range) + } + OperationTypeInSerializationFormat::ShieldedWithdrawal(range) => { + OperationType::ShieldedWithdrawal(range) + } }) } } diff --git a/packages/swift-sdk/MIGRATION_PLAN.md b/packages/swift-sdk/MIGRATION_PLAN.md new file mode 100644 index 00000000000..b13ce3ae493 --- /dev/null +++ b/packages/swift-sdk/MIGRATION_PLAN.md @@ -0,0 +1,397 @@ +# Swift SDK Migration Plan + +## Problem Statement + +The SwiftExampleApp contains approximately **8,000+ lines of SDK code** that should be in the `SwiftDashSDK` library. This includes: +- Platform query implementations +- State transition builders +- Service abstractions +- Domain models +- FFI wrapper extensions + +The example app should only contain UI code and app-specific configuration. + +--- + +## Current Structure Analysis + +### SDK Sources (`Sources/SwiftDashSDK/`) - **34 files, ~4,500 LOC** + +``` +SwiftDashSDK/ +├── SDK.swift # Core SDK class (572 lines) +├── SwiftDashSDK.swift # Re-exports +├── Identity.swift # Identity types +├── DataContract.swift # Contract types +├── IdentityTypes.swift # Identity enums/structs +├── ConcurrencyCompat.swift # Concurrency helpers +├── KeyWallet/ # Wallet functionality +│ ├── Wallet.swift +│ ├── KeyWallet.swift +│ ├── ManagedWallet.swift +│ ├── Account.swift +│ ├── ManagedAccount.swift +│ ├── AccountCollection.swift +│ ├── ManagedAccountCollection.swift +│ ├── KeyDerivation.swift +│ ├── Mnemonic.swift +│ ├── BIP38.swift +│ ├── Address.swift +│ ├── AddressPool.swift +│ ├── KeyWalletTypes.swift +│ ├── BLSAccount.swift +│ ├── EdDSAAccount.swift +│ └── Transaction.swift +├── PlatformWallet/ # Platform wallet +│ ├── PlatformWallet.swift +│ ├── PlatformWalletFFI.swift +│ ├── IdentityManager.swift +│ ├── ManagedIdentity.swift +│ ├── ContactRequest.swift +│ ├── EstablishedContact.swift +│ └── PlatformWalletTypes.swift +├── SPV/ +│ └── SPVClient.swift # SPV client wrapper +├── Tx/ +│ ├── TransactionBuilder.swift +│ └── TransactionTypes.swift +└── Utils/ + └── KeyValidation.swift +``` + +### SwiftExampleApp - Files That Should Move + +#### CRITICAL PRIORITY - SDK Extensions (~4,200 LOC) + +| File | Lines | Description | Target Location | +|------|-------|-------------|-----------------| +| `SDK/StateTransitionExtensions.swift` | ~2,782 | State transition building, document/identity/contract operations | `Sources/SwiftDashSDK/StateTransition/` | +| `SDK/PlatformQueryExtensions.swift` | ~1,370 | Platform queries (identity, contract, document, DPNS) | `Sources/SwiftDashSDK/Queries/` | +| `SDK/SDKExtensions.swift` | ~23 | Minor SDK extensions | Merge into `SDK.swift` | + +#### HIGH PRIORITY - Services (~800 LOC) + +| File | Lines | Description | Target Location | +|------|-------|-------------|-----------------| +| `Services/DashPayService.swift` | ~292 | DashPay contact management | `Sources/SwiftDashSDK/PlatformWallet/DashPayService.swift` | +| `Services/KeychainManager.swift` | ~300 | Secure key storage | `Sources/SwiftDashSDK/Security/KeychainManager.swift` | +| `Core/Services/WalletService.swift` | ~400+ | SPV wallet service | `Sources/SwiftDashSDK/SPV/WalletService.swift` | + +#### HIGH PRIORITY - Core Wallet (~600 LOC) + +| File | Lines | Description | Target Location | +|------|-------|-------------|-----------------| +| `Core/Wallet/HDWallet.swift` | ~200 | HD wallet implementation | Review - may duplicate `KeyWallet/` | +| `Core/Wallet/TransactionService.swift` | ~150 | Transaction broadcasting | `Sources/SwiftDashSDK/Tx/TransactionService.swift` | +| `Core/Wallet/TransactionErrors.swift` | ~50 | Transaction error types | `Sources/SwiftDashSDK/Tx/TransactionErrors.swift` | +| `Core/Wallet/HDTransaction.swift` | ~100 | Transaction models | Review - may duplicate `Tx/` | +| `Core/Wallet/WalletManager.swift` | ~100 | Wallet lifecycle | Review - may duplicate `KeyWallet/WalletManager.swift` | + +#### MEDIUM PRIORITY - Models (~1,000 LOC) + +| File | Lines | Description | Target Location | +|------|-------|-------------|-----------------| +| `Core/Models/UTXO.swift` | ~60 | UTXO model | `Sources/SwiftDashSDK/SPV/UTXO.swift` | +| `Core/Models/Balance.swift` | ~40 | Balance model | `Sources/SwiftDashSDK/Wallet/Balance.swift` | +| `Core/Models/Transaction.swift` | ~80 | Transaction display model | `Sources/SwiftDashSDK/Tx/TransactionModel.swift` | +| `Core/Models/HDWalletModels.swift` | ~150 | Wallet state models | `Sources/SwiftDashSDK/Wallet/WalletModels.swift` | +| `Core/Utils/DataContractParser.swift` | ~300 | Contract parsing | `Sources/SwiftDashSDK/DataContract/Parser.swift` | +| `Models/Network.swift` | ~50 | Network enum | `Sources/SwiftDashSDK/Network.swift` | +| `Models/TestnetNodes.swift` | ~100 | Node configuration | `Sources/SwiftDashSDK/Config/TestnetNodes.swift` | + +#### MEDIUM PRIORITY - DPP Types (~400 LOC) + +| File | Lines | Description | Target Location | +|------|-------|-------------|-----------------| +| `Models/DPP/DPPCoreTypes.swift` | ~100 | Core DPP types | Review - may duplicate `Identity.swift`/`DataContract.swift` | +| `Models/DPP/Identity.swift` | ~100 | Identity types | Review - may duplicate SDK types | +| `Models/DPP/DataContract.swift` | ~100 | Contract types | Review - may duplicate SDK types | +| `Models/DPP/Document.swift` | ~50 | Document types | `Sources/SwiftDashSDK/Document.swift` | +| `Models/DPP/StateTransition.swift` | ~50 | State transition types | `Sources/SwiftDashSDK/StateTransition/Types.swift` | + +#### LOW PRIORITY - Helpers (~200 LOC) + +| File | Lines | Description | Target Location | +|------|-------|-------------|-----------------| +| `Helpers/WIFParser.swift` | ~50 | WIF key parsing | `Sources/SwiftDashSDK/Utils/WIFParser.swift` | +| `Utils/TestKeyGenerator.swift` | ~50 | Key generation for tests | `Sources/SwiftDashSDK/Utils/TestKeyGenerator.swift` | +| `SDK/TestSigner.swift` | ~51 | Test signing | `Sources/SwiftDashSDK/Testing/TestSigner.swift` | + +--- + +## Files That Should STAY in SwiftExampleApp + +### UI Code (~35+ view files) +- All files in `Views/` directory +- All files in `Core/Views/` directory +- `ContentView.swift` +- `SwiftExampleAppApp.swift` +- `Version.swift` + +### App-Specific State +- `AppState.swift` - Needs refactoring (split SDK manager from UI state) +- `UnifiedAppState.swift` - App-specific coordination + +### SwiftData Persistence (App-Specific) +- All files in `Models/SwiftData/` - These are tied to iOS app persistence +- `Services/DataManager.swift` - SwiftData operations +- `Core/Utils/ModelContainerHelper.swift` + +### App-Specific Services +- `Core/Services/FilterMatchService.swift` - Filter matching UI logic +- `Core/Models/FilterMatch.swift` - App-specific model +- `Core/Models/CoreTypes.swift` - App display types +- `Core/Wallet/WalletStorage.swift` - SwiftData wallet persistence +- `Core/Wallet/WalletViewModel.swift` - UI view model + +### App Configuration +- `Utils/EnvLoader.swift` - Environment loading + +--- + +## Migration Phases + +### Phase 1: Critical SDK Extensions (Highest Impact) + +**Goal**: Move 4,000+ lines of SDK functionality + +1. **Create new SDK directories**: + ``` + Sources/SwiftDashSDK/ + ├── StateTransition/ + │ ├── StateTransitionBuilder.swift + │ ├── IdentityTransitions.swift + │ ├── ContractTransitions.swift + │ ├── DocumentTransitions.swift + │ └── TokenTransitions.swift + ├── Queries/ + │ ├── IdentityQueries.swift + │ ├── ContractQueries.swift + │ ├── DocumentQueries.swift + │ └── DPNSQueries.swift + └── Document.swift + ``` + +2. **Move `StateTransitionExtensions.swift`**: + - Split into logical files by domain + - Keep as SDK extensions + - Update access control (`public`) + - Remove app-specific dependencies + +3. **Move `PlatformQueryExtensions.swift`**: + - Split into logical files by entity type + - Keep as SDK extensions + - Ensure all helper methods are available + +4. **Update SwiftExampleApp**: + - Remove moved files + - Update imports + +**Estimated Effort**: Large - these files have many dependencies + +### Phase 2: Services Migration + +**Goal**: Move reusable services to SDK + +1. **Move `KeychainManager.swift`**: + - Pure utility, no app dependencies + - Add to `Sources/SwiftDashSDK/Security/` + - Make public + +2. **Move `DashPayService.swift`**: + - Already uses SDK PlatformWallet types + - Add to `Sources/SwiftDashSDK/PlatformWallet/` + - Extract UI-specific parts (`DashPayContact`, `DashPayContactRequest`) - keep in app + +3. **Move `WalletService.swift`** (if not duplicating): + - Review overlap with existing SDK wallet code + - May need significant refactoring + +**Estimated Effort**: Medium + +### Phase 3: Models and Types + +**Goal**: Consolidate domain models + +1. **Review DPP types for duplication**: + - Compare `Models/DPP/*` with SDK's `Identity.swift`, `DataContract.swift` + - Consolidate into single source of truth + +2. **Move core models**: + - `UTXO.swift` → SDK + - `Balance.swift` → SDK + - `Transaction.swift` (display model) → SDK or keep in app + - `DataContractParser.swift` → SDK + +3. **Move configuration**: + - `Network.swift` → SDK + - `TestnetNodes.swift` → SDK config + +**Estimated Effort**: Medium - requires careful deduplication + +### Phase 4: Wallet Code Cleanup + +**Goal**: Eliminate duplication + +1. **Audit `Core/Wallet/` vs `KeyWallet/`**: + - `HDWallet.swift` - likely duplicates `KeyWallet.swift` + - `WalletManager.swift` - likely duplicates `KeyWallet/WalletManager.swift` + - `HDTransaction.swift` - likely duplicates `KeyWallet/Transaction.swift` + +2. **Consolidate or remove duplicates** + +3. **Update app to use SDK wallet code** + +**Estimated Effort**: Medium - requires understanding both implementations + +### Phase 5: AppState Refactoring + +**Goal**: Separate SDK manager from UI state + +1. **Create `SDKManager` in SDK**: + - SDK lifecycle (init, network switch) + - Contract loading + - Status monitoring + - Move from `AppState.swift` + +2. **Reduce `AppState.swift` to UI concerns**: + - View navigation state + - Selected items + - UI-specific preferences + - Use `SDKManager` for SDK operations + +**Estimated Effort**: Large - `AppState.swift` is 25,000+ bytes with many dependencies + +--- + +## Proposed SDK Structure After Migration + +``` +Sources/SwiftDashSDK/ +├── SDK.swift # Core SDK class +├── SwiftDashSDK.swift # Re-exports +├── SDKManager.swift # NEW: High-level SDK lifecycle +├── Network.swift # NEW: Network enum +│ +├── Identity/ # Consolidated identity +│ ├── Identity.swift +│ ├── IdentityTypes.swift +│ └── IdentityQueries.swift # NEW: From PlatformQueryExtensions +│ +├── DataContract/ # Consolidated contracts +│ ├── DataContract.swift +│ ├── ContractQueries.swift # NEW: From PlatformQueryExtensions +│ └── Parser.swift # NEW: From DataContractParser +│ +├── Document/ # NEW: Document module +│ ├── Document.swift +│ └── DocumentQueries.swift # NEW: From PlatformQueryExtensions +│ +├── StateTransition/ # NEW: State transitions +│ ├── Builder.swift # From StateTransitionExtensions +│ ├── IdentityTransitions.swift +│ ├── ContractTransitions.swift +│ ├── DocumentTransitions.swift +│ └── TokenTransitions.swift +│ +├── DPNS/ # NEW: DPNS module +│ └── DPNSQueries.swift # From PlatformQueryExtensions +│ +├── KeyWallet/ # Existing - keep +│ └── ... +│ +├── PlatformWallet/ # Existing - expand +│ ├── ... +│ └── DashPayService.swift # NEW: From Services +│ +├── SPV/ # Existing - expand +│ ├── SPVClient.swift +│ ├── WalletService.swift # NEW: Core service logic +│ └── UTXO.swift # NEW: From Core/Models +│ +├── Tx/ # Existing - expand +│ ├── TransactionBuilder.swift +│ ├── TransactionTypes.swift +│ ├── TransactionService.swift # NEW: From Core/Wallet +│ └── TransactionErrors.swift # NEW: From Core/Wallet +│ +├── Wallet/ # NEW: Common wallet types +│ ├── Balance.swift # From Core/Models +│ └── WalletModels.swift # From Core/Models +│ +├── Security/ # NEW: Security utilities +│ └── KeychainManager.swift # From Services +│ +├── Config/ # NEW: Configuration +│ └── TestnetNodes.swift # From Models +│ +├── Utils/ # Existing - expand +│ ├── KeyValidation.swift +│ ├── WIFParser.swift # NEW: From Helpers +│ └── TestKeyGenerator.swift # NEW: From Utils +│ +└── Testing/ # NEW: Test utilities + └── TestSigner.swift # From SDK +``` + +--- + +## Risk Assessment + +### High Risk +- **StateTransitionExtensions.swift**: 2,782 lines, many FFI calls, complex dependencies +- **AppState.swift**: 25,000+ bytes, central to the app, many consumers +- **Wallet code duplication**: Two parallel implementations may have subtle differences + +### Medium Risk +- **PlatformQueryExtensions.swift**: 1,370 lines, cleaner structure +- **Services**: Well-encapsulated, fewer dependencies +- **DPP types**: May have evolved differently in app vs SDK + +### Low Risk +- **Utility files**: Self-contained, minimal dependencies +- **Configuration files**: Simple data structures +- **Helper functions**: Isolated functionality + +--- + +## Recommended Order of Execution + +1. **KeychainManager.swift** - Low risk, self-contained +2. **Network.swift** & **TestnetNodes.swift** - Simple data +3. **UTXO.swift** & **Balance.swift** - Simple models +4. **DataContractParser.swift** - Useful for SDK +5. **DashPayService.swift** - Already uses SDK types +6. **PlatformQueryExtensions.swift** - Split into modules +7. **StateTransitionExtensions.swift** - Split into modules +8. **WalletService.swift** - After audit for duplication +9. **Wallet code cleanup** - Eliminate duplicates +10. **AppState refactoring** - Create SDKManager + +--- + +## Validation Steps + +After each phase: +1. Build SwiftDashSDK library +2. Build SwiftExampleApp +3. Run app on simulator +4. Test affected functionality +5. Run any existing tests + +--- + +## Files Summary + +### To Move: ~40 files, ~8,000+ LOC +### To Stay: ~50 files (UI, SwiftData, app-specific) +### To Review for Duplication: ~10 files + +--- + +## Notes + +- All moved code needs `public` access modifiers for external use +- Consider backwards compatibility - existing app code should work after migration +- Some files may need splitting (e.g., StateTransitionExtensions by domain) +- DPP types need careful review to avoid breaking existing serialization diff --git a/packages/wasm-dpp/Cargo.toml b/packages/wasm-dpp/Cargo.toml index c9ca419abbd..c150ebd588d 100644 --- a/packages/wasm-dpp/Cargo.toml +++ b/packages/wasm-dpp/Cargo.toml @@ -22,7 +22,7 @@ wasm-bindgen = { version = "=0.2.108" } js-sys = "0.3.64" web-sys = { version = "0.3.64", features = ["console"] } thiserror = { version = "2.0.17" } -serde-wasm-bindgen = { git = "https://github.com/QuantumExplorer/serde-wasm-bindgen", branch = "feat/not_human_readable" } +serde-wasm-bindgen = { git = "https://github.com/dashpay/serde-wasm-bindgen", rev = "0d3e1a8ff058b400bab3a8ececd2fb9581e8c287" } dpp = { path = "../rs-dpp", default-features = false, features = [ "json-conversion", "state-transitions", diff --git a/packages/wasm-dpp/src/errors/consensus/consensus_error.rs b/packages/wasm-dpp/src/errors/consensus/consensus_error.rs index d32586c219f..8d0bcc63a37 100644 --- a/packages/wasm-dpp/src/errors/consensus/consensus_error.rs +++ b/packages/wasm-dpp/src/errors/consensus/consensus_error.rs @@ -88,12 +88,8 @@ use dpp::consensus::state::prefunded_specialized_balances::prefunded_specialized use dpp::consensus::state::prefunded_specialized_balances::prefunded_specialized_balance_not_found_error::PrefundedSpecializedBalanceNotFoundError; use dpp::consensus::state::token::{IdentityDoesNotHaveEnoughTokenBalanceError, IdentityTokenAccountNotFrozenError, IdentityTokenAccountFrozenError, TokenIsPausedError, IdentityTokenAccountAlreadyFrozenError, UnauthorizedTokenActionError, TokenSettingMaxSupplyToLessThanCurrentSupplyError, TokenMintPastMaxSupplyError, NewTokensDestinationIdentityDoesNotExistError, NewAuthorizedActionTakerIdentityDoesNotExistError, NewAuthorizedActionTakerGroupDoesNotExistError, NewAuthorizedActionTakerMainGroupNotSetError, InvalidGroupPositionError, TokenAlreadyPausedError, TokenNotPausedError, InvalidTokenClaimPropertyMismatch, InvalidTokenClaimNoCurrentRewards, InvalidTokenClaimWrongClaimant, TokenTransferRecipientIdentityNotExistError, PreProgrammedDistributionTimestampInPastError, IdentityHasNotAgreedToPayRequiredTokenAmountError, RequiredTokenPaymentInfoNotSetError, IdentityTryingToPayWithWrongTokenError, TokenDirectPurchaseUserPriceTooLow, TokenAmountUnderMinimumSaleAmount, TokenNotForDirectSale, InvalidTokenPositionStateError}; use dpp::consensus::state::address_funds::{AddressDoesNotExistError, AddressInvalidNonceError, AddressNotEnoughFundsError, AddressesNotEnoughFundsError}; -use dpp::consensus::state::shielded::insufficient_pool_notes_error::InsufficientPoolNotesError; -use dpp::consensus::state::shielded::insufficient_shielded_fee_error::InsufficientShieldedFeeError; -use dpp::consensus::state::shielded::invalid_anchor_error::InvalidAnchorError; -use dpp::consensus::state::shielded::invalid_shielded_proof_error::InvalidShieldedProofError; -use dpp::consensus::state::shielded::nullifier_already_spent_error::NullifierAlreadySpentError; -use dpp::consensus::basic::state_transition::{StateTransitionNotActiveError, TransitionOverMaxInputsError, TransitionOverMaxOutputsError, InputWitnessCountMismatchError, TransitionNoInputsError, TransitionNoOutputsError, FeeStrategyEmptyError, FeeStrategyDuplicateError, FeeStrategyIndexOutOfBoundsError, FeeStrategyTooManyStepsError, InputBelowMinimumError, OutputBelowMinimumError, InputOutputBalanceMismatchError, OutputsNotGreaterThanInputsError, WithdrawalBalanceMismatchError, InsufficientFundingAmountError, InputsNotLessThanOutputsError, OutputAddressAlsoInputError, InvalidRemainderOutputCountError, WithdrawalBelowMinAmountError, ShieldedNoActionsError, ShieldedTooManyActionsError, ShieldedEmptyProofError, ShieldedZeroAnchorError, ShieldedInvalidValueBalanceError}; +use dpp::consensus::state::shielded::{InsufficientPoolNotesError, InsufficientShieldedFeeError, InvalidAnchorError, InvalidShieldedProofError, NullifierAlreadySpentError}; +use dpp::consensus::basic::state_transition::{StateTransitionNotActiveError, TransitionOverMaxInputsError, TransitionOverMaxOutputsError, InputWitnessCountMismatchError, TransitionNoInputsError, TransitionNoOutputsError, FeeStrategyEmptyError, FeeStrategyDuplicateError, FeeStrategyIndexOutOfBoundsError, FeeStrategyTooManyStepsError, InputBelowMinimumError, OutputBelowMinimumError, InputOutputBalanceMismatchError, OutputsNotGreaterThanInputsError, WithdrawalBalanceMismatchError, InsufficientFundingAmountError, InputsNotLessThanOutputsError, OutputAddressAlsoInputError, InvalidRemainderOutputCountError, WithdrawalBelowMinAmountError, ShieldedNoActionsError, ShieldedTooManyActionsError, ShieldedEmptyProofError, ShieldedZeroAnchorError, ShieldedInvalidValueBalanceError, UnshieldAmountZeroError, UnshieldValueBalanceBelowAmountError}; use dpp::consensus::state::voting::masternode_incorrect_voter_identity_id_error::MasternodeIncorrectVoterIdentityIdError; use dpp::consensus::state::voting::masternode_incorrect_voting_address_error::MasternodeIncorrectVotingAddressError; use dpp::consensus::state::voting::masternode_not_found_error::MasternodeNotFoundError; @@ -956,6 +952,12 @@ fn from_basic_error(basic_error: &BasicError) -> JsValue { BasicError::ShieldedInvalidValueBalanceError(e) => { generic_consensus_error!(ShieldedInvalidValueBalanceError, e).into() } + BasicError::UnshieldAmountZeroError(e) => { + generic_consensus_error!(UnshieldAmountZeroError, e).into() + } + BasicError::UnshieldValueBalanceBelowAmountError(e) => { + generic_consensus_error!(UnshieldValueBalanceBelowAmountError, e).into() + } } } diff --git a/packages/wasm-dpp/src/identity/state_transition/transition_types.rs b/packages/wasm-dpp/src/identity/state_transition/transition_types.rs index 29d2274dffe..0e30aacee89 100644 --- a/packages/wasm-dpp/src/identity/state_transition/transition_types.rs +++ b/packages/wasm-dpp/src/identity/state_transition/transition_types.rs @@ -19,6 +19,11 @@ pub enum StateTransitionTypeWasm { AddressFundsTransfer = 12, AddressFundingFromAssetLock = 13, AddressCreditWithdrawal = 14, + Shield = 15, + ShieldedTransfer = 16, + Unshield = 17, + ShieldFromAssetLock = 18, + ShieldedWithdrawal = 19, } impl From for StateTransitionTypeWasm { @@ -55,7 +60,13 @@ impl From for StateTransitionTypeWasm { StateTransitionType::AddressCreditWithdrawal => { StateTransitionTypeWasm::AddressCreditWithdrawal } - _ => todo!("shielded state transition types not yet implemented in wasm"), + StateTransitionType::Shield => StateTransitionTypeWasm::Shield, + StateTransitionType::ShieldedTransfer => StateTransitionTypeWasm::ShieldedTransfer, + StateTransitionType::Unshield => StateTransitionTypeWasm::Unshield, + StateTransitionType::ShieldFromAssetLock => { + StateTransitionTypeWasm::ShieldFromAssetLock + } + StateTransitionType::ShieldedWithdrawal => StateTransitionTypeWasm::ShieldedWithdrawal, } } } diff --git a/packages/wasm-dpp/src/lib.rs b/packages/wasm-dpp/src/lib.rs index 87fd30c55bd..40e275dc6c2 100644 --- a/packages/wasm-dpp/src/lib.rs +++ b/packages/wasm-dpp/src/lib.rs @@ -19,6 +19,8 @@ pub mod identifier; pub mod identity; mod metadata; // mod state_repository; +/// Shielded state transitions +pub mod shielded; /// State transitions pub mod state_transition; // mod version; diff --git a/packages/wasm-dpp/src/shielded/mod.rs b/packages/wasm-dpp/src/shielded/mod.rs new file mode 100644 index 00000000000..e04539129ce --- /dev/null +++ b/packages/wasm-dpp/src/shielded/mod.rs @@ -0,0 +1,42 @@ +use wasm_bindgen::prelude::*; + +use crate::buffer::Buffer; + +pub mod shield_from_asset_lock_transition; +pub mod shield_transition; +pub mod shielded_transfer_transition; +pub mod shielded_withdrawal_transition; +pub mod unshield_transition; + +pub use shield_from_asset_lock_transition::ShieldFromAssetLockTransitionWasm; +pub use shield_transition::ShieldTransitionWasm; +pub use shielded_transfer_transition::ShieldedTransferTransitionWasm; +pub use shielded_withdrawal_transition::ShieldedWithdrawalTransitionWasm; +pub use unshield_transition::UnshieldTransitionWasm; + +/// Compute the platform sighash from an Orchard bundle commitment and extra data. +/// +/// `sighash = SHA-256("DashPlatformSighash" || bundleCommitment || extraData)` +/// +/// - For shield and shielded_transfer transitions, `extraData` should be empty. +/// - For unshield transitions, `extraData` = `outputAddress || amount (LE u64)`. +/// - For shielded withdrawal transitions, `extraData` = `outputScript || amount (LE u64)`. +/// +/// @param {Buffer} bundleCommitment - 32-byte Orchard bundle commitment (BLAKE2b-256 per ZIP-244) +/// @param {Buffer} extraData - Transparent field binding (empty for shielded-only transitions) +/// @returns {Buffer} 32-byte SHA-256 sighash +#[wasm_bindgen(js_name = computePlatformSighash)] +pub fn compute_platform_sighash_wasm( + bundle_commitment: &[u8], + extra_data: &[u8], +) -> Result { + if bundle_commitment.len() != 32 { + return Err(JsValue::from_str(&format!( + "bundleCommitment must be exactly 32 bytes, got {}", + bundle_commitment.len() + ))); + } + let commitment: &[u8; 32] = bundle_commitment.try_into().unwrap(); + let result = dpp::shielded::compute_platform_sighash(commitment, extra_data); + Ok(Buffer::from_bytes(&result)) +} diff --git a/packages/wasm-dpp/src/shielded/shield_from_asset_lock_transition.rs b/packages/wasm-dpp/src/shielded/shield_from_asset_lock_transition.rs new file mode 100644 index 00000000000..b32a3657f53 --- /dev/null +++ b/packages/wasm-dpp/src/shielded/shield_from_asset_lock_transition.rs @@ -0,0 +1,156 @@ +use wasm_bindgen::prelude::*; + +use crate::buffer::Buffer; +use crate::utils::WithJsError; + +use dpp::serialization::{PlatformDeserializable, PlatformSerializable}; +use dpp::state_transition::shield_from_asset_lock_transition::ShieldFromAssetLockTransition; +use dpp::state_transition::{StateTransition, StateTransitionLike}; + +#[wasm_bindgen(js_name = ShieldFromAssetLockTransition)] +#[derive(Clone)] +pub struct ShieldFromAssetLockTransitionWasm(ShieldFromAssetLockTransition); + +impl From for ShieldFromAssetLockTransitionWasm { + fn from(v: ShieldFromAssetLockTransition) -> Self { + ShieldFromAssetLockTransitionWasm(v) + } +} + +impl From for ShieldFromAssetLockTransition { + fn from(v: ShieldFromAssetLockTransitionWasm) -> Self { + v.0 + } +} + +#[wasm_bindgen(js_class = ShieldFromAssetLockTransition)] +impl ShieldFromAssetLockTransitionWasm { + #[wasm_bindgen(js_name = getType)] + pub fn get_type(&self) -> u8 { + self.0.state_transition_type() as u8 + } + + /// Returns the asset lock proof as a JS value. + #[wasm_bindgen(js_name = getAssetLockProof)] + pub fn get_asset_lock_proof(&self) -> Result { + let proof = match &self.0 { + ShieldFromAssetLockTransition::V0(v0) => &v0.asset_lock_proof, + }; + serde_wasm_bindgen::to_value(proof).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the serialized Orchard actions as a JS array. + #[wasm_bindgen(js_name = getActions)] + pub fn get_actions(&self) -> Result { + let inner = match &self.0 { + ShieldFromAssetLockTransition::V0(v0) => &v0.actions, + }; + serde_wasm_bindgen::to_value(inner).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the net value balance. + #[wasm_bindgen(js_name = getValueBalance)] + pub fn get_value_balance(&self) -> u64 { + match &self.0 { + ShieldFromAssetLockTransition::V0(v0) => v0.value_balance, + } + } + + /// Returns the anchor (32-byte Merkle root) as a Buffer. + #[wasm_bindgen(js_name = getAnchor)] + pub fn get_anchor(&self) -> Buffer { + let anchor = match &self.0 { + ShieldFromAssetLockTransition::V0(v0) => &v0.anchor, + }; + Buffer::from_bytes(anchor) + } + + /// Returns the Halo2 proof bytes as a Buffer. + #[wasm_bindgen(js_name = getProof)] + pub fn get_proof(&self) -> Buffer { + let proof = match &self.0 { + ShieldFromAssetLockTransition::V0(v0) => &v0.proof, + }; + Buffer::from_bytes(proof) + } + + /// Returns the RedPallas binding signature (64 bytes) as a Buffer. + #[wasm_bindgen(js_name = getBindingSignature)] + pub fn get_binding_signature(&self) -> Buffer { + let sig = match &self.0 { + ShieldFromAssetLockTransition::V0(v0) => &v0.binding_signature, + }; + Buffer::from_bytes(sig) + } + + /// Returns the ECDSA signature as a Buffer. + #[wasm_bindgen(js_name = getSignature)] + pub fn get_signature(&self) -> Buffer { + let sig = match &self.0 { + ShieldFromAssetLockTransition::V0(v0) => &v0.signature, + }; + Buffer::from_bytes(sig.as_slice()) + } + + #[wasm_bindgen(js_name = toObject)] + pub fn to_object(&self) -> Result { + serde_wasm_bindgen::to_value(&self.0).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = toBuffer)] + pub fn to_buffer(&self) -> Result { + let bytes = PlatformSerializable::serialize_to_bytes( + &StateTransition::ShieldFromAssetLock(self.0.clone()), + ) + .with_js_error()?; + Ok(Buffer::from_bytes(&bytes)) + } + + #[wasm_bindgen(js_name = fromBuffer)] + pub fn from_buffer(buffer: Vec) -> Result { + let state_transition: StateTransition = + PlatformDeserializable::deserialize_from_bytes(&buffer).with_js_error()?; + match state_transition { + StateTransition::ShieldFromAssetLock(st) => Ok(st.into()), + _ => Err(JsValue::from_str("Invalid state transition type")), + } + } + + #[wasm_bindgen(js_name = toJSON)] + pub fn to_json(&self) -> Result { + let json = serde_json::to_value(&self.0).map_err(|e| JsValue::from(e.to_string()))?; + serde_wasm_bindgen::to_value(&json).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = getModifiedDataIds)] + pub fn modified_data_ids(&self) -> Vec { + self.0 + .modified_data_ids() + .into_iter() + .map(|id| { + let wrapper = crate::identifier::IdentifierWrapper::from(id); + wrapper.into() + }) + .collect() + } + + #[wasm_bindgen(js_name = isDataContractStateTransition)] + pub fn is_data_contract_state_transition(&self) -> bool { + self.0.is_data_contract_state_transition() + } + + #[wasm_bindgen(js_name = isDocumentStateTransition)] + pub fn is_document_state_transition(&self) -> bool { + self.0.is_document_state_transition() + } + + #[wasm_bindgen(js_name = isIdentityStateTransition)] + pub fn is_identity_state_transition(&self) -> bool { + self.0.is_identity_state_transition() + } + + #[wasm_bindgen(js_name = isVotingStateTransition)] + pub fn is_voting_state_transition(&self) -> bool { + self.0.is_voting_state_transition() + } +} diff --git a/packages/wasm-dpp/src/shielded/shield_transition.rs b/packages/wasm-dpp/src/shielded/shield_transition.rs new file mode 100644 index 00000000000..ae392123c64 --- /dev/null +++ b/packages/wasm-dpp/src/shielded/shield_transition.rs @@ -0,0 +1,173 @@ +use wasm_bindgen::prelude::*; + +use crate::buffer::Buffer; +use crate::utils::WithJsError; + +use dpp::serialization::{PlatformDeserializable, PlatformSerializable}; +use dpp::state_transition::shield_transition::ShieldTransition; +use dpp::state_transition::{StateTransition, StateTransitionLike}; + +#[wasm_bindgen(js_name = ShieldTransition)] +#[derive(Clone)] +pub struct ShieldTransitionWasm(ShieldTransition); + +impl From for ShieldTransitionWasm { + fn from(v: ShieldTransition) -> Self { + ShieldTransitionWasm(v) + } +} + +impl From for ShieldTransition { + fn from(v: ShieldTransitionWasm) -> Self { + v.0 + } +} + +#[wasm_bindgen(js_class = ShieldTransition)] +impl ShieldTransitionWasm { + #[wasm_bindgen(js_name = getType)] + pub fn get_type(&self) -> u8 { + self.0.state_transition_type() as u8 + } + + /// Returns the inputs map as a JS object. + /// Keys are platform address strings, values are [nonce, credits] tuples. + #[wasm_bindgen(js_name = getInputs)] + pub fn get_inputs(&self) -> Result { + let inner = match &self.0 { + ShieldTransition::V0(v0) => &v0.inputs, + }; + serde_wasm_bindgen::to_value(inner).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the serialized Orchard actions as a JS array. + #[wasm_bindgen(js_name = getActions)] + pub fn get_actions(&self) -> Result { + let inner = match &self.0 { + ShieldTransition::V0(v0) => &v0.actions, + }; + serde_wasm_bindgen::to_value(inner).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the shield amount (credits entering the pool). + #[wasm_bindgen(js_name = getAmount)] + pub fn get_amount(&self) -> u64 { + match &self.0 { + ShieldTransition::V0(v0) => v0.amount, + } + } + + /// Returns the anchor (32-byte Merkle root) as a Buffer. + #[wasm_bindgen(js_name = getAnchor)] + pub fn get_anchor(&self) -> Buffer { + let anchor = match &self.0 { + ShieldTransition::V0(v0) => &v0.anchor, + }; + Buffer::from_bytes(anchor) + } + + /// Returns the Halo2 proof bytes as a Buffer. + #[wasm_bindgen(js_name = getProof)] + pub fn get_proof(&self) -> Buffer { + let proof = match &self.0 { + ShieldTransition::V0(v0) => &v0.proof, + }; + Buffer::from_bytes(proof) + } + + /// Returns the RedPallas binding signature (64 bytes) as a Buffer. + #[wasm_bindgen(js_name = getBindingSignature)] + pub fn get_binding_signature(&self) -> Buffer { + let sig = match &self.0 { + ShieldTransition::V0(v0) => &v0.binding_signature, + }; + Buffer::from_bytes(sig) + } + + /// Returns the fee strategy as a JS value. + #[wasm_bindgen(js_name = getFeeStrategy)] + pub fn get_fee_strategy(&self) -> Result { + let strategy = match &self.0 { + ShieldTransition::V0(v0) => &v0.fee_strategy, + }; + serde_wasm_bindgen::to_value(strategy).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the user fee increase multiplier. + #[wasm_bindgen(js_name = getUserFeeIncrease)] + pub fn get_user_fee_increase(&self) -> u16 { + match &self.0 { + ShieldTransition::V0(v0) => v0.user_fee_increase, + } + } + + /// Returns the input witnesses as a JS value. + #[wasm_bindgen(js_name = getInputWitnesses)] + pub fn get_input_witnesses(&self) -> Result { + let witnesses = match &self.0 { + ShieldTransition::V0(v0) => &v0.input_witnesses, + }; + serde_wasm_bindgen::to_value(witnesses).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = toObject)] + pub fn to_object(&self) -> Result { + serde_wasm_bindgen::to_value(&self.0).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = toBuffer)] + pub fn to_buffer(&self) -> Result { + let bytes = + PlatformSerializable::serialize_to_bytes(&StateTransition::Shield(self.0.clone())) + .with_js_error()?; + Ok(Buffer::from_bytes(&bytes)) + } + + #[wasm_bindgen(js_name = fromBuffer)] + pub fn from_buffer(buffer: Vec) -> Result { + let state_transition: StateTransition = + PlatformDeserializable::deserialize_from_bytes(&buffer).with_js_error()?; + match state_transition { + StateTransition::Shield(st) => Ok(st.into()), + _ => Err(JsValue::from_str("Invalid state transition type")), + } + } + + #[wasm_bindgen(js_name = toJSON)] + pub fn to_json(&self) -> Result { + let json = serde_json::to_value(&self.0).map_err(|e| JsValue::from(e.to_string()))?; + serde_wasm_bindgen::to_value(&json).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = getModifiedDataIds)] + pub fn modified_data_ids(&self) -> Vec { + self.0 + .modified_data_ids() + .into_iter() + .map(|id| { + let wrapper = crate::identifier::IdentifierWrapper::from(id); + wrapper.into() + }) + .collect() + } + + #[wasm_bindgen(js_name = isDataContractStateTransition)] + pub fn is_data_contract_state_transition(&self) -> bool { + self.0.is_data_contract_state_transition() + } + + #[wasm_bindgen(js_name = isDocumentStateTransition)] + pub fn is_document_state_transition(&self) -> bool { + self.0.is_document_state_transition() + } + + #[wasm_bindgen(js_name = isIdentityStateTransition)] + pub fn is_identity_state_transition(&self) -> bool { + self.0.is_identity_state_transition() + } + + #[wasm_bindgen(js_name = isVotingStateTransition)] + pub fn is_voting_state_transition(&self) -> bool { + self.0.is_voting_state_transition() + } +} diff --git a/packages/wasm-dpp/src/shielded/shielded_transfer_transition.rs b/packages/wasm-dpp/src/shielded/shielded_transfer_transition.rs new file mode 100644 index 00000000000..47988065743 --- /dev/null +++ b/packages/wasm-dpp/src/shielded/shielded_transfer_transition.rs @@ -0,0 +1,144 @@ +use wasm_bindgen::prelude::*; + +use crate::buffer::Buffer; +use crate::utils::WithJsError; + +use dpp::serialization::{PlatformDeserializable, PlatformSerializable}; +use dpp::state_transition::shielded_transfer_transition::ShieldedTransferTransition; +use dpp::state_transition::{StateTransition, StateTransitionLike}; + +#[wasm_bindgen(js_name = ShieldedTransferTransition)] +#[derive(Clone)] +pub struct ShieldedTransferTransitionWasm(ShieldedTransferTransition); + +impl From for ShieldedTransferTransitionWasm { + fn from(v: ShieldedTransferTransition) -> Self { + ShieldedTransferTransitionWasm(v) + } +} + +impl From for ShieldedTransferTransition { + fn from(v: ShieldedTransferTransitionWasm) -> Self { + v.0 + } +} + +#[wasm_bindgen(js_class = ShieldedTransferTransition)] +impl ShieldedTransferTransitionWasm { + #[wasm_bindgen(js_name = getType)] + pub fn get_type(&self) -> u8 { + self.0.state_transition_type() as u8 + } + + /// Returns the serialized Orchard actions as a JS array. + #[wasm_bindgen(js_name = getActions)] + pub fn get_actions(&self) -> Result { + let inner = match &self.0 { + ShieldedTransferTransition::V0(v0) => &v0.actions, + }; + serde_wasm_bindgen::to_value(inner).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the value balance (fee amount leaving the pool). + #[wasm_bindgen(js_name = getValueBalance)] + pub fn get_value_balance(&self) -> u64 { + match &self.0 { + ShieldedTransferTransition::V0(v0) => v0.value_balance, + } + } + + /// Returns the anchor (32-byte Merkle root) as a Buffer. + #[wasm_bindgen(js_name = getAnchor)] + pub fn get_anchor(&self) -> Buffer { + let anchor = match &self.0 { + ShieldedTransferTransition::V0(v0) => &v0.anchor, + }; + Buffer::from_bytes(anchor) + } + + /// Returns the Halo2 proof bytes as a Buffer. + #[wasm_bindgen(js_name = getProof)] + pub fn get_proof(&self) -> Buffer { + let proof = match &self.0 { + ShieldedTransferTransition::V0(v0) => &v0.proof, + }; + Buffer::from_bytes(proof) + } + + /// Returns the RedPallas binding signature (64 bytes) as a Buffer. + #[wasm_bindgen(js_name = getBindingSignature)] + pub fn get_binding_signature(&self) -> Buffer { + let sig = match &self.0 { + ShieldedTransferTransition::V0(v0) => &v0.binding_signature, + }; + Buffer::from_bytes(sig) + } + + /// Always returns 0 — the fee is cryptographically locked by the Orchard binding signature. + #[wasm_bindgen(js_name = getUserFeeIncrease)] + pub fn get_user_fee_increase(&self) -> u16 { + 0 + } + + #[wasm_bindgen(js_name = toObject)] + pub fn to_object(&self) -> Result { + serde_wasm_bindgen::to_value(&self.0).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = toBuffer)] + pub fn to_buffer(&self) -> Result { + let bytes = PlatformSerializable::serialize_to_bytes(&StateTransition::ShieldedTransfer( + self.0.clone(), + )) + .with_js_error()?; + Ok(Buffer::from_bytes(&bytes)) + } + + #[wasm_bindgen(js_name = fromBuffer)] + pub fn from_buffer(buffer: Vec) -> Result { + let state_transition: StateTransition = + PlatformDeserializable::deserialize_from_bytes(&buffer).with_js_error()?; + match state_transition { + StateTransition::ShieldedTransfer(st) => Ok(st.into()), + _ => Err(JsValue::from_str("Invalid state transition type")), + } + } + + #[wasm_bindgen(js_name = toJSON)] + pub fn to_json(&self) -> Result { + let json = serde_json::to_value(&self.0).map_err(|e| JsValue::from(e.to_string()))?; + serde_wasm_bindgen::to_value(&json).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = getModifiedDataIds)] + pub fn modified_data_ids(&self) -> Vec { + self.0 + .modified_data_ids() + .into_iter() + .map(|id| { + let wrapper = crate::identifier::IdentifierWrapper::from(id); + wrapper.into() + }) + .collect() + } + + #[wasm_bindgen(js_name = isDataContractStateTransition)] + pub fn is_data_contract_state_transition(&self) -> bool { + self.0.is_data_contract_state_transition() + } + + #[wasm_bindgen(js_name = isDocumentStateTransition)] + pub fn is_document_state_transition(&self) -> bool { + self.0.is_document_state_transition() + } + + #[wasm_bindgen(js_name = isIdentityStateTransition)] + pub fn is_identity_state_transition(&self) -> bool { + self.0.is_identity_state_transition() + } + + #[wasm_bindgen(js_name = isVotingStateTransition)] + pub fn is_voting_state_transition(&self) -> bool { + self.0.is_voting_state_transition() + } +} diff --git a/packages/wasm-dpp/src/shielded/shielded_withdrawal_transition.rs b/packages/wasm-dpp/src/shielded/shielded_withdrawal_transition.rs new file mode 100644 index 00000000000..999a6c81d9f --- /dev/null +++ b/packages/wasm-dpp/src/shielded/shielded_withdrawal_transition.rs @@ -0,0 +1,169 @@ +use wasm_bindgen::prelude::*; + +use crate::buffer::Buffer; +use crate::utils::WithJsError; + +use dpp::serialization::{PlatformDeserializable, PlatformSerializable}; +use dpp::state_transition::shielded_withdrawal_transition::ShieldedWithdrawalTransition; +use dpp::state_transition::{StateTransition, StateTransitionLike}; + +#[wasm_bindgen(js_name = ShieldedWithdrawalTransition)] +#[derive(Clone)] +pub struct ShieldedWithdrawalTransitionWasm(ShieldedWithdrawalTransition); + +impl From for ShieldedWithdrawalTransitionWasm { + fn from(v: ShieldedWithdrawalTransition) -> Self { + ShieldedWithdrawalTransitionWasm(v) + } +} + +impl From for ShieldedWithdrawalTransition { + fn from(v: ShieldedWithdrawalTransitionWasm) -> Self { + v.0 + } +} + +#[wasm_bindgen(js_class = ShieldedWithdrawalTransition)] +impl ShieldedWithdrawalTransitionWasm { + #[wasm_bindgen(js_name = getType)] + pub fn get_type(&self) -> u8 { + self.0.state_transition_type() as u8 + } + + /// Returns the serialized Orchard actions as a JS array. + #[wasm_bindgen(js_name = getActions)] + pub fn get_actions(&self) -> Result { + let inner = match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => &v0.actions, + }; + serde_wasm_bindgen::to_value(inner).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the net value balance. + #[wasm_bindgen(js_name = getUnshieldingAmount)] + pub fn get_unshielding_amount(&self) -> u64 { + match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => v0.unshielding_amount, + } + } + + /// Returns the anchor (32-byte Merkle root) as a Buffer. + #[wasm_bindgen(js_name = getAnchor)] + pub fn get_anchor(&self) -> Buffer { + let anchor = match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => &v0.anchor, + }; + Buffer::from_bytes(anchor) + } + + /// Returns the Halo2 proof bytes as a Buffer. + #[wasm_bindgen(js_name = getProof)] + pub fn get_proof(&self) -> Buffer { + let proof = match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => &v0.proof, + }; + Buffer::from_bytes(proof) + } + + /// Returns the RedPallas binding signature (64 bytes) as a Buffer. + #[wasm_bindgen(js_name = getBindingSignature)] + pub fn get_binding_signature(&self) -> Buffer { + let sig = match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => &v0.binding_signature, + }; + Buffer::from_bytes(sig) + } + + /// Returns the core fee per byte. + #[wasm_bindgen(js_name = getCoreFeePerByte)] + pub fn get_core_fee_per_byte(&self) -> u32 { + match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => v0.core_fee_per_byte, + } + } + + /// Returns the pooling strategy as a u8. + #[wasm_bindgen(js_name = getPooling)] + pub fn get_pooling(&self) -> u8 { + match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => v0.pooling as u8, + } + } + + /// Returns the output script (core address) as a Buffer. + #[wasm_bindgen(js_name = getOutputScript)] + pub fn get_output_script(&self) -> Buffer { + let script = match &self.0 { + ShieldedWithdrawalTransition::V0(v0) => &v0.output_script, + }; + Buffer::from_bytes(script.as_bytes()) + } + + /// Always returns 0 — the fee is cryptographically locked by the Orchard binding signature. + #[wasm_bindgen(js_name = getUserFeeIncrease)] + pub fn get_user_fee_increase(&self) -> u16 { + 0 + } + + #[wasm_bindgen(js_name = toObject)] + pub fn to_object(&self) -> Result { + serde_wasm_bindgen::to_value(&self.0).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = toBuffer)] + pub fn to_buffer(&self) -> Result { + let bytes = PlatformSerializable::serialize_to_bytes(&StateTransition::ShieldedWithdrawal( + self.0.clone(), + )) + .with_js_error()?; + Ok(Buffer::from_bytes(&bytes)) + } + + #[wasm_bindgen(js_name = fromBuffer)] + pub fn from_buffer(buffer: Vec) -> Result { + let state_transition: StateTransition = + PlatformDeserializable::deserialize_from_bytes(&buffer).with_js_error()?; + match state_transition { + StateTransition::ShieldedWithdrawal(st) => Ok(st.into()), + _ => Err(JsValue::from_str("Invalid state transition type")), + } + } + + #[wasm_bindgen(js_name = toJSON)] + pub fn to_json(&self) -> Result { + let json = serde_json::to_value(&self.0).map_err(|e| JsValue::from(e.to_string()))?; + serde_wasm_bindgen::to_value(&json).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = getModifiedDataIds)] + pub fn modified_data_ids(&self) -> Vec { + self.0 + .modified_data_ids() + .into_iter() + .map(|id| { + let wrapper = crate::identifier::IdentifierWrapper::from(id); + wrapper.into() + }) + .collect() + } + + #[wasm_bindgen(js_name = isDataContractStateTransition)] + pub fn is_data_contract_state_transition(&self) -> bool { + self.0.is_data_contract_state_transition() + } + + #[wasm_bindgen(js_name = isDocumentStateTransition)] + pub fn is_document_state_transition(&self) -> bool { + self.0.is_document_state_transition() + } + + #[wasm_bindgen(js_name = isIdentityStateTransition)] + pub fn is_identity_state_transition(&self) -> bool { + self.0.is_identity_state_transition() + } + + #[wasm_bindgen(js_name = isVotingStateTransition)] + pub fn is_voting_state_transition(&self) -> bool { + self.0.is_voting_state_transition() + } +} diff --git a/packages/wasm-dpp/src/shielded/unshield_transition.rs b/packages/wasm-dpp/src/shielded/unshield_transition.rs new file mode 100644 index 00000000000..0d12dd720b1 --- /dev/null +++ b/packages/wasm-dpp/src/shielded/unshield_transition.rs @@ -0,0 +1,153 @@ +use wasm_bindgen::prelude::*; + +use crate::buffer::Buffer; +use crate::utils::WithJsError; + +use dpp::serialization::{PlatformDeserializable, PlatformSerializable}; +use dpp::state_transition::unshield_transition::UnshieldTransition; +use dpp::state_transition::{StateTransition, StateTransitionLike}; + +#[wasm_bindgen(js_name = UnshieldTransition)] +#[derive(Clone)] +pub struct UnshieldTransitionWasm(UnshieldTransition); + +impl From for UnshieldTransitionWasm { + fn from(v: UnshieldTransition) -> Self { + UnshieldTransitionWasm(v) + } +} + +impl From for UnshieldTransition { + fn from(v: UnshieldTransitionWasm) -> Self { + v.0 + } +} + +#[wasm_bindgen(js_class = UnshieldTransition)] +impl UnshieldTransitionWasm { + #[wasm_bindgen(js_name = getType)] + pub fn get_type(&self) -> u8 { + self.0.state_transition_type() as u8 + } + + /// Returns the output address as a JS value (serialized PlatformAddress). + #[wasm_bindgen(js_name = getOutputAddress)] + pub fn get_output_address(&self) -> Result { + let addr = match &self.0 { + UnshieldTransition::V0(v0) => &v0.output_address, + }; + serde_wasm_bindgen::to_value(addr).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the serialized Orchard actions as a JS array. + #[wasm_bindgen(js_name = getActions)] + pub fn get_actions(&self) -> Result { + let inner = match &self.0 { + UnshieldTransition::V0(v0) => &v0.actions, + }; + serde_wasm_bindgen::to_value(inner).map_err(|e| JsValue::from(e.to_string())) + } + + /// Returns the net value balance. + #[wasm_bindgen(js_name = getUnshieldingAmount)] + pub fn get_unshielding_amount(&self) -> u64 { + match &self.0 { + UnshieldTransition::V0(v0) => v0.unshielding_amount, + } + } + + /// Returns the anchor (32-byte Merkle root) as a Buffer. + #[wasm_bindgen(js_name = getAnchor)] + pub fn get_anchor(&self) -> Buffer { + let anchor = match &self.0 { + UnshieldTransition::V0(v0) => &v0.anchor, + }; + Buffer::from_bytes(anchor) + } + + /// Returns the Halo2 proof bytes as a Buffer. + #[wasm_bindgen(js_name = getProof)] + pub fn get_proof(&self) -> Buffer { + let proof = match &self.0 { + UnshieldTransition::V0(v0) => &v0.proof, + }; + Buffer::from_bytes(proof) + } + + /// Returns the RedPallas binding signature (64 bytes) as a Buffer. + #[wasm_bindgen(js_name = getBindingSignature)] + pub fn get_binding_signature(&self) -> Buffer { + let sig = match &self.0 { + UnshieldTransition::V0(v0) => &v0.binding_signature, + }; + Buffer::from_bytes(sig) + } + + /// Returns the user fee increase multiplier. + /// Always returns 0 — the fee is cryptographically locked by the Orchard binding signature. + #[wasm_bindgen(js_name = getUserFeeIncrease)] + pub fn get_user_fee_increase(&self) -> u16 { + 0 + } + + #[wasm_bindgen(js_name = toObject)] + pub fn to_object(&self) -> Result { + serde_wasm_bindgen::to_value(&self.0).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = toBuffer)] + pub fn to_buffer(&self) -> Result { + let bytes = + PlatformSerializable::serialize_to_bytes(&StateTransition::Unshield(self.0.clone())) + .with_js_error()?; + Ok(Buffer::from_bytes(&bytes)) + } + + #[wasm_bindgen(js_name = fromBuffer)] + pub fn from_buffer(buffer: Vec) -> Result { + let state_transition: StateTransition = + PlatformDeserializable::deserialize_from_bytes(&buffer).with_js_error()?; + match state_transition { + StateTransition::Unshield(st) => Ok(st.into()), + _ => Err(JsValue::from_str("Invalid state transition type")), + } + } + + #[wasm_bindgen(js_name = toJSON)] + pub fn to_json(&self) -> Result { + let json = serde_json::to_value(&self.0).map_err(|e| JsValue::from(e.to_string()))?; + serde_wasm_bindgen::to_value(&json).map_err(|e| JsValue::from(e.to_string())) + } + + #[wasm_bindgen(js_name = getModifiedDataIds)] + pub fn modified_data_ids(&self) -> Vec { + self.0 + .modified_data_ids() + .into_iter() + .map(|id| { + let wrapper = crate::identifier::IdentifierWrapper::from(id); + wrapper.into() + }) + .collect() + } + + #[wasm_bindgen(js_name = isDataContractStateTransition)] + pub fn is_data_contract_state_transition(&self) -> bool { + self.0.is_data_contract_state_transition() + } + + #[wasm_bindgen(js_name = isDocumentStateTransition)] + pub fn is_document_state_transition(&self) -> bool { + self.0.is_document_state_transition() + } + + #[wasm_bindgen(js_name = isIdentityStateTransition)] + pub fn is_identity_state_transition(&self) -> bool { + self.0.is_identity_state_transition() + } + + #[wasm_bindgen(js_name = isVotingStateTransition)] + pub fn is_voting_state_transition(&self) -> bool { + self.0.is_voting_state_transition() + } +} diff --git a/packages/wasm-dpp/src/state_transition/state_transition_factory.rs b/packages/wasm-dpp/src/state_transition/state_transition_factory.rs index 2b929d17a15..fb196c27f1a 100644 --- a/packages/wasm-dpp/src/state_transition/state_transition_factory.rs +++ b/packages/wasm-dpp/src/state_transition/state_transition_factory.rs @@ -6,6 +6,10 @@ use crate::identity::state_transition::{ IdentityCreditWithdrawalTransitionWasm, IdentityTopUpTransitionWasm, IdentityUpdateTransitionWasm, }; +use crate::shielded::{ + ShieldFromAssetLockTransitionWasm, ShieldTransitionWasm, ShieldedTransferTransitionWasm, + ShieldedWithdrawalTransitionWasm, UnshieldTransitionWasm, +}; use crate::state_transition::errors::invalid_state_transition_error::InvalidStateTransitionErrorWasm; use crate::state_transition::errors::state_transition_is_not_active_error::StateTransitionIsNotActiveErrorWasm; use crate::voting::state_transition::masternode_vote_transition::MasternodeVoteTransitionWasm; @@ -79,12 +83,16 @@ impl StateTransitionFactoryWasm { StateTransition::AddressCreditWithdrawal(st) => { serde_wasm_bindgen::to_value(&st).map_err(|e| JsValue::from(e.to_string())) } - StateTransition::Shield(_) - | StateTransition::ShieldedTransfer(_) - | StateTransition::Unshield(_) - | StateTransition::ShieldFromAssetLock(_) - | StateTransition::ShieldedWithdrawal(_) => { - todo!("shielded transitions not yet implemented in state_transition_factory") + StateTransition::Shield(st) => Ok(ShieldTransitionWasm::from(st).into()), + StateTransition::ShieldedTransfer(st) => { + Ok(ShieldedTransferTransitionWasm::from(st).into()) + } + StateTransition::Unshield(st) => Ok(UnshieldTransitionWasm::from(st).into()), + StateTransition::ShieldFromAssetLock(st) => { + Ok(ShieldFromAssetLockTransitionWasm::from(st).into()) + } + StateTransition::ShieldedWithdrawal(st) => { + Ok(ShieldedWithdrawalTransitionWasm::from(st).into()) } }, Err(dpp::ProtocolError::StateTransitionError(e)) => match e { diff --git a/packages/wasm-dpp2/Cargo.toml b/packages/wasm-dpp2/Cargo.toml index 5aa1a33f213..54efddded53 100644 --- a/packages/wasm-dpp2/Cargo.toml +++ b/packages/wasm-dpp2/Cargo.toml @@ -22,7 +22,7 @@ dpp = { path = "../rs-dpp", default-features = false, features = [ "json-conversion", "state-transitions", ] } -serde-wasm-bindgen = { git = "https://github.com/dashpay/serde-wasm-bindgen", branch = "fix/uint8array-to-bytes" } +serde-wasm-bindgen = { git = "https://github.com/dashpay/serde-wasm-bindgen", rev = "0d3e1a8ff058b400bab3a8ececd2fb9581e8c287" } serde = { version = "1.0.197", features = ["derive"] } serde_json = { version = "1.0", features = ["preserve_order"] } js-sys = "0.3.77" diff --git a/packages/wasm-dpp2/src/state_transitions/base/state_transition.rs b/packages/wasm-dpp2/src/state_transitions/base/state_transition.rs index 5c7625f979c..0a55f8b1323 100644 --- a/packages/wasm-dpp2/src/state_transitions/base/state_transition.rs +++ b/packages/wasm-dpp2/src/state_transitions/base/state_transition.rs @@ -396,12 +396,12 @@ impl StateTransitionWasm { | IdentityTopUpFromAddresses(_) | AddressFundsTransfer(_) | AddressFundingFromAssetLock(_) - | AddressCreditWithdrawal(_) => None, - Shield(_) + | AddressCreditWithdrawal(_) + | Shield(_) | ShieldedTransfer(_) | Unshield(_) | ShieldFromAssetLock(_) - | ShieldedWithdrawal(_) => todo!("shielded transitions not yet implemented"), + | ShieldedWithdrawal(_) => None, } } @@ -423,12 +423,12 @@ impl StateTransitionWasm { IdentityTopUpFromAddresses(_) => None, AddressFundsTransfer(_) | AddressFundingFromAssetLock(_) - | AddressCreditWithdrawal(_) => None, - Shield(_) + | AddressCreditWithdrawal(_) + | Shield(_) | ShieldedTransfer(_) | Unshield(_) | ShieldFromAssetLock(_) - | ShieldedWithdrawal(_) => todo!("shielded transitions not yet implemented"), + | ShieldedWithdrawal(_) => None, } } @@ -571,7 +571,9 @@ impl StateTransitionWasm { | Unshield(_) | ShieldFromAssetLock(_) | ShieldedWithdrawal(_) => { - todo!("shielded transitions not yet implemented") + return Err(WasmDppError::invalid_argument( + "Cannot set owner for shielded transition", + )); } }; @@ -636,17 +638,15 @@ impl StateTransitionWasm { | IdentityTopUpFromAddresses(_) | AddressFundsTransfer(_) | AddressFundingFromAssetLock(_) - | AddressCreditWithdrawal(_) => { - return Err(WasmDppError::invalid_argument( - "Cannot set identity contract nonce for address-related transition types", - )); - } - Shield(_) + | AddressCreditWithdrawal(_) + | Shield(_) | ShieldedTransfer(_) | Unshield(_) | ShieldFromAssetLock(_) | ShieldedWithdrawal(_) => { - todo!("shielded transitions not yet implemented") + return Err(WasmDppError::invalid_argument( + "Cannot set identity contract nonce for this transition type", + )); } }; @@ -731,17 +731,15 @@ impl StateTransitionWasm { } AddressFundsTransfer(_) | AddressFundingFromAssetLock(_) - | AddressCreditWithdrawal(_) => { - return Err(WasmDppError::invalid_argument( - "Cannot set identity nonce for address-related transition types", - )); - } - Shield(_) + | AddressCreditWithdrawal(_) + | Shield(_) | ShieldedTransfer(_) | Unshield(_) | ShieldFromAssetLock(_) | ShieldedWithdrawal(_) => { - todo!("shielded transitions not yet implemented") + return Err(WasmDppError::invalid_argument( + "Cannot set identity nonce for this transition type", + )); } }; diff --git a/packages/wasm-sdk/Cargo.toml b/packages/wasm-sdk/Cargo.toml index 7fe681e9166..b631363d206 100644 --- a/packages/wasm-sdk/Cargo.toml +++ b/packages/wasm-sdk/Cargo.toml @@ -75,7 +75,7 @@ tracing-subscriber = { version = "0.3.22", default-features = false, features = tracing-wasm = { version = "0.2.1" } platform-value = { path = "../rs-platform-value", features = ["json"] } serde = { version = "1.0", features = ["derive"] } -serde-wasm-bindgen = { git = "https://github.com/dashpay/serde-wasm-bindgen", branch = "fix/uint8array-to-bytes" } +serde-wasm-bindgen = { git = "https://github.com/dashpay/serde-wasm-bindgen", rev = "0d3e1a8ff058b400bab3a8ececd2fb9581e8c287" } serde_json = "1.0" hex = "0.4" base64 = "0.22" diff --git a/scripts/grovedb_version_switcher.py b/scripts/grovedb_version_switcher.py index f2bf1f7258e..e1347de7e33 100644 --- a/scripts/grovedb_version_switcher.py +++ b/scripts/grovedb_version_switcher.py @@ -30,13 +30,15 @@ GROVEDB_DEPS = { "grovedb": "../../../grovedb/grovedb", - "grovedb-costs": "../../../grovedb/grovedb-costs", - "grovedb-merk": "../../../grovedb/grovedb-merk", - "grovedb-path": "../../../grovedb/grovedb-path", - "grovedb-storage": "../../../grovedb/grovedb-storage", + "grovedb-costs": "../../../grovedb/costs", + "grovedb-merk": "../../../grovedb/merk", + "grovedb-path": "../../../grovedb/path", + "grovedb-storage": "../../../grovedb/storage", "grovedb-version": "../../../grovedb/grovedb-version", - "grovedb-visualize": "../../../grovedb/grovedb-visualize", + "grovedb-visualize": "../../../grovedb/visualize", "grovedb-epoch-based-storage-flags": "../../../grovedb/grovedb-epoch-based-storage-flags", + "grovedb-commitment-tree": "../../../grovedb/grovedb-commitment-tree", + "grovedb-element": "../../../grovedb/grovedb-element", } diff --git a/scripts/setup-ai-agent-environment.sh b/scripts/setup-ai-agent-environment.sh index 069cf901882..aa60764fe6c 100755 --- a/scripts/setup-ai-agent-environment.sh +++ b/scripts/setup-ai-agent-environment.sh @@ -199,7 +199,7 @@ install_protoc() { install_protoc install_cargo_tools() { - local wasm_bindgen_version="0.2.103" + local wasm_bindgen_version="0.2.108" if ! command -v wasm-bindgen >/dev/null 2>&1 || [[ "$(wasm-bindgen --version | awk '{print $2}')" != "${wasm_bindgen_version}" ]]; then log "Installing wasm-bindgen-cli ${wasm_bindgen_version}"