diff --git a/.gitmodules b/.gitmodules index 2a6d948d..b12ebf1b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -22,3 +22,4 @@ [submodule "lib/FlowALP"] path = lib/FlowALP url = git@github.com:onflow/FlowALP.git + branch = v0 diff --git a/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc b/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc index 8748dd8e..c9325872 100644 --- a/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc +++ b/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc @@ -29,6 +29,16 @@ access(all) contract FlowYieldVaultsAutoBalancers { /// The path prefix used for StoragePath & PublicPath derivations access(all) let pathPrefix: String + /// Storage path for the shared execution callback resource that reports to the registry (one per account) + access(self) let registryReportCallbackStoragePath: StoragePath + + /// Callback resource invoked by each AutoBalancer after execution; calls Registry.reportExecution with its id + access(all) resource RegistryReportCallback: DeFiActions.AutoBalancerExecutionCallback { + access(all) fun onExecuted(balancerUUID: UInt64) { + FlowYieldVaultsSchedulerRegistry.reportExecution(yieldVaultID: balancerUUID) + } + } + /* --- PUBLIC METHODS --- */ /// Returns the path (StoragePath or PublicPath) at which an AutoBalancer is stored with the associated @@ -69,7 +79,7 @@ access(all) contract FlowYieldVaultsAutoBalancers { if autoBalancer == nil { return false } - + let txnIDs = autoBalancer!.getScheduledTransactionIDs() for txnID in txnIDs { if autoBalancer!.borrowScheduledTransaction(id: txnID)?.status() == FlowTransactionScheduler.Status.Scheduled { @@ -93,24 +103,24 @@ access(all) contract FlowYieldVaultsAutoBalancers { if autoBalancer == nil { return false } - + // Check if yield vault has recurring config (should be executing periodically) let config = autoBalancer!.getRecurringConfig() if config == nil { return false // Not configured for recurring, can't be "stuck" } - + // Check if there's an active schedule if self.hasActiveSchedule(id: id) { return false // Has active schedule, not stuck } - + // Check if yield vault is overdue let nextExpected = autoBalancer!.calculateNextExecutionTimestampAsConfigured() if nextExpected == nil { return true // Can't calculate next time, likely stuck } - + // If next expected time has passed and no active schedule, yield vault is stuck return nextExpected! < getCurrentBlock().timestamp } @@ -150,6 +160,20 @@ access(all) contract FlowYieldVaultsAutoBalancers { assert(!publishedCap, message: "Published Capability collision found when publishing AutoBalancer for UniqueIdentifier.id \(uniqueID.id) at path \(publicPath)") + let registryReportCallbackCapabilityStoragePath = + StoragePath(identifier: "FlowYieldVaultsRegistryReportCallbackCapability")! + if self.account.storage.type(at: registryReportCallbackCapabilityStoragePath) == nil { + let sharedReportCap = self.account.capabilities.storage.issue<&{DeFiActions.AutoBalancerExecutionCallback}>( + self.registryReportCallbackStoragePath + ) + self.account.storage.save(sharedReportCap, to: registryReportCallbackCapabilityStoragePath) + } + let reportCap = self.account.storage.copy>( + from: registryReportCallbackCapabilityStoragePath + ) ?? panic( + "Missing shared registry report callback capability at \(registryReportCallbackCapabilityStoragePath)" + ) + // create & save AutoBalancer with optional recurring config let autoBalancer <- DeFiActions.createAutoBalancer( oracle: oracle, @@ -161,6 +185,7 @@ access(all) contract FlowYieldVaultsAutoBalancers { recurringConfig: recurringConfig, uniqueID: uniqueID ) + autoBalancer.setExecutionCallback(reportCap) self.account.storage.save(<-autoBalancer, to: storagePath) let autoBalancerRef = self._borrowAutoBalancer(uniqueID.id) @@ -224,7 +249,7 @@ access(all) contract FlowYieldVaultsAutoBalancers { let publicPath = self.deriveAutoBalancerPath(id: id, storage: false) as! PublicPath // unpublish the public AutoBalancer Capability let _ = self.account.capabilities.unpublish(publicPath) - + // Collect controller IDs first (can't modify during iteration) var controllersToDelete: [UInt64] = [] self.account.capabilities.storage.forEachController(forPath: storagePath, fun(_ controller: &StorageCapabilityController): Bool { @@ -237,13 +262,23 @@ access(all) contract FlowYieldVaultsAutoBalancers { controller.delete() } } - + // load & burn the AutoBalancer (this also handles any pending scheduled transactions via burnCallback) let autoBalancer <-self.account.storage.load<@DeFiActions.AutoBalancer>(from: storagePath) Burner.burn(<-autoBalancer) } + access(self) fun createRegistryReportCallbackImpl(): @RegistryReportCallback { + return <-create RegistryReportCallback() + } + init() { self.pathPrefix = "FlowYieldVaultsAutoBalancer_" + self.registryReportCallbackStoragePath = StoragePath(identifier: "FlowYieldVaultsRegistryReportCallback")! + + // Ensure shared execution callback exists (reports this account's executions to Registry) + if self.account.storage.type(at: self.registryReportCallbackStoragePath) == nil { + self.account.storage.save(<-self.createRegistryReportCallbackImpl(), to: self.registryReportCallbackStoragePath) + } } } diff --git a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc index 645d7e0a..a8bdbe97 100644 --- a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc +++ b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc @@ -13,6 +13,27 @@ import "DeFiActions" /// access(all) contract FlowYieldVaultsSchedulerRegistry { + /* --- TYPES --- */ + + /// Node in the simulated doubly-linked list used for O(1) stuck-scan ordering. + /// `prev` points toward the head (most recently executed); `next` points toward the tail (oldest/least recently executed). + access(all) struct ListNode { + access(all) var prev: UInt64? + access(all) var next: UInt64? + init(prev: UInt64?, next: UInt64?) { + self.prev = prev + self.next = next + } + + access(all) fun setPrev(prev: UInt64?) { + self.prev = prev + } + + access(all) fun setNext(next: UInt64?) { + self.next = next + } + } + /* --- EVENTS --- */ /// Emitted when a yield vault is registered with its handler capability @@ -58,6 +79,60 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { /// Stored as a dictionary for O(1) add/remove; iteration gives the pending set access(self) var pendingQueue: {UInt64: Bool} + /// Simulated doubly-linked list for O(1) stuck-scan ordering. + /// listHead = most recently executed vault ID (or nil if empty). + /// listTail = least recently executed vault ID — getStuckScanCandidates walks from here. + /// On reportExecution a vault is snipped from its current position and moved to head in O(1). + access(self) var listNodes: {UInt64: ListNode} + access(self) var listHead: UInt64? + access(self) var listTail: UInt64? + + /* --- PRIVATE LIST HELPERS --- */ + + /// Insert `id` at the head of the list (most-recently-executed end). + /// Caller must ensure `id` is not already in the list. + access(self) fun _listInsertAtHead(id: UInt64) { + let node = ListNode(prev: nil, next: self.listHead) + if let oldHeadID = self.listHead { + var oldHead = self.listNodes[oldHeadID]! + oldHead.setPrev(prev: id) + self.listNodes[oldHeadID] = oldHead + } else { + // List was empty — id is also the tail + self.listTail = id + } + self.listNodes[id] = node + self.listHead = id + } + + /// Remove `id` from wherever it sits in the list in O(1). + /// Returns false if the id is not currently linked. + access(self) fun _listRemove(id: UInt64): Bool { + let node = self.listNodes.remove(key: id) + if node == nil { + return false + } + + if let prevID = node!.prev { + var prevNode = self.listNodes[prevID]! + prevNode.setNext(next: node!.next) + self.listNodes[prevID] = prevNode + } else { + // id was the head + self.listHead = node!.next + } + + if let nextID = node!.next { + var nextNode = self.listNodes[nextID]! + nextNode.setPrev(prev: node!.prev) + self.listNodes[nextID] = nextNode + } else { + // id was the tail + self.listTail = node!.prev + } + return true + } + /* --- ACCOUNT-LEVEL FUNCTIONS --- */ /// Register a YieldVault and store its handler and schedule capabilities (idempotent) @@ -73,9 +148,26 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { self.yieldVaultRegistry[yieldVaultID] = true self.handlerCaps[yieldVaultID] = handlerCap self.scheduleCaps[yieldVaultID] = scheduleCap + // New vaults go to the head; they haven't executed yet but are freshly registered. + // If already in the list (idempotent re-register), remove first to avoid duplicates. + if self.listNodes[yieldVaultID] != nil { + self._listRemove(id: yieldVaultID) + } + self._listInsertAtHead(id: yieldVaultID) emit YieldVaultRegistered(yieldVaultID: yieldVaultID) } + /// Called on every execution. Moves yieldVaultID to the head (most recently executed) + /// so the Supervisor scans from the tail (least recently executed) for stuck detection — O(1). + /// If the list entry is unexpectedly missing, reinsert it to restore the ordering structure. + access(account) fun reportExecution(yieldVaultID: UInt64) { + if !(self.yieldVaultRegistry[yieldVaultID] ?? false) { + return + } + let _ = self._listRemove(id: yieldVaultID) + self._listInsertAtHead(id: yieldVaultID) + } + /// Adds a yield vault to the pending queue for seeding by the Supervisor access(account) fun enqueuePending(yieldVaultID: UInt64) { if self.yieldVaultRegistry[yieldVaultID] == true { @@ -92,12 +184,13 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { } } - /// Unregister a YieldVault (idempotent) - removes from registry, capabilities, and pending queue + /// Unregister a YieldVault (idempotent) - removes from registry, capabilities, pending queue, and linked list access(account) fun unregister(yieldVaultID: UInt64) { - self.yieldVaultRegistry.remove(key: yieldVaultID) - self.handlerCaps.remove(key: yieldVaultID) - self.scheduleCaps.remove(key: yieldVaultID) + let _r = self.yieldVaultRegistry.remove(key: yieldVaultID) + let _h = self.handlerCaps.remove(key: yieldVaultID) + let _s = self.scheduleCaps.remove(key: yieldVaultID) let pending = self.pendingQueue.remove(key: yieldVaultID) + let _ = self._listRemove(id: yieldVaultID) emit YieldVaultUnregistered(yieldVaultID: yieldVaultID, wasInPendingQueue: pending != nil) } @@ -155,20 +248,20 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { /// Get paginated pending yield vault IDs /// @param page: The page number (0-indexed) - /// @param size: The page size (defaults to MAX_BATCH_SIZE if nil) - access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: Int?): [UInt64] { - let pageSize = size ?? self.MAX_BATCH_SIZE + /// @param size: The page size (defaults to MAX_BATCH_SIZE if 0) + access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt): [UInt64] { + let pageSize = size == 0 ? self.MAX_BATCH_SIZE : Int(size) let allPending = self.pendingQueue.keys let startIndex = page * pageSize - + if startIndex >= allPending.length { return [] } - - let endIndex = startIndex + pageSize > allPending.length - ? allPending.length + + let endIndex = startIndex + pageSize > allPending.length + ? allPending.length : startIndex + pageSize - + return allPending.slice(from: startIndex, upTo: endIndex) } @@ -177,6 +270,25 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { return self.pendingQueue.length } + /// Returns up to `limit` vault IDs starting from the tail (least recently executed). + /// Supervisor should only scan these for stuck detection instead of all registered vaults. + /// @param limit: Maximum number of IDs to return (caller typically passes MAX_BATCH_SIZE) + access(all) fun getStuckScanCandidates(limit: UInt): [UInt64] { + var result: [UInt64] = [] + var current = self.listTail + var count: UInt = 0 + while count < limit { + if let id = current { + result.append(id) + current = self.listNodes[id]?.prev + count = count + 1 + } else { + break + } + } + return result + } + /// Get global Supervisor capability, if set /// NOTE: Access restricted - only used internally by the scheduler access(account) @@ -193,7 +305,8 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { self.handlerCaps = {} self.scheduleCaps = {} self.pendingQueue = {} + self.listNodes = {} + self.listHead = nil + self.listTail = nil } } - - diff --git a/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc b/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc index be81a875..27aceb70 100644 --- a/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc +++ b/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc @@ -172,7 +172,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 { /// "priority": UInt8 (0=High,1=Medium,2=Low) - for Supervisor self-rescheduling /// "executionEffort": UInt64 - for Supervisor self-rescheduling /// "recurringInterval": UFix64 (for Supervisor self-rescheduling) - /// "scanForStuck": Bool (default true - scan all registered yield vaults for stuck ones) + /// "scanForStuck": Bool (default true - scan up to MAX_BATCH_SIZE least-recently-executed vaults for stuck ones) /// } access(FlowTransactionScheduler.Execute) fun executeTransaction(id: UInt64, data: AnyStruct?) { let cfg = data as? {String: AnyStruct} ?? {} @@ -186,24 +186,8 @@ access(all) contract FlowYieldVaultsSchedulerV1 { // STEP 1: State-based detection - scan for stuck yield vaults if scanForStuck { - // TODO: add pagination - this will inevitably fails and at minimum creates inconsistent execution - // effort between runs - let registeredYieldVaults = FlowYieldVaultsSchedulerRegistry.getRegisteredYieldVaultIDs() - var scanned = 0 - for yieldVaultID in registeredYieldVaults { - if scanned >= FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE { - break - } - scanned = scanned + 1 - - // Skip if already in pending queue - // TODO: This is extremely inefficient - accessing from mapping is preferrable to iterating over - // an array - if FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDs().contains(yieldVaultID) { - continue - } - - // Check if yield vault is stuck (has recurring config, no active schedule, overdue) + let candidates = FlowYieldVaultsSchedulerRegistry.getStuckScanCandidates(limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE)) + for yieldVaultID in candidates { if FlowYieldVaultsAutoBalancers.isStuckYieldVault(id: yieldVaultID) { FlowYieldVaultsSchedulerRegistry.enqueuePending(yieldVaultID: yieldVaultID) emit StuckYieldVaultDetected(yieldVaultID: yieldVaultID) @@ -212,8 +196,8 @@ access(all) contract FlowYieldVaultsSchedulerV1 { } // STEP 2: Process pending yield vaults - recover them via Schedule capability - let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: nil) - + let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE)) + for yieldVaultID in pendingYieldVaults { // Get Schedule capability for this yield vault let scheduleCap = FlowYieldVaultsSchedulerRegistry.getScheduleCap(yieldVaultID: yieldVaultID) @@ -457,7 +441,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 { // Initialize paths self.SupervisorStoragePath = /storage/FlowYieldVaultsSupervisor - + // Configure Supervisor at deploy time self.ensureSupervisorConfigured() } diff --git a/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc b/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc index 7ee81669..99b6ea34 100644 --- a/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc +++ b/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc @@ -857,7 +857,7 @@ access(all) contract FlowYieldVaultsStrategiesV2 { } access(all) view fun getSupportedComposers(): {Type: Bool} { - return { + return { Type<@MorphoERC4626StrategyComposer>(): true } } @@ -994,7 +994,7 @@ access(all) contract FlowYieldVaultsStrategiesV2 { fun _createRecurringConfig(withID: DeFiActions.UniqueIdentifier?): DeFiActions.AutoBalancerRecurringConfig { // Create txnFunder that can provide/accept FLOW for scheduling fees let txnFunder = self._createTxnFunder(withID: withID) - + return DeFiActions.AutoBalancerRecurringConfig( interval: 60 * 10, // Rebalance every 10 minutes priority: FlowTransactionScheduler.Priority.Medium, diff --git a/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc b/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc index d9be5b50..c84abc43 100644 --- a/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc +++ b/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc @@ -3,8 +3,7 @@ import "FlowYieldVaultsSchedulerRegistry" /// Returns a paginated list of yield vault IDs in the pending queue. /// @param page: The page number (0-indexed) /// @param size: The number of yield vaults per page (defaults to MAX_BATCH_SIZE if 0) -access(all) fun main(page: Int, size: Int): [UInt64] { - let pageSize: Int? = size > 0 ? size : nil - return FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: page, size: pageSize) +access(all) fun main(page: Int, size: UInt): [UInt64] { + return FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: page, size: size) } diff --git a/cadence/tests/scheduled_supervisor_test.cdc b/cadence/tests/scheduled_supervisor_test.cdc index eada6f15..32438057 100644 --- a/cadence/tests/scheduled_supervisor_test.cdc +++ b/cadence/tests/scheduled_supervisor_test.cdc @@ -279,7 +279,9 @@ fun testMultiYieldVaultIndependentExecution() { /// access(all) fun testPaginationStress() { - Test.reset(to: snapshot) + if snapshot != getCurrentBlockHeight() { + Test.reset(to: snapshot) + } // Calculate number of yield vaults: 3 * MAX_BATCH_SIZE + partial batch // MAX_BATCH_SIZE is 5 in FlowYieldVaultsSchedulerRegistry let maxBatchSize = 5 @@ -333,7 +335,8 @@ fun testPaginationStress() { // Test paginated access - request each page up to MAX_BATCH_SIZE var page = 0 while page <= fullBatches { - let pageRes = executeScript("../scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc", [page, maxBatchSize]) + let pageRes = executeScript("../scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc", [page, UInt(maxBatchSize)]) + Test.expect(pageRes, Test.beSucceeded()) let pageData = pageRes.returnValue! as! [UInt64] log("Page ".concat(page.toString()).concat(" of pending queue: ").concat(pageData.length.toString()).concat(" yield vaults")) page = page + 1 @@ -410,6 +413,9 @@ fun testPaginationStress() { /// access(all) fun testSupervisorDoesNotDisruptHealthyYieldVaults() { + // Start from the post-setup snapshot so global event history and stuck state from other + // tests cannot make this "healthy/no-op Supervisor" assertion pass accidentally. + Test.reset(to: snapshot) log("\n Testing Supervisor with healthy yield vaults (nothing to recover)...") let user = Test.createAccount() @@ -454,6 +460,12 @@ fun testSupervisorDoesNotDisruptHealthyYieldVaults() { log("Pending queue size: ".concat(pendingCount.toString())) Test.assertEqual(0, pendingCount) + // Capture event baselines after verifying this test is in a clean healthy state. + // The assertions below check that this Supervisor run emits no *new* recovery or + // stuck-detection events, instead of tolerating unrelated events from prior tests. + let recoveredEventsBefore = Test.eventsOfType(Type()).length + let stuckDetectedEventsBefore = Test.eventsOfType(Type()).length + // Supervisor is automatically configured when FlowYieldVaultsSchedulerV1 is deployed (in init) Test.commitBlock() @@ -474,11 +486,20 @@ fun testSupervisorDoesNotDisruptHealthyYieldVaults() { // 7. Verify Supervisor ran but found nothing to recover (healthy yield vault) let recoveredEvents = Test.eventsOfType(Type()) + let stuckDetectedEvents = Test.eventsOfType(Type()) log("YieldVaultRecovered events: ".concat(recoveredEvents.length.toString())) - - // Healthy yield vaults don't need recovery - // Note: recoveredEvents might be > 0 if there were stuck yield vaults from previous tests - // The key verification is that our yield vault continues to execute + log("StuckYieldVaultDetected events: ".concat(stuckDetectedEvents.length.toString())) + // A healthy vault should not cause the Supervisor to enqueue recovery work or emit + // recovery events. These checks make the test prove "Supervisor was a no-op" rather + // than only proving the vault kept executing afterward. + Test.assert( + recoveredEvents.length == recoveredEventsBefore, + message: "Supervisor should not emit recovery events for a healthy yield vault. Before: \(recoveredEventsBefore.toString()), After: \(recoveredEvents.length.toString())" + ) + Test.assert( + stuckDetectedEvents.length == stuckDetectedEventsBefore, + message: "Supervisor should not detect stuck yield vaults in a clean healthy test. Before: \(stuckDetectedEventsBefore.toString()), After: \(stuckDetectedEvents.length.toString())" + ) // 8. Verify yield vault continues executing log("Step 7: Verifying yield vault continues executing...") @@ -913,3 +934,130 @@ fun testInsufficientFundsAndRecovery() { log("- All ".concat(activeScheduleCount.toString()).concat(" yield vaults have active schedules")) log("========================================") } + +/// Supervisor batch recovery: 200 stuck vaults, no capacity-probe loop. +/// +/// Flow: create 200 yield vaults, run 2 scheduling rounds, drain FLOW so executions fail, +/// wait for vaults to be marked stuck, refund FLOW, schedule the supervisor, then advance +/// time for ceil(200/MAX_BATCH_SIZE)+10 supervisor ticks. Asserts all 200 vaults are +/// recovered (YieldVaultRecovered events), none still stuck, and all have active schedules. +/// The +10 extra ticks are a buffer so every vault is processed despite scheduler timing. +access(all) +fun testSupervisorHandlesManyStuckVaults() { + let n = 200 + let maxBatchSize = FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE + + if snapshot != getCurrentBlockHeight() { + Test.reset(to: snapshot) + } + + // 1. Setup: user, FLOW, and grant + let user = Test.createAccount() + mintFlow(to: user, amount: 100000.0) + grantBeta(flowYieldVaultsAccount, user) + mintFlow(to: flowYieldVaultsAccount, amount: 10000.0) + + // 2. Create n yield vaults in batch (Test.executeTransactions) + var i = 0 + let tx = Test.Transaction( + code: Test.readFile("../transactions/flow-yield-vaults/create_yield_vault.cdc"), + authorizers: [user.address], + signers: [user], + arguments: [strategyIdentifier, flowTokenIdentifier, 5.0] + ) + let txs: [Test.Transaction] = [] + while i < n { + txs.append(tx) + i = i + 1 + } + let results = Test.executeTransactions(txs) + for result in results { + Test.expect(result, Test.beSucceeded()) + } + log("testSupervisorHandlesManyStuckVaults: created \(n.toString()) yield vaults") + + let yieldVaultIDs = getYieldVaultIDs(address: user.address)! + Test.assert(yieldVaultIDs.length == n, message: "expected \(n.toString()) vaults, got \(yieldVaultIDs.length.toString())") + + // 3. Two scheduling rounds so vaults run once + setMockOraclePrice(signer: flowYieldVaultsAccount, forTokenIdentifier: flowTokenIdentifier, price: 1.5) + setMockOraclePrice(signer: flowYieldVaultsAccount, forTokenIdentifier: yieldTokenIdentifier, price: 1.2) + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + + // 4. Drain FLOW so subsequent executions fail and vaults become stuck + let balanceBeforeDrain = (executeScript( + "../scripts/flow-yield-vaults/get_flow_balance.cdc", + [flowYieldVaultsAccount.address] + ).returnValue! as! UFix64) + if balanceBeforeDrain > 0.01 { + let drainRes = _executeTransaction( + "../transactions/flow-yield-vaults/drain_flow.cdc", + [balanceBeforeDrain - 0.001], + flowYieldVaultsAccount + ) + Test.expect(drainRes, Test.beSucceeded()) + } + log("testSupervisorHandlesManyStuckVaults: drained FLOW, waiting for vaults to be marked stuck") + + // 5. Wait rounds until vaults are marked stuck + var waitRound = 0 + while waitRound < 6 { + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + waitRound = waitRound + 1 + } + + // 6. Refund FLOW and schedule supervisor + mintFlow(to: flowYieldVaultsAccount, amount: 500.0) + Test.commitBlock() + Test.moveTime(by: 1.0) + Test.commitBlock() + + let interval = 60.0 * 10.0 + let schedSupRes = _executeTransaction( + "../transactions/flow-yield-vaults/admin/schedule_supervisor.cdc", + [interval, UInt8(1), UInt64(5000), true], + flowYieldVaultsAccount + ) + Test.expect(schedSupRes, Test.beSucceeded()) + + // 7. Advance time for supervisor ticks (ceil(n/MAX_BATCH_SIZE)+10); each tick processes a batch + let supervisorRunsNeeded = (UInt(n) + UInt(maxBatchSize) - 1) / UInt(maxBatchSize) + var run = 0 as UInt + while run < supervisorRunsNeeded + 10 { + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + run = run + 1 + } + log("testSupervisorHandlesManyStuckVaults: ran \((supervisorRunsNeeded + 10).toString()) supervisor ticks") + + let recoveredEvents = Test.eventsOfType(Type()) + Test.assert(recoveredEvents.length >= n, message: "expected at least \(n.toString()) recovered, got \(recoveredEvents.length.toString())") + log("testSupervisorHandlesManyStuckVaults: recovered \(recoveredEvents.length.toString()) vaults") + + // 8. Health check: none stuck, all have active schedules + var stillStuck = 0 + var activeCount = 0 + for yieldVaultID in yieldVaultIDs { + let isStuckRes = executeScript( + "../scripts/flow-yield-vaults/is_stuck_yield_vault.cdc", + [yieldVaultID] + ) + if isStuckRes.returnValue != nil && (isStuckRes.returnValue! as! Bool) { + stillStuck = stillStuck + 1 + } + let hasActiveRes = executeScript( + "../scripts/flow-yield-vaults/has_active_schedule.cdc", + [yieldVaultID] + ) + if hasActiveRes.returnValue != nil && (hasActiveRes.returnValue! as! Bool) { + activeCount = activeCount + 1 + } + } + Test.assert(stillStuck == 0, message: "expected 0 stuck, got \(stillStuck.toString())") + Test.assert(activeCount == n, message: "expected \(n.toString()) active, got \(activeCount.toString())") + log("testSupervisorHandlesManyStuckVaults: all \(n.toString()) vaults healthy, active schedules: \(activeCount.toString())") +} diff --git a/docs/IMPLEMENTATION_SUMMARY.md b/docs/IMPLEMENTATION_SUMMARY.md index fa3e30e8..b6a73b31 100644 --- a/docs/IMPLEMENTATION_SUMMARY.md +++ b/docs/IMPLEMENTATION_SUMMARY.md @@ -2,174 +2,174 @@ ## Overview -Autonomous scheduled rebalancing for FlowYieldVaults YieldVaults using Flow's native transaction scheduler (FLIP 330). +Scheduled rebalancing for FlowYieldVaults is built on Flow's native transaction scheduler. +AutoBalancers schedule themselves for normal recurring execution, while +`FlowYieldVaultsSchedulerV1.Supervisor` exists only to detect and recover stuck vaults. -## Branch Information +## Status -**Branch**: `scheduled-rebalancing` -**Last Updated**: November 26, 2025 +This document reflects the current scheduler architecture in this repository. + +- Last Updated: March 10, 2026 +- Current batch size: `FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE = 5` +- Current scheduler contract: `FlowYieldVaultsSchedulerV1.cdc` ## Architecture ### Key Design Principles -1. **Atomic Initial Scheduling**: YieldVault creation atomically registers and schedules first execution -2. **No Wrapper**: Direct capability to AutoBalancer (RebalancingHandler removed) -3. **Self-Scheduling AutoBalancers**: AutoBalancers chain their own subsequent executions -4. **Recovery-Only Supervisor**: Processes bounded pending queue, not all yield vaults +1. Atomic registration and first scheduling at YieldVault creation. +2. Direct AutoBalancer capabilities, with no scheduling wrapper layer. +3. Native self-scheduling for healthy recurring AutoBalancers. +4. Recovery-only Supervisor with bounded scanning and bounded pending-queue processing. +5. LRU stuck-scan ordering, so the longest-idle vaults are checked first. -### Component Design +### Main Components -``` -FlowYieldVaults Contract Account +```text +FlowYieldVaults Account | - +-- FlowYieldVaultsScheduler - | +-- SchedulerManager (tracks scheduled transactions) - | +-- Supervisor (recovery handler for failed schedules) + +-- FlowYieldVaultsAutoBalancers + | +-- Stores account-hosted AutoBalancers + | +-- Issues handler/schedule capabilities + | +-- Sets shared execution callback + | +-- Starts first native schedule | +-- FlowYieldVaultsSchedulerRegistry | +-- yieldVaultRegistry: {UInt64: Bool} - | +-- handlerCaps: {UInt64: Capability} - | +-- pendingQueue: {UInt64: Bool} (bounded by MAX_BATCH_SIZE=50) + | +-- handlerCaps + | +-- scheduleCaps + | +-- pendingQueue + | +-- listNodes / listHead / listTail (LRU stuck-scan order) | +-- supervisorCap | - +-- FlowYieldVaultsAutoBalancers - +-- AutoBalancer (per YieldVault) implements TransactionHandler + +-- FlowYieldVaultsSchedulerV1 + +-- Supervisor resource + +-- scheduling cost/config helpers ``` -### Execution Flow - -1. **YieldVault Creation** (atomic): - - User creates YieldVault via `create_yield_vault.cdc` - - Strategy creates AutoBalancer in `_initNewAutoBalancer()` - - `registerYieldVault()` atomically: - - Issues capability directly to AutoBalancer - - Registers in FlowYieldVaultsSchedulerRegistry - - Schedules first execution - - If any step fails, entire transaction reverts - -2. **Scheduled Execution**: - - FlowTransactionScheduler triggers at scheduled time - - Calls `AutoBalancer.executeTransaction()` - - AutoBalancer.rebalance() executes - - AutoBalancer self-schedules next execution (if configured with recurringConfig) - -3. **Recovery** (Supervisor): - - Processes `getPendingYieldVaultIDs()` (MAX 50 per run) - - Schedules yield vaults that failed to self-schedule - - Self-reschedules if pending work remains - -## Files - -### Core Contracts -- **`FlowYieldVaultsScheduler.cdc`** (~730 lines) - - SchedulerManager resource - - Supervisor resource (recovery handler) - - Atomic registration with initial scheduling - -- **`FlowYieldVaultsSchedulerRegistry.cdc`** (~155 lines) - - Registry storage (separate contract) - - Pending queue with MAX_BATCH_SIZE pagination - - Events: YieldVaultRegistered, YieldVaultUnregistered, YieldVaultEnqueuedPending, YieldVaultDequeuedPending - -### Transactions -- `schedule_rebalancing.cdc` - Manual schedule (after canceling auto-schedule) -- `cancel_scheduled_rebalancing.cdc` - Cancel and get refund -- `setup_scheduler_manager.cdc` - Initialize SchedulerManager -- `setup_supervisor.cdc` - Initialize Supervisor -- `schedule_supervisor.cdc` - Schedule Supervisor for recovery -- `enqueue_pending_yield_vault.cdc` - Manually enqueue for recovery - -### Scripts -- `get_scheduled_rebalancing.cdc` - Query specific yield vault's schedule -- `get_all_scheduled_rebalancing.cdc` - List all scheduled rebalancing -- `get_registered_yield_vault_ids.cdc` - Get registered yield vault IDs -- `get_pending_count.cdc` - Check pending queue size -- `estimate_rebalancing_cost.cdc` - Estimate fees -- `has_wrapper_cap_for_yield_vault.cdc` - Check if handler cap exists (renamed from wrapper) - -### Tests -- `scheduled_supervisor_test.cdc` - Supervisor and multi-yield-vault tests -- `scheduled_rebalance_integration_test.cdc` - Integration tests -- `scheduled_rebalance_scenario_test.cdc` - Scenario-based tests -- `scheduler_edge_cases_test.cdc` - Edge case tests - -## Key Features - -### Automatic Scheduling at YieldVault Creation -- No manual setup required -- First rebalancing scheduled atomically with yield vault creation -- Fails safely - reverts entire transaction if scheduling fails - -### Self-Scheduling AutoBalancers -- AutoBalancers with `recurringConfig` chain their own executions -- No central coordinator needed for normal operation -- Each AutoBalancer manages its own schedule independently - -### Paginated Recovery (Supervisor) -- MAX_BATCH_SIZE = 50 yield vaults per Supervisor run -- Only processes pending queue (not all registered yield vaults) -- Self-reschedules if more work remains - -### Events +## Execution Flow + +### YieldVault Creation + +1. `create_yield_vault.cdc` creates a strategy. +2. The strategy calls `FlowYieldVaultsAutoBalancers._initNewAutoBalancer(...)`. +3. `_initNewAutoBalancer(...)`: + - stores the AutoBalancer + - issues handler and schedule capabilities + - registers the vault in `FlowYieldVaultsSchedulerRegistry` + - sets a shared `RegistryReportCallback` + - schedules the first rebalance when `recurringConfig != nil` +4. If any required step fails, the transaction reverts. + +### Normal Operation + +1. `FlowTransactionScheduler` executes the AutoBalancer. +2. The AutoBalancer rebalances. +3. If recurring scheduling is configured, the AutoBalancer schedules its next run. +4. The shared execution callback reports success to the registry. +5. `reportExecution()` moves that vault to the most-recently-executed end of the LRU list. + +### Recovery Operation + +Each Supervisor run has two bounded steps: + +1. Stuck detection: + - reads up to `MAX_BATCH_SIZE` least-recently-executed vault IDs from `getStuckScanCandidates(...)` + - checks whether each candidate is overdue and lacks an active schedule + - enqueues stuck vaults into `pendingQueue` + +2. Pending recovery: + - reads up to `MAX_BATCH_SIZE` vault IDs from `getPendingYieldVaultIDsPaginated(page: 0, size: UInt(MAX_BATCH_SIZE))` + - borrows each vault's `Schedule` capability + - calls `scheduleNextRebalance(whileExecuting: nil)` directly + - dequeues successfully recovered vaults + +If the Supervisor itself is configured with a recurring interval, it self-reschedules after the run. + +## Core Contracts + +- `FlowYieldVaultsAutoBalancers.cdc` + - account-hosted AutoBalancer creation and cleanup + - handler/schedule capability issuance + - shared execution callback wiring + +- `FlowYieldVaultsSchedulerRegistry.cdc` + - registered vault tracking + - pending queue + - handler/schedule capability storage + - LRU stuck-scan ordering + +- `FlowYieldVaultsSchedulerV1.cdc` + - Supervisor recovery handler + - Supervisor configuration and cost estimation helpers + - Supervisor recovery and self-reschedule events + +## Scheduler-Related Transactions + +- `cadence/transactions/flow-yield-vaults/admin/schedule_supervisor.cdc` +- `cadence/transactions/flow-yield-vaults/admin/destroy_supervisor.cdc` +- `cadence/transactions/flow-yield-vaults/admin/destroy_and_reset_supervisor.cdc` +- `cadence/transactions/flow-yield-vaults/enqueue_pending_yield_vault.cdc` +- `cadence/transactions/flow-yield-vaults/admin/set_default_recurring_interval.cdc` +- `cadence/transactions/flow-yield-vaults/admin/set_default_exec_effort.cdc` +- `cadence/transactions/flow-yield-vaults/admin/set_default_min_fee_fallback.cdc` +- `cadence/transactions/flow-yield-vaults/admin/set_default_fee_margin_multiplier.cdc` +- `cadence/transactions/flow-yield-vaults/admin/set_default_priority.cdc` + +## Scheduler-Related Scripts + +- `cadence/scripts/flow-yield-vaults/get_registered_yield_vault_ids.cdc` +- `cadence/scripts/flow-yield-vaults/get_registered_yield_vault_count.cdc` +- `cadence/scripts/flow-yield-vaults/get_pending_count.cdc` +- `cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc` +- `cadence/scripts/flow-yield-vaults/get_scheduler_config.cdc` +- `cadence/scripts/flow-yield-vaults/estimate_rebalancing_cost.cdc` +- `cadence/scripts/flow-yield-vaults/has_active_schedule.cdc` +- `cadence/scripts/flow-yield-vaults/is_stuck_yield_vault.cdc` +- `cadence/scripts/flow-yield-vaults/has_wrapper_cap_for_yield_vault.cdc` + - legacy script name; it checks for the direct handler capability stored in the registry + +## Current Events + ```cadence -// FlowYieldVaultsScheduler -event RebalancingScheduled(yieldVaultID, scheduledTransactionID, timestamp, priority, isRecurring, ...) -event RebalancingCanceled(yieldVaultID, scheduledTransactionID, feesReturned) -event SupervisorSeededYieldVault(yieldVaultID, scheduledTransactionID, timestamp) +// FlowYieldVaultsSchedulerV1 +event YieldVaultRecovered(yieldVaultID: UInt64) +event YieldVaultRecoveryFailed(yieldVaultID: UInt64, error: String) +event StuckYieldVaultDetected(yieldVaultID: UInt64) +event SupervisorRescheduled(scheduledTransactionID: UInt64, timestamp: UFix64) +event SupervisorRescheduleFailed( + timestamp: UFix64, + requiredFee: UFix64?, + availableBalance: UFix64?, + error: String +) // FlowYieldVaultsSchedulerRegistry -event YieldVaultRegistered(yieldVaultID, handlerCapValid) -event YieldVaultUnregistered(yieldVaultID, wasInPendingQueue) -event YieldVaultEnqueuedPending(yieldVaultID, pendingQueueSize) -event YieldVaultDequeuedPending(yieldVaultID, pendingQueueSize) +event YieldVaultRegistered(yieldVaultID: UInt64) +event YieldVaultUnregistered(yieldVaultID: UInt64, wasInPendingQueue: Bool) +event YieldVaultEnqueuedPending(yieldVaultID: UInt64, pendingQueueSize: Int) +event YieldVaultDequeuedPending(yieldVaultID: UInt64, pendingQueueSize: Int) ``` ## Test Coverage -| Test | Description | -|------|-------------| -| `testAutoRegisterAndSupervisor` | YieldVault creation auto-registers and schedules | -| `testMultiYieldVaultNativeScheduling` | 3 yield vaults all self-schedule natively | -| `testMultiYieldVaultIndependentExecution` | Multiple yield vaults execute independently 3+ times | -| `testPaginationStress` | 18 yield vaults (>MAX_BATCH_SIZE) all registered and execute | -| `testSupervisorDoesNotDisruptHealthyYieldVaults` | Healthy yield vaults continue executing with Supervisor running | -| `testStuckYieldVaultDetectionLogic` | Detection logic correctly identifies healthy vs stuck yield vaults | -| `testInsufficientFundsAndRecovery` | Complete failure and recovery cycle with insufficient funds | -| `testYieldVaultHasNativeScheduleAfterCreation` | Yield vault has active schedule immediately after creation | - -## Security - -1. **Access Control**: - - `getSupervisorCap()` - `access(account)` - - `getHandlerCap()` - `access(account)` - - `enqueuePending()` - `access(account)` - - Registration/unregistration only from FlowYieldVaultsAutoBalancers - -2. **Atomic Operations**: - - YieldVault creation + registration + scheduling is atomic - - Failure at any step reverts the entire transaction - -3. **Bounded Operations**: - - Supervisor processes MAX 50 yield vaults per execution - - Prevents compute limit exhaustion - -## Changelog - -### Version 2.0.0 (November 26, 2025) -- Removed RebalancingHandler wrapper -- Atomic initial scheduling at yield vault registration -- Paginated Supervisor with pending queue -- Self-scheduling AutoBalancers -- Moved registration to FlowYieldVaultsAutoBalancers -- Added comprehensive events - -### Version 1.0.0 (November 10, 2025) -- Initial implementation -- Central Supervisor scanning all yield vaults -- RebalancingHandler wrapper - ---- - -**Status**: Implementation complete, tests passing -**Last Updated**: November 26, 2025 +- `scheduled_supervisor_test.cdc` + - native scheduling, pagination, healthy-supervisor no-op behavior, stuck detection, recovery +- `scheduled_rebalance_integration_test.cdc` + - scheduler integration behavior +- `scheduled_rebalance_scenario_test.cdc` + - multi-round scheduling scenarios +- `scheduler_edge_cases_test.cdc` + - edge cases and invariants +- `yield_vault_lifecycle_test.cdc` + - vault lifecycle with scheduler wiring +- `atomic_registration_gc_test.cdc` + - atomic registration and cleanup behavior + +## Security and Operational Notes + +1. Registration, execution reporting, pending enqueue/dequeue, and unregister operations are account-restricted. +2. Supervisor processing is bounded to avoid unbounded compute growth. +3. Healthy recurring execution depends on the FlowYieldVaults account retaining sufficient FLOW for fees. +4. Recovery does not replace off-chain monitoring; it only restores schedules for vaults that are overdue and unscheduled. diff --git a/docs/SCHEDULED_REBALANCING_GUIDE.md b/docs/SCHEDULED_REBALANCING_GUIDE.md index 824f176e..687f176b 100644 --- a/docs/SCHEDULED_REBALANCING_GUIDE.md +++ b/docs/SCHEDULED_REBALANCING_GUIDE.md @@ -26,10 +26,10 @@ YieldVault Creation (Atomic) FlowYieldVaultsAutoBalancers._initNewAutoBalancer() | v -FlowYieldVaultsScheduler.registerYieldVault() - |-- Issues capability to AutoBalancer +FlowYieldVaultsSchedulerRegistry.register(...) + |-- Stores handler + schedule capabilities |-- Registers in FlowYieldVaultsSchedulerRegistry - +-- Schedules first execution + +-- AutoBalancer schedules first execution | v FlowTransactionScheduler executes at scheduled time @@ -42,10 +42,10 @@ AutoBalancer.executeTransaction() ### Components -1. **FlowYieldVaultsScheduler**: Manages registration and scheduling -2. **FlowYieldVaultsSchedulerRegistry**: Stores registry of yield vaults and pending queue -3. **AutoBalancer**: Implements `TransactionHandler`, executes rebalancing -4. **Supervisor**: Recovery handler for failed schedules (paginated) +1. **FlowYieldVaultsAutoBalancers**: Configures AutoBalancers, registers them, and starts the first schedule +2. **FlowYieldVaultsSchedulerRegistry**: Stores registered vault IDs, capabilities, pending queue, and stuck-scan order +3. **AutoBalancer**: Implements `TransactionHandler`, executes rebalancing, and self-schedules recurring runs +4. **FlowYieldVaultsSchedulerV1.Supervisor**: Recovery handler for failed schedules ### No Wrapper Needed @@ -161,10 +161,11 @@ flow scripts execute cadence/scripts/flow-yield-vaults/get_pending_count.cdc ### What It Does -The Supervisor handles yield vaults that failed to self-schedule: -- Processes bounded `pendingQueue` (MAX 50 yield vaults per run) -- Schedules failed yield vaults -- Self-reschedules if more work remains +The Supervisor handles two recovery scenarios per run: +1. **Stuck detection**: Scans up to `MAX_BATCH_SIZE` vault candidates using `getStuckScanCandidates()`, which returns vaults ordered least-recently-executed first (LRU). Stuck vaults are enqueued in `pendingQueue`. +2. **Pending processing**: Seeds vaults from `pendingQueue` (up to `MAX_BATCH_SIZE` per run). When scheduled with a recurring interval, the Supervisor keeps self-rescheduling even if a given run finds no work. + +Each AutoBalancer reports back to the registry after every execution via `RegistryReportCallback`, which calls `reportExecution()` to move the vault to the most-recently-executed end of the internal list. Because stuck scanning starts from the least-recently-executed tail, the Supervisor still prioritises the longest-idle vaults first. ### When It's Needed @@ -187,36 +188,39 @@ The next Supervisor run will re-seed the yield vault. ## Events -### FlowYieldVaultsScheduler Events +### FlowYieldVaultsSchedulerV1 Events ```cadence -event RebalancingScheduled( - yieldVaultID: UInt64, - scheduledTransactionID: UInt64, - timestamp: UFix64, - priority: UInt8, - isRecurring: Bool, - recurringInterval: UFix64?, - force: Bool +event YieldVaultRecovered( + yieldVaultID: UInt64 ) -event RebalancingCanceled( +event YieldVaultRecoveryFailed( yieldVaultID: UInt64, - scheduledTransactionID: UInt64, - feesReturned: UFix64 + error: String ) -event SupervisorSeededYieldVault( - yieldVaultID: UInt64, +event StuckYieldVaultDetected( + yieldVaultID: UInt64 +) + +event SupervisorRescheduled( scheduledTransactionID: UInt64, timestamp: UFix64 ) + +event SupervisorRescheduleFailed( + timestamp: UFix64, + requiredFee: UFix64?, + availableBalance: UFix64?, + error: String +) ``` ### FlowYieldVaultsSchedulerRegistry Events ```cadence -event YieldVaultRegistered(yieldVaultID: UInt64, handlerCapValid: Bool) +event YieldVaultRegistered(yieldVaultID: UInt64) event YieldVaultUnregistered(yieldVaultID: UInt64, wasInPendingQueue: Bool) event YieldVaultEnqueuedPending(yieldVaultID: UInt64, pendingQueueSize: Int) event YieldVaultDequeuedPending(yieldVaultID: UInt64, pendingQueueSize: Int) @@ -239,9 +243,9 @@ flow transactions send cadence/transactions/flow-yield-vaults/cancel_scheduled_r When a yield vault is closed: 1. `_cleanupAutoBalancer()` is called -2. `unregisterYieldVault()` cancels pending schedules -3. Fees are refunded to the FlowYieldVaults account -4. YieldVault is removed from registry +2. The vault is removed from `FlowYieldVaultsSchedulerRegistry` +3. AutoBalancer capability controllers are deleted +4. Burning the AutoBalancer cleans up its internally managed scheduled transactions --- @@ -311,5 +315,5 @@ A: No, one schedule per yield vault. Cancel to reschedule. --- -**Last Updated**: November 26, 2025 -**Version**: 2.0.0 +**Last Updated**: March 9, 2026 +**Version**: 2.1.0 diff --git a/docs/autobalancer-restart-recurring-proposal.md b/docs/autobalancer-restart-recurring-proposal.md index 01643a78..6aa42dcb 100644 --- a/docs/autobalancer-restart-recurring-proposal.md +++ b/docs/autobalancer-restart-recurring-proposal.md @@ -1,5 +1,10 @@ # AutoBalancer Recovery via Schedule Capability +> Historical note: this proposal describes the recovery design that was later implemented. +> Current code names are `FlowYieldVaultsSchedulerV1` and `FlowYieldVaultsSchedulerRegistry`. +> Current stuck detection scans up to `MAX_BATCH_SIZE` least-recently-executed vaults from +> the registry's LRU ordering, not the full registered set. + ## Problem Statement When an `AutoBalancer` is configured for recurring rebalancing, its `executeTransaction` function contains an internal check: @@ -104,8 +109,8 @@ Instead of modifying DeFiActions to add a `restartRecurring` flag, we use the ex ┌────────────────────────────────────────────────────────────────┐ │ Supervisor Recovery │ ├────────────────────────────────────────────────────────────────┤ -│ 1. Supervisor scans registered yield vaults │ -│ 2. Detects stuck yield vaults via isStuckYieldVault() check: │ +│ 1. Supervisor scans up to MAX_BATCH_SIZE LRU candidates from Registry │ +│ 2. Detects stuck yield vaults via isStuckYieldVault() check: │ │ - Has recurringConfig │ │ - No active schedule │ │ - Next expected execution time is in the past │ @@ -126,7 +131,7 @@ The Supervisor emits these events during recovery: ## Fee Source Considerations -Both Supervisor and AutoBalancer use the same fund source (the FlowYieldVaultsStrategies contract account's FlowToken vault). This means: +Both Supervisor and AutoBalancer use the same fund source (the FlowYieldVaults account's shared FlowToken vault). This means: 1. If the account is drained, BOTH fail to schedule 2. If the account is refunded, BOTH can schedule again @@ -164,7 +169,7 @@ let scheduleCap = self.account.capabilities.storage .issue(storagePath) ``` -### FlowYieldVaultsScheduler +### FlowYieldVaultsSchedulerV1 Simplified Supervisor that directly calls `scheduleNextRebalance()`: diff --git a/docs/rebalancing_architecture.md b/docs/rebalancing_architecture.md index a0f5c9e8..5d2acd8d 100644 --- a/docs/rebalancing_architecture.md +++ b/docs/rebalancing_architecture.md @@ -19,9 +19,9 @@ - Storing AutoBalancer resources in the FlowYieldVaults account (per YieldVault/UniqueID) - Publishing public/private capabilities - Setting the AutoBalancer's **self capability** (for scheduling) - - **Registering/unregistering with FlowYieldVaultsScheduler** + - **Registering/unregistering with FlowYieldVaultsSchedulerRegistry** - On `_initNewAutoBalancer()`: registers yield vault and schedules first execution atomically -- On `_cleanupAutoBalancer()`: unregisters and cancels pending schedules +- On `_cleanupAutoBalancer()`: unregisters the vault, deletes AutoBalancer capability controllers, and burns the AutoBalancer ### DeFiActions.AutoBalancer (from FlowActions) - Holds a vault of some asset (here: `YieldToken`) @@ -40,17 +40,16 @@ - If undercollateralized and there is a `topUpSource`, pulls extra collateral - If overcollateralized and there is a `drawDownSink`, withdraws collateral -### FlowYieldVaultsScheduler + FlowYieldVaultsSchedulerRegistry +### FlowYieldVaultsSchedulerV1 + FlowYieldVaultsSchedulerRegistry - **FlowYieldVaultsSchedulerRegistry** stores: - `yieldVaultRegistry`: registered yield vault IDs - `handlerCaps`: direct capabilities to AutoBalancers (no wrapper) - - `pendingQueue`: yield vaults needing (re)seeding (bounded by MAX_BATCH_SIZE=50) + - `pendingQueue`: yield vaults needing (re)seeding; processing is bounded by `MAX_BATCH_SIZE = 5` per Supervisor run + - `stuckScanOrder`: LRU-ordered list of vault IDs for stuck detection; vaults call `reportExecution()` on each run to move themselves to the most-recently-executed end, so the Supervisor always scans the longest-idle vaults first - `supervisorCap`: capability for Supervisor self-scheduling -- **FlowYieldVaultsScheduler** provides: - - `registerYieldVault()`: atomic registration + initial scheduling - - `unregisterYieldVault()`: cleanup and fee refund - - `SchedulerManager`: tracks scheduled transactions +- **FlowYieldVaultsSchedulerV1** provides: - `Supervisor`: recovery handler for failed schedules + - Scheduling cost estimation and Supervisor configuration helpers --- @@ -63,7 +62,7 @@ Inside `MockStrategies.TracerStrategyComposer.createStrategy(...)`: - Oracle: `MockOracle.PriceOracle()` - Vault type: `YieldToken.Vault` - Thresholds: `lowerThreshold = 0.95`, `upperThreshold = 1.05` - - Recurring config: `nil` (scheduling handled by FlowYieldVaultsScheduler) + - Recurring config: non-`nil` (enables native AutoBalancer self-scheduling) - Saved via `FlowYieldVaultsAutoBalancers._initNewAutoBalancer(...)`, which: - Stores the AutoBalancer - Issues public capability @@ -110,7 +109,7 @@ Inside `MockStrategies.TracerStrategyComposer.createStrategy(...)`: The capability is issued directly to the AutoBalancer at its storage path: ```cadence -// In registerYieldVault(): +// In _initNewAutoBalancer(): let abPath = FlowYieldVaultsAutoBalancers.deriveAutoBalancerPath(id: yieldVaultID, storage: true) as! StoragePath let handlerCap = self.account.capabilities.storage .issue(abPath) @@ -121,16 +120,22 @@ let handlerCap = self.account.capabilities.storage When `_initNewAutoBalancer()` is called: ```cadence -// Register with scheduler and schedule first execution atomically +// Register with the registry and schedule first execution atomically // This panics if scheduling fails, reverting AutoBalancer creation -FlowYieldVaultsScheduler.registerYieldVault(yieldVaultID: uniqueID.id) +FlowYieldVaultsSchedulerRegistry.register( + yieldVaultID: uniqueID.id, + handlerCap: handlerCap, + scheduleCap: scheduleCap +) +autoBalancerRef.scheduleNextRebalance(whileExecuting: nil) ``` -`registerYieldVault()` atomically: -1. Issues capability to AutoBalancer -2. Registers in FlowYieldVaultsSchedulerRegistry -3. Schedules first execution via SchedulerManager -4. If any step fails, entire transaction reverts +`_initNewAutoBalancer()` atomically: +1. Issues capabilities to the AutoBalancer +2. Registers the vault in `FlowYieldVaultsSchedulerRegistry` +3. Sets the shared execution callback used to report successful runs +4. Schedules the first execution directly on the AutoBalancer +5. If any step fails, the entire transaction reverts ### Self-Scheduling AutoBalancers @@ -154,32 +159,44 @@ fun executeTransaction(id: UInt64, data: AnyStruct?) { ### Supervisor Recovery (Bounded) -The Supervisor handles failed schedules via a bounded pending queue: +The Supervisor runs two steps per execution: + +**Step 1 – Stuck detection** (when `scanForStuck == true`): +Fetches up to `MAX_BATCH_SIZE` candidates from `getStuckScanCandidates(limit:)`, which returns vault IDs starting from the least-recently-executed tail of `stuckScanOrder`. Vaults that are stuck (recurring config set, no active schedule, overdue) are enqueued into `pendingQueue`. + +**Step 2 – Pending processing**: +Seeds vaults from `pendingQueue` (up to `MAX_BATCH_SIZE` per run via `getPendingYieldVaultIDsPaginated(page: 0, size: UInt(MAX_BATCH_SIZE))`). ```cadence access(FlowTransactionScheduler.Execute) fun executeTransaction(id: UInt64, data: AnyStruct?) { - // Process only pending yield vaults (MAX 50 per run) - let pendingYieldVaultIDs = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDs() - - for yieldVaultID in pendingYieldVaults { - if manager.hasScheduled(yieldVaultID: yieldVaultID) { - FlowYieldVaultsSchedulerRegistry.dequeuePending(yieldVaultID: yieldVaultID) - continue + // STEP 1: scan least-recently-executed vaults for stuck detection + let candidates = FlowYieldVaultsSchedulerRegistry.getStuckScanCandidates( + limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE)) + for yieldVaultID in candidates { + if FlowYieldVaultsAutoBalancers.isStuckYieldVault(id: yieldVaultID) { + FlowYieldVaultsSchedulerRegistry.enqueuePending(yieldVaultID: yieldVaultID) } - - // Schedule and dequeue - let handlerCap = FlowYieldVaultsSchedulerRegistry.getHandlerCap(yieldVaultID: yieldVaultID) - // ... estimate fees, schedule, dequeue ... } - - // Self-reschedule if more pending work - if FlowYieldVaultsSchedulerRegistry.getPendingCount() > 0 { + + // STEP 2: process pending queue (MAX_BATCH_SIZE per run) + let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated( + page: 0, + size: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE) + ) + for yieldVaultID in pendingYieldVaults { + // ... schedule via scheduleCap, dequeue ... + } + + // Self-reschedule if recurringInterval was provided + if recurringInterval != nil { // Schedule next Supervisor run - } - } + } +} ``` +Each AutoBalancer sets a shared `RegistryReportCallback` capability at creation time. On every execution it calls `FlowYieldVaultsSchedulerRegistry.reportExecution(yieldVaultID:)`, which moves the vault to the head of `stuckScanOrder` so the least-recently-executed tail remains the next stuck-scan priority. + --- ## 4. Behavior in Different Price Scenarios @@ -232,8 +249,8 @@ fun executeTransaction(id: UInt64, data: AnyStruct?) { | TracerStrategy | Wires AutoBalancer <-> FlowALP | | AutoBalancer | Manages Yield exposure, executes rebalance | | FlowALP Position | Manages collateral/debt health | -| FlowYieldVaultsScheduler | Registration, atomic initial scheduling | -| FlowYieldVaultsSchedulerRegistry | Stores registry, pending queue | -| Supervisor | Recovery for failed schedules (bounded) | +| FlowYieldVaultsSchedulerV1 | Supervisor recovery, fee estimation, configuration | +| FlowYieldVaultsSchedulerRegistry | Stores registry, pending queue, stuck-scan order | +| Supervisor | Stuck detection (LRU scan) + pending queue recovery (bounded) | -**Last Updated**: November 26, 2025 +**Last Updated**: March 9, 2026 diff --git a/docs/scheduled_rebalancing_comprehensive_analysis.md b/docs/scheduled_rebalancing_comprehensive_analysis.md index b59ace1f..f3e39da7 100644 --- a/docs/scheduled_rebalancing_comprehensive_analysis.md +++ b/docs/scheduled_rebalancing_comprehensive_analysis.md @@ -1,10 +1,24 @@ # Comprehensive Analysis: FlowYieldVaults Scheduled Rebalancing Branch +> Historical note: this document captures review context from November 2025 and includes +> analysis of earlier scheduler iterations. It is not the authoritative description of the +> current implementation. +> +> Current implementation summary: +> - scheduler contract: `FlowYieldVaultsSchedulerV1` +> - registry batch size: `MAX_BATCH_SIZE = 5` +> - stuck detection: bounded LRU scan via `getStuckScanCandidates(...)` +> - recovery: direct `Schedule` capability calls to `scheduleNextRebalance(...)` +> +> For current behavior and architecture, see: +> - `docs/SCHEDULED_REBALANCING_GUIDE.md` +> - `docs/rebalancing_architecture.md` + **Document Version:** 2.0 **Date:** November 26, 2025 **Source:** Synthesized from multiple independent code review analyses **Original Reviewer:** sisyphusSmiling (onflow/flow-defi) -**Status:** IMPLEMENTATION COMPLETE +**Status:** HISTORICAL REVIEW CONTEXT --- @@ -832,4 +846,3 @@ Strategy creation via StrategyComposer *This analysis synthesizes findings from four independent code review analyses of the scheduled-rebalancing branch, all derived from review comments by sisyphusSmiling on behalf of onflow/flow-defi.* *Implementation completed November 26, 2025.* -