Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@
[submodule "lib/FlowALP"]
path = lib/FlowALP
url = git@github.com:onflow/FlowALP.git
branch = v0
49 changes: 42 additions & 7 deletions cadence/contracts/FlowYieldVaultsAutoBalancers.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,16 @@ access(all) contract FlowYieldVaultsAutoBalancers {
/// The path prefix used for StoragePath & PublicPath derivations
access(all) let pathPrefix: String

/// Storage path for the shared execution callback resource that reports to the registry (one per account)
access(self) let registryReportCallbackStoragePath: StoragePath

/// Callback resource invoked by each AutoBalancer after execution; calls Registry.reportExecution with its id
access(all) resource RegistryReportCallback: DeFiActions.AutoBalancerExecutionCallback {
access(all) fun onExecuted(balancerUUID: UInt64) {
FlowYieldVaultsSchedulerRegistry.reportExecution(yieldVaultID: balancerUUID)
}
}

/* --- PUBLIC METHODS --- */

/// Returns the path (StoragePath or PublicPath) at which an AutoBalancer is stored with the associated
Expand Down Expand Up @@ -69,7 +79,7 @@ access(all) contract FlowYieldVaultsAutoBalancers {
if autoBalancer == nil {
return false
}

let txnIDs = autoBalancer!.getScheduledTransactionIDs()
for txnID in txnIDs {
if autoBalancer!.borrowScheduledTransaction(id: txnID)?.status() == FlowTransactionScheduler.Status.Scheduled {
Expand All @@ -93,24 +103,24 @@ access(all) contract FlowYieldVaultsAutoBalancers {
if autoBalancer == nil {
return false
}

// Check if yield vault has recurring config (should be executing periodically)
let config = autoBalancer!.getRecurringConfig()
if config == nil {
return false // Not configured for recurring, can't be "stuck"
}

// Check if there's an active schedule
if self.hasActiveSchedule(id: id) {
return false // Has active schedule, not stuck
}

// Check if yield vault is overdue
let nextExpected = autoBalancer!.calculateNextExecutionTimestampAsConfigured()
if nextExpected == nil {
return true // Can't calculate next time, likely stuck
}

// If next expected time has passed and no active schedule, yield vault is stuck
return nextExpected! < getCurrentBlock().timestamp
}
Expand Down Expand Up @@ -150,6 +160,20 @@ access(all) contract FlowYieldVaultsAutoBalancers {
assert(!publishedCap,
message: "Published Capability collision found when publishing AutoBalancer for UniqueIdentifier.id \(uniqueID.id) at path \(publicPath)")

let registryReportCallbackCapabilityStoragePath =
StoragePath(identifier: "FlowYieldVaultsRegistryReportCallbackCapability")!
if self.account.storage.type(at: registryReportCallbackCapabilityStoragePath) == nil {
let sharedReportCap = self.account.capabilities.storage.issue<&{DeFiActions.AutoBalancerExecutionCallback}>(
self.registryReportCallbackStoragePath
)
self.account.storage.save(sharedReportCap, to: registryReportCallbackCapabilityStoragePath)
}
let reportCap = self.account.storage.copy<Capability<&{DeFiActions.AutoBalancerExecutionCallback}>>(
from: registryReportCallbackCapabilityStoragePath
) ?? panic(
"Missing shared registry report callback capability at \(registryReportCallbackCapabilityStoragePath)"
)

// create & save AutoBalancer with optional recurring config
let autoBalancer <- DeFiActions.createAutoBalancer(
oracle: oracle,
Expand All @@ -161,6 +185,7 @@ access(all) contract FlowYieldVaultsAutoBalancers {
recurringConfig: recurringConfig,
uniqueID: uniqueID
)
autoBalancer.setExecutionCallback(reportCap)
self.account.storage.save(<-autoBalancer, to: storagePath)
let autoBalancerRef = self._borrowAutoBalancer(uniqueID.id)

Expand Down Expand Up @@ -224,7 +249,7 @@ access(all) contract FlowYieldVaultsAutoBalancers {
let publicPath = self.deriveAutoBalancerPath(id: id, storage: false) as! PublicPath
// unpublish the public AutoBalancer Capability
let _ = self.account.capabilities.unpublish(publicPath)

// Collect controller IDs first (can't modify during iteration)
var controllersToDelete: [UInt64] = []
self.account.capabilities.storage.forEachController(forPath: storagePath, fun(_ controller: &StorageCapabilityController): Bool {
Expand All @@ -237,13 +262,23 @@ access(all) contract FlowYieldVaultsAutoBalancers {
controller.delete()
}
}

// load & burn the AutoBalancer (this also handles any pending scheduled transactions via burnCallback)
let autoBalancer <-self.account.storage.load<@DeFiActions.AutoBalancer>(from: storagePath)
Burner.burn(<-autoBalancer)
}

access(self) fun createRegistryReportCallbackImpl(): @RegistryReportCallback {
return <-create RegistryReportCallback()
}

init() {
self.pathPrefix = "FlowYieldVaultsAutoBalancer_"
self.registryReportCallbackStoragePath = StoragePath(identifier: "FlowYieldVaultsRegistryReportCallback")!

// Ensure shared execution callback exists (reports this account's executions to Registry)
if self.account.storage.type(at: self.registryReportCallbackStoragePath) == nil {
self.account.storage.save(<-self.createRegistryReportCallbackImpl(), to: self.registryReportCallbackStoragePath)
}
}
}
141 changes: 127 additions & 14 deletions cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,27 @@ import "DeFiActions"
///
access(all) contract FlowYieldVaultsSchedulerRegistry {

/* --- TYPES --- */

/// Node in the simulated doubly-linked list used for O(1) stuck-scan ordering.
/// `prev` points toward the head (most recently executed); `next` points toward the tail (oldest/least recently executed).
access(all) struct ListNode {
access(all) var prev: UInt64?
access(all) var next: UInt64?
init(prev: UInt64?, next: UInt64?) {
self.prev = prev
self.next = next
}

access(all) fun setPrev(prev: UInt64?) {
self.prev = prev
}

access(all) fun setNext(next: UInt64?) {
self.next = next
}
}

/* --- EVENTS --- */

/// Emitted when a yield vault is registered with its handler capability
Expand Down Expand Up @@ -58,6 +79,60 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
/// Stored as a dictionary for O(1) add/remove; iteration gives the pending set
access(self) var pendingQueue: {UInt64: Bool}

/// Simulated doubly-linked list for O(1) stuck-scan ordering.
/// listHead = most recently executed vault ID (or nil if empty).
/// listTail = least recently executed vault ID — getStuckScanCandidates walks from here.
/// On reportExecution a vault is snipped from its current position and moved to head in O(1).
access(self) var listNodes: {UInt64: ListNode}
access(self) var listHead: UInt64?
access(self) var listTail: UInt64?

/* --- PRIVATE LIST HELPERS --- */

/// Insert `id` at the head of the list (most-recently-executed end).
/// Caller must ensure `id` is not already in the list.
access(self) fun _listInsertAtHead(id: UInt64) {
let node = ListNode(prev: nil, next: self.listHead)
if let oldHeadID = self.listHead {
var oldHead = self.listNodes[oldHeadID]!
oldHead.setPrev(prev: id)
self.listNodes[oldHeadID] = oldHead
} else {
// List was empty — id is also the tail
self.listTail = id
}
self.listNodes[id] = node
self.listHead = id
}

/// Remove `id` from wherever it sits in the list in O(1).
/// Returns false if the id is not currently linked.
access(self) fun _listRemove(id: UInt64): Bool {
let node = self.listNodes.remove(key: id)
if node == nil {
return false
}

if let prevID = node!.prev {
var prevNode = self.listNodes[prevID]!
prevNode.setNext(next: node!.next)
self.listNodes[prevID] = prevNode
} else {
// id was the head
self.listHead = node!.next
}

if let nextID = node!.next {
var nextNode = self.listNodes[nextID]!
nextNode.setPrev(prev: node!.prev)
self.listNodes[nextID] = nextNode
} else {
// id was the tail
self.listTail = node!.prev
}
return true
}

/* --- ACCOUNT-LEVEL FUNCTIONS --- */

/// Register a YieldVault and store its handler and schedule capabilities (idempotent)
Expand All @@ -73,9 +148,26 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
self.yieldVaultRegistry[yieldVaultID] = true
self.handlerCaps[yieldVaultID] = handlerCap
self.scheduleCaps[yieldVaultID] = scheduleCap
// New vaults go to the head; they haven't executed yet but are freshly registered.
// If already in the list (idempotent re-register), remove first to avoid duplicates.
if self.listNodes[yieldVaultID] != nil {
self._listRemove(id: yieldVaultID)
}
self._listInsertAtHead(id: yieldVaultID)
emit YieldVaultRegistered(yieldVaultID: yieldVaultID)
}

/// Called on every execution. Moves yieldVaultID to the head (most recently executed)
/// so the Supervisor scans from the tail (least recently executed) for stuck detection — O(1).
/// If the list entry is unexpectedly missing, reinsert it to restore the ordering structure.
access(account) fun reportExecution(yieldVaultID: UInt64) {
if !(self.yieldVaultRegistry[yieldVaultID] ?? false) {
return
}
let _ = self._listRemove(id: yieldVaultID)
self._listInsertAtHead(id: yieldVaultID)
}

/// Adds a yield vault to the pending queue for seeding by the Supervisor
access(account) fun enqueuePending(yieldVaultID: UInt64) {
if self.yieldVaultRegistry[yieldVaultID] == true {
Expand All @@ -92,12 +184,13 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
}
}

/// Unregister a YieldVault (idempotent) - removes from registry, capabilities, and pending queue
/// Unregister a YieldVault (idempotent) - removes from registry, capabilities, pending queue, and linked list
access(account) fun unregister(yieldVaultID: UInt64) {
self.yieldVaultRegistry.remove(key: yieldVaultID)
self.handlerCaps.remove(key: yieldVaultID)
self.scheduleCaps.remove(key: yieldVaultID)
let _r = self.yieldVaultRegistry.remove(key: yieldVaultID)
let _h = self.handlerCaps.remove(key: yieldVaultID)
let _s = self.scheduleCaps.remove(key: yieldVaultID)
let pending = self.pendingQueue.remove(key: yieldVaultID)
let _ = self._listRemove(id: yieldVaultID)
emit YieldVaultUnregistered(yieldVaultID: yieldVaultID, wasInPendingQueue: pending != nil)
}

Expand Down Expand Up @@ -155,20 +248,20 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {

/// Get paginated pending yield vault IDs
/// @param page: The page number (0-indexed)
/// @param size: The page size (defaults to MAX_BATCH_SIZE if nil)
access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: Int?): [UInt64] {
let pageSize = size ?? self.MAX_BATCH_SIZE
/// @param size: The page size (defaults to MAX_BATCH_SIZE if 0)
access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt): [UInt64] {
let pageSize = size == 0 ? self.MAX_BATCH_SIZE : Int(size)
let allPending = self.pendingQueue.keys
let startIndex = page * pageSize

if startIndex >= allPending.length {
return []
}
let endIndex = startIndex + pageSize > allPending.length
? allPending.length

let endIndex = startIndex + pageSize > allPending.length
? allPending.length
: startIndex + pageSize

return allPending.slice(from: startIndex, upTo: endIndex)
}

Expand All @@ -177,6 +270,25 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
return self.pendingQueue.length
}

/// Returns up to `limit` vault IDs starting from the tail (least recently executed).
/// Supervisor should only scan these for stuck detection instead of all registered vaults.
/// @param limit: Maximum number of IDs to return (caller typically passes MAX_BATCH_SIZE)
access(all) fun getStuckScanCandidates(limit: UInt): [UInt64] {
var result: [UInt64] = []
var current = self.listTail
var count: UInt = 0
while count < limit {
if let id = current {
result.append(id)
current = self.listNodes[id]?.prev
count = count + 1
} else {
break
}
}
return result
}

/// Get global Supervisor capability, if set
/// NOTE: Access restricted - only used internally by the scheduler
access(account)
Expand All @@ -193,7 +305,8 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
self.handlerCaps = {}
self.scheduleCaps = {}
self.pendingQueue = {}
self.listNodes = {}
self.listHead = nil
self.listTail = nil
}
}


28 changes: 6 additions & 22 deletions cadence/contracts/FlowYieldVaultsSchedulerV1.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 {
/// "priority": UInt8 (0=High,1=Medium,2=Low) - for Supervisor self-rescheduling
/// "executionEffort": UInt64 - for Supervisor self-rescheduling
/// "recurringInterval": UFix64 (for Supervisor self-rescheduling)
/// "scanForStuck": Bool (default true - scan all registered yield vaults for stuck ones)
/// "scanForStuck": Bool (default true - scan up to MAX_BATCH_SIZE least-recently-executed vaults for stuck ones)
/// }
access(FlowTransactionScheduler.Execute) fun executeTransaction(id: UInt64, data: AnyStruct?) {
let cfg = data as? {String: AnyStruct} ?? {}
Expand All @@ -186,24 +186,8 @@ access(all) contract FlowYieldVaultsSchedulerV1 {

// STEP 1: State-based detection - scan for stuck yield vaults
if scanForStuck {
// TODO: add pagination - this will inevitably fails and at minimum creates inconsistent execution
// effort between runs
let registeredYieldVaults = FlowYieldVaultsSchedulerRegistry.getRegisteredYieldVaultIDs()
var scanned = 0
for yieldVaultID in registeredYieldVaults {
if scanned >= FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE {
break
}
scanned = scanned + 1

// Skip if already in pending queue
// TODO: This is extremely inefficient - accessing from mapping is preferrable to iterating over
// an array
if FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDs().contains(yieldVaultID) {
continue
}

// Check if yield vault is stuck (has recurring config, no active schedule, overdue)
let candidates = FlowYieldVaultsSchedulerRegistry.getStuckScanCandidates(limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE))
for yieldVaultID in candidates {
if FlowYieldVaultsAutoBalancers.isStuckYieldVault(id: yieldVaultID) {
FlowYieldVaultsSchedulerRegistry.enqueuePending(yieldVaultID: yieldVaultID)
emit StuckYieldVaultDetected(yieldVaultID: yieldVaultID)
Expand All @@ -212,8 +196,8 @@ access(all) contract FlowYieldVaultsSchedulerV1 {
}

// STEP 2: Process pending yield vaults - recover them via Schedule capability
let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: nil)
let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE))

for yieldVaultID in pendingYieldVaults {
// Get Schedule capability for this yield vault
let scheduleCap = FlowYieldVaultsSchedulerRegistry.getScheduleCap(yieldVaultID: yieldVaultID)
Expand Down Expand Up @@ -457,7 +441,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 {

// Initialize paths
self.SupervisorStoragePath = /storage/FlowYieldVaultsSupervisor

// Configure Supervisor at deploy time
self.ensureSupervisorConfigured()
}
Expand Down
4 changes: 2 additions & 2 deletions cadence/contracts/FlowYieldVaultsStrategiesV2.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -857,7 +857,7 @@ access(all) contract FlowYieldVaultsStrategiesV2 {
}

access(all) view fun getSupportedComposers(): {Type: Bool} {
return {
return {
Type<@MorphoERC4626StrategyComposer>(): true
}
}
Expand Down Expand Up @@ -994,7 +994,7 @@ access(all) contract FlowYieldVaultsStrategiesV2 {
fun _createRecurringConfig(withID: DeFiActions.UniqueIdentifier?): DeFiActions.AutoBalancerRecurringConfig {
// Create txnFunder that can provide/accept FLOW for scheduling fees
let txnFunder = self._createTxnFunder(withID: withID)

return DeFiActions.AutoBalancerRecurringConfig(
interval: 60 * 10, // Rebalance every 10 minutes
priority: FlowTransactionScheduler.Priority.Medium,
Expand Down
Loading
Loading