From 453fcc9ff81e1e81e3b746ea4695b5f1976e960b Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:00:35 -0700 Subject: [PATCH 01/28] Revert "fix Proximal extension" This reverts commit 9729819b86f375e4663de1fe9ec9c38d4932f580. --- ext/SEMProximalOptExt/SEMProximalOptExt.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ext/SEMProximalOptExt/SEMProximalOptExt.jl b/ext/SEMProximalOptExt/SEMProximalOptExt.jl index 04be35cb7..bedf1920e 100644 --- a/ext/SEMProximalOptExt/SEMProximalOptExt.jl +++ b/ext/SEMProximalOptExt/SEMProximalOptExt.jl @@ -3,7 +3,8 @@ module SEMProximalOptExt using StructuralEquationModels using StructuralEquationModels: print_type_name, print_field_types using ProximalAlgorithms -import StructuralEquationModels: SemOptimizerProximal + +export SemOptimizerProximal SEM = StructuralEquationModels From abc2847899447bf6e10ea76f105d6913c88a2442 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:01:59 -0700 Subject: [PATCH 02/28] Revert "fix NLopt extension" This reverts commit 81a4bd9839df01e9f487b9aa13e3df107856114a. --- ext/SEMNLOptExt/NLopt.jl | 5 +++++ ext/SEMNLOptExt/SEMNLOptExt.jl | 3 ++- src/StructuralEquationModels.jl | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 27bc30039..ac282ad6d 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -1,3 +1,8 @@ +Base.@kwdef struct NLoptConstraint + f::Any + tol = 0.0 +end + Base.convert( ::Type{NLoptConstraint}, tuple::NamedTuple{(:f, :tol), Tuple{F, T}}, diff --git a/ext/SEMNLOptExt/SEMNLOptExt.jl b/ext/SEMNLOptExt/SEMNLOptExt.jl index bf905e3ac..a159f6dc8 100644 --- a/ext/SEMNLOptExt/SEMNLOptExt.jl +++ b/ext/SEMNLOptExt/SEMNLOptExt.jl @@ -1,10 +1,11 @@ module SEMNLOptExt using StructuralEquationModels, NLopt -import StructuralEquationModels: SemOptimizerNLopt, NLoptConstraint SEM = StructuralEquationModels +export SemOptimizerNLopt, NLoptConstraint + include("NLopt.jl") end diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index 46692bd5b..e0cd1e7a4 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -198,6 +198,5 @@ export AbstractSem, ↔, ⇔, SemOptimizerNLopt, - NLoptConstraint, SemOptimizerProximal end From 56cdef1f26c482828b1466bc120fad31e6ed8c18 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:03:35 -0700 Subject: [PATCH 03/28] Revert "fix exporting structs from package extensions" This reverts commit f0df6538f0220f964cbf51772698c317a0b4cf86. --- ext/SEMNLOptExt/NLopt.jl | 68 +++++++++++++++++++++ ext/SEMProximalOptExt/ProximalAlgorithms.jl | 32 ++++++++++ src/StructuralEquationModels.jl | 7 +-- src/package_extensions/SEMNLOptExt.jl | 65 -------------------- src/package_extensions/SEMProximalOptExt.jl | 27 -------- 5 files changed, 101 insertions(+), 98 deletions(-) delete mode 100644 src/package_extensions/SEMNLOptExt.jl delete mode 100644 src/package_extensions/SEMProximalOptExt.jl diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index ac282ad6d..694247cac 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -1,3 +1,71 @@ +############################################################################################ +### Types +############################################################################################ +""" +Connects to `NLopt.jl` as the optimization backend. +Only usable if `NLopt.jl` is loaded in the current Julia session! + +# Constructor + + SemOptimizerNLopt(; + algorithm = :LD_LBFGS, + options = Dict{Symbol, Any}(), + local_algorithm = nothing, + local_options = Dict{Symbol, Any}(), + equality_constraints = Vector{NLoptConstraint}(), + inequality_constraints = Vector{NLoptConstraint}(), + kwargs...) + +# Arguments +- `algorithm`: optimization algorithm. +- `options::Dict{Symbol, Any}`: options for the optimization algorithm +- `local_algorithm`: local optimization algorithm +- `local_options::Dict{Symbol, Any}`: options for the local optimization algorithm +- `equality_constraints::Vector{NLoptConstraint}`: vector of equality constraints +- `inequality_constraints::Vector{NLoptConstraint}`: vector of inequality constraints + +# Example +```julia +my_optimizer = SemOptimizerNLopt() + +# constrained optimization with augmented lagrangian +my_constrained_optimizer = SemOptimizerNLopt(; + algorithm = :AUGLAG, + local_algorithm = :LD_LBFGS, + local_options = Dict(:ftol_rel => 1e-6), + inequality_constraints = NLoptConstraint(;f = my_constraint, tol = 0.0), +) +``` + +# Usage +All algorithms and options from the NLopt library are available, for more information see +the NLopt.jl package and the NLopt online documentation. +For information on how to use inequality and equality constraints, +see [Constrained optimization](@ref) in our online documentation. + +# Extended help + +## Interfaces +- `algorithm(::SemOptimizerNLopt)` +- `local_algorithm(::SemOptimizerNLopt)` +- `options(::SemOptimizerNLopt)` +- `local_options(::SemOptimizerNLopt)` +- `equality_constraints(::SemOptimizerNLopt)` +- `inequality_constraints(::SemOptimizerNLopt)` + +## Implementation + +Subtype of `SemOptimizer`. +""" +struct SemOptimizerNLopt{A, A2, B, B2, C} <: SemOptimizer{:NLopt} + algorithm::A + local_algorithm::A2 + options::B + local_options::B2 + equality_constraints::C + inequality_constraints::C +end + Base.@kwdef struct NLoptConstraint f::Any tol = 0.0 diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index 0d4748e3a..aec61e57e 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -1,3 +1,35 @@ +############################################################################################ +### Types +############################################################################################ +""" +Connects to `ProximalAlgorithms.jl` as the optimization backend. + +Can be used for regularized SEM, for a tutorial see the online docs on [Regularization](@ref). + +# Constructor + + SemOptimizerProximal(; + algorithm = ProximalAlgorithms.PANOC(), + operator_g, + operator_h = nothing, + kwargs..., + +# Arguments +- `algorithm`: optimization algorithm. +- `operator_g`: proximal operator (e.g., regularization penalty) +- `operator_h`: optional second proximal operator + +# Usage +All algorithms and operators from `ProximalAlgorithms.jl` are available, +for more information see the online docs on [Regularization](@ref) and +the documentation of `ProximalAlgorithms.jl` / `ProximalOperators.jl`. +""" +mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} + algorithm::A + operator_g::B + operator_h::C +end + SEM.SemOptimizer{:Proximal}(args...; kwargs...) = SemOptimizerProximal(args...; kwargs...) SemOptimizerProximal(; diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index e0cd1e7a4..a306eccfa 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -86,9 +86,6 @@ include("frontend/fit/fitmeasures/fit_measures.jl") # standard errors include("frontend/fit/standard_errors/hessian.jl") include("frontend/fit/standard_errors/bootstrap.jl") -# extensions -include("package_extensions/SEMNLOptExt.jl") -include("package_extensions/SEMProximalOptExt.jl") export AbstractSem, AbstractSemSingle, @@ -196,7 +193,5 @@ export AbstractSem, →, ←, ↔, - ⇔, - SemOptimizerNLopt, - SemOptimizerProximal + ⇔ end diff --git a/src/package_extensions/SEMNLOptExt.jl b/src/package_extensions/SEMNLOptExt.jl deleted file mode 100644 index 64c4cff04..000000000 --- a/src/package_extensions/SEMNLOptExt.jl +++ /dev/null @@ -1,65 +0,0 @@ -""" -Connects to `NLopt.jl` as the optimization backend. -Only usable if `NLopt.jl` is loaded in the current Julia session! - -# Constructor - - SemOptimizerNLopt(; - algorithm = :LD_LBFGS, - options = Dict{Symbol, Any}(), - local_algorithm = nothing, - local_options = Dict{Symbol, Any}(), - equality_constraints = Vector{NLoptConstraint}(), - inequality_constraints = Vector{NLoptConstraint}(), - kwargs...) - -# Arguments -- `algorithm`: optimization algorithm. -- `options::Dict{Symbol, Any}`: options for the optimization algorithm -- `local_algorithm`: local optimization algorithm -- `local_options::Dict{Symbol, Any}`: options for the local optimization algorithm -- `equality_constraints::Vector{NLoptConstraint}`: vector of equality constraints -- `inequality_constraints::Vector{NLoptConstraint}`: vector of inequality constraints - -# Example -```julia -my_optimizer = SemOptimizerNLopt() - -# constrained optimization with augmented lagrangian -my_constrained_optimizer = SemOptimizerNLopt(; - algorithm = :AUGLAG, - local_algorithm = :LD_LBFGS, - local_options = Dict(:ftol_rel => 1e-6), - inequality_constraints = NLoptConstraint(;f = my_constraint, tol = 0.0), -) -``` - -# Usage -All algorithms and options from the NLopt library are available, for more information see -the NLopt.jl package and the NLopt online documentation. -For information on how to use inequality and equality constraints, -see [Constrained optimization](@ref) in our online documentation. - -# Extended help - -## Interfaces -- `algorithm(::SemOptimizerNLopt)` -- `local_algorithm(::SemOptimizerNLopt)` -- `options(::SemOptimizerNLopt)` -- `local_options(::SemOptimizerNLopt)` -- `equality_constraints(::SemOptimizerNLopt)` -- `inequality_constraints(::SemOptimizerNLopt)` -""" -struct SemOptimizerNLopt{A, A2, B, B2, C} <: SemOptimizer{:NLopt} - algorithm::A - local_algorithm::A2 - options::B - local_options::B2 - equality_constraints::C - inequality_constraints::C -end - -Base.@kwdef struct NLoptConstraint - f::Any - tol = 0.0 -end diff --git a/src/package_extensions/SEMProximalOptExt.jl b/src/package_extensions/SEMProximalOptExt.jl deleted file mode 100644 index ad4c2da2a..000000000 --- a/src/package_extensions/SEMProximalOptExt.jl +++ /dev/null @@ -1,27 +0,0 @@ -""" -Connects to `ProximalAlgorithms.jl` as the optimization backend. -Can be used for regularized SEM, for a tutorial see the online docs on [Regularization](@ref). - -# Constructor - - SemOptimizerProximal(; - algorithm = ProximalAlgorithms.PANOC(), - operator_g, - operator_h = nothing, - kwargs..., - -# Arguments -- `algorithm`: optimization algorithm. -- `operator_g`: proximal operator (e.g., regularization penalty) -- `operator_h`: optional second proximal operator - -# Usage -All algorithms and operators from `ProximalAlgorithms.jl` are available, -for more information see the online docs on [Regularization](@ref) and -the documentation of `ProximalAlgorithms.jl` / `ProximalOperators.jl`. -""" -mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} - algorithm::A - operator_g::B - operator_h::C -end From 421927ef79a466c399494bb6b83a764a37467b7d Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:18:02 -0700 Subject: [PATCH 04/28] types.jl: move SemOptimizer API into abstract.jl --- src/optimizer/abstract.jl | 17 +++++++++++++++++ src/types.jl | 11 ----------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index c1ad72592..f00e50552 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,3 +1,20 @@ +engine(::Type{SemOptimizer{E}}) where {E} = E +engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) + +SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = + SemOptimizer{engine}(args...; kwargs...) + +# fallback optimizer constructor +function SemOptimizer{E}(args...; kwargs...) where {E} + if E == :NLOpt + error("$E optimizer requires \"using NLopt\".") + elseif E == :Proximal + error("$E optimizer requires \"using ProximalAlgorithms\".") + else + error("$E optimizer is not supported.") + end +end + """ fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) diff --git a/src/types.jl b/src/types.jl index 0e279e5b2..73a650e6f 100644 --- a/src/types.jl +++ b/src/types.jl @@ -86,17 +86,6 @@ If you want to connect the SEM package to a new optimization backend, you should """ abstract type SemOptimizer{E} end -engine(::Type{SemOptimizer{E}}) where {E} = E -engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) - -SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = - SemOptimizer{engine}(args...; kwargs...) - -# fallback optimizer constructor -function SemOptimizer{E}(args...; kwargs...) where {E} - throw(ErrorException("$E optimizer is not supported.")) -end - """ Supertype of all objects that can serve as the observed field of a SEM. Pre-processes data and computes sufficient statistics for example. From 84bd7bdbb0ea9e30b519a9d1e8aaf372e8d3f5f8 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:25:57 -0700 Subject: [PATCH 05/28] NLoptResult should not be mutable --- ext/SEMNLOptExt/NLopt.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 694247cac..a51a3d065 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -124,7 +124,7 @@ local_options(optimizer::SemOptimizerNLopt) = optimizer.local_options equality_constraints(optimizer::SemOptimizerNLopt) = optimizer.equality_constraints inequality_constraints(optimizer::SemOptimizerNLopt) = optimizer.inequality_constraints -mutable struct NLoptResult +struct NLoptResult result::Any problem::Any end From 930e0e5f005546aab1afee605c027a3738b2dc4a Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:32:15 -0700 Subject: [PATCH 06/28] SemNLOpt: use f or f => tol pair for constraints It is a simple and intuitive syntax and avoids declaring new types. Also allow specifying default constraint tolerance as `constraint_tol`. --- docs/src/tutorials/constraints/constraints.md | 6 +- ext/SEMNLOptExt/NLopt.jl | 108 +++++++++--------- ext/SEMNLOptExt/SEMNLOptExt.jl | 2 +- .../political_democracy/constraints.jl | 4 +- 4 files changed, 61 insertions(+), 59 deletions(-) diff --git a/docs/src/tutorials/constraints/constraints.md b/docs/src/tutorials/constraints/constraints.md index c433240a9..938a2bb9e 100644 --- a/docs/src/tutorials/constraints/constraints.md +++ b/docs/src/tutorials/constraints/constraints.md @@ -1,6 +1,6 @@ # Constrained optimization -## Using the NLopt backend +## Using the NLopt engine ### Define an example model @@ -128,8 +128,8 @@ constrained_optimizer = SemOptimizerNLopt( algorithm = :AUGLAG, options = Dict(:upper_bounds => upper_bounds, :xtol_abs => 1e-4), local_algorithm = :LD_LBFGS, - equality_constraints = NLoptConstraint(;f = eq_constraint, tol = 1e-8), - inequality_constraints = NLoptConstraint(;f = ineq_constraint, tol = 1e-8), + equality_constraints = (eq_constraint => 1e-8), + inequality_constraints = (ineq_constraint => 1e-8), ) ``` diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index a51a3d065..fe052b135 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -1,6 +1,9 @@ ############################################################################################ ### Types ############################################################################################ + +const NLoptConstraint = Pair{Any, Number} + """ Connects to `NLopt.jl` as the optimization backend. Only usable if `NLopt.jl` is loaded in the current Julia session! @@ -12,8 +15,9 @@ Only usable if `NLopt.jl` is loaded in the current Julia session! options = Dict{Symbol, Any}(), local_algorithm = nothing, local_options = Dict{Symbol, Any}(), - equality_constraints = Vector{NLoptConstraint}(), - inequality_constraints = Vector{NLoptConstraint}(), + equality_constraints = nothing, + inequality_constraints = nothing, + constraint_tol::Number = 0.0, kwargs...) # Arguments @@ -21,19 +25,32 @@ Only usable if `NLopt.jl` is loaded in the current Julia session! - `options::Dict{Symbol, Any}`: options for the optimization algorithm - `local_algorithm`: local optimization algorithm - `local_options::Dict{Symbol, Any}`: options for the local optimization algorithm -- `equality_constraints::Vector{NLoptConstraint}`: vector of equality constraints -- `inequality_constraints::Vector{NLoptConstraint}`: vector of inequality constraints +- `equality_constraints: optional equality constraints +- `inequality_constraints:: optional inequality constraints +- `constraint_tol::Number`: default tolerance for constraints + +## Constraints specification + +Equality and inequality constraints arguments could be a single constraint or any +iterable constraints container (e.g. vector or tuple). +Each constraint could be a function or any other callable object that +takes the two input arguments: + - the vector of the model parameters; + - the array for the in-place calculation of the constraint gradient. +To override the default tolerance, the constraint could be specified +as a pair of the function and its tolerance: `constraint_func => tol`. # Example ```julia -my_optimizer = SemOptimizerNLopt() +my_optimizer = SemOptimizer(engine = :NLopt) # constrained optimization with augmented lagrangian -my_constrained_optimizer = SemOptimizerNLopt(; +my_constrained_optimizer = SemOptimizer(; + engine = :NLopt, algorithm = :AUGLAG, local_algorithm = :LD_LBFGS, local_options = Dict(:ftol_rel => 1e-6), - inequality_constraints = NLoptConstraint(;f = my_constraint, tol = 0.0), + inequality_constraints = (my_constraint => tol), ) ``` @@ -57,25 +74,15 @@ see [Constrained optimization](@ref) in our online documentation. Subtype of `SemOptimizer`. """ -struct SemOptimizerNLopt{A, A2, B, B2, C} <: SemOptimizer{:NLopt} - algorithm::A - local_algorithm::A2 - options::B - local_options::B2 - equality_constraints::C - inequality_constraints::C +struct SemOptimizerNLopt <: SemOptimizer{:NLopt} + algorithm::Symbol + local_algorithm::Union{Symbol, Nothing} + options::Dict{Symbol, Any} + local_options::Dict{Symbol, Any} + equality_constraints::Vector{NLoptConstraint} + inequality_constraints::Vector{NLoptConstraint} end -Base.@kwdef struct NLoptConstraint - f::Any - tol = 0.0 -end - -Base.convert( - ::Type{NLoptConstraint}, - tuple::NamedTuple{(:f, :tol), Tuple{F, T}}, -) where {F, T} = NLoptConstraint(tuple.f, tuple.tol) - ############################################################################################ ### Constructor ############################################################################################ @@ -85,22 +92,26 @@ function SemOptimizerNLopt(; local_algorithm = nothing, options = Dict{Symbol, Any}(), local_options = Dict{Symbol, Any}(), - equality_constraints = Vector{NLoptConstraint}(), - inequality_constraints = Vector{NLoptConstraint}(), - kwargs..., + equality_constraints = nothing, + inequality_constraints = nothing, + constraint_tol::Number = 0.0, + kwargs..., # FIXME remove the sink for unused kwargs ) - applicable(iterate, equality_constraints) && !isa(equality_constraints, NamedTuple) || - (equality_constraints = [equality_constraints]) - applicable(iterate, inequality_constraints) && - !isa(inequality_constraints, NamedTuple) || - (inequality_constraints = [inequality_constraints]) + constraint(f::Any) = f => constraint_tol + constraint(f_and_tol::Pair) = f_and_tol + + constraints(::Nothing) = Vector{NLoptConstraint}() + constraints(constraints) = + applicable(iterate, constraints) && !isa(constraints, Pair) ? + [constraint(constr) for constr in constraints] : [constraint(constraints)] + return SemOptimizerNLopt( algorithm, local_algorithm, options, local_options, - convert.(NLoptConstraint, equality_constraints), - convert.(NLoptConstraint, inequality_constraints), + constraints(equality_constraints), + constraints(inequality_constraints), ) end @@ -151,10 +162,7 @@ function SEM.fit( start_params::AbstractVector; kwargs..., ) - - # construct the NLopt problem - opt = construct_NLopt_problem(optim.algorithm, optim.options, length(start_params)) - set_NLopt_constraints!(opt, optim) + opt = construct_NLopt(optim.algorithm, optim.options, nparams(model)) opt.min_objective = (par, G) -> SEM.evaluate!( zero(eltype(par)), @@ -163,13 +171,16 @@ function SEM.fit( model, par, ) + for (f, tol) in optim.inequality_constraints + inequality_constraint!(opt, f, tol) + end + for (f, tol) in optim.equality_constraints + equality_constraint!(opt, f, tol) + end if !isnothing(optim.local_algorithm) - opt_local = construct_NLopt_problem( - optim.local_algorithm, - optim.local_options, - length(start_params), - ) + opt_local = + construct_NLopt(optim.local_algorithm, optim.local_options, nparams(model)) opt.local_optimizer = opt_local end @@ -183,7 +194,7 @@ end ### additional functions ############################################################################################ -function construct_NLopt_problem(algorithm, options, npar) +function construct_NLopt(algorithm, options, npar) opt = Opt(algorithm, npar) for (key, val) in pairs(options) @@ -193,15 +204,6 @@ function construct_NLopt_problem(algorithm, options, npar) return opt end -function set_NLopt_constraints!(opt::Opt, optimizer::SemOptimizerNLopt) - for con in optimizer.inequality_constraints - inequality_constraint!(opt, con.f, con.tol) - end - for con in optimizer.equality_constraints - equality_constraint!(opt, con.f, con.tol) - end -end - ############################################################################################ # pretty printing ############################################################################################ diff --git a/ext/SEMNLOptExt/SEMNLOptExt.jl b/ext/SEMNLOptExt/SEMNLOptExt.jl index a159f6dc8..61c41338b 100644 --- a/ext/SEMNLOptExt/SEMNLOptExt.jl +++ b/ext/SEMNLOptExt/SEMNLOptExt.jl @@ -4,7 +4,7 @@ using StructuralEquationModels, NLopt SEM = StructuralEquationModels -export SemOptimizerNLopt, NLoptConstraint +export SemOptimizerNLopt include("NLopt.jl") diff --git a/test/examples/political_democracy/constraints.jl b/test/examples/political_democracy/constraints.jl index cc1b0874d..7a6670fa3 100644 --- a/test/examples/political_democracy/constraints.jl +++ b/test/examples/political_democracy/constraints.jl @@ -26,8 +26,8 @@ constrained_optimizer = SemOptimizer(; algorithm = :AUGLAG, local_algorithm = :LD_LBFGS, options = Dict(:xtol_rel => 1e-4), - # equality_constraints = (f = eq_constraint, tol = 1e-14), - inequality_constraints = (f = ineq_constraint, tol = 0.0), + # equality_constraints = (eq_constraint => 1e-14), + inequality_constraints = (ineq_constraint => 0.0), ) @test constrained_optimizer isa SemOptimizer{:NLopt} From 230af39bdf54c78acb680d10a9b84ea6b76323c6 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 18:32:07 -0800 Subject: [PATCH 07/28] NLopt: cleanup docstring --- ext/SEMNLOptExt/NLopt.jl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index fe052b135..d18c35e23 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -5,12 +5,13 @@ const NLoptConstraint = Pair{Any, Number} """ -Connects to `NLopt.jl` as the optimization backend. -Only usable if `NLopt.jl` is loaded in the current Julia session! +Uses *NLopt.jl* as the optimization engine. +Only available if *NLopt.jl* is loaded in the current Julia session! # Constructor - SemOptimizerNLopt(; + SemOptimizer(; + engine = :NLopt, algorithm = :LD_LBFGS, options = Dict{Symbol, Any}(), local_algorithm = nothing, @@ -55,8 +56,9 @@ my_constrained_optimizer = SemOptimizer(; ``` # Usage -All algorithms and options from the NLopt library are available, for more information see -the NLopt.jl package and the NLopt online documentation. +All algorithms and options from the *NLopt* library are available, for more information see +the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the +[NLopt docs](https://nlopt.readthedocs.io/en/latest/). For information on how to use inequality and equality constraints, see [Constrained optimization](@ref) in our online documentation. From d1355a0562385b685e8f4dc6014a83f927255f80 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 18:33:05 -0800 Subject: [PATCH 08/28] NLopt: update/simplify docs use SemOptimizer(engine = :NLopt) instead of SemOptimizerNLopt() as this is a more universal scheme --- docs/src/tutorials/backends/nlopt.md | 39 +++++------- docs/src/tutorials/constraints/constraints.md | 60 +++++++++---------- 2 files changed, 44 insertions(+), 55 deletions(-) diff --git a/docs/src/tutorials/backends/nlopt.md b/docs/src/tutorials/backends/nlopt.md index feb5c8f48..3ad4bf497 100644 --- a/docs/src/tutorials/backends/nlopt.md +++ b/docs/src/tutorials/backends/nlopt.md @@ -1,31 +1,21 @@ # Using NLopt.jl -[`SemOptimizerNLopt`](@ref) implements the connection to `NLopt.jl`. -It is only available if the `NLopt` package is loaded alongside `StructuralEquationModels.jl` in the running Julia session. -It takes a bunch of arguments: +When [`NLopt.jl`](https://github.com/jump-dev/NLopt.jl) is loaded in the running Julia session, +it could be used by the [`SemOptimizer`](@ref) by specifying `engine = :NLopt` +(see [NLopt-specific options](@ref `SemOptimizerNLopt`)). +Among other things, `NLopt` enables constrained optimization of the SEM models, which is +explained in the [Constrained optimization](@ref) section. -```julia - • algorithm: optimization algorithm - - • options::Dict{Symbol, Any}: options for the optimization algorithm - - • local_algorithm: local optimization algorithm - - • local_options::Dict{Symbol, Any}: options for the local optimization algorithm - - • equality_constraints::Vector{NLoptConstraint}: vector of equality constraints - - • inequality_constraints::Vector{NLoptConstraint}: vector of inequality constraints -``` -Constraints are explained in the section on [Constrained optimization](@ref). - -The defaults are LBFGS as the optimization algorithm and the standard options from `NLopt.jl`. -We can choose something different: +We can override the default *NLopt* algorithm (LFBGS) and instead use +the *augmented lagrangian* method with LBFGS as the *local* optimization algorithm, +stop at a maximum of 200 evaluations and use a relative tolerance of +the objective value of `1e-6` as the stopping criterion for the local algorithm: ```julia using NLopt -my_optimizer = SemOptimizerNLopt(; +my_optimizer = SemOptimizer(; + engine = :NLopt, algorithm = :AUGLAG, options = Dict(:maxeval => 200), local_algorithm = :LD_LBFGS, @@ -33,15 +23,14 @@ my_optimizer = SemOptimizerNLopt(; ) ``` -This uses an augmented lagrangian method with LBFGS as the local optimization algorithm, stops at a maximum of 200 evaluations and uses a relative tolerance of the objective value of `1e-6` as the stopping criterion for the local algorithm. - To see how to use the optimizer to actually fit a model now, check out the [Model fitting](@ref) section. -In the NLopt docs, you can find explanations about the different [algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/) and a [tutorial](https://nlopt.readthedocs.io/en/latest/NLopt_Introduction/) that also explains the different options. +In the *NLopt* docs, you can find details about the [optimization algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/), +and the [tutorial](https://nlopt.readthedocs.io/en/latest/NLopt_Introduction/) that demonstrates how to tweak their behavior. To choose an algorithm, just pass its name without the 'NLOPT\_' prefix (for example, 'NLOPT\_LD\_SLSQP' can be used by passing `algorithm = :LD_SLSQP`). -The README of the [julia package](https://github.com/JuliaOpt/NLopt.jl) may also be helpful, and provides a list of options: +The README of the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) may also be helpful, and provides a list of options: - `algorithm` - `stopval` diff --git a/docs/src/tutorials/constraints/constraints.md b/docs/src/tutorials/constraints/constraints.md index 938a2bb9e..32bb6a529 100644 --- a/docs/src/tutorials/constraints/constraints.md +++ b/docs/src/tutorials/constraints/constraints.md @@ -1,10 +1,15 @@ # Constrained optimization +*SEM.jl* allows to fit models with additional constraints imposed on the parameters. + ## Using the NLopt engine +*NLopt.jl* is one of *SEM.jl* optimization engines that supports constrained optimization. +In the example below we show how to specify constraints for the *SEM* model when using *NLopt*. + ### Define an example model -Let's revisit our model from [A first model](@ref): +Let's revisit our model from [A first model](@ref) and fit it first without constraints: ```@example constraints using StructuralEquationModels @@ -57,39 +62,40 @@ details(partable) ### Define the constraints -Let's introduce some constraints: +Let's introduce some constraints (they are not based on any real properties of the underlying study and serve only as an example): 1. **Equality constraint**: The covariances `y3 ↔ y7` and `y8 ↔ y4` should sum up to `1`. 2. **Inequality constraint**: The difference between the loadings `dem60 → y2` and `dem60 → y3` should be smaller than `0.1` 3. **Bound constraint**: The directed effect from `ind60 → dem65` should be smaller than `0.5` -(Of course those constaints only serve an illustratory purpose.) - -We first need to get the indices of the respective parameters that are invoved in the constraints. -We can look up their labels in the output above, and retrieve their indices as +Since *NLopt* does not have access to the SEM parameter names, its constaints are defined on the vector of all SEM parameters. +We have to look up the indices of the parameters involved in the constraints to construct the respective functions. ```@example constraints parind = param_indices(model) parind[:y3y7] # 29 ``` -The bound constraint is easy to specify: Just give a vector of upper or lower bounds that contains the bound for each parameter. In our example, only the parameter labeled `:λₗ` has an upper bound, and the number of total parameters is `n_par(model) = 31`, so we define +The bound constraint is easy to specify: just give a vector of upper or lower bounds for each parameter. +In our example, only the parameter labeled `:λₗ` has an upper bound, and the number of total parameters is `n_par(model) = 31`, so ```@example constraints upper_bounds = fill(Inf, 31) upper_bounds[parind[:λₗ]] = 0.5 ``` -The equailty and inequality constraints have to be reformulated to be of the form `x = 0` or `x ≤ 0`: -1. `y3 ↔ y7 + y8 ↔ y4 - 1 = 0` -2. `dem60 → y2 - dem60 → y3 - 0.1 ≤ 0` +The equailty and inequality constraints have to be reformulated in the `f(θ) = 0` or `f(θ) ≤ 0` form, +where `θ` is the vector of SEM parameters: +1. `f(θ) = 0`, where `f(θ) = y3 ↔ y7 + y8 ↔ y4 - 1` +2. `g(θ) ≤ 0`, where `g(θ) = dem60 → y2 - dem60 → y3 - 0.1` -Now they can be defined as functions of the parameter vector: +If the optimization algorithm needs gradients, it will pass the `gradient` vector that is of the same size as the parameters, +and the constraint function has to calculate the gradient in-place. ```@example constraints parind[:y3y7] # 29 parind[:y8y4] # 30 # θ[29] + θ[30] - 1 = 0.0 -function eq_constraint(θ, gradient) +function f(θ, gradient) if length(gradient) > 0 gradient .= 0.0 gradient[29] = 1.0 @@ -101,7 +107,7 @@ end parind[:λ₂] # 3 parind[:λ₃] # 4 # θ[3] - θ[4] - 0.1 ≤ 0 -function ineq_constraint(θ, gradient) +function g(θ, gradient) if length(gradient) > 0 gradient .= 0.0 gradient[3] = 1.0 @@ -111,29 +117,26 @@ function ineq_constraint(θ, gradient) end ``` -If the algorithm needs gradients at an iteration, it will pass the vector `gradient` that is of the same size as the parameters. -With `if length(gradient) > 0` we check if the algorithm needs gradients, and if it does, we fill the `gradient` vector with the gradients -of the constraint w.r.t. the parameters. - -In NLopt, vector-valued constraints are also possible, but we refer to the documentation for that. +In *NLopt*, vector-valued constraints are also possible, but we refer to the documentation for that. ### Fit the model -We now have everything together to specify and fit our model. First, we specify our optimizer backend as +Now we can construct the *SemOptimizer* that will use the *NLopt* engine for constrained optimization. ```@example constraints using NLopt -constrained_optimizer = SemOptimizerNLopt( +constrained_optimizer = SemOptimizer( + engine = :NLopt, algorithm = :AUGLAG, options = Dict(:upper_bounds => upper_bounds, :xtol_abs => 1e-4), local_algorithm = :LD_LBFGS, - equality_constraints = (eq_constraint => 1e-8), - inequality_constraints = (ineq_constraint => 1e-8), + equality_constraints = (f => 1e-8), + inequality_constraints = (g => 1e-8), ) ``` -As you see, the equality constraints and inequality constraints are passed as keyword arguments, and the bounds are passed as options for the (outer) optimization algorithm. +As you see, the equality and inequality constraints are passed as keyword arguments, and the bounds are passed as options for the (outer) optimization algorithm. Additionally, for equality and inequality constraints, a feasibility tolerance can be specified that controls if a solution can be accepted, even if it violates the constraints by a small amount. Especially for equality constraints, it is recommended to allow for a small positive tolerance. In this example, we set both tolerances to `1e-8`. @@ -141,19 +144,16 @@ In this example, we set both tolerances to `1e-8`. !!! warning "Convergence criteria" We have often observed that the default convergence criteria in NLopt lead to non-convergence flags. Indeed, this example does not convergence with default criteria. - As you see above, we used a realively liberal absolute tolerance in the optimization parameters of 1e-4. + As you see above, we used a relatively liberal absolute tolerance in the optimization parameters of 1e-4. This should not be a problem in most cases, as the sampling variance in (almost all) structural equation models should lead to uncertainty in the parameter estimates that are orders of magnitude larger. We nontheless recommend choosing a convergence criterion with care (i.e. w.r.t. the scale of your parameters), inspecting the solutions for plausibility, and comparing them to unconstrained solutions. -```@example constraints -model_constrained = Sem( - specification = partable, - data = data -) +We now have everything to fit our model under constraints: -model_fit_constrained = fit(constrained_optimizer, model_constrained) +```@example constraints +model_fit_constrained = fit(constrained_optimizer, model) ``` As you can see, the optimizer converged (`:XTOL_REACHED`) and investigating the solution yields From 6f3ccd537ef794e8df776a00ddcc157da94e3168 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:35:11 -0700 Subject: [PATCH 09/28] Optim.md: SemOptimizerOptim => SemOptimizer --- docs/src/tutorials/backends/optim.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/src/tutorials/backends/optim.md b/docs/src/tutorials/backends/optim.md index cf287e773..a16537ec4 100644 --- a/docs/src/tutorials/backends/optim.md +++ b/docs/src/tutorials/backends/optim.md @@ -1,23 +1,23 @@ # Using Optim.jl -[`SemOptimizerOptim`](@ref) implements the connection to `Optim.jl`. -It takes two arguments, `algorithm` and `options`. -The defaults are LBFGS as the optimization algorithm and the standard options from `Optim.jl`. -We can load the `Optim` and `LineSearches` packages to choose something different: +[Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) is the default optimization engine of *SEM.jl*, +see [`SemOptimizerOptim`](@ref) for a full list of its parameters. +It defaults to the LBFGS optimization, but we can load the `Optim` and `LineSearches` packages +and specify BFGS (!not L-BFGS) with a back-tracking linesearch and Hager-Zhang initial step length guess: ```julia using Optim, LineSearches -my_optimizer = SemOptimizerOptim( +my_optimizer = SemOptimizer( algorithm = BFGS( - linesearch = BackTracking(order=3), + linesearch = BackTracking(order=3), alphaguess = InitialHagerZhang() - ), - options = Optim.Options(show_trace = true) - ) + ), + options = Optim.Options(show_trace = true) +) ``` -This optimizer will use BFGS (!not L-BFGS) with a back tracking linesearch and a certain initial step length guess. Also, the trace of the optimization will be printed to the console. +Note that we used `options` to print the optimization progress to the console. To see how to use the optimizer to actually fit a model now, check out the [Model fitting](@ref) section. From 242c6021a8f2ed28f64154f154c4e65cae0c726d Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:37:20 -0700 Subject: [PATCH 10/28] regulariz.md: SemOptimProx => SemOptimizer --- .../regularization/regularization.md | 67 ++++++++----------- 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/docs/src/tutorials/regularization/regularization.md b/docs/src/tutorials/regularization/regularization.md index 2b2c6df30..17add030a 100644 --- a/docs/src/tutorials/regularization/regularization.md +++ b/docs/src/tutorials/regularization/regularization.md @@ -5,7 +5,9 @@ For ridge regularization, you can simply use `SemRidge` as an additional loss function (for example, a model with the loss functions `SemML` and `SemRidge` corresponds to ridge-regularized maximum likelihood estimation). -For lasso, elastic net and (far) beyond, you can load the `ProximalAlgorithms.jl` and `ProximalOperators.jl` packages alongside `StructuralEquationModels`: +You can define lasso, elastic net and other forms of regularization using [`ProximalOperators.jl`](https://github.com/JuliaFirstOrder/ProximalOperators.jl) +and optimize the SEM model with [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) +that provides so-called *proximal optimization* algorithms. ```@setup reg using StructuralEquationModels, ProximalAlgorithms, ProximalOperators @@ -19,24 +21,22 @@ Pkg.add("ProximalOperators") using StructuralEquationModels, ProximalAlgorithms, ProximalOperators ``` -## `SemOptimizerProximal` +## Proximal optimization -To estimate regularized models, we provide a "building block" for the optimizer part, called `SemOptimizerProximal`. -It connects our package to the [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) optimization backend, providing so-called proximal optimization algorithms. -Those can handle, amongst other things, various forms of regularization. - -It can be used as +With *ProximalAlgorithms* package loaded, it is now possible to use `:Proximal` optimization engine +in `SemOptimizer` for estimating regularized models. ```julia -SemOptimizerProximal( +SemOptimizer(; + engine = :Proximal, algorithm = ProximalAlgorithms.PANOC(), operator_g, operator_h = nothing ) ``` -The proximal operator (aka the regularization function) can be passed as `operator_g`. -The available Algorithms are listed [here](https://juliafirstorder.github.io/ProximalAlgorithms.jl/stable/guide/implemented_algorithms/). +The *proximal operator* (aka the *regularization function*) is passed as `operator_g`, see [available operators](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/). +The `algorithm` is chosen from one of the [available algorithms](https://juliafirstorder.github.io/ProximalAlgorithms.jl/stable/guide/implemented_algorithms/). ## First example - lasso @@ -84,7 +84,7 @@ model = Sem( We labeled the covariances between the items because we want to regularize those: ```@example reg -ind = getindex.( +cov_inds = getindex.( Ref(param_indices(model)), [:cov_15, :cov_24, :cov_26, :cov_37, :cov_48, :cov_68]) ``` @@ -96,30 +96,24 @@ The lasso penalty is defined as \sum \lambda_i \lvert \theta_i \rvert ``` -From the previously linked [documentation](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/#ProximalOperators.NormL1), we find that lasso regularization is named `NormL1` in the `ProximalOperators` package, and that we can pass an array of hyperparameters (`λ`) to control the amount of regularization for each parameter. To regularize only the observed item covariances, we define `λ` as +In `ProximalOperators.jl`, lasso regularization is represented by the [`NormL1`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/#ProximalOperators.NormL1) operator. It allows controlling the amount of +regularization individually for each SEM model parameter via the vector of hyperparameters (`λ`). +To regularize only the observed item covariances, we define `λ` as ```@example reg -λ = zeros(31); λ[ind] .= 0.02 -``` - -and use `SemOptimizerProximal`. +λ = zeros(31); λ[cov_inds] .= 0.02 -```@example reg -optimizer_lasso = SemOptimizerProximal( +optimizer_lasso = SemOptimizer( + engine = :Proximal, operator_g = NormL1(λ) ) - -model_lasso = Sem( - specification = partable, - data = data -) ``` Let's fit the regularized model ```@example reg -fit_lasso = fit(optimizer_lasso, model_lasso) +fit_lasso = fit(optimizer_lasso, model) ``` and compare the solution to unregularizted estimates: @@ -134,34 +128,31 @@ update_partable!(partable, :estimate_lasso, fit_lasso, solution(fit_lasso)) details(partable) ``` -Instead of explicitely defining a `SemOptimizerProximal` object, you can also pass `engine = :Proximal` and additional keyword arguments to `fit`: +Instead of explicitly defining a `SemOptimizer` object, you can also pass `engine = :Proximal` +and additional keyword arguments directly to the `fit` function: ```@example reg -sem_fit = fit(model; engine = :Proximal, operator_g = NormL1(λ)) +fit_lasso2 = fit(model; engine = :Proximal, operator_g = NormL1(λ)) ``` ## Second example - mixed l1 and l0 regularization You can choose to penalize different parameters with different types of regularization functions. -Let's use the lasso again on the covariances, but additionally penalyze the error variances of the observed items via l0 regularization. +Let's use the *lasso* (*l1*) again on the covariances, but additionally penalize the error variances of the observed items via *l0* regularization. -The l0 penalty is defined as +The *l0* penalty is defined as ```math -\lambda \mathrm{nnz}(\theta) +l_0 = \lambda \mathrm{nnz}(\theta) ``` -To define a sup of separable proximal operators (i.e. no parameter is penalized twice), -we can use [`SlicedSeparableSum`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/calculus/#ProximalOperators.SlicedSeparableSum) from the `ProximalOperators` package: +Since we apply *l1* and *l0* to the disjoint sets of parameters, this regularization could be represented as +as sum of *separable proximal operators* (i.e. no parameter is penalized twice) +implemented by the [`SlicedSeparableSum`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/calculus/#ProximalOperators.SlicedSeparableSum) operator: ```@example reg -prox_operator = SlicedSeparableSum((NormL0(20.0), NormL1(0.02), NormL0(0.0)), ([ind], [9:11], [vcat(1:8, 12:25)])) - -model_mixed = Sem( - specification = partable, - data = data, -) +l0_and_l1_reg = SlicedSeparableSum((NormL0(20.0), NormL1(0.02), NormL0(0.0)), ([cov_inds], [9:11], [vcat(1:8, 12:25)])) -fit_mixed = fit(model_mixed; engine = :Proximal, operator_g = prox_operator) +fit_mixed = fit(model; engine = :Proximal, operator_g = l0_and_l1_reg) ``` Let's again compare the different results: From 3e5c9ac14af3c252c36fcb1ab6e7ce8a88e2c985 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 09:36:13 -0800 Subject: [PATCH 11/28] optimizer_engine(): rename and fix signature optimizer_engine(): add docstring --- src/optimizer/abstract.jl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index f00e50552..5bdf65b26 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,6 +1,3 @@ -engine(::Type{SemOptimizer{E}}) where {E} = E -engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) - SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = SemOptimizer{engine}(args...; kwargs...) @@ -15,6 +12,15 @@ function SemOptimizer{E}(args...; kwargs...) where {E} end end +""" + optimizer_engine(::Type{<:SemOptimizer}) + optimizer_engine(::SemOptimizer) + +Returns the engine name (`Symbol`) for a [`SemOptimizer`](@ref) instance or subtype. +""" +optimizer_engine(::Type{<:SemOptimizer{E}}) where {E} = E +optimizer_engine(optim::SemOptimizer) = optimizer_engine(typeof(optim)) + """ fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) From 9471fbe91efdda573cc0f90025975f38cdded0df Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 09:39:33 -0800 Subject: [PATCH 12/28] optimizer_engines(): new method --- Project.toml | 2 ++ src/StructuralEquationModels.jl | 2 ++ src/optimizer/abstract.jl | 14 +++++++++++++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 376347083..0cc596b94 100644 --- a/Project.toml +++ b/Project.toml @@ -8,6 +8,7 @@ DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" LazyArtifacts = "4af54fe1-eca0-43a8-85a7-787d91b784e3" LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -30,6 +31,7 @@ StenoGraphs = "0.2 - 0.3, 0.4.1 - 0.5" DataFrames = "1" Distributions = "0.25" FiniteDiff = "2" +InteractiveUtils = "1.11.0" LineSearches = "7" NLSolversBase = "7" NLopt = "0.6, 1" diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index a306eccfa..f537dafb3 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -18,6 +18,8 @@ using LinearAlgebra, import StatsAPI: params, coef, coefnames, dof, fit, nobs, coeftable +using InteractiveUtils: subtypes + export StenoGraphs, @StenoGraph, meld, SimpleNode const SEM = StructuralEquationModels diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index 5bdf65b26..d0da444fe 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -21,9 +21,21 @@ Returns the engine name (`Symbol`) for a [`SemOptimizer`](@ref) instance or subt optimizer_engine(::Type{<:SemOptimizer{E}}) where {E} = E optimizer_engine(optim::SemOptimizer) = optimizer_engine(typeof(optim)) +""" + optimizer_engines() + +Returns a vector of optimizer engines supported by the `engine` keyword argument of +the [`SemOptimizer`](@ref) constructor. + +The list of engines depends on the Julia packages loaded (with the `using` directive) +into the current session. +""" +optimizer_engines() = + Symbol[optimizer_engine(opt_type) for opt_type in subtypes(SemOptimizer)] + """ fit([optim::SemOptimizer], model::AbstractSem; - [engine::Symbol], start_val = start_val, kwargs...) + [engine::Symbol], start_val = start_val, kwargs...) Return the fitted `model`. From f4f92808bc757ba9cebde91db2df7fca8acf37df Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Mon, 2 Feb 2026 18:58:28 -0800 Subject: [PATCH 13/28] export optmizer_engine() --- src/StructuralEquationModels.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index f537dafb3..b2e5edd8d 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -125,6 +125,9 @@ export AbstractSem, SemOptimizerEmpty, SemOptimizerOptim, optimizer, + optimizer_engine, + optimizer_engine_doc, + optimizer_engines, n_iterations, convergence, SemObserved, From 41c130879edbcb4309ccb5f1d3430988800cb2c6 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Mon, 2 Feb 2026 19:04:05 -0800 Subject: [PATCH 14/28] sem_optimizer_subtype(engine) internal method returning the type that implements SemOptimizer{engine} --- docs/src/developer/optimizer.md | 4 +-- ext/SEMNLOptExt/NLopt.jl | 4 +-- ext/SEMProximalOptExt/ProximalAlgorithms.jl | 4 +-- src/optimizer/Empty.jl | 6 +---- src/optimizer/abstract.jl | 29 ++++++++++++++++----- src/optimizer/optim.jl | 4 +-- 6 files changed, 31 insertions(+), 20 deletions(-) diff --git a/docs/src/developer/optimizer.md b/docs/src/developer/optimizer.md index 9e01ac87c..7e2b27b51 100644 --- a/docs/src/developer/optimizer.md +++ b/docs/src/developer/optimizer.md @@ -1,6 +1,6 @@ # Custom optimizer types -The optimizer part of a model connects it to the optimization backend. +The optimizer part of a model connects it to the optimization backend. Let's say we want to implement a new optimizer as `SemOptimizerName`. The first part of the implementation is very similar to loss functions, so we just show the implementation of `SemOptimizerOptim` here as a reference: ```julia @@ -12,7 +12,7 @@ mutable struct SemOptimizerName{A, B} <: SemOptimizer{:Name} options::B end -SemOptimizer{:Name}(args...; kwargs...) = SemOptimizerName(args...; kwargs...) +SEM.sem_optimizer_subtype(::Val{:Name}) = SemOptimizerName SemOptimizerName(; algorithm = LBFGS(), diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index d18c35e23..29006b8d0 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -85,6 +85,8 @@ struct SemOptimizerNLopt <: SemOptimizer{:NLopt} inequality_constraints::Vector{NLoptConstraint} end +SEM.sem_optimizer_subtype(::Val{:NLopt}) = SemOptimizerNLopt + ############################################################################################ ### Constructor ############################################################################################ @@ -117,8 +119,6 @@ function SemOptimizerNLopt(; ) end -SEM.SemOptimizer{:NLopt}(args...; kwargs...) = SemOptimizerNLopt(args...; kwargs...) - ############################################################################################ ### Recommended methods ############################################################################################ diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index aec61e57e..cd7664537 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -30,8 +30,6 @@ mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} operator_h::C end -SEM.SemOptimizer{:Proximal}(args...; kwargs...) = SemOptimizerProximal(args...; kwargs...) - SemOptimizerProximal(; algorithm = ProximalAlgorithms.PANOC(), operator_g, @@ -39,6 +37,8 @@ SemOptimizerProximal(; kwargs..., ) = SemOptimizerProximal(algorithm, operator_g, operator_h) +SEM.sem_optimizer_subtype(::Val{:Proximal}) = SemOptimizerProximal + ############################################################################################ ### Recommended methods ############################################################################################ diff --git a/src/optimizer/Empty.jl b/src/optimizer/Empty.jl index 1bf0c30ac..51ab5f840 100644 --- a/src/optimizer/Empty.jl +++ b/src/optimizer/Empty.jl @@ -11,11 +11,7 @@ an optimizer part. """ struct SemOptimizerEmpty <: SemOptimizer{:Empty} end -############################################################################################ -### Constructor -############################################################################################ - -SemOptimizer{:Empty}() = SemOptimizerEmpty() +sem_optimizer_subtype(::Val{:Empty}) = SemOptimizerEmpty ############################################################################################ ### Recommended methods diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index d0da444fe..2abe95fa3 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,17 +1,32 @@ -SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = - SemOptimizer{engine}(args...; kwargs...) - -# fallback optimizer constructor -function SemOptimizer{E}(args...; kwargs...) where {E} - if E == :NLOpt +# throw unsupported engine error +function throw_engine_error(E) + if typeof(E) !== Symbol + throw(ArgumentError("engine argument must be a Symbol.")) + elseif E == :NLopt error("$E optimizer requires \"using NLopt\".") elseif E == :Proximal error("$E optimizer requires \"using ProximalAlgorithms\".") else - error("$E optimizer is not supported.") + error("$E optimizer engine is not supported.") end end +# return the type implementing SemOptimizer{engine} +# the method should be overridden in the extension +sem_optimizer_subtype(engine::Symbol) = sem_optimizer_subtype(Val(engine)) + +# fallback method for unsupported engines +sem_optimizer_subtype(::Val{E}) where {E} = throw_engine_error(E) + +# default constructor that dispatches to the engine-specific type +SemOptimizer(::Val{E}, args...; kwargs...) where {E} = + sem_optimizer_subtype(E)(args...; kwargs...) + +SemOptimizer{E}(args...; kwargs...) where {E} = SemOptimizer(Val(E), args...; kwargs...) + +SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = + SemOptimizer(Val(engine), args...; kwargs...) + """ optimizer_engine(::Type{<:SemOptimizer}) optimizer_engine(::SemOptimizer) diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 2d782473a..9a5a5139f 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -63,14 +63,14 @@ mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} options::B end -SemOptimizer{:Optim}(args...; kwargs...) = SemOptimizerOptim(args...; kwargs...) - SemOptimizerOptim(; algorithm = LBFGS(), options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs..., ) = SemOptimizerOptim(algorithm, options) +sem_optimizer_subtype(::Val{:Optim}) = SemOptimizerOptim + ############################################################################################ ### Recommended methods ############################################################################################ From a1cf400abef948fe4ec67101032d3da38241412d Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 17:21:33 -0800 Subject: [PATCH 15/28] streamline engine error throwing --- src/optimizer/abstract.jl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index 2abe95fa3..53b49909e 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,13 +1,16 @@ +const optimizer_engine_dependencies = + Dict(:NLopt => ["NLopt"], :Proximal => ["ProximalAlgorithms"]) + # throw unsupported engine error function throw_engine_error(E) if typeof(E) !== Symbol throw(ArgumentError("engine argument must be a Symbol.")) - elseif E == :NLopt - error("$E optimizer requires \"using NLopt\".") - elseif E == :Proximal - error("$E optimizer requires \"using ProximalAlgorithms\".") + elseif haskey(optimizer_engine_dependencies, E) + error( + "optimizer \":$E\" requires \"using $(join(optimizer_engine_dependencies[E], ", "))\".", + ) else - error("$E optimizer engine is not supported.") + error("optimizer engine \":$E\" is not supported.") end end From efd49110a23bb4d43420bce7d9f2c0d82907283d Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 17:09:21 -0800 Subject: [PATCH 16/28] SemOptimizer{E}: remove docstring --- src/types.jl | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/types.jl b/src/types.jl index 73a650e6f..92ca2c284 100644 --- a/src/types.jl +++ b/src/types.jl @@ -79,11 +79,6 @@ end Base.:*(x::SemWeight{Nothing}, y) = y Base.:*(x::SemWeight, y) = x.w * y -""" -Supertype of all objects that can serve as the `optimizer` field of a SEM. -Connects the SEM to its optimization backend and controls options like the optimization algorithm. -If you want to connect the SEM package to a new optimization backend, you should implement a subtype of SemOptimizer. -""" abstract type SemOptimizer{E} end """ From 341085336f128cf152838be71febabfdcc011cea Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 09:48:44 -0800 Subject: [PATCH 17/28] SemOptimizer: cleanup docstrings --- ext/SEMNLOptExt/NLopt.jl | 56 ++++++++------------- ext/SEMProximalOptExt/ProximalAlgorithms.jl | 31 +++++------- src/optimizer/Empty.jl | 8 ++- src/optimizer/abstract.jl | 24 +++++++++ src/optimizer/optim.jl | 48 +++++++----------- 5 files changed, 79 insertions(+), 88 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 29006b8d0..1c6b97120 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -4,12 +4,22 @@ const NLoptConstraint = Pair{Any, Number} -""" -Uses *NLopt.jl* as the optimization engine. -Only available if *NLopt.jl* is loaded in the current Julia session! +struct SemOptimizerNLopt <: SemOptimizer{:NLopt} + algorithm::Symbol + local_algorithm::Union{Symbol, Nothing} + options::Dict{Symbol, Any} + local_options::Dict{Symbol, Any} + equality_constraints::Vector{NLoptConstraint} + inequality_constraints::Vector{NLoptConstraint} +end + +SEM.sem_optimizer_subtype(::Val{:NLopt}) = SemOptimizerNLopt -# Constructor +############################################################################################ +### Constructor +############################################################################################ +""" SemOptimizer(; engine = :NLopt, algorithm = :LD_LBFGS, @@ -21,6 +31,10 @@ Only available if *NLopt.jl* is loaded in the current Julia session! constraint_tol::Number = 0.0, kwargs...) +Uses *NLopt.jl* as the optimization engine. For more information on the available algorithms +and options, see the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and +the [NLopt docs](https://nlopt.readthedocs.io/en/latest/). + # Arguments - `algorithm`: optimization algorithm. - `options::Dict{Symbol, Any}`: options for the optimization algorithm @@ -38,8 +52,10 @@ Each constraint could be a function or any other callable object that takes the two input arguments: - the vector of the model parameters; - the array for the in-place calculation of the constraint gradient. -To override the default tolerance, the constraint could be specified +To override the default tolerance, the constraint can be specified as a pair of the function and its tolerance: `constraint_func => tol`. +For information on how to use inequality and equality constraints, +see [Constrained optimization](@ref) in our online documentation. # Example ```julia @@ -55,42 +71,14 @@ my_constrained_optimizer = SemOptimizer(; ) ``` -# Usage -All algorithms and options from the *NLopt* library are available, for more information see -the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the -[NLopt docs](https://nlopt.readthedocs.io/en/latest/). -For information on how to use inequality and equality constraints, -see [Constrained optimization](@ref) in our online documentation. - -# Extended help - -## Interfaces +# Interfaces - `algorithm(::SemOptimizerNLopt)` - `local_algorithm(::SemOptimizerNLopt)` - `options(::SemOptimizerNLopt)` - `local_options(::SemOptimizerNLopt)` - `equality_constraints(::SemOptimizerNLopt)` - `inequality_constraints(::SemOptimizerNLopt)` - -## Implementation - -Subtype of `SemOptimizer`. """ -struct SemOptimizerNLopt <: SemOptimizer{:NLopt} - algorithm::Symbol - local_algorithm::Union{Symbol, Nothing} - options::Dict{Symbol, Any} - local_options::Dict{Symbol, Any} - equality_constraints::Vector{NLoptConstraint} - inequality_constraints::Vector{NLoptConstraint} -end - -SEM.sem_optimizer_subtype(::Val{:NLopt}) = SemOptimizerNLopt - -############################################################################################ -### Constructor -############################################################################################ - function SemOptimizerNLopt(; algorithm = :LD_LBFGS, local_algorithm = nothing, diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index cd7664537..e3bb98245 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -1,35 +1,30 @@ ############################################################################################ ### Types ############################################################################################ -""" -Connects to `ProximalAlgorithms.jl` as the optimization backend. - -Can be used for regularized SEM, for a tutorial see the online docs on [Regularization](@ref). - -# Constructor +mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} + algorithm::A + operator_g::B + operator_h::C +end +""" SemOptimizerProximal(; algorithm = ProximalAlgorithms.PANOC(), operator_g, operator_h = nothing, kwargs..., + ) + +Connects to `ProximalAlgorithms.jl` as the optimization backend. For more information on +the available algorithms and options, see the online docs on [Regularization](@ref) and +the documentation of [*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) / +[ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl). # Arguments -- `algorithm`: optimization algorithm. +- `algorithm`: proximal optimization algorithm. - `operator_g`: proximal operator (e.g., regularization penalty) - `operator_h`: optional second proximal operator - -# Usage -All algorithms and operators from `ProximalAlgorithms.jl` are available, -for more information see the online docs on [Regularization](@ref) and -the documentation of `ProximalAlgorithms.jl` / `ProximalOperators.jl`. """ -mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} - algorithm::A - operator_g::B - operator_h::C -end - SemOptimizerProximal(; algorithm = ProximalAlgorithms.PANOC(), operator_g, diff --git a/src/optimizer/Empty.jl b/src/optimizer/Empty.jl index 51ab5f840..f95c067ce 100644 --- a/src/optimizer/Empty.jl +++ b/src/optimizer/Empty.jl @@ -1,13 +1,11 @@ ############################################################################################ ### Types ############################################################################################ -""" -Empty placeholder for models that don't need -an optimizer part. -# Constructor +""" + SemOptimizer(engine = :Empty) - SemOptimizerEmpty() +Constructs a dummy placeholder optimizer for models that don't need it. """ struct SemOptimizerEmpty <: SemOptimizer{:Empty} end diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index 53b49909e..c4eadc70a 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -21,6 +21,30 @@ sem_optimizer_subtype(engine::Symbol) = sem_optimizer_subtype(Val(engine)) # fallback method for unsupported engines sem_optimizer_subtype(::Val{E}) where {E} = throw_engine_error(E) +""" + SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) + +Constructs a `SemOptimizer` object that can be passed to [`fit`](@ref) for specifying aspects +of the numerical optimization involved in fitting a SEM. + +The keyword `engine` controlls which Julia package is used, with `:Optim` being the default. +- [`optimizer_engines()`](@ref optimizer_engines) prints a list of currently available engines. +- [`optimizer_engine_doc(EngineName)`](@ref optimizer_engine_doc) prints information on the usage of a specific engine. + +More engines become available if specific packages are loaded, for example +[*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) (also see [Constrained optimization](@ref) +in the online documentation) or +[*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) +(also see [Regularization](@ref) in the online documentation). + +The arguments `args...` and `kwargs...` are engine-specific and control further +aspects of the optimization process, such as the algorithm, convergence criteria or constraints. +Information on those can be accessed with [`optimizer_engine_doc`](@ref). + +[Custom optimizer types](@ref) shows how to connect the *SEM.jl* package to a completely new optimization engine. +""" +SemOptimizer + # default constructor that dispatches to the engine-specific type SemOptimizer(::Val{E}, args...; kwargs...) where {E} = sem_optimizer_subtype(E)(args...; kwargs...) diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 9a5a5139f..9ab477067 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -3,34 +3,36 @@ ############################################################################################ ### Types and Constructor ############################################################################################ -""" - SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} - -Connects to `Optim.jl` as the optimization backend. -# Constructor +# SemOptimizer for the Optim.jl +mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} + algorithm::A + options::B +end - SemOptimizerOptim(; +""" + SemOptimizer(; + engine = :Optim, algorithm = LBFGS(), options = Optim.Options(;f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs...) +Connects to *Optim.jl* as the optimization engine. + +For more information on the available algorithms and options, +see the [*Optim.jl* docs](https://julianlsolvers.github.io/Optim.jl/stable/). + # Arguments -- `algorithm`: optimization algorithm from `Optim.jl` +- `algorithm`: optimization algorithm from *Optim.jl* - `options::Optim.Options`: options for the optimization algorithm -# Usage -All algorithms and options from the Optim.jl library are available, for more information see -the Optim.jl online documentation. - # Examples ```julia -my_optimizer = SemOptimizerOptim() - # hessian based optimization with backtracking linesearch and modified initial step size using Optim, LineSearches -my_newton_optimizer = SemOptimizerOptim( +my_newton_optimizer = SemOptimizer( + engine = :Optim, algorithm = Newton( ;linesearch = BackTracking(order=3), alphaguess = InitialHagerZhang() @@ -38,10 +40,7 @@ my_newton_optimizer = SemOptimizerOptim( ) ``` -# Extended help - -## Constrained optimization - +# Constrained optimization When using the `Fminbox` or `SAMIN` constrained optimization algorithms, the vector or dictionary of lower and upper bounds for each model parameter can be specified via `lower_bounds` and `upper_bounds` keyword arguments. @@ -49,20 +48,7 @@ Alternatively, the `lower_bound` and `upper_bound` keyword arguments can be used the default bound for all non-variance model parameters, and the `variance_lower_bound` and `variance_upper_bound` keyword -- for the variance parameters (the diagonal of the *S* matrix). - -## Interfaces -- `algorithm(::SemOptimizerOptim)` -- `options(::SemOptimizerOptim)` - -## Implementation - -Subtype of `SemOptimizer`. """ -mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} - algorithm::A - options::B -end - SemOptimizerOptim(; algorithm = LBFGS(), options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), From 6ba91f476276ec08fd5f91226c6037b15e2651b8 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 17:01:24 -0800 Subject: [PATCH 18/28] optimizer_engine_doc() --- src/optimizer/abstract.jl | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index c4eadc70a..d9e52fdc8 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -75,6 +75,15 @@ into the current session. optimizer_engines() = Symbol[optimizer_engine(opt_type) for opt_type in subtypes(SemOptimizer)] +""" + optimizer_engine_doc(engine::Symbol) + +Shows documentation for the optimizer engine. + +For a list of available engines, call [`optimizer_engines`](@ref). +""" +optimizer_engine_doc(engine) = Base.Docs.doc(sem_optimizer_subtype(engine)) + """ fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) From 2e5c9b3ccbbe2964e188e50a48dae1761317f834 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 10:39:13 +0100 Subject: [PATCH 19/28] fix proximal extension --- ext/SEMProximalOptExt/ProximalAlgorithms.jl | 26 ++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index e3bb98245..bf0fd2a2a 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -48,14 +48,9 @@ SEM.update_observed(optimizer::SemOptimizerProximal, observed::SemObserved; kwar SEM.algorithm(optimizer::SemOptimizerProximal) = optimizer.algorithm ############################################################################ -### Pretty Printing +### Model fitting ############################################################################ -function Base.show(io::IO, struct_inst::SemOptimizerProximal) - print_type_name(io, struct_inst) - print_field_types(io, struct_inst) -end - ## connect to ProximalAlgorithms.jl function ProximalAlgorithms.value_and_gradient(model::AbstractSem, params) grad = similar(params) @@ -106,10 +101,29 @@ function SEM.fit( ) end +############################################################################################ +### additional methods +############################################################################################ + +SEM.algorithm_name(res::ProximalResult) = SEM.algorithm_name(res.optimizer.algorithm) +SEM.algorithm_name( + ::ProximalAlgorithms.IterativeAlgorithm{I, H, S, D, K}, +) where {I, H, S, D, K} = nameof(I) + +SEM.convergence( + ::ProximalResult, +) = "No standard convergence criteria for proximal \n algorithms available." +SEM.n_iterations(res::ProximalResult) = res.n_iterations + ############################################################################################ # pretty printing ############################################################################################ +function Base.show(io::IO, struct_inst::SemOptimizerProximal) + print_type_name(io, struct_inst) + print_field_types(io, struct_inst) +end + function Base.show(io::IO, result::ProximalResult) print(io, "Minimum: $(round(result.result[:minimum]; digits = 2)) \n") print(io, "No. evaluations: $(result.result[:iterations]) \n") From e47a94f5bde2c2c8ed3312afe6fefd93dbab3514 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 17:02:09 -0800 Subject: [PATCH 20/28] don't export SemOptimizerOptim update docs to use SemOptimizer --- docs/src/performance/simulation.md | 2 +- docs/src/tutorials/construction/build_by_parts.md | 2 +- docs/src/tutorials/construction/outer_constructor.md | 2 +- docs/src/tutorials/fitting/fitting.md | 3 +-- src/StructuralEquationModels.jl | 2 -- test/examples/multigroup/multigroup.jl | 4 ++-- .../examples/recover_parameters/recover_parameters_twofact.jl | 2 +- 7 files changed, 7 insertions(+), 10 deletions(-) diff --git a/docs/src/performance/simulation.md b/docs/src/performance/simulation.md index d268853f6..85a0c0a0c 100644 --- a/docs/src/performance/simulation.md +++ b/docs/src/performance/simulation.md @@ -67,7 +67,7 @@ For example, new_observed = SemObservedData(;data = data_2, specification = partable) -my_optimizer = SemOptimizerOptim() +my_optimizer = SemOptimizer() new_optimizer = update_observed(my_optimizer, new_observed) ``` diff --git a/docs/src/tutorials/construction/build_by_parts.md b/docs/src/tutorials/construction/build_by_parts.md index 680e28804..52e12f30b 100644 --- a/docs/src/tutorials/construction/build_by_parts.md +++ b/docs/src/tutorials/construction/build_by_parts.md @@ -59,7 +59,7 @@ ml = SemML(observed = observed) loss_ml = SemLoss(ml) # optimizer ---------------------------------------------------------------------------- -optimizer = SemOptimizerOptim() +optimizer = SemOptimizer() # model -------------------------------------------------------------------------------- diff --git a/docs/src/tutorials/construction/outer_constructor.md b/docs/src/tutorials/construction/outer_constructor.md index e27724307..e0c69ef3c 100644 --- a/docs/src/tutorials/construction/outer_constructor.md +++ b/docs/src/tutorials/construction/outer_constructor.md @@ -41,7 +41,7 @@ model = Sem( data = data, implied = RAMSymbolic, loss = SemWLS, - optimizer = SemOptimizerOptim + optimizer = SemOptimizer ) ``` diff --git a/docs/src/tutorials/fitting/fitting.md b/docs/src/tutorials/fitting/fitting.md index d7353c9f9..1af03ce8e 100644 --- a/docs/src/tutorials/fitting/fitting.md +++ b/docs/src/tutorials/fitting/fitting.md @@ -17,7 +17,6 @@ Structural Equation Model - Fields observed: SemObservedData implied: RAM - optimizer: SemOptimizerOptim ------------- Optimization result ------------- @@ -60,7 +59,7 @@ The available keyword arguments are listed in the sections [Using Optim.jl](@ref Alternative, you can also explicitely define a `SemOptimizer` and pass it as the first argument to `fit`: ```julia -my_optimizer = SemOptimizerOptim(algorithm = BFGS()) +my_optimizer = SemOptimizer(algorithm = BFGS()) fit(my_optimizer, model) ``` diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index b2e5edd8d..71cedf1c4 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -122,8 +122,6 @@ export AbstractSem, SemWLS, loss, SemOptimizer, - SemOptimizerEmpty, - SemOptimizerOptim, optimizer, optimizer_engine, optimizer_engine_doc, diff --git a/test/examples/multigroup/multigroup.jl b/test/examples/multigroup/multigroup.jl index 43de554ce..2d43c3d2a 100644 --- a/test/examples/multigroup/multigroup.jl +++ b/test/examples/multigroup/multigroup.jl @@ -86,7 +86,7 @@ start_test = [ fill(0.05, 3) fill(0.01, 3) ] -semoptimizer = SemOptimizerOptim() +semoptimizer = SemOptimizer() @testset "RAMMatrices | constructor | Optim" begin include("build_models.jl") @@ -169,7 +169,7 @@ start_test = [ 0.01 0.05 ] -semoptimizer = SemOptimizerOptim() +semoptimizer = SemOptimizer() @testset "Graph → Partable → RAMMatrices | constructor | Optim" begin include("build_models.jl") diff --git a/test/examples/recover_parameters/recover_parameters_twofact.jl b/test/examples/recover_parameters/recover_parameters_twofact.jl index ce7dc61ff..9f9503af8 100644 --- a/test/examples/recover_parameters/recover_parameters_twofact.jl +++ b/test/examples/recover_parameters/recover_parameters_twofact.jl @@ -68,7 +68,7 @@ loss_ml = SemLoss(SemML(; observed = semobserved, nparams = length(start))) model_ml = Sem(semobserved, implied_ml, loss_ml) objective!(model_ml, true_val) -optimizer = SemOptimizerOptim( +optimizer = SemOptimizer( BFGS(; linesearch = BackTracking(order = 3), alphaguess = InitialHagerZhang()),# m = 100), Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), ) From 309c5789c9aebf0094774265bb345176087403c5 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 17:07:04 -0800 Subject: [PATCH 21/28] SemFit: add opt_engine() to the output --- src/frontend/fit/SemFit.jl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/frontend/fit/SemFit.jl b/src/frontend/fit/SemFit.jl index 438da4da6..4074f3da3 100644 --- a/src/frontend/fit/SemFit.jl +++ b/src/frontend/fit/SemFit.jl @@ -39,6 +39,10 @@ function Base.show(io::IO, semfit::SemFit) #print(io, "Objective value: $(round(semfit.minimum, digits = 4)) \n") print(io, "------------- Optimization result ------------- \n") print(io, "\n") + print(io, "engine: ") + print(io, optimizer_engine(semfit)) + print(io, "\n") + print(io, "\n") print(io, semfit.optimization_result) end @@ -58,6 +62,7 @@ model(sem_fit::SemFit) = sem_fit.model optimization_result(sem_fit::SemFit) = sem_fit.optimization_result # optimizer properties +optimizer_engine(sem_fit::SemFit) = optimizer_engine(optimization_result(sem_fit)) optimizer(sem_fit::SemFit) = optimizer(optimization_result(sem_fit)) n_iterations(sem_fit::SemFit) = n_iterations(optimization_result(sem_fit)) convergence(sem_fit::SemFit) = convergence(optimization_result(sem_fit)) From 38939b709a942d3166ec5f5624a4115092738854 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 16:52:56 -0800 Subject: [PATCH 22/28] SemOptimizerResult: streamline optim results --- docs/src/developer/optimizer.md | 3 +- ext/SEMNLOptExt/NLopt.jl | 62 +++++++++------------ ext/SEMProximalOptExt/ProximalAlgorithms.jl | 48 +++++----------- src/frontend/fit/SemFit.jl | 4 +- src/frontend/fit/summary.jl | 3 +- src/optimizer/abstract.jl | 4 ++ src/optimizer/optim.jl | 31 +++++------ src/types.jl | 3 + test/examples/proximal/l0.jl | 4 +- test/examples/proximal/lasso.jl | 4 +- 10 files changed, 71 insertions(+), 95 deletions(-) diff --git a/docs/src/developer/optimizer.md b/docs/src/developer/optimizer.md index 7e2b27b51..3e7cadaf8 100644 --- a/docs/src/developer/optimizer.md +++ b/docs/src/developer/optimizer.md @@ -30,7 +30,6 @@ update_observed(optimizer::SemOptimizerName, observed::SemObserved; kwargs...) = ### additional methods ############################################################################################ -algorithm(optimizer::SemOptimizerName) = optimizer.algorithm options(optimizer::SemOptimizerName) = optimizer.options ``` @@ -68,7 +67,7 @@ The method has to return a `SemFit` object that consists of the minimum of the o In addition, you might want to provide methods to access properties of your optimization result: ```julia -optimizer(res::MyOptimizationResult) = ... +algorithm_name(res::MyOptimizationResult) = ... n_iterations(res::MyOptimizationResult) = ... convergence(res::MyOptimizationResult) = ... ``` \ No newline at end of file diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 1c6b97120..909dbbfc1 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -118,31 +118,32 @@ SEM.update_observed(optimizer::SemOptimizerNLopt, observed::SemObserved; kwargs. ### additional methods ############################################################################################ -SEM.algorithm(optimizer::SemOptimizerNLopt) = optimizer.algorithm local_algorithm(optimizer::SemOptimizerNLopt) = optimizer.local_algorithm SEM.options(optimizer::SemOptimizerNLopt) = optimizer.options local_options(optimizer::SemOptimizerNLopt) = optimizer.local_options equality_constraints(optimizer::SemOptimizerNLopt) = optimizer.equality_constraints inequality_constraints(optimizer::SemOptimizerNLopt) = optimizer.inequality_constraints -struct NLoptResult +# wrapper for the NLopt optimization result +struct NLoptResult <: SEM.SemOptimizerResult{SemOptimizerNLopt} + optimizer::SemOptimizerNLopt result::Any problem::Any end -SEM.optimizer(res::NLoptResult) = res.problem.algorithm +SEM.algorithm_name(res::NLoptResult) = res.problem.algorithm SEM.n_iterations(res::NLoptResult) = res.problem.numevals SEM.convergence(res::NLoptResult) = res.result[3] -# construct SemFit from fitted NLopt object -function SemFit_NLopt(optimization_result, model::AbstractSem, start_val, opt) - return SemFit( - optimization_result[1], - optimization_result[2], - start_val, - model, - NLoptResult(optimization_result, opt), - ) +# construct NLopt.jl problem +function NLopt_problem(algorithm, options, npar) + problem = Opt(algorithm, npar) + + for (key, val) in pairs(options) + setproperty!(problem, key, val) + end + + return problem end # fit method @@ -152,8 +153,8 @@ function SEM.fit( start_params::AbstractVector; kwargs..., ) - opt = construct_NLopt(optim.algorithm, optim.options, nparams(model)) - opt.min_objective = + problem = NLopt_problem(optim.algorithm, optim.options, nparams(model)) + problem.min_objective = (par, G) -> SEM.evaluate!( zero(eltype(par)), !isnothing(G) && !isempty(G) ? G : nothing, @@ -162,36 +163,27 @@ function SEM.fit( par, ) for (f, tol) in optim.inequality_constraints - inequality_constraint!(opt, f, tol) + inequality_constraint!(problem, f, tol) end for (f, tol) in optim.equality_constraints - equality_constraint!(opt, f, tol) + equality_constraint!(problem, f, tol) end if !isnothing(optim.local_algorithm) - opt_local = - construct_NLopt(optim.local_algorithm, optim.local_options, nparams(model)) - opt.local_optimizer = opt_local + problem.local_optimizer = + NLopt_problem(optim.local_algorithm, optim.local_options, nparams(model)) end # fit - result = NLopt.optimize(opt, start_params) + result = NLopt.optimize(problem, start_params) - return SemFit_NLopt(result, model, start_params, opt) -end - -############################################################################################ -### additional functions -############################################################################################ - -function construct_NLopt(algorithm, options, npar) - opt = Opt(algorithm, npar) - - for (key, val) in pairs(options) - setproperty!(opt, key, val) - end - - return opt + return SemFit( + result[1], # minimum + result[2], # optimal params + start_val, + model, + NLoptResult(optim, result, problem), + ) end ############################################################################################ diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index bf0fd2a2a..1d7f83632 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -41,12 +41,6 @@ SEM.sem_optimizer_subtype(::Val{:Proximal}) = SemOptimizerProximal SEM.update_observed(optimizer::SemOptimizerProximal, observed::SemObserved; kwargs...) = optimizer -############################################################################################ -### additional methods -############################################################################################ - -SEM.algorithm(optimizer::SemOptimizerProximal) = optimizer.algorithm - ############################################################################ ### Model fitting ############################################################################ @@ -58,8 +52,11 @@ function ProximalAlgorithms.value_and_gradient(model::AbstractSem, params) return obj, grad end -mutable struct ProximalResult - result::Any +# wrapper for the Proximal optimization result +struct ProximalResult{O <: SemOptimizer{:Proximal}} <: SEM.SemOptimizerResult{O} + optimizer::O + minimum::Float64 + n_iterations::Int end function SEM.fit( @@ -69,10 +66,10 @@ function SEM.fit( kwargs..., ) if isnothing(optim.operator_h) - solution, iterations = + solution, niterations = optim.algorithm(x0 = start_params, f = model, g = optim.operator_g) else - solution, iterations = optim.algorithm( + solution, niterations = optim.algorithm( x0 = start_params, f = model, g = optim.operator_g, @@ -80,25 +77,9 @@ function SEM.fit( ) end - minimum = objective!(model, solution) + optim_res = ProximalResult(optim, objective!(model, solution), niterations) - optimization_result = Dict( - :minimum => minimum, - :iterations => iterations, - :algorithm => optim.algorithm, - :operator_g => optim.operator_g, - ) - - isnothing(optim.operator_h) || - push!(optimization_result, :operator_h => optim.operator_h) - - return SemFit( - minimum, - solution, - start_params, - model, - ProximalResult(optimization_result), - ) + return SemFit(optim_res.minimum, solution, start_params, model, optim_res) end ############################################################################################ @@ -125,10 +106,9 @@ function Base.show(io::IO, struct_inst::SemOptimizerProximal) end function Base.show(io::IO, result::ProximalResult) - print(io, "Minimum: $(round(result.result[:minimum]; digits = 2)) \n") - print(io, "No. evaluations: $(result.result[:iterations]) \n") - print(io, "Operator: $(nameof(typeof(result.result[:operator_g]))) \n") - if haskey(result.result, :operator_h) - print(io, "Second Operator: $(nameof(typeof(result.result[:operator_h]))) \n") - end + print(io, "Minimum: $(round(result.minimum; digits = 2)) \n") + print(io, "No. evaluations: $(result.n_iterations) \n") + print(io, "Operator: $(nameof(typeof(result.optimizer.operator_g))) \n") + op_h = result.optimizer.operator_h + isnothing(op_h) || print(io, "Second Operator: $(nameof(typeof(op_h))) \n") end diff --git a/src/frontend/fit/SemFit.jl b/src/frontend/fit/SemFit.jl index 4074f3da3..84db81e3a 100644 --- a/src/frontend/fit/SemFit.jl +++ b/src/frontend/fit/SemFit.jl @@ -13,7 +13,7 @@ Fitted structural equation model. - `model(::SemFit)` - `optimization_result(::SemFit)` -- `optimizer(::SemFit)` -> optimization algorithm +- `algorithm_name(::SemFit)` -> optimization algorithm - `n_iterations(::SemFit)` -> number of iterations - `convergence(::SemFit)` -> convergence properties """ @@ -63,6 +63,6 @@ optimization_result(sem_fit::SemFit) = sem_fit.optimization_result # optimizer properties optimizer_engine(sem_fit::SemFit) = optimizer_engine(optimization_result(sem_fit)) -optimizer(sem_fit::SemFit) = optimizer(optimization_result(sem_fit)) +algorithm_name(sem_fit::SemFit) = algorithm_name(optimization_result(sem_fit)) n_iterations(sem_fit::SemFit) = n_iterations(optimization_result(sem_fit)) convergence(sem_fit::SemFit) = convergence(optimization_result(sem_fit)) diff --git a/src/frontend/fit/summary.jl b/src/frontend/fit/summary.jl index 3071d5653..435b17470 100644 --- a/src/frontend/fit/summary.jl +++ b/src/frontend/fit/summary.jl @@ -7,7 +7,8 @@ function details(sem_fit::SemFit; show_fitmeasures = false, color = :light_cyan, color = color, ) print("\n") - println("Optimization algorithm: $(optimizer(sem_fit))") + println("Optimization engine: $(optimizer_engine(sem_fit))") + println("Optimization algorithm: $(algorithm_name(sem_fit))") println("Convergence: $(convergence(sem_fit))") println("No. iterations/evaluations: $(n_iterations(sem_fit))") print("\n") diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index d9e52fdc8..e9a8c47ba 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -84,6 +84,10 @@ For a list of available engines, call [`optimizer_engines`](@ref). """ optimizer_engine_doc(engine) = Base.Docs.doc(sem_optimizer_subtype(engine)) +optimizer(result::SemOptimizerResult) = result.optimizer + +optimizer_engine(result::SemOptimizerResult) = optimizer_engine(result.optimizer) + """ fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 9ab477067..52b27bc88 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -67,26 +67,17 @@ update_observed(optimizer::SemOptimizerOptim, observed::SemObserved; kwargs...) ### additional methods ############################################################################################ -algorithm(optimizer::SemOptimizerOptim) = optimizer.algorithm options(optimizer::SemOptimizerOptim) = optimizer.options -function SemFit( - optimization_result::Optim.MultivariateOptimizationResults, - model::AbstractSem, - start_val, -) - return SemFit( - optimization_result.minimum, - optimization_result.minimizer, - start_val, - model, - optimization_result, - ) +# wrapper for the Optim.jl result +struct SemOptimResult{O <: SemOptimizerOptim} <: SemOptimizerResult{O} + optimizer::O + result::Optim.MultivariateOptimizationResults end -optimizer(res::Optim.MultivariateOptimizationResults) = Optim.summary(res) -n_iterations(res::Optim.MultivariateOptimizationResults) = Optim.iterations(res) -convergence(res::Optim.MultivariateOptimizationResults) = Optim.converged(res) +algorithm_name(res::SemOptimResult) = Optim.summary(res.result) +n_iterations(res::SemOptimResult) = Optim.iterations(res.result) +convergence(res::SemOptimResult) = Optim.converged(res.result) function fit( optim::SemOptimizerOptim, @@ -133,5 +124,11 @@ function fit( optim.options, ) end - return SemFit(result, model, start_params) + return SemFit( + result.minimum, + result.minimizer, + start_params, + model, + SemOptimResult(optim, result), + ) end diff --git a/src/types.jl b/src/types.jl index 92ca2c284..777165f37 100644 --- a/src/types.jl +++ b/src/types.jl @@ -81,6 +81,9 @@ Base.:*(x::SemWeight, y) = x.w * y abstract type SemOptimizer{E} end +# wrapper around optimization result +abstract type SemOptimizerResult{O <: SemOptimizer} end + """ Supertype of all objects that can serve as the observed field of a SEM. Pre-processes data and computes sufficient statistics for example. diff --git a/test/examples/proximal/l0.jl b/test/examples/proximal/l0.jl index f74dfb2d1..8542ac458 100644 --- a/test/examples/proximal/l0.jl +++ b/test/examples/proximal/l0.jl @@ -45,7 +45,7 @@ model_prox = Sem(specification = partable, data = dat, loss = SemML) fit_prox = fit(model_prox, engine = :Proximal, operator_g = prox_operator) @testset "l0 | solution_unregularized" begin - @test fit_prox.optimization_result.result[:iterations] < 1000 + @test n_iterations(fit_prox.optimization_result) < 1000 @test maximum(abs.(solution(sem_fit) - solution(fit_prox))) < 0.002 end @@ -57,7 +57,7 @@ model_prox = Sem(specification = partable, data = dat, loss = SemML) fit_prox = fit(model_prox, engine = :Proximal, operator_g = prox_operator) @testset "l0 | solution_regularized" begin - @test fit_prox.optimization_result.result[:iterations] < 1000 + @test n_iterations(fit_prox.optimization_result) < 1000 @test solution(fit_prox)[31] == 0.0 @test abs( StructuralEquationModels.minimum(fit_prox) - diff --git a/test/examples/proximal/lasso.jl b/test/examples/proximal/lasso.jl index 356ac6188..9138f6884 100644 --- a/test/examples/proximal/lasso.jl +++ b/test/examples/proximal/lasso.jl @@ -43,7 +43,7 @@ model_prox = Sem(specification = partable, data = dat, loss = SemML) fit_prox = fit(model_prox, engine = :Proximal, operator_g = NormL1(λ)) @testset "lasso | solution_unregularized" begin - @test fit_prox.optimization_result.result[:iterations] < 1000 + @test n_iterations(fit_prox.optimization_result) < 1000 @test maximum(abs.(solution(sem_fit) - solution(fit_prox))) < 0.002 end @@ -55,7 +55,7 @@ model_prox = Sem(specification = partable, data = dat, loss = SemML) fit_prox = fit(model_prox, engine = :Proximal, operator_g = NormL1(λ)) @testset "lasso | solution_regularized" begin - @test fit_prox.optimization_result.result[:iterations] < 1000 + @test n_iterations(fit_prox.optimization_result) < 1000 @test all(solution(fit_prox)[16:20] .< solution(sem_fit)[16:20]) @test StructuralEquationModels.minimum(fit_prox) - StructuralEquationModels.minimum(sem_fit) < 0.03 From ccbf55f508375e4f07ef0e33c1845e733cee92f5 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 14:03:37 -0800 Subject: [PATCH 23/28] docs: fix optimizer engine docs - enable docstrings from extensions - fix references to ext. docstrings --- docs/make.jl | 7 ++++++- docs/src/tutorials/backends/nlopt.md | 2 +- docs/src/tutorials/backends/optim.md | 4 ++-- docs/src/tutorials/concept.md | 25 ++++++++++++++----------- 4 files changed, 23 insertions(+), 15 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index 1bb68c4da..042b83587 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,7 +1,12 @@ -using Documenter, StructuralEquationModels +using Documenter, StructuralEquationModels, NLopt, ProximalAlgorithms, ProximalOperators + +SEM = StructuralEquationModels +SEMNLOptExt = Base.get_extension(StructuralEquationModels, :SEMNLOptExt) +SEMProximalOptExt = Base.get_extension(StructuralEquationModels, :SEMProximalOptExt) makedocs( sitename = "StructuralEquationModels.jl", + modules = [SEM, SEMNLOptExt, SEMProximalOptExt], pages = [ "index.md", "Tutorials" => [ diff --git a/docs/src/tutorials/backends/nlopt.md b/docs/src/tutorials/backends/nlopt.md index 3ad4bf497..692628fd1 100644 --- a/docs/src/tutorials/backends/nlopt.md +++ b/docs/src/tutorials/backends/nlopt.md @@ -2,7 +2,7 @@ When [`NLopt.jl`](https://github.com/jump-dev/NLopt.jl) is loaded in the running Julia session, it could be used by the [`SemOptimizer`](@ref) by specifying `engine = :NLopt` -(see [NLopt-specific options](@ref `SemOptimizerNLopt`)). +(see [NLopt-specific options](@ref SEMNLOptExt.SemOptimizerNLopt)). Among other things, `NLopt` enables constrained optimization of the SEM models, which is explained in the [Constrained optimization](@ref) section. diff --git a/docs/src/tutorials/backends/optim.md b/docs/src/tutorials/backends/optim.md index a16537ec4..b1b07885e 100644 --- a/docs/src/tutorials/backends/optim.md +++ b/docs/src/tutorials/backends/optim.md @@ -1,7 +1,7 @@ # Using Optim.jl -[Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) is the default optimization engine of *SEM.jl*, -see [`SemOptimizerOptim`](@ref) for a full list of its parameters. +[*Optim.jl*](https://github.com/JuliaNLSolvers/Optim.jl) is the default optimization engine of *SEM.jl*, +see [`SEM.SemOptimizerOptim`](@ref) for a full list of its parameters. It defaults to the LBFGS optimization, but we can load the `Optim` and `LineSearches` packages and specify BFGS (!not L-BFGS) with a back-tracking linesearch and Hager-Zhang initial step length guess: diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 2b453925a..49f0d404f 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -21,13 +21,13 @@ So everything that can be used as the 'observed' part has to be of type `SemObse Here is an overview on the available building blocks: -|[`SemObserved`](@ref) | [`SemImplied`](@ref) | [`SemLossFunction`](@ref) | [`SemOptimizer`](@ref) | -|---------------------------------|-----------------------|---------------------------|-------------------------------| -| [`SemObservedData`](@ref) | [`RAM`](@ref) | [`SemML`](@ref) | [`SemOptimizerOptim`](@ref) | -| [`SemObservedCovariance`](@ref) | [`RAMSymbolic`](@ref) | [`SemWLS`](@ref) | [`SemOptimizerNLopt`](@ref) | -| [`SemObservedMissing`](@ref) | [`ImpliedEmpty`](@ref)| [`SemFIML`](@ref) | | -| | | [`SemRidge`](@ref) | | -| | | [`SemConstant`](@ref) | | +|[`SemObserved`](@ref) | [`SemImplied`](@ref) | [`SemLossFunction`](@ref) | [`SemOptimizer`](@ref) | +|---------------------------------|-----------------------|---------------------------|----------------------------| +| [`SemObservedData`](@ref) | [`RAM`](@ref) | [`SemML`](@ref) | [:Optim](@ref StructuralEquationModels.SemOptimizerOptim) | +| [`SemObservedCovariance`](@ref) | [`RAMSymbolic`](@ref) | [`SemWLS`](@ref) | [:NLopt](@ref SEMNLOptExt.SemOptimizerNLopt) | +| [`SemObservedMissing`](@ref) | [`ImpliedEmpty`](@ref)| [`SemFIML`](@ref) | [:Proximal](@ref SEMProximalOptExt.SemOptimizerProximal) | +| | | [`SemRidge`](@ref) | | +| | | [`SemConstant`](@ref) | | The rest of this page explains the building blocks for each part. First, we explain every part and give an overview on the different options that are available. After that, the [API - model parts](@ref) section serves as a reference for detailed explanations about the different options. (How to stick them together to a final model is explained in the section on [Model Construction](@ref).) @@ -52,7 +52,7 @@ Available loss functions are ## The optimizer part aka `SemOptimizer` The optimizer part of a model connects to the numerical optimization backend used to fit the model. It can be used to control options like the optimization algorithm, linesearch, stopping criteria, etc. -There are currently three available backends, [`SemOptimizerOptim`](@ref) connecting to the [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) backend, [`SemOptimizerNLopt`](@ref) connecting to the [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) backend and [`SemOptimizerProximal`](@ref) connecting to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). +There are currently three available engines (i.e., backends used to carry out the numerical optimization), [`:Optim`](@ref StructuralEquationModels.SemOptimizerOptim) connecting to the [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) backend, [`:NLopt`](@ref SEMNLOptExt.SemOptimizerNLopt) connecting to the [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) backend and [`:Proximal`](@ref SEMProximalOptExt.SemOptimizerProximal) connecting to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). For more information about the available options see also the tutorials about [Using Optim.jl](@ref) and [Using NLopt.jl](@ref), as well as [Constrained optimization](@ref) and [Regularization](@ref) . # What to do next @@ -101,8 +101,11 @@ SemConstant ## optimizer ```@docs +optimizer_engines +optimizer_engine +optimizer_engine_doc SemOptimizer -SemOptimizerOptim -SemOptimizerNLopt -SemOptimizerProximal +SEM.SemOptimizerOptim +SEMNLOptExt.SemOptimizerNLopt +SEMProximalOptExt.SemOptimizerProximal ``` \ No newline at end of file From 13272362d4cf6cb3389cb2759aec3dffe4257b02 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Wed, 4 Feb 2026 13:14:44 -0800 Subject: [PATCH 24/28] docs/make.jl: disable doctest temporary until the links are fixed Co-authored-by: Maximilian Ernst <34346372+Maximilian-Stefan-Ernst@users.noreply.github.com> --- docs/make.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/make.jl b/docs/make.jl index 042b83587..f3824dd79 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -65,6 +65,7 @@ makedocs( collapselevel = 1, ), doctest = false, + checkdocs = :none, ) # doctest(StructuralEquationModels, fix=true) From 2f2293c5205026178639c3646cacd49f57d3c38d Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Mon, 2 Feb 2026 19:09:30 -0800 Subject: [PATCH 25/28] optimizer.md: rename to SemOptimizerMyopt --- docs/src/developer/optimizer.md | 43 ++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/docs/src/developer/optimizer.md b/docs/src/developer/optimizer.md index 3e7cadaf8..164fa4f42 100644 --- a/docs/src/developer/optimizer.md +++ b/docs/src/developer/optimizer.md @@ -1,47 +1,56 @@ # Custom optimizer types The optimizer part of a model connects it to the optimization backend. -Let's say we want to implement a new optimizer as `SemOptimizerName`. The first part of the implementation is very similar to loss functions, so we just show the implementation of `SemOptimizerOptim` here as a reference: +Let's say we want to implement a new optimizer as `SemOptimizerMyopt`. +The first part of the implementation is very similar to loss functions, +so we just show the implementation of `SemOptimizerOptim` here as a reference: ```julia ############################################################################################ ### Types and Constructor ############################################################################################ -mutable struct SemOptimizerName{A, B} <: SemOptimizer{:Name} +struct SemOptimizerMyopt{A, B} <: SemOptimizer{:Myopt} algorithm::A options::B end -SEM.sem_optimizer_subtype(::Val{:Name}) = SemOptimizerName +SEM.sem_optimizer_subtype(::Val{:Myopt}) = SemOptimizerMyopt -SemOptimizerName(; +SemOptimizerMyopt(; algorithm = LBFGS(), options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs..., -) = SemOptimizerName(algorithm, options) +) = SemOptimizerMyopt(algorithm, options) + +struct MyOptResult{O <: SemOptimizerMyopt} <: SEM.SemOptimizerResult{O} + optimizer::O + ... +end ############################################################################################ ### Recommended methods ############################################################################################ -update_observed(optimizer::SemOptimizerName, observed::SemObserved; kwargs...) = optimizer +update_observed(optimizer::SemOptimizerMyopt, observed::SemObserved; kwargs...) = optimizer ############################################################################################ ### additional methods ############################################################################################ -options(optimizer::SemOptimizerName) = optimizer.options +options(optimizer::SemOptimizerMyopt) = optimizer.options ``` -Note that your optimizer is a subtype of `SemOptimizer{:Name}`, where you can choose a `:Name` that can later be used as a keyword argument to `fit(engine = :Name)`. -Similarly, `SemOptimizer{:Name}(args...; kwargs...) = SemOptimizerName(args...; kwargs...)` should be defined as well as a constructor that uses only keyword arguments: +Note that your optimizer is a subtype of `SemOptimizer{:Myopt}`, +where you can choose a `:Myopt` that can later be used as a keyword argument to `fit(engine = :Myopt)`. +Similarly, `SemOptimizer{:Myopt}(args...; kwargs...) = SemOptimizerMyopt(args...; kwargs...)` +should be defined as well as a constructor that uses only keyword arguments: ```julia -SemOptimizerName(; +SemOptimizerMyopt(; algorithm = LBFGS(), options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs..., -) = SemOptimizerName(algorithm, options) +) = SemOptimizerMyopt(algorithm, options) ``` A method for `update_observed` and additional methods might be usefull, but are not necessary. @@ -49,15 +58,15 @@ Now comes the substantive part: We need to provide a method for `fit`: ```julia function fit( - optim::SemOptimizerName, + optim::SemOptimizerMyopt, model::AbstractSem, start_params::AbstractVector; kwargs..., ) - optimization_result = ... - ... + optimization_result = MyoptResult(optim, ...) + return SemFit(minimum, minimizer, start_params, model, optimization_result) end ``` @@ -67,7 +76,7 @@ The method has to return a `SemFit` object that consists of the minimum of the o In addition, you might want to provide methods to access properties of your optimization result: ```julia -algorithm_name(res::MyOptimizationResult) = ... -n_iterations(res::MyOptimizationResult) = ... -convergence(res::MyOptimizationResult) = ... +algorithm_name(res::MyOptResult) = ... +n_iterations(res::MyOptResult) = ... +convergence(res::MyOptResult) = ... ``` \ No newline at end of file From 5cdcbb1204fc531c64d3a8c596ea9d102fcc7e38 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Mon, 2 Feb 2026 19:23:58 -0800 Subject: [PATCH 26/28] docs: apply suggestions --- docs/src/tutorials/backends/nlopt.md | 8 ++++---- .../src/tutorials/regularization/regularization.md | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/src/tutorials/backends/nlopt.md b/docs/src/tutorials/backends/nlopt.md index 692628fd1..8e6b83bee 100644 --- a/docs/src/tutorials/backends/nlopt.md +++ b/docs/src/tutorials/backends/nlopt.md @@ -1,9 +1,9 @@ # Using NLopt.jl When [`NLopt.jl`](https://github.com/jump-dev/NLopt.jl) is loaded in the running Julia session, -it could be used by the [`SemOptimizer`](@ref) by specifying `engine = :NLopt` +it can be used by the [`SemOptimizer`](@ref) by specifying `engine = :NLopt` (see [NLopt-specific options](@ref SEMNLOptExt.SemOptimizerNLopt)). -Among other things, `NLopt` enables constrained optimization of the SEM models, which is +Among other things, `NLopt` enables constrained optimization of SEMs, which is explained in the [Constrained optimization](@ref) section. We can override the default *NLopt* algorithm (LFBGS) and instead use @@ -28,9 +28,9 @@ To see how to use the optimizer to actually fit a model now, check out the [Mode In the *NLopt* docs, you can find details about the [optimization algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/), and the [tutorial](https://nlopt.readthedocs.io/en/latest/NLopt_Introduction/) that demonstrates how to tweak their behavior. -To choose an algorithm, just pass its name without the 'NLOPT\_' prefix (for example, 'NLOPT\_LD\_SLSQP' can be used by passing `algorithm = :LD_SLSQP`). +To choose an algorithm, just pass its name without the `NLOPT_` prefix (for example, `NLOPT_LD_SLSQP` can be used by passing `algorithm = :LD_SLSQP`). -The README of the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) may also be helpful, and provides a list of options: +The *README* of [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) may also be helpful, and provides a list of options: - `algorithm` - `stopval` diff --git a/docs/src/tutorials/regularization/regularization.md b/docs/src/tutorials/regularization/regularization.md index 17add030a..79e301c21 100644 --- a/docs/src/tutorials/regularization/regularization.md +++ b/docs/src/tutorials/regularization/regularization.md @@ -6,8 +6,7 @@ For ridge regularization, you can simply use `SemRidge` as an additional loss fu (for example, a model with the loss functions `SemML` and `SemRidge` corresponds to ridge-regularized maximum likelihood estimation). You can define lasso, elastic net and other forms of regularization using [`ProximalOperators.jl`](https://github.com/JuliaFirstOrder/ProximalOperators.jl) -and optimize the SEM model with [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) -that provides so-called *proximal optimization* algorithms. +and optimize the SEM with so-called *proximal optimization* algorithms from [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). ```@setup reg using StructuralEquationModels, ProximalAlgorithms, ProximalOperators @@ -23,8 +22,8 @@ using StructuralEquationModels, ProximalAlgorithms, ProximalOperators ## Proximal optimization -With *ProximalAlgorithms* package loaded, it is now possible to use `:Proximal` optimization engine -in `SemOptimizer` for estimating regularized models. +With the *ProximalAlgorithms* package loaded, it is now possible to use the `:Proximal` +optimization engine in `SemOptimizer` for estimating regularized models. ```julia SemOptimizer(; @@ -138,15 +137,16 @@ fit_lasso2 = fit(model; engine = :Proximal, operator_g = NormL1(λ)) ## Second example - mixed l1 and l0 regularization You can choose to penalize different parameters with different types of regularization functions. -Let's use the *lasso* (*l1*) again on the covariances, but additionally penalize the error variances of the observed items via *l0* regularization. +Let's use the *lasso* (*l1*) again on the covariances, but additionally penalize the error variances of +the observed items via *l0* regularization. The *l0* penalty is defined as ```math l_0 = \lambda \mathrm{nnz}(\theta) ``` -Since we apply *l1* and *l0* to the disjoint sets of parameters, this regularization could be represented as -as sum of *separable proximal operators* (i.e. no parameter is penalized twice) +Since we apply *l1* and *l0* to the disjoint sets of parameters, this regularization can be +represented as a sum of *separable proximal operators* (i.e. no parameter is penalized twice) implemented by the [`SlicedSeparableSum`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/calculus/#ProximalOperators.SlicedSeparableSum) operator: ```@example reg From f1f453cf5ce8f7175ea8a045cf750e3716514509 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Thu, 5 Feb 2026 15:53:22 -0800 Subject: [PATCH 27/28] dovs/optimizer.md: more updates for the new API --- docs/src/developer/optimizer.md | 55 +++++++++++++++++---------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/docs/src/developer/optimizer.md b/docs/src/developer/optimizer.md index 164fa4f42..b5c9a6e09 100644 --- a/docs/src/developer/optimizer.md +++ b/docs/src/developer/optimizer.md @@ -1,9 +1,7 @@ # Custom optimizer types -The optimizer part of a model connects it to the optimization backend. +The optimizer part of a model connects it to the optimization engine. Let's say we want to implement a new optimizer as `SemOptimizerMyopt`. -The first part of the implementation is very similar to loss functions, -so we just show the implementation of `SemOptimizerOptim` here as a reference: ```julia ############################################################################################ @@ -17,12 +15,12 @@ end SEM.sem_optimizer_subtype(::Val{:Myopt}) = SemOptimizerMyopt SemOptimizerMyopt(; - algorithm = LBFGS(), - options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), + algorithm = ..., + options = ..., kwargs..., ) = SemOptimizerMyopt(algorithm, options) -struct MyOptResult{O <: SemOptimizerMyopt} <: SEM.SemOptimizerResult{O} +struct MyoptResult{O <: SemOptimizerMyopt} <: SEM.SemOptimizerResult{O} optimizer::O ... end @@ -40,21 +38,15 @@ update_observed(optimizer::SemOptimizerMyopt, observed::SemObserved; kwargs...) options(optimizer::SemOptimizerMyopt) = optimizer.options ``` -Note that your optimizer is a subtype of `SemOptimizer{:Myopt}`, -where you can choose a `:Myopt` that can later be used as a keyword argument to `fit(engine = :Myopt)`. -Similarly, `SemOptimizer{:Myopt}(args...; kwargs...) = SemOptimizerMyopt(args...; kwargs...)` -should be defined as well as a constructor that uses only keyword arguments: +Note that `SemOptimizerMyopt` is defined as a subtype of [`SemOptimizer{:Myopt}`](@ref SEM.SemOptimizer)`, +and `SEM.sem_optimizer_subtype(::Val{:Myopt})` returns `SemOptimizerMyopt`. +This instructs *SEM.jl* to use `SemOptimizerMyopt` when `:Myopt` is specified as the engine for +model fitting: `fit(..., engine = :Myopt)`. -```julia -SemOptimizerMyopt(; - algorithm = LBFGS(), - options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), - kwargs..., -) = SemOptimizerMyopt(algorithm, options) -``` A method for `update_observed` and additional methods might be usefull, but are not necessary. -Now comes the substantive part: We need to provide a method for `fit`: +Now comes the essential part: we need to provide the [`fit`](@ref) method with `SemOptimizerMyopt` +as the first positional argument. ```julia function fit( @@ -63,20 +55,29 @@ function fit( start_params::AbstractVector; kwargs..., ) - ... + # ... prepare the Myopt optimization problem - optimization_result = MyoptResult(optim, ...) + myopt_res = ... # fit the problem with the Myopt engine + minimum = ... # extract the minimum from myopt_res + minimizer = ... # extract the solution (parameter estimates) + optim_result = MyoptResult(optim, myopt_res, ...) # store the original Myopt result and params - return SemFit(minimum, minimizer, start_params, model, optimization_result) + return SemFit(minimum, minimizer, start_params, model, optim_result) end ``` -The method has to return a `SemFit` object that consists of the minimum of the objective at the solution, the minimizer (aka parameter estimates), the starting values, the model and the optimization result (which may be anything you desire for your specific backend). +This method is responsible for converting the SEM into the format required by your optimization engine, +running the optimization, extracting the solution and returning the `SemFit` object, which should package: +* the minimum of the objective at the solution +* the minimizer (the vector of the SEM parameter estimates) +* the starting values +* the SEM model +* `MyoptResult` object with any relevant engine-specific details you want to preserve -In addition, you might want to provide methods to access properties of your optimization result: +In addition, you might want to provide methods to access engine-specific properties stored in `MyoptResult`: ```julia -algorithm_name(res::MyOptResult) = ... -n_iterations(res::MyOptResult) = ... -convergence(res::MyOptResult) = ... -``` \ No newline at end of file +algorithm_name(res::MyoptResult) = ... +n_iterations(res::MyoptResult) = ... +convergence(res::MyoptResult) = ... +``` From 1f8d2a9a91fa31143c68054f34d047307dbf4695 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Thu, 5 Feb 2026 16:41:08 -0800 Subject: [PATCH 28/28] .gh/FormatCheck: run on pull_request Fixes the format checking, because when run in pull_request_target, it does not check out the correct commit --- .github/workflows/FormatCheck.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml index d005ca942..5612cd8d3 100644 --- a/.github/workflows/FormatCheck.yml +++ b/.github/workflows/FormatCheck.yml @@ -1,8 +1,7 @@ name: Format suggestions on: - pull_request_target: - # this argument is not required if you don't use the `suggestion-label` input - types: [ opened, reopened, synchronize, labeled, unlabeled ] + push: # Runs on all pushes to any branch + pull_request: # Runs on all PR events (open, sync, reopen) jobs: code-style: runs-on: ubuntu-latest