From 453fcc9ff81e1e81e3b746ea4695b5f1976e960b Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:00:35 -0700 Subject: [PATCH 01/37] Revert "fix Proximal extension" This reverts commit 9729819b86f375e4663de1fe9ec9c38d4932f580. --- ext/SEMProximalOptExt/SEMProximalOptExt.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ext/SEMProximalOptExt/SEMProximalOptExt.jl b/ext/SEMProximalOptExt/SEMProximalOptExt.jl index 04be35cb..bedf1920 100644 --- a/ext/SEMProximalOptExt/SEMProximalOptExt.jl +++ b/ext/SEMProximalOptExt/SEMProximalOptExt.jl @@ -3,7 +3,8 @@ module SEMProximalOptExt using StructuralEquationModels using StructuralEquationModels: print_type_name, print_field_types using ProximalAlgorithms -import StructuralEquationModels: SemOptimizerProximal + +export SemOptimizerProximal SEM = StructuralEquationModels From abc2847899447bf6e10ea76f105d6913c88a2442 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:01:59 -0700 Subject: [PATCH 02/37] Revert "fix NLopt extension" This reverts commit 81a4bd9839df01e9f487b9aa13e3df107856114a. --- ext/SEMNLOptExt/NLopt.jl | 5 +++++ ext/SEMNLOptExt/SEMNLOptExt.jl | 3 ++- src/StructuralEquationModels.jl | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 27bc3003..ac282ad6 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -1,3 +1,8 @@ +Base.@kwdef struct NLoptConstraint + f::Any + tol = 0.0 +end + Base.convert( ::Type{NLoptConstraint}, tuple::NamedTuple{(:f, :tol), Tuple{F, T}}, diff --git a/ext/SEMNLOptExt/SEMNLOptExt.jl b/ext/SEMNLOptExt/SEMNLOptExt.jl index bf905e3a..a159f6dc 100644 --- a/ext/SEMNLOptExt/SEMNLOptExt.jl +++ b/ext/SEMNLOptExt/SEMNLOptExt.jl @@ -1,10 +1,11 @@ module SEMNLOptExt using StructuralEquationModels, NLopt -import StructuralEquationModels: SemOptimizerNLopt, NLoptConstraint SEM = StructuralEquationModels +export SemOptimizerNLopt, NLoptConstraint + include("NLopt.jl") end diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index 46692bd5..e0cd1e7a 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -198,6 +198,5 @@ export AbstractSem, ↔, ⇔, SemOptimizerNLopt, - NLoptConstraint, SemOptimizerProximal end From 56cdef1f26c482828b1466bc120fad31e6ed8c18 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:03:35 -0700 Subject: [PATCH 03/37] Revert "fix exporting structs from package extensions" This reverts commit f0df6538f0220f964cbf51772698c317a0b4cf86. --- ext/SEMNLOptExt/NLopt.jl | 68 +++++++++++++++++++++ ext/SEMProximalOptExt/ProximalAlgorithms.jl | 32 ++++++++++ src/StructuralEquationModels.jl | 7 +-- src/package_extensions/SEMNLOptExt.jl | 65 -------------------- src/package_extensions/SEMProximalOptExt.jl | 27 -------- 5 files changed, 101 insertions(+), 98 deletions(-) delete mode 100644 src/package_extensions/SEMNLOptExt.jl delete mode 100644 src/package_extensions/SEMProximalOptExt.jl diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index ac282ad6..694247ca 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -1,3 +1,71 @@ +############################################################################################ +### Types +############################################################################################ +""" +Connects to `NLopt.jl` as the optimization backend. +Only usable if `NLopt.jl` is loaded in the current Julia session! + +# Constructor + + SemOptimizerNLopt(; + algorithm = :LD_LBFGS, + options = Dict{Symbol, Any}(), + local_algorithm = nothing, + local_options = Dict{Symbol, Any}(), + equality_constraints = Vector{NLoptConstraint}(), + inequality_constraints = Vector{NLoptConstraint}(), + kwargs...) + +# Arguments +- `algorithm`: optimization algorithm. +- `options::Dict{Symbol, Any}`: options for the optimization algorithm +- `local_algorithm`: local optimization algorithm +- `local_options::Dict{Symbol, Any}`: options for the local optimization algorithm +- `equality_constraints::Vector{NLoptConstraint}`: vector of equality constraints +- `inequality_constraints::Vector{NLoptConstraint}`: vector of inequality constraints + +# Example +```julia +my_optimizer = SemOptimizerNLopt() + +# constrained optimization with augmented lagrangian +my_constrained_optimizer = SemOptimizerNLopt(; + algorithm = :AUGLAG, + local_algorithm = :LD_LBFGS, + local_options = Dict(:ftol_rel => 1e-6), + inequality_constraints = NLoptConstraint(;f = my_constraint, tol = 0.0), +) +``` + +# Usage +All algorithms and options from the NLopt library are available, for more information see +the NLopt.jl package and the NLopt online documentation. +For information on how to use inequality and equality constraints, +see [Constrained optimization](@ref) in our online documentation. + +# Extended help + +## Interfaces +- `algorithm(::SemOptimizerNLopt)` +- `local_algorithm(::SemOptimizerNLopt)` +- `options(::SemOptimizerNLopt)` +- `local_options(::SemOptimizerNLopt)` +- `equality_constraints(::SemOptimizerNLopt)` +- `inequality_constraints(::SemOptimizerNLopt)` + +## Implementation + +Subtype of `SemOptimizer`. +""" +struct SemOptimizerNLopt{A, A2, B, B2, C} <: SemOptimizer{:NLopt} + algorithm::A + local_algorithm::A2 + options::B + local_options::B2 + equality_constraints::C + inequality_constraints::C +end + Base.@kwdef struct NLoptConstraint f::Any tol = 0.0 diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index 0d4748e3..aec61e57 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -1,3 +1,35 @@ +############################################################################################ +### Types +############################################################################################ +""" +Connects to `ProximalAlgorithms.jl` as the optimization backend. + +Can be used for regularized SEM, for a tutorial see the online docs on [Regularization](@ref). + +# Constructor + + SemOptimizerProximal(; + algorithm = ProximalAlgorithms.PANOC(), + operator_g, + operator_h = nothing, + kwargs..., + +# Arguments +- `algorithm`: optimization algorithm. +- `operator_g`: proximal operator (e.g., regularization penalty) +- `operator_h`: optional second proximal operator + +# Usage +All algorithms and operators from `ProximalAlgorithms.jl` are available, +for more information see the online docs on [Regularization](@ref) and +the documentation of `ProximalAlgorithms.jl` / `ProximalOperators.jl`. +""" +mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} + algorithm::A + operator_g::B + operator_h::C +end + SEM.SemOptimizer{:Proximal}(args...; kwargs...) = SemOptimizerProximal(args...; kwargs...) SemOptimizerProximal(; diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index e0cd1e7a..a306eccf 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -86,9 +86,6 @@ include("frontend/fit/fitmeasures/fit_measures.jl") # standard errors include("frontend/fit/standard_errors/hessian.jl") include("frontend/fit/standard_errors/bootstrap.jl") -# extensions -include("package_extensions/SEMNLOptExt.jl") -include("package_extensions/SEMProximalOptExt.jl") export AbstractSem, AbstractSemSingle, @@ -196,7 +193,5 @@ export AbstractSem, →, ←, ↔, - ⇔, - SemOptimizerNLopt, - SemOptimizerProximal + ⇔ end diff --git a/src/package_extensions/SEMNLOptExt.jl b/src/package_extensions/SEMNLOptExt.jl deleted file mode 100644 index 64c4cff0..00000000 --- a/src/package_extensions/SEMNLOptExt.jl +++ /dev/null @@ -1,65 +0,0 @@ -""" -Connects to `NLopt.jl` as the optimization backend. -Only usable if `NLopt.jl` is loaded in the current Julia session! - -# Constructor - - SemOptimizerNLopt(; - algorithm = :LD_LBFGS, - options = Dict{Symbol, Any}(), - local_algorithm = nothing, - local_options = Dict{Symbol, Any}(), - equality_constraints = Vector{NLoptConstraint}(), - inequality_constraints = Vector{NLoptConstraint}(), - kwargs...) - -# Arguments -- `algorithm`: optimization algorithm. -- `options::Dict{Symbol, Any}`: options for the optimization algorithm -- `local_algorithm`: local optimization algorithm -- `local_options::Dict{Symbol, Any}`: options for the local optimization algorithm -- `equality_constraints::Vector{NLoptConstraint}`: vector of equality constraints -- `inequality_constraints::Vector{NLoptConstraint}`: vector of inequality constraints - -# Example -```julia -my_optimizer = SemOptimizerNLopt() - -# constrained optimization with augmented lagrangian -my_constrained_optimizer = SemOptimizerNLopt(; - algorithm = :AUGLAG, - local_algorithm = :LD_LBFGS, - local_options = Dict(:ftol_rel => 1e-6), - inequality_constraints = NLoptConstraint(;f = my_constraint, tol = 0.0), -) -``` - -# Usage -All algorithms and options from the NLopt library are available, for more information see -the NLopt.jl package and the NLopt online documentation. -For information on how to use inequality and equality constraints, -see [Constrained optimization](@ref) in our online documentation. - -# Extended help - -## Interfaces -- `algorithm(::SemOptimizerNLopt)` -- `local_algorithm(::SemOptimizerNLopt)` -- `options(::SemOptimizerNLopt)` -- `local_options(::SemOptimizerNLopt)` -- `equality_constraints(::SemOptimizerNLopt)` -- `inequality_constraints(::SemOptimizerNLopt)` -""" -struct SemOptimizerNLopt{A, A2, B, B2, C} <: SemOptimizer{:NLopt} - algorithm::A - local_algorithm::A2 - options::B - local_options::B2 - equality_constraints::C - inequality_constraints::C -end - -Base.@kwdef struct NLoptConstraint - f::Any - tol = 0.0 -end diff --git a/src/package_extensions/SEMProximalOptExt.jl b/src/package_extensions/SEMProximalOptExt.jl deleted file mode 100644 index ad4c2da2..00000000 --- a/src/package_extensions/SEMProximalOptExt.jl +++ /dev/null @@ -1,27 +0,0 @@ -""" -Connects to `ProximalAlgorithms.jl` as the optimization backend. -Can be used for regularized SEM, for a tutorial see the online docs on [Regularization](@ref). - -# Constructor - - SemOptimizerProximal(; - algorithm = ProximalAlgorithms.PANOC(), - operator_g, - operator_h = nothing, - kwargs..., - -# Arguments -- `algorithm`: optimization algorithm. -- `operator_g`: proximal operator (e.g., regularization penalty) -- `operator_h`: optional second proximal operator - -# Usage -All algorithms and operators from `ProximalAlgorithms.jl` are available, -for more information see the online docs on [Regularization](@ref) and -the documentation of `ProximalAlgorithms.jl` / `ProximalOperators.jl`. -""" -mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} - algorithm::A - operator_g::B - operator_h::C -end From 421927ef79a466c399494bb6b83a764a37467b7d Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 21:18:02 -0700 Subject: [PATCH 04/37] types.jl: move SemOptimizer API into abstract.jl --- src/optimizer/abstract.jl | 17 +++++++++++++++++ src/types.jl | 11 ----------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index c1ad7259..f00e5055 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,3 +1,20 @@ +engine(::Type{SemOptimizer{E}}) where {E} = E +engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) + +SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = + SemOptimizer{engine}(args...; kwargs...) + +# fallback optimizer constructor +function SemOptimizer{E}(args...; kwargs...) where {E} + if E == :NLOpt + error("$E optimizer requires \"using NLopt\".") + elseif E == :Proximal + error("$E optimizer requires \"using ProximalAlgorithms\".") + else + error("$E optimizer is not supported.") + end +end + """ fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) diff --git a/src/types.jl b/src/types.jl index 0e279e5b..73a650e6 100644 --- a/src/types.jl +++ b/src/types.jl @@ -86,17 +86,6 @@ If you want to connect the SEM package to a new optimization backend, you should """ abstract type SemOptimizer{E} end -engine(::Type{SemOptimizer{E}}) where {E} = E -engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) - -SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = - SemOptimizer{engine}(args...; kwargs...) - -# fallback optimizer constructor -function SemOptimizer{E}(args...; kwargs...) where {E} - throw(ErrorException("$E optimizer is not supported.")) -end - """ Supertype of all objects that can serve as the observed field of a SEM. Pre-processes data and computes sufficient statistics for example. From 84bd7bdbb0ea9e30b519a9d1e8aaf372e8d3f5f8 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:25:57 -0700 Subject: [PATCH 05/37] NLoptResult should not be mutable --- ext/SEMNLOptExt/NLopt.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 694247ca..a51a3d06 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -124,7 +124,7 @@ local_options(optimizer::SemOptimizerNLopt) = optimizer.local_options equality_constraints(optimizer::SemOptimizerNLopt) = optimizer.equality_constraints inequality_constraints(optimizer::SemOptimizerNLopt) = optimizer.inequality_constraints -mutable struct NLoptResult +struct NLoptResult result::Any problem::Any end From 930e0e5f005546aab1afee605c027a3738b2dc4a Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:32:15 -0700 Subject: [PATCH 06/37] SemNLOpt: use f or f => tol pair for constraints It is a simple and intuitive syntax and avoids declaring new types. Also allow specifying default constraint tolerance as `constraint_tol`. --- docs/src/tutorials/constraints/constraints.md | 6 +- ext/SEMNLOptExt/NLopt.jl | 108 +++++++++--------- ext/SEMNLOptExt/SEMNLOptExt.jl | 2 +- .../political_democracy/constraints.jl | 4 +- 4 files changed, 61 insertions(+), 59 deletions(-) diff --git a/docs/src/tutorials/constraints/constraints.md b/docs/src/tutorials/constraints/constraints.md index c433240a..938a2bb9 100644 --- a/docs/src/tutorials/constraints/constraints.md +++ b/docs/src/tutorials/constraints/constraints.md @@ -1,6 +1,6 @@ # Constrained optimization -## Using the NLopt backend +## Using the NLopt engine ### Define an example model @@ -128,8 +128,8 @@ constrained_optimizer = SemOptimizerNLopt( algorithm = :AUGLAG, options = Dict(:upper_bounds => upper_bounds, :xtol_abs => 1e-4), local_algorithm = :LD_LBFGS, - equality_constraints = NLoptConstraint(;f = eq_constraint, tol = 1e-8), - inequality_constraints = NLoptConstraint(;f = ineq_constraint, tol = 1e-8), + equality_constraints = (eq_constraint => 1e-8), + inequality_constraints = (ineq_constraint => 1e-8), ) ``` diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index a51a3d06..fe052b13 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -1,6 +1,9 @@ ############################################################################################ ### Types ############################################################################################ + +const NLoptConstraint = Pair{Any, Number} + """ Connects to `NLopt.jl` as the optimization backend. Only usable if `NLopt.jl` is loaded in the current Julia session! @@ -12,8 +15,9 @@ Only usable if `NLopt.jl` is loaded in the current Julia session! options = Dict{Symbol, Any}(), local_algorithm = nothing, local_options = Dict{Symbol, Any}(), - equality_constraints = Vector{NLoptConstraint}(), - inequality_constraints = Vector{NLoptConstraint}(), + equality_constraints = nothing, + inequality_constraints = nothing, + constraint_tol::Number = 0.0, kwargs...) # Arguments @@ -21,19 +25,32 @@ Only usable if `NLopt.jl` is loaded in the current Julia session! - `options::Dict{Symbol, Any}`: options for the optimization algorithm - `local_algorithm`: local optimization algorithm - `local_options::Dict{Symbol, Any}`: options for the local optimization algorithm -- `equality_constraints::Vector{NLoptConstraint}`: vector of equality constraints -- `inequality_constraints::Vector{NLoptConstraint}`: vector of inequality constraints +- `equality_constraints: optional equality constraints +- `inequality_constraints:: optional inequality constraints +- `constraint_tol::Number`: default tolerance for constraints + +## Constraints specification + +Equality and inequality constraints arguments could be a single constraint or any +iterable constraints container (e.g. vector or tuple). +Each constraint could be a function or any other callable object that +takes the two input arguments: + - the vector of the model parameters; + - the array for the in-place calculation of the constraint gradient. +To override the default tolerance, the constraint could be specified +as a pair of the function and its tolerance: `constraint_func => tol`. # Example ```julia -my_optimizer = SemOptimizerNLopt() +my_optimizer = SemOptimizer(engine = :NLopt) # constrained optimization with augmented lagrangian -my_constrained_optimizer = SemOptimizerNLopt(; +my_constrained_optimizer = SemOptimizer(; + engine = :NLopt, algorithm = :AUGLAG, local_algorithm = :LD_LBFGS, local_options = Dict(:ftol_rel => 1e-6), - inequality_constraints = NLoptConstraint(;f = my_constraint, tol = 0.0), + inequality_constraints = (my_constraint => tol), ) ``` @@ -57,25 +74,15 @@ see [Constrained optimization](@ref) in our online documentation. Subtype of `SemOptimizer`. """ -struct SemOptimizerNLopt{A, A2, B, B2, C} <: SemOptimizer{:NLopt} - algorithm::A - local_algorithm::A2 - options::B - local_options::B2 - equality_constraints::C - inequality_constraints::C +struct SemOptimizerNLopt <: SemOptimizer{:NLopt} + algorithm::Symbol + local_algorithm::Union{Symbol, Nothing} + options::Dict{Symbol, Any} + local_options::Dict{Symbol, Any} + equality_constraints::Vector{NLoptConstraint} + inequality_constraints::Vector{NLoptConstraint} end -Base.@kwdef struct NLoptConstraint - f::Any - tol = 0.0 -end - -Base.convert( - ::Type{NLoptConstraint}, - tuple::NamedTuple{(:f, :tol), Tuple{F, T}}, -) where {F, T} = NLoptConstraint(tuple.f, tuple.tol) - ############################################################################################ ### Constructor ############################################################################################ @@ -85,22 +92,26 @@ function SemOptimizerNLopt(; local_algorithm = nothing, options = Dict{Symbol, Any}(), local_options = Dict{Symbol, Any}(), - equality_constraints = Vector{NLoptConstraint}(), - inequality_constraints = Vector{NLoptConstraint}(), - kwargs..., + equality_constraints = nothing, + inequality_constraints = nothing, + constraint_tol::Number = 0.0, + kwargs..., # FIXME remove the sink for unused kwargs ) - applicable(iterate, equality_constraints) && !isa(equality_constraints, NamedTuple) || - (equality_constraints = [equality_constraints]) - applicable(iterate, inequality_constraints) && - !isa(inequality_constraints, NamedTuple) || - (inequality_constraints = [inequality_constraints]) + constraint(f::Any) = f => constraint_tol + constraint(f_and_tol::Pair) = f_and_tol + + constraints(::Nothing) = Vector{NLoptConstraint}() + constraints(constraints) = + applicable(iterate, constraints) && !isa(constraints, Pair) ? + [constraint(constr) for constr in constraints] : [constraint(constraints)] + return SemOptimizerNLopt( algorithm, local_algorithm, options, local_options, - convert.(NLoptConstraint, equality_constraints), - convert.(NLoptConstraint, inequality_constraints), + constraints(equality_constraints), + constraints(inequality_constraints), ) end @@ -151,10 +162,7 @@ function SEM.fit( start_params::AbstractVector; kwargs..., ) - - # construct the NLopt problem - opt = construct_NLopt_problem(optim.algorithm, optim.options, length(start_params)) - set_NLopt_constraints!(opt, optim) + opt = construct_NLopt(optim.algorithm, optim.options, nparams(model)) opt.min_objective = (par, G) -> SEM.evaluate!( zero(eltype(par)), @@ -163,13 +171,16 @@ function SEM.fit( model, par, ) + for (f, tol) in optim.inequality_constraints + inequality_constraint!(opt, f, tol) + end + for (f, tol) in optim.equality_constraints + equality_constraint!(opt, f, tol) + end if !isnothing(optim.local_algorithm) - opt_local = construct_NLopt_problem( - optim.local_algorithm, - optim.local_options, - length(start_params), - ) + opt_local = + construct_NLopt(optim.local_algorithm, optim.local_options, nparams(model)) opt.local_optimizer = opt_local end @@ -183,7 +194,7 @@ end ### additional functions ############################################################################################ -function construct_NLopt_problem(algorithm, options, npar) +function construct_NLopt(algorithm, options, npar) opt = Opt(algorithm, npar) for (key, val) in pairs(options) @@ -193,15 +204,6 @@ function construct_NLopt_problem(algorithm, options, npar) return opt end -function set_NLopt_constraints!(opt::Opt, optimizer::SemOptimizerNLopt) - for con in optimizer.inequality_constraints - inequality_constraint!(opt, con.f, con.tol) - end - for con in optimizer.equality_constraints - equality_constraint!(opt, con.f, con.tol) - end -end - ############################################################################################ # pretty printing ############################################################################################ diff --git a/ext/SEMNLOptExt/SEMNLOptExt.jl b/ext/SEMNLOptExt/SEMNLOptExt.jl index a159f6dc..61c41338 100644 --- a/ext/SEMNLOptExt/SEMNLOptExt.jl +++ b/ext/SEMNLOptExt/SEMNLOptExt.jl @@ -4,7 +4,7 @@ using StructuralEquationModels, NLopt SEM = StructuralEquationModels -export SemOptimizerNLopt, NLoptConstraint +export SemOptimizerNLopt include("NLopt.jl") diff --git a/test/examples/political_democracy/constraints.jl b/test/examples/political_democracy/constraints.jl index cc1b0874..7a6670fa 100644 --- a/test/examples/political_democracy/constraints.jl +++ b/test/examples/political_democracy/constraints.jl @@ -26,8 +26,8 @@ constrained_optimizer = SemOptimizer(; algorithm = :AUGLAG, local_algorithm = :LD_LBFGS, options = Dict(:xtol_rel => 1e-4), - # equality_constraints = (f = eq_constraint, tol = 1e-14), - inequality_constraints = (f = ineq_constraint, tol = 0.0), + # equality_constraints = (eq_constraint => 1e-14), + inequality_constraints = (ineq_constraint => 0.0), ) @test constrained_optimizer isa SemOptimizer{:NLopt} From 7bd1007cdad66f031e25e706900e8a9c795a763c Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:34:22 -0700 Subject: [PATCH 07/37] NLopt: update/simplify docs use SemOptimizer(engine = :NLopt) instead of SemOptimizerNLopt() as this is a more universal scheme --- docs/src/tutorials/backends/nlopt.md | 34 ++++++------------- docs/src/tutorials/constraints/constraints.md | 23 ++++++------- 2 files changed, 22 insertions(+), 35 deletions(-) diff --git a/docs/src/tutorials/backends/nlopt.md b/docs/src/tutorials/backends/nlopt.md index feb5c8f4..840f3992 100644 --- a/docs/src/tutorials/backends/nlopt.md +++ b/docs/src/tutorials/backends/nlopt.md @@ -1,31 +1,21 @@ # Using NLopt.jl -[`SemOptimizerNLopt`](@ref) implements the connection to `NLopt.jl`. -It is only available if the `NLopt` package is loaded alongside `StructuralEquationModels.jl` in the running Julia session. -It takes a bunch of arguments: +When [`NLopt.jl`](https://github.com/jump-dev/NLopt.jl) is loaded in the running Julia session, +it could be used by the [`SemOptimizer`](@ref) by specifying `engine = :NLopt` +(see [NLopt-specific options](@ref `SemOptimizerNLopt`)). +Among other things, `NLopt` enables constrained optimization of the SEM models, which is +explained in the [Constrained optimization](@ref) section. -```julia - • algorithm: optimization algorithm - - • options::Dict{Symbol, Any}: options for the optimization algorithm - - • local_algorithm: local optimization algorithm - - • local_options::Dict{Symbol, Any}: options for the local optimization algorithm - - • equality_constraints::Vector{NLoptConstraint}: vector of equality constraints - - • inequality_constraints::Vector{NLoptConstraint}: vector of inequality constraints -``` -Constraints are explained in the section on [Constrained optimization](@ref). - -The defaults are LBFGS as the optimization algorithm and the standard options from `NLopt.jl`. -We can choose something different: +We can override the default *NLopt* algorithm (LFBGS) and instead use +the *augmented lagrangian* method with LBFGS as the *local* optimization algorithm, +stop at a maximum of 200 evaluations and use a relative tolerance of +the objective value of `1e-6` as the stopping criterion for the local algorithm: ```julia using NLopt -my_optimizer = SemOptimizerNLopt(; +my_optimizer = SemOptimizer(; + engine = :NLopt, algorithm = :AUGLAG, options = Dict(:maxeval => 200), local_algorithm = :LD_LBFGS, @@ -33,8 +23,6 @@ my_optimizer = SemOptimizerNLopt(; ) ``` -This uses an augmented lagrangian method with LBFGS as the local optimization algorithm, stops at a maximum of 200 evaluations and uses a relative tolerance of the objective value of `1e-6` as the stopping criterion for the local algorithm. - To see how to use the optimizer to actually fit a model now, check out the [Model fitting](@ref) section. In the NLopt docs, you can find explanations about the different [algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/) and a [tutorial](https://nlopt.readthedocs.io/en/latest/NLopt_Introduction/) that also explains the different options. diff --git a/docs/src/tutorials/constraints/constraints.md b/docs/src/tutorials/constraints/constraints.md index 938a2bb9..699c962b 100644 --- a/docs/src/tutorials/constraints/constraints.md +++ b/docs/src/tutorials/constraints/constraints.md @@ -64,7 +64,8 @@ Let's introduce some constraints: (Of course those constaints only serve an illustratory purpose.) -We first need to get the indices of the respective parameters that are invoved in the constraints. +To fit the SEM model with the functional constraints, we will use the *NLopt* optimization engine. +Since *NLopt* does not have access to the SEM parameter names, we have to lookup the indices of the respective parameters that are invoved in the constraints. We can look up their labels in the output above, and retrieve their indices as ```@example constraints @@ -115,16 +116,17 @@ If the algorithm needs gradients at an iteration, it will pass the vector `gradi With `if length(gradient) > 0` we check if the algorithm needs gradients, and if it does, we fill the `gradient` vector with the gradients of the constraint w.r.t. the parameters. -In NLopt, vector-valued constraints are also possible, but we refer to the documentation for that. +In *NLopt*, vector-valued constraints are also possible, but we refer to the documentation for that. ### Fit the model -We now have everything together to specify and fit our model. First, we specify our optimizer backend as +Now we can construct the *SemOptimizer* that will use the *NLopt* engine for constrained optimization. ```@example constraints using NLopt -constrained_optimizer = SemOptimizerNLopt( +constrained_optimizer = SemOptimizer( + engine = :NLopt, algorithm = :AUGLAG, options = Dict(:upper_bounds => upper_bounds, :xtol_abs => 1e-4), local_algorithm = :LD_LBFGS, @@ -133,7 +135,7 @@ constrained_optimizer = SemOptimizerNLopt( ) ``` -As you see, the equality constraints and inequality constraints are passed as keyword arguments, and the bounds are passed as options for the (outer) optimization algorithm. +As you see, the equality and inequality constraints are passed as keyword arguments, and the bounds are passed as options for the (outer) optimization algorithm. Additionally, for equality and inequality constraints, a feasibility tolerance can be specified that controls if a solution can be accepted, even if it violates the constraints by a small amount. Especially for equality constraints, it is recommended to allow for a small positive tolerance. In this example, we set both tolerances to `1e-8`. @@ -141,19 +143,16 @@ In this example, we set both tolerances to `1e-8`. !!! warning "Convergence criteria" We have often observed that the default convergence criteria in NLopt lead to non-convergence flags. Indeed, this example does not convergence with default criteria. - As you see above, we used a realively liberal absolute tolerance in the optimization parameters of 1e-4. + As you see above, we used a relatively liberal absolute tolerance in the optimization parameters of 1e-4. This should not be a problem in most cases, as the sampling variance in (almost all) structural equation models should lead to uncertainty in the parameter estimates that are orders of magnitude larger. We nontheless recommend choosing a convergence criterion with care (i.e. w.r.t. the scale of your parameters), inspecting the solutions for plausibility, and comparing them to unconstrained solutions. -```@example constraints -model_constrained = Sem( - specification = partable, - data = data -) +We now have everything to fit our model under constraints: -model_fit_constrained = fit(constrained_optimizer, model_constrained) +```@example constraints +model_fit_constrained = fit(constrained_optimizer, model) ``` As you can see, the optimizer converged (`:XTOL_REACHED`) and investigating the solution yields From 96f5f17461c3873e8f9098d139aa1578a6d89464 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:35:11 -0700 Subject: [PATCH 08/37] Optim.md: SemOptimizerOptim => SemOptimizer --- docs/src/tutorials/backends/optim.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/src/tutorials/backends/optim.md b/docs/src/tutorials/backends/optim.md index cf287e77..a16537ec 100644 --- a/docs/src/tutorials/backends/optim.md +++ b/docs/src/tutorials/backends/optim.md @@ -1,23 +1,23 @@ # Using Optim.jl -[`SemOptimizerOptim`](@ref) implements the connection to `Optim.jl`. -It takes two arguments, `algorithm` and `options`. -The defaults are LBFGS as the optimization algorithm and the standard options from `Optim.jl`. -We can load the `Optim` and `LineSearches` packages to choose something different: +[Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) is the default optimization engine of *SEM.jl*, +see [`SemOptimizerOptim`](@ref) for a full list of its parameters. +It defaults to the LBFGS optimization, but we can load the `Optim` and `LineSearches` packages +and specify BFGS (!not L-BFGS) with a back-tracking linesearch and Hager-Zhang initial step length guess: ```julia using Optim, LineSearches -my_optimizer = SemOptimizerOptim( +my_optimizer = SemOptimizer( algorithm = BFGS( - linesearch = BackTracking(order=3), + linesearch = BackTracking(order=3), alphaguess = InitialHagerZhang() - ), - options = Optim.Options(show_trace = true) - ) + ), + options = Optim.Options(show_trace = true) +) ``` -This optimizer will use BFGS (!not L-BFGS) with a back tracking linesearch and a certain initial step length guess. Also, the trace of the optimization will be printed to the console. +Note that we used `options` to print the optimization progress to the console. To see how to use the optimizer to actually fit a model now, check out the [Model fitting](@ref) section. From 869429b7ad497e5cdc63734adb1c5933daa03b1e Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Fri, 21 Mar 2025 23:37:20 -0700 Subject: [PATCH 09/37] regulariz.md: SemOptimProx => SemOptimizer --- .../regularization/regularization.md | 49 +++++++------------ 1 file changed, 19 insertions(+), 30 deletions(-) diff --git a/docs/src/tutorials/regularization/regularization.md b/docs/src/tutorials/regularization/regularization.md index 2b2c6df3..e7f083d1 100644 --- a/docs/src/tutorials/regularization/regularization.md +++ b/docs/src/tutorials/regularization/regularization.md @@ -5,7 +5,10 @@ For ridge regularization, you can simply use `SemRidge` as an additional loss function (for example, a model with the loss functions `SemML` and `SemRidge` corresponds to ridge-regularized maximum likelihood estimation). -For lasso, elastic net and (far) beyond, you can load the `ProximalAlgorithms.jl` and `ProximalOperators.jl` packages alongside `StructuralEquationModels`: +For lasso, elastic net and (far) beyond, you can use the [`ProximalOperators.jl`](https://github.com/JuliaFirstOrder/ProximalOperators.jl) +and optimize the model with [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) +that provides so-called *proximal optimization* algorithms. +It can handle, amongst other things, various forms of regularization. ```@setup reg using StructuralEquationModels, ProximalAlgorithms, ProximalOperators @@ -19,24 +22,22 @@ Pkg.add("ProximalOperators") using StructuralEquationModels, ProximalAlgorithms, ProximalOperators ``` -## `SemOptimizerProximal` +## Proximal optimization -To estimate regularized models, we provide a "building block" for the optimizer part, called `SemOptimizerProximal`. -It connects our package to the [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) optimization backend, providing so-called proximal optimization algorithms. -Those can handle, amongst other things, various forms of regularization. - -It can be used as +With *ProximalAlgorithms* package loaded, it is now possible to use `:Proximal` optimization engine +in `SemOptimizer` for estimating regularized models. ```julia -SemOptimizerProximal( +SemOptimizer(; + engine = :Proximal, algorithm = ProximalAlgorithms.PANOC(), operator_g, operator_h = nothing ) ``` -The proximal operator (aka the regularization function) can be passed as `operator_g`. -The available Algorithms are listed [here](https://juliafirstorder.github.io/ProximalAlgorithms.jl/stable/guide/implemented_algorithms/). +The proximal operator (aka the regularization function) can be passed as `operator_g`, available options are listed [here](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/). +The available algorithms are listed [here](https://juliafirstorder.github.io/ProximalAlgorithms.jl/stable/guide/implemented_algorithms/). ## First example - lasso @@ -100,26 +101,18 @@ From the previously linked [documentation](https://juliafirstorder.github.io/Pro ```@example reg λ = zeros(31); λ[ind] .= 0.02 -``` - -and use `SemOptimizerProximal`. -```@example reg -optimizer_lasso = SemOptimizerProximal( +optimizer_lasso = SemOptimizer( + engine = :Proximal, operator_g = NormL1(λ) ) - -model_lasso = Sem( - specification = partable, - data = data -) ``` Let's fit the regularized model ```@example reg -fit_lasso = fit(optimizer_lasso, model_lasso) +fit_lasso = fit(optimizer_lasso, model) ``` and compare the solution to unregularizted estimates: @@ -134,7 +127,8 @@ update_partable!(partable, :estimate_lasso, fit_lasso, solution(fit_lasso)) details(partable) ``` -Instead of explicitely defining a `SemOptimizerProximal` object, you can also pass `engine = :Proximal` and additional keyword arguments to `fit`: +Instead of explicitly defining a `SemOptimizer` object, you can also pass `engine = :Proximal` +and additional keyword arguments directly to the `fit` function: ```@example reg sem_fit = fit(model; engine = :Proximal, operator_g = NormL1(λ)) @@ -143,25 +137,20 @@ sem_fit = fit(model; engine = :Proximal, operator_g = NormL1(λ)) ## Second example - mixed l1 and l0 regularization You can choose to penalize different parameters with different types of regularization functions. -Let's use the lasso again on the covariances, but additionally penalyze the error variances of the observed items via l0 regularization. +Let's use the lasso again on the covariances, but additionally penalize the error variances of the observed items via l0 regularization. The l0 penalty is defined as ```math \lambda \mathrm{nnz}(\theta) ``` -To define a sup of separable proximal operators (i.e. no parameter is penalized twice), +To define a sum of separable proximal operators (i.e. no parameter is penalized twice), we can use [`SlicedSeparableSum`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/calculus/#ProximalOperators.SlicedSeparableSum) from the `ProximalOperators` package: ```@example reg prox_operator = SlicedSeparableSum((NormL0(20.0), NormL1(0.02), NormL0(0.0)), ([ind], [9:11], [vcat(1:8, 12:25)])) -model_mixed = Sem( - specification = partable, - data = data, -) - -fit_mixed = fit(model_mixed; engine = :Proximal, operator_g = prox_operator) +fit_mixed = fit(model; engine = :Proximal, operator_g = prox_operator) ``` Let's again compare the different results: From 6d88590d5768bc6c8802c4479df49517b0e7b6af Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 09:36:13 -0800 Subject: [PATCH 10/37] engine(): fix signature --- src/optimizer/abstract.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index f00e5055..cce08f2d 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,4 +1,4 @@ -engine(::Type{SemOptimizer{E}}) where {E} = E +engine(::Type{<:SemOptimizer{E}}) where {E} = E engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = From 3e2de1db590381898d0912f814febf6b2f7b7661 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 09:39:33 -0800 Subject: [PATCH 11/37] optimizer_engines(): new method --- Project.toml | 2 ++ src/StructuralEquationModels.jl | 2 ++ src/optimizer/abstract.jl | 11 +++++++++++ 3 files changed, 15 insertions(+) diff --git a/Project.toml b/Project.toml index 37634708..0cc596b9 100644 --- a/Project.toml +++ b/Project.toml @@ -8,6 +8,7 @@ DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" LazyArtifacts = "4af54fe1-eca0-43a8-85a7-787d91b784e3" LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -30,6 +31,7 @@ StenoGraphs = "0.2 - 0.3, 0.4.1 - 0.5" DataFrames = "1" Distributions = "0.25" FiniteDiff = "2" +InteractiveUtils = "1.11.0" LineSearches = "7" NLSolversBase = "7" NLopt = "0.6, 1" diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index a306eccf..f537dafb 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -18,6 +18,8 @@ using LinearAlgebra, import StatsAPI: params, coef, coefnames, dof, fit, nobs, coeftable +using InteractiveUtils: subtypes + export StenoGraphs, @StenoGraph, meld, SimpleNode const SEM = StructuralEquationModels diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index cce08f2d..6a053a48 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -15,6 +15,17 @@ function SemOptimizer{E}(args...; kwargs...) where {E} end end +""" + optimizer_engines() + +Returns a vector of optimizer engines supported by the `engine` keyword argument of +the [`SemOptimizer`](@ref) constructor. + +The list of engines depends on the Julia packages loaded (with the `using` directive) +into the current session. +""" +optimizer_engines() = Symbol[engine(opt_type) for opt_type in subtypes(SemOptimizer)] + """ fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) From a560a01656f2d11cc2c1a9168f85ffc215193378 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 09:42:20 -0800 Subject: [PATCH 12/37] SemOptimizer() ctor switch to Val(E) dispatch It works around docstring errors during SemOptimizer{:E} parsing. --- ext/SEMNLOptExt/NLopt.jl | 2 +- src/optimizer/Empty.jl | 2 +- src/optimizer/abstract.jl | 12 ++++++++---- src/optimizer/optim.jl | 4 ++-- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index fe052b13..e8752769 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -115,7 +115,7 @@ function SemOptimizerNLopt(; ) end -SEM.SemOptimizer{:NLopt}(args...; kwargs...) = SemOptimizerNLopt(args...; kwargs...) +SEM.SemOptimizer(::Val{:NLopt}, args...; kwargs...) = SemOptimizerNLopt(args...; kwargs...) ############################################################################################ ### Recommended methods diff --git a/src/optimizer/Empty.jl b/src/optimizer/Empty.jl index 1bf0c30a..b0eaef25 100644 --- a/src/optimizer/Empty.jl +++ b/src/optimizer/Empty.jl @@ -15,7 +15,7 @@ struct SemOptimizerEmpty <: SemOptimizer{:Empty} end ### Constructor ############################################################################################ -SemOptimizer{:Empty}() = SemOptimizerEmpty() +SemOptimizer(::Val{:Empty}) = SemOptimizerEmpty() ############################################################################################ ### Recommended methods diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index 6a053a48..ac51d253 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -4,17 +4,21 @@ engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = SemOptimizer{engine}(args...; kwargs...) -# fallback optimizer constructor -function SemOptimizer{E}(args...; kwargs...) where {E} - if E == :NLOpt +# fallback optimizer constructor when the engine E is not supported +function SemOptimizer(::Val{E}, args...; kwargs...) where {E} + if typeof(E) !== Symbol + throw(ArgumentError("engine argument must be a Symbol.")) + elseif E == :NLOpt error("$E optimizer requires \"using NLopt\".") elseif E == :Proximal error("$E optimizer requires \"using ProximalAlgorithms\".") else - error("$E optimizer is not supported.") + error("$E optimizer engine is not supported.") end end +SemOptimizer{E}(args...; kwargs...) where {E} = SemOptimizer(Val(E), args...; kwargs...) + """ optimizer_engines() diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 2d782473..2408c782 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -63,14 +63,14 @@ mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} options::B end -SemOptimizer{:Optim}(args...; kwargs...) = SemOptimizerOptim(args...; kwargs...) - SemOptimizerOptim(; algorithm = LBFGS(), options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs..., ) = SemOptimizerOptim(algorithm, options) +SemOptimizer(::Val{:Optim}, args...; kwargs...) = SemOptimizerOptim(args...; kwargs...) + ############################################################################################ ### Recommended methods ############################################################################################ From 8dacd438c0b2c48221f1c1af7743ef8db458f425 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 09:48:44 -0800 Subject: [PATCH 13/37] SemOptimizer: reattach docstrings to ctor Instead of engine-specific subtype as it might be not available in the user session. --- ext/SEMNLOptExt/NLopt.jl | 35 ++++++++++++++++++++++------------- src/optimizer/Empty.jl | 13 ++++++------- src/optimizer/abstract.jl | 18 ++++++++++++++++++ src/optimizer/optim.jl | 38 ++++++++++++++++++++------------------ 4 files changed, 66 insertions(+), 38 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index e8752769..93621342 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -4,6 +4,19 @@ const NLoptConstraint = Pair{Any, Number} +struct SemOptimizerNLopt <: SemOptimizer{:NLopt} + algorithm::Symbol + local_algorithm::Union{Symbol, Nothing} + options::Dict{Symbol, Any} + local_options::Dict{Symbol, Any} + equality_constraints::Vector{NLoptConstraint} + inequality_constraints::Vector{NLoptConstraint} +end + +############################################################################################ +### Constructor +############################################################################################ + """ Connects to `NLopt.jl` as the optimization backend. Only usable if `NLopt.jl` is loaded in the current Julia session! @@ -74,19 +87,6 @@ see [Constrained optimization](@ref) in our online documentation. Subtype of `SemOptimizer`. """ -struct SemOptimizerNLopt <: SemOptimizer{:NLopt} - algorithm::Symbol - local_algorithm::Union{Symbol, Nothing} - options::Dict{Symbol, Any} - local_options::Dict{Symbol, Any} - equality_constraints::Vector{NLoptConstraint} - inequality_constraints::Vector{NLoptConstraint} -end - -############################################################################################ -### Constructor -############################################################################################ - function SemOptimizerNLopt(; algorithm = :LD_LBFGS, local_algorithm = nothing, @@ -115,6 +115,15 @@ function SemOptimizerNLopt(; ) end +""" + SemOptimizer(args...; engine = :NLopt, kwargs...) + +Creates SEM optimizer using [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl). + +# Extended help + +See [`SemOptimizerNLopt`](@ref) for a full reference. +""" SEM.SemOptimizer(::Val{:NLopt}, args...; kwargs...) = SemOptimizerNLopt(args...; kwargs...) ############################################################################################ diff --git a/src/optimizer/Empty.jl b/src/optimizer/Empty.jl index b0eaef25..9c745470 100644 --- a/src/optimizer/Empty.jl +++ b/src/optimizer/Empty.jl @@ -1,20 +1,19 @@ ############################################################################################ ### Types ############################################################################################ -""" -Empty placeholder for models that don't need -an optimizer part. - -# Constructor - SemOptimizerEmpty() -""" +# dummy SEM optimizer struct SemOptimizerEmpty <: SemOptimizer{:Empty} end ############################################################################################ ### Constructor ############################################################################################ +""" + SemOptimizer(engine = :Empty) + +Constructs a dummy optimizer for models that don't need it. +""" SemOptimizer(::Val{:Empty}) = SemOptimizerEmpty() ############################################################################################ diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index ac51d253..1ed7346c 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,6 +1,24 @@ engine(::Type{<:SemOptimizer{E}}) where {E} = E engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) +""" + SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) + +Constructs [`SemOptimizer`](@ref) for the specified optimization `engine`. + +A wrapper function that passes `args...` and `kwargs...` to the +engine-specific optimizer constructor. + +Uses `:Optim` as the default engine. +Throws an error if the specified `engine` is not supported. +Call [`optimizer_engines`](@ref) for the list of supported engines. + +For the information about using the engine `:EngineName`, use +```julia +?SemOptimizer(Val(:EngineName)) +``` + +""" SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = SemOptimizer{engine}(args...; kwargs...) diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 2408c782..f5a201e6 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -3,25 +3,29 @@ ############################################################################################ ### Types and Constructor ############################################################################################ -""" - SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} - -Connects to `Optim.jl` as the optimization backend. -# Constructor +# SemOptimizer for the Optim.jl +mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} + algorithm::A + options::B +end - SemOptimizerOptim(; +""" + SemOptimizer(; + engine = :Optim, algorithm = LBFGS(), options = Optim.Options(;f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs...) +Creates SEM optimizer using [*Optim.jl*](https://julianlsolvers.github.io/Optim.jl/stable/). + # Arguments -- `algorithm`: optimization algorithm from `Optim.jl` +- `algorithm`: optimization algorithm from *Optim.jl* - `options::Optim.Options`: options for the optimization algorithm # Usage -All algorithms and options from the Optim.jl library are available, for more information see -the Optim.jl online documentation. +All algorithms and options from the *Optim.jl* package are available, for more information see +the *Optim.jl* online documentation. # Examples ```julia @@ -53,22 +57,20 @@ for the variance parameters (the diagonal of the *S* matrix). ## Interfaces - `algorithm(::SemOptimizerOptim)` - `options(::SemOptimizerOptim)` - -## Implementation - -Subtype of `SemOptimizer`. """ -mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} - algorithm::A - options::B -end - SemOptimizerOptim(; algorithm = LBFGS(), options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs..., ) = SemOptimizerOptim(algorithm, options) +""" + SemOptimizer(args...; engine = :Optim, kwargs...) + +Creates SEM optimizer using [*Optim.jl*](https://julianlsolvers.github.io/Optim.jl/stable/). + +See [`SemOptimizerOptim`](@ref) for the full reference. +""" SemOptimizer(::Val{:Optim}, args...; kwargs...) = SemOptimizerOptim(args...; kwargs...) ############################################################################################ From 06a9a1334e8a93080aa966e0871e28ddcc0a5a0f Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 13:50:19 -0800 Subject: [PATCH 14/37] constraints.md: cleanups --- docs/src/tutorials/constraints/constraints.md | 41 ++++++++++--------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/docs/src/tutorials/constraints/constraints.md b/docs/src/tutorials/constraints/constraints.md index 699c962b..32bb6a52 100644 --- a/docs/src/tutorials/constraints/constraints.md +++ b/docs/src/tutorials/constraints/constraints.md @@ -1,10 +1,15 @@ # Constrained optimization +*SEM.jl* allows to fit models with additional constraints imposed on the parameters. + ## Using the NLopt engine +*NLopt.jl* is one of *SEM.jl* optimization engines that supports constrained optimization. +In the example below we show how to specify constraints for the *SEM* model when using *NLopt*. + ### Define an example model -Let's revisit our model from [A first model](@ref): +Let's revisit our model from [A first model](@ref) and fit it first without constraints: ```@example constraints using StructuralEquationModels @@ -57,40 +62,40 @@ details(partable) ### Define the constraints -Let's introduce some constraints: +Let's introduce some constraints (they are not based on any real properties of the underlying study and serve only as an example): 1. **Equality constraint**: The covariances `y3 ↔ y7` and `y8 ↔ y4` should sum up to `1`. 2. **Inequality constraint**: The difference between the loadings `dem60 → y2` and `dem60 → y3` should be smaller than `0.1` 3. **Bound constraint**: The directed effect from `ind60 → dem65` should be smaller than `0.5` -(Of course those constaints only serve an illustratory purpose.) - -To fit the SEM model with the functional constraints, we will use the *NLopt* optimization engine. -Since *NLopt* does not have access to the SEM parameter names, we have to lookup the indices of the respective parameters that are invoved in the constraints. -We can look up their labels in the output above, and retrieve their indices as +Since *NLopt* does not have access to the SEM parameter names, its constaints are defined on the vector of all SEM parameters. +We have to look up the indices of the parameters involved in the constraints to construct the respective functions. ```@example constraints parind = param_indices(model) parind[:y3y7] # 29 ``` -The bound constraint is easy to specify: Just give a vector of upper or lower bounds that contains the bound for each parameter. In our example, only the parameter labeled `:λₗ` has an upper bound, and the number of total parameters is `n_par(model) = 31`, so we define +The bound constraint is easy to specify: just give a vector of upper or lower bounds for each parameter. +In our example, only the parameter labeled `:λₗ` has an upper bound, and the number of total parameters is `n_par(model) = 31`, so ```@example constraints upper_bounds = fill(Inf, 31) upper_bounds[parind[:λₗ]] = 0.5 ``` -The equailty and inequality constraints have to be reformulated to be of the form `x = 0` or `x ≤ 0`: -1. `y3 ↔ y7 + y8 ↔ y4 - 1 = 0` -2. `dem60 → y2 - dem60 → y3 - 0.1 ≤ 0` +The equailty and inequality constraints have to be reformulated in the `f(θ) = 0` or `f(θ) ≤ 0` form, +where `θ` is the vector of SEM parameters: +1. `f(θ) = 0`, where `f(θ) = y3 ↔ y7 + y8 ↔ y4 - 1` +2. `g(θ) ≤ 0`, where `g(θ) = dem60 → y2 - dem60 → y3 - 0.1` -Now they can be defined as functions of the parameter vector: +If the optimization algorithm needs gradients, it will pass the `gradient` vector that is of the same size as the parameters, +and the constraint function has to calculate the gradient in-place. ```@example constraints parind[:y3y7] # 29 parind[:y8y4] # 30 # θ[29] + θ[30] - 1 = 0.0 -function eq_constraint(θ, gradient) +function f(θ, gradient) if length(gradient) > 0 gradient .= 0.0 gradient[29] = 1.0 @@ -102,7 +107,7 @@ end parind[:λ₂] # 3 parind[:λ₃] # 4 # θ[3] - θ[4] - 0.1 ≤ 0 -function ineq_constraint(θ, gradient) +function g(θ, gradient) if length(gradient) > 0 gradient .= 0.0 gradient[3] = 1.0 @@ -112,10 +117,6 @@ function ineq_constraint(θ, gradient) end ``` -If the algorithm needs gradients at an iteration, it will pass the vector `gradient` that is of the same size as the parameters. -With `if length(gradient) > 0` we check if the algorithm needs gradients, and if it does, we fill the `gradient` vector with the gradients -of the constraint w.r.t. the parameters. - In *NLopt*, vector-valued constraints are also possible, but we refer to the documentation for that. ### Fit the model @@ -130,8 +131,8 @@ constrained_optimizer = SemOptimizer( algorithm = :AUGLAG, options = Dict(:upper_bounds => upper_bounds, :xtol_abs => 1e-4), local_algorithm = :LD_LBFGS, - equality_constraints = (eq_constraint => 1e-8), - inequality_constraints = (ineq_constraint => 1e-8), + equality_constraints = (f => 1e-8), + inequality_constraints = (g => 1e-8), ) ``` From 519cff15c1e59ff2e341ca8b5e92d588f2ff9fca Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 13:54:22 -0800 Subject: [PATCH 15/37] reg.md: cleanup --- .../regularization/regularization.md | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/docs/src/tutorials/regularization/regularization.md b/docs/src/tutorials/regularization/regularization.md index e7f083d1..8254449f 100644 --- a/docs/src/tutorials/regularization/regularization.md +++ b/docs/src/tutorials/regularization/regularization.md @@ -5,10 +5,9 @@ For ridge regularization, you can simply use `SemRidge` as an additional loss function (for example, a model with the loss functions `SemML` and `SemRidge` corresponds to ridge-regularized maximum likelihood estimation). -For lasso, elastic net and (far) beyond, you can use the [`ProximalOperators.jl`](https://github.com/JuliaFirstOrder/ProximalOperators.jl) -and optimize the model with [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) +You can define lasso, elastic net and other forms of regularization using [`ProximalOperators.jl`](https://github.com/JuliaFirstOrder/ProximalOperators.jl) +and optimize the SEM model with [`ProximalAlgorithms.jl`](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) that provides so-called *proximal optimization* algorithms. -It can handle, amongst other things, various forms of regularization. ```@setup reg using StructuralEquationModels, ProximalAlgorithms, ProximalOperators @@ -36,8 +35,8 @@ SemOptimizer(; ) ``` -The proximal operator (aka the regularization function) can be passed as `operator_g`, available options are listed [here](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/). -The available algorithms are listed [here](https://juliafirstorder.github.io/ProximalAlgorithms.jl/stable/guide/implemented_algorithms/). +The *proximal operator* (aka the *regularization function*) is passed as `operator_g`, see [available operators](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/). +The `algorithm` is chosen from one of the [available algorithms](https://juliafirstorder.github.io/ProximalAlgorithms.jl/stable/guide/implemented_algorithms/). ## First example - lasso @@ -85,7 +84,7 @@ model = Sem( We labeled the covariances between the items because we want to regularize those: ```@example reg -ind = getindex.( +cov_inds = getindex.( Ref(param_indices(model)), [:cov_15, :cov_24, :cov_26, :cov_37, :cov_48, :cov_68]) ``` @@ -97,10 +96,12 @@ The lasso penalty is defined as \sum \lambda_i \lvert \theta_i \rvert ``` -From the previously linked [documentation](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/#ProximalOperators.NormL1), we find that lasso regularization is named `NormL1` in the `ProximalOperators` package, and that we can pass an array of hyperparameters (`λ`) to control the amount of regularization for each parameter. To regularize only the observed item covariances, we define `λ` as +In `ProximalOperators.jl`, lasso regularization is represented by the [`NormL1`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/functions/#ProximalOperators.NormL1) operator. It allows controlling the amount of +regularization individually for each SEM model parameter via the vector of hyperparameters (`λ`). +To regularize only the observed item covariances, we define `λ` as ```@example reg -λ = zeros(31); λ[ind] .= 0.02 +λ = zeros(31); λ[cov_inds] .= 0.02 optimizer_lasso = SemOptimizer( engine = :Proximal, @@ -131,26 +132,27 @@ Instead of explicitly defining a `SemOptimizer` object, you can also pass `engin and additional keyword arguments directly to the `fit` function: ```@example reg -sem_fit = fit(model; engine = :Proximal, operator_g = NormL1(λ)) +fit_lasso2 = fit(model; engine = :Proximal, operator_g = NormL1(λ)) ``` ## Second example - mixed l1 and l0 regularization You can choose to penalize different parameters with different types of regularization functions. -Let's use the lasso again on the covariances, but additionally penalize the error variances of the observed items via l0 regularization. +Let's use the *lasso* (*l1*) again on the covariances, but additionally penalize the error variances of the observed items via *l0* regularization. -The l0 penalty is defined as +The *l0* penalty is defined as ```math -\lambda \mathrm{nnz}(\theta) +\lambda_0 = \mathrm{nnz}(\theta) ``` -To define a sum of separable proximal operators (i.e. no parameter is penalized twice), -we can use [`SlicedSeparableSum`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/calculus/#ProximalOperators.SlicedSeparableSum) from the `ProximalOperators` package: +Since we apply *l1* and *l0* to the disjoint sets of parameters, this regularization could be represented as +as sum of *separable proximal operators* (i.e. no parameter is penalized twice) +implemented by the [`SlicedSeparableSum`](https://juliafirstorder.github.io/ProximalOperators.jl/stable/calculus/#ProximalOperators.SlicedSeparableSum) operator: ```@example reg -prox_operator = SlicedSeparableSum((NormL0(20.0), NormL1(0.02), NormL0(0.0)), ([ind], [9:11], [vcat(1:8, 12:25)])) +l0_and_l1_reg = SlicedSeparableSum((NormL0(20.0), NormL1(0.02), NormL0(0.0)), ([cov_inds], [9:11], [vcat(1:8, 12:25)])) -fit_mixed = fit(model; engine = :Proximal, operator_g = prox_operator) +fit_mixed = fit(model; engine = :Proximal, operator_g = l0_and_l1_reg) ``` Let's again compare the different results: From ce87dd4736e87b7b29035c95d59c76db1d88e02d Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 13:54:47 -0800 Subject: [PATCH 16/37] NLopt.jl: fixup docstring since it moved from struct to ctor --- ext/SEMNLOptExt/NLopt.jl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 93621342..863d602f 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -18,12 +18,13 @@ end ############################################################################################ """ -Connects to `NLopt.jl` as the optimization backend. -Only usable if `NLopt.jl` is loaded in the current Julia session! +Uses *NLopt.jl* as the optimization engine. +Only available if *NLopt.jl* is loaded in the current Julia session! # Constructor - SemOptimizerNLopt(; + SemOptimizer(; + engine = :NLopt, algorithm = :LD_LBFGS, options = Dict{Symbol, Any}(), local_algorithm = nothing, @@ -68,8 +69,9 @@ my_constrained_optimizer = SemOptimizer(; ``` # Usage -All algorithms and options from the NLopt library are available, for more information see -the NLopt.jl package and the NLopt online documentation. +All algorithms and options from the *NLopt* library are available, for more information see +the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the +[NLopt docs](https://nlopt.readthedocs.io/en/latest/). For information on how to use inequality and equality constraints, see [Constrained optimization](@ref) in our online documentation. From 51121fd1a9575bbd54425503001b465ef1b2017e Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 14:03:37 -0800 Subject: [PATCH 17/37] docs: fixup docstring switch --- docs/src/tutorials/backends/nlopt.md | 2 +- docs/src/tutorials/backends/optim.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/tutorials/backends/nlopt.md b/docs/src/tutorials/backends/nlopt.md index 840f3992..9ff23336 100644 --- a/docs/src/tutorials/backends/nlopt.md +++ b/docs/src/tutorials/backends/nlopt.md @@ -2,7 +2,7 @@ When [`NLopt.jl`](https://github.com/jump-dev/NLopt.jl) is loaded in the running Julia session, it could be used by the [`SemOptimizer`](@ref) by specifying `engine = :NLopt` -(see [NLopt-specific options](@ref `SemOptimizerNLopt`)). +(see [NLopt-specific options](@ref `SemOptimizer(Val(:NLopt))`)). Among other things, `NLopt` enables constrained optimization of the SEM models, which is explained in the [Constrained optimization](@ref) section. diff --git a/docs/src/tutorials/backends/optim.md b/docs/src/tutorials/backends/optim.md index a16537ec..545de805 100644 --- a/docs/src/tutorials/backends/optim.md +++ b/docs/src/tutorials/backends/optim.md @@ -1,7 +1,7 @@ # Using Optim.jl [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) is the default optimization engine of *SEM.jl*, -see [`SemOptimizerOptim`](@ref) for a full list of its parameters. +see [`SemOptimizer(Val(:Optim))`](@ref) for a full list of its parameters. It defaults to the LBFGS optimization, but we can load the `Optim` and `LineSearches` packages and specify BFGS (!not L-BFGS) with a back-tracking linesearch and Hager-Zhang initial step length guess: From de9d2e8f13f6b839acb555a122bc943c033bf8ae Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 14:04:14 -0800 Subject: [PATCH 18/37] tut/nlopt.md: cleanups --- docs/src/tutorials/backends/nlopt.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/src/tutorials/backends/nlopt.md b/docs/src/tutorials/backends/nlopt.md index 9ff23336..8bafe7c4 100644 --- a/docs/src/tutorials/backends/nlopt.md +++ b/docs/src/tutorials/backends/nlopt.md @@ -25,11 +25,12 @@ my_optimizer = SemOptimizer(; To see how to use the optimizer to actually fit a model now, check out the [Model fitting](@ref) section. -In the NLopt docs, you can find explanations about the different [algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/) and a [tutorial](https://nlopt.readthedocs.io/en/latest/NLopt_Introduction/) that also explains the different options. +In the *NLopt* docs, you can find details about the [optimization algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/), +and the [tutorial](https://nlopt.readthedocs.io/en/latest/NLopt_Introduction/) that demonstrates how to tweak their behavior. To choose an algorithm, just pass its name without the 'NLOPT\_' prefix (for example, 'NLOPT\_LD\_SLSQP' can be used by passing `algorithm = :LD_SLSQP`). -The README of the [julia package](https://github.com/JuliaOpt/NLopt.jl) may also be helpful, and provides a list of options: +The README of the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) may also be helpful, and provides a list of options: - `algorithm` - `stopval` From 2b42351c7e07e3e72a054f41d2e7828d3aff1e96 Mon Sep 17 00:00:00 2001 From: Alexey Stukalov Date: Tue, 27 Jan 2026 14:08:31 -0800 Subject: [PATCH 19/37] reg.md: fixup --- docs/src/tutorials/regularization/regularization.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tutorials/regularization/regularization.md b/docs/src/tutorials/regularization/regularization.md index 8254449f..17add030 100644 --- a/docs/src/tutorials/regularization/regularization.md +++ b/docs/src/tutorials/regularization/regularization.md @@ -142,7 +142,7 @@ Let's use the *lasso* (*l1*) again on the covariances, but additionally penalize The *l0* penalty is defined as ```math -\lambda_0 = \mathrm{nnz}(\theta) +l_0 = \lambda \mathrm{nnz}(\theta) ``` Since we apply *l1* and *l0* to the disjoint sets of parameters, this regularization could be represented as From 8f701463841d906bdc09463c5a06df2f888ab436 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Wed, 28 Jan 2026 13:22:56 +0100 Subject: [PATCH 20/37] streamline docstrings --- ext/SEMNLOptExt/NLopt.jl | 91 +++++++++------------ ext/SEMProximalOptExt/ProximalAlgorithms.jl | 24 +++--- src/optimizer/abstract.jl | 26 +++--- src/optimizer/optim.jl | 50 +++++------ 4 files changed, 81 insertions(+), 110 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 863d602f..af6fd7b6 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -17,9 +17,40 @@ end ### Constructor ############################################################################################ +function SemOptimizerNLopt(; + algorithm = :LD_LBFGS, + local_algorithm = nothing, + options = Dict{Symbol, Any}(), + local_options = Dict{Symbol, Any}(), + equality_constraints = nothing, + inequality_constraints = nothing, + constraint_tol::Number = 0.0, + kwargs..., # FIXME remove the sink for unused kwargs +) + constraint(f::Any) = f => constraint_tol + constraint(f_and_tol::Pair) = f_and_tol + + constraints(::Nothing) = Vector{NLoptConstraint}() + constraints(constraints) = + applicable(iterate, constraints) && !isa(constraints, Pair) ? + [constraint(constr) for constr in constraints] : [constraint(constraints)] + + return SemOptimizerNLopt( + algorithm, + local_algorithm, + options, + local_options, + constraints(equality_constraints), + constraints(inequality_constraints), + ) +end + """ -Uses *NLopt.jl* as the optimization engine. -Only available if *NLopt.jl* is loaded in the current Julia session! +# Extended help +*`engine = :NLopt`* + +Uses *NLopt.jl* as the optimization engine. For more information on the available algorithms +and options, see the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the [NLopt docs](https://nlopt.readthedocs.io/en/latest/). # Constructor @@ -51,8 +82,10 @@ Each constraint could be a function or any other callable object that takes the two input arguments: - the vector of the model parameters; - the array for the in-place calculation of the constraint gradient. -To override the default tolerance, the constraint could be specified +To override the default tolerance, the constraint can be specified as a pair of the function and its tolerance: `constraint_func => tol`. +For information on how to use inequality and equality constraints, +see [Constrained optimization](@ref) in our online documentation. # Example ```julia @@ -68,63 +101,13 @@ my_constrained_optimizer = SemOptimizer(; ) ``` -# Usage -All algorithms and options from the *NLopt* library are available, for more information see -the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the -[NLopt docs](https://nlopt.readthedocs.io/en/latest/). -For information on how to use inequality and equality constraints, -see [Constrained optimization](@ref) in our online documentation. - -# Extended help - -## Interfaces +# Interfaces - `algorithm(::SemOptimizerNLopt)` - `local_algorithm(::SemOptimizerNLopt)` - `options(::SemOptimizerNLopt)` - `local_options(::SemOptimizerNLopt)` - `equality_constraints(::SemOptimizerNLopt)` - `inequality_constraints(::SemOptimizerNLopt)` - -## Implementation - -Subtype of `SemOptimizer`. -""" -function SemOptimizerNLopt(; - algorithm = :LD_LBFGS, - local_algorithm = nothing, - options = Dict{Symbol, Any}(), - local_options = Dict{Symbol, Any}(), - equality_constraints = nothing, - inequality_constraints = nothing, - constraint_tol::Number = 0.0, - kwargs..., # FIXME remove the sink for unused kwargs -) - constraint(f::Any) = f => constraint_tol - constraint(f_and_tol::Pair) = f_and_tol - - constraints(::Nothing) = Vector{NLoptConstraint}() - constraints(constraints) = - applicable(iterate, constraints) && !isa(constraints, Pair) ? - [constraint(constr) for constr in constraints] : [constraint(constraints)] - - return SemOptimizerNLopt( - algorithm, - local_algorithm, - options, - local_options, - constraints(equality_constraints), - constraints(inequality_constraints), - ) -end - -""" - SemOptimizer(args...; engine = :NLopt, kwargs...) - -Creates SEM optimizer using [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl). - -# Extended help - -See [`SemOptimizerNLopt`](@ref) for a full reference. """ SEM.SemOptimizer(::Val{:NLopt}, args...; kwargs...) = SemOptimizerNLopt(args...; kwargs...) diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index aec61e57..0c8bc305 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -1,10 +1,19 @@ ############################################################################################ ### Types ############################################################################################ +mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} + algorithm::A + operator_g::B + operator_h::C +end + """ -Connects to `ProximalAlgorithms.jl` as the optimization backend. +# Extended help +*`engine = :Proximal`* -Can be used for regularized SEM, for a tutorial see the online docs on [Regularization](@ref). +Connects to `ProximalAlgorithms.jl` as the optimization backend. For more information on +the available algorithms and options, see the online docs on [Regularization](@ref) and +the documentation of [*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) / [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl). # Constructor @@ -18,18 +27,7 @@ Can be used for regularized SEM, for a tutorial see the online docs on [Regulari - `algorithm`: optimization algorithm. - `operator_g`: proximal operator (e.g., regularization penalty) - `operator_h`: optional second proximal operator - -# Usage -All algorithms and operators from `ProximalAlgorithms.jl` are available, -for more information see the online docs on [Regularization](@ref) and -the documentation of `ProximalAlgorithms.jl` / `ProximalOperators.jl`. """ -mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} - algorithm::A - operator_g::B - operator_h::C -end - SEM.SemOptimizer{:Proximal}(args...; kwargs...) = SemOptimizerProximal(args...; kwargs...) SemOptimizerProximal(; diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index 1ed7346c..b9341072 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -4,20 +4,18 @@ engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) """ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) -Constructs [`SemOptimizer`](@ref) for the specified optimization `engine`. - -A wrapper function that passes `args...` and `kwargs...` to the -engine-specific optimizer constructor. - -Uses `:Optim` as the default engine. -Throws an error if the specified `engine` is not supported. -Call [`optimizer_engines`](@ref) for the list of supported engines. - -For the information about using the engine `:EngineName`, use -```julia -?SemOptimizer(Val(:EngineName)) -``` - +Constructs a `SemOptimizer` object that can be passed to `fit`(@ref) for specifying aspects +of the numerical optimization involved in fitting a SEM. + +The keyword `engine` controlls which Julia package is used, with `:Optim` being the default. +The additional arguments `args...` and `kwargs...` are engine-specific and control further +aspects of the optimization process, such as the algorithm, convergence criteria or constraints. + +More engines are available if specific packages are loaded, for example [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) +(also see [Constrained optimization](@ref) in the online documentation) or [*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) +(also see [Regularization](@ref) in the online documentation). +The documentation of available engines (with the packages loaded in the current Julia session) +is shown in the extended help. """ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = SemOptimizer{engine}(args...; kwargs...) diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index f5a201e6..8e93eaa6 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -10,31 +10,39 @@ mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} options::B end + +SemOptimizerOptim(; + algorithm = LBFGS(), + options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), + kwargs..., +) = SemOptimizerOptim(algorithm, options) + """ +# Extended help +*`engine = :Optim`* + +Creates SEM optimizer using [*Optim.jl*](https://julianlsolvers.github.io/Optim.jl/stable/). +For more information on the available algorithms and options, see the *Optim.jl* docs. + +# Constructor + SemOptimizer(; engine = :Optim, algorithm = LBFGS(), options = Optim.Options(;f_reltol = 1e-10, x_abstol = 1.5e-8), kwargs...) -Creates SEM optimizer using [*Optim.jl*](https://julianlsolvers.github.io/Optim.jl/stable/). - # Arguments - `algorithm`: optimization algorithm from *Optim.jl* - `options::Optim.Options`: options for the optimization algorithm -# Usage -All algorithms and options from the *Optim.jl* package are available, for more information see -the *Optim.jl* online documentation. - # Examples ```julia -my_optimizer = SemOptimizerOptim() - # hessian based optimization with backtracking linesearch and modified initial step size using Optim, LineSearches -my_newton_optimizer = SemOptimizerOptim( +my_newton_optimizer = SemOptimizer( + engine = :Optim, algorithm = Newton( ;linesearch = BackTracking(order=3), alphaguess = InitialHagerZhang() @@ -42,10 +50,7 @@ my_newton_optimizer = SemOptimizerOptim( ) ``` -# Extended help - -## Constrained optimization - +# Constrained optimization When using the `Fminbox` or `SAMIN` constrained optimization algorithms, the vector or dictionary of lower and upper bounds for each model parameter can be specified via `lower_bounds` and `upper_bounds` keyword arguments. @@ -54,22 +59,9 @@ the default bound for all non-variance model parameters, and the `variance_lower_bound` and `variance_upper_bound` keyword -- for the variance parameters (the diagonal of the *S* matrix). -## Interfaces -- `algorithm(::SemOptimizerOptim)` -- `options(::SemOptimizerOptim)` -""" -SemOptimizerOptim(; - algorithm = LBFGS(), - options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), - kwargs..., -) = SemOptimizerOptim(algorithm, options) - -""" - SemOptimizer(args...; engine = :Optim, kwargs...) - -Creates SEM optimizer using [*Optim.jl*](https://julianlsolvers.github.io/Optim.jl/stable/). - -See [`SemOptimizerOptim`](@ref) for the full reference. +# Interfaces +- `algorithm(::SemOptimizer)` +- `options(::SemOptimizer)` """ SemOptimizer(::Val{:Optim}, args...; kwargs...) = SemOptimizerOptim(args...; kwargs...) From c2c4ab2f7f70a7318a041b8c36d30ac4a6920e72 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Wed, 28 Jan 2026 15:24:42 +0100 Subject: [PATCH 21/37] refactor docstring access with --- ext/SEMNLOptExt/NLopt.jl | 61 ++++++++++----------- ext/SEMNLOptExt/SEMNLOptExt.jl | 2 + ext/SEMProximalOptExt/ProximalAlgorithms.jl | 9 ++- ext/SEMProximalOptExt/SEMProximalOptExt.jl | 5 +- src/StructuralEquationModels.jl | 6 +- src/optimizer/Empty.jl | 10 ++-- src/optimizer/abstract.jl | 30 ++++++++-- src/optimizer/optim.jl | 22 ++++---- src/types.jl | 5 -- 9 files changed, 82 insertions(+), 68 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index af6fd7b6..da4d737b 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -17,38 +17,7 @@ end ### Constructor ############################################################################################ -function SemOptimizerNLopt(; - algorithm = :LD_LBFGS, - local_algorithm = nothing, - options = Dict{Symbol, Any}(), - local_options = Dict{Symbol, Any}(), - equality_constraints = nothing, - inequality_constraints = nothing, - constraint_tol::Number = 0.0, - kwargs..., # FIXME remove the sink for unused kwargs -) - constraint(f::Any) = f => constraint_tol - constraint(f_and_tol::Pair) = f_and_tol - - constraints(::Nothing) = Vector{NLoptConstraint}() - constraints(constraints) = - applicable(iterate, constraints) && !isa(constraints, Pair) ? - [constraint(constr) for constr in constraints] : [constraint(constraints)] - - return SemOptimizerNLopt( - algorithm, - local_algorithm, - options, - local_options, - constraints(equality_constraints), - constraints(inequality_constraints), - ) -end - """ -# Extended help -*`engine = :NLopt`* - Uses *NLopt.jl* as the optimization engine. For more information on the available algorithms and options, see the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the [NLopt docs](https://nlopt.readthedocs.io/en/latest/). @@ -109,8 +78,38 @@ my_constrained_optimizer = SemOptimizer(; - `equality_constraints(::SemOptimizerNLopt)` - `inequality_constraints(::SemOptimizerNLopt)` """ +function SemOptimizerNLopt(; + algorithm = :LD_LBFGS, + local_algorithm = nothing, + options = Dict{Symbol, Any}(), + local_options = Dict{Symbol, Any}(), + equality_constraints = nothing, + inequality_constraints = nothing, + constraint_tol::Number = 0.0, + kwargs..., # FIXME remove the sink for unused kwargs +) + constraint(f::Any) = f => constraint_tol + constraint(f_and_tol::Pair) = f_and_tol + + constraints(::Nothing) = Vector{NLoptConstraint}() + constraints(constraints) = + applicable(iterate, constraints) && !isa(constraints, Pair) ? + [constraint(constr) for constr in constraints] : [constraint(constraints)] + + return SemOptimizerNLopt( + algorithm, + local_algorithm, + options, + local_options, + constraints(equality_constraints), + constraints(inequality_constraints), + ) +end + SEM.SemOptimizer(::Val{:NLopt}, args...; kwargs...) = SemOptimizerNLopt(args...; kwargs...) +SEM.engine_info(engine::Val{:NLopt}) = doc(SemOptimizerNLopt) + ############################################################################################ ### Recommended methods ############################################################################################ diff --git a/ext/SEMNLOptExt/SEMNLOptExt.jl b/ext/SEMNLOptExt/SEMNLOptExt.jl index 61c41338..b0091dc7 100644 --- a/ext/SEMNLOptExt/SEMNLOptExt.jl +++ b/ext/SEMNLOptExt/SEMNLOptExt.jl @@ -2,6 +2,8 @@ module SEMNLOptExt using StructuralEquationModels, NLopt +import Base.Docs: doc + SEM = StructuralEquationModels export SemOptimizerNLopt diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index 0c8bc305..f9e5bd2d 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -7,10 +7,11 @@ mutable struct SemOptimizerProximal{A, B, C} <: SemOptimizer{:Proximal} operator_h::C end -""" -# Extended help -*`engine = :Proximal`* +SEM.SemOptimizer{:Proximal}(args...; kwargs...) = SemOptimizerProximal(args...; kwargs...) + +SEM.engine_info(engine::Val{:Proximal}) = doc(SemOptimizerProximal) +""" Connects to `ProximalAlgorithms.jl` as the optimization backend. For more information on the available algorithms and options, see the online docs on [Regularization](@ref) and the documentation of [*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) / [ProximalOperators.jl](https://github.com/JuliaFirstOrder/ProximalOperators.jl). @@ -28,8 +29,6 @@ the documentation of [*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrde - `operator_g`: proximal operator (e.g., regularization penalty) - `operator_h`: optional second proximal operator """ -SEM.SemOptimizer{:Proximal}(args...; kwargs...) = SemOptimizerProximal(args...; kwargs...) - SemOptimizerProximal(; algorithm = ProximalAlgorithms.PANOC(), operator_g, diff --git a/ext/SEMProximalOptExt/SEMProximalOptExt.jl b/ext/SEMProximalOptExt/SEMProximalOptExt.jl index bedf1920..2b35fb0c 100644 --- a/ext/SEMProximalOptExt/SEMProximalOptExt.jl +++ b/ext/SEMProximalOptExt/SEMProximalOptExt.jl @@ -1,8 +1,9 @@ module SEMProximalOptExt -using StructuralEquationModels +using StructuralEquationModels, ProximalAlgorithms using StructuralEquationModels: print_type_name, print_field_types -using ProximalAlgorithms + +import Base.Docs: doc export SemOptimizerProximal diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index f537dafb..286f1bdc 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -18,6 +18,8 @@ using LinearAlgebra, import StatsAPI: params, coef, coefnames, dof, fit, nobs, coeftable +import Base.Docs: doc + using InteractiveUtils: subtypes export StenoGraphs, @StenoGraph, meld, SimpleNode @@ -122,8 +124,8 @@ export AbstractSem, SemWLS, loss, SemOptimizer, - SemOptimizerEmpty, - SemOptimizerOptim, + engine_info, + optimizer_engines, optimizer, n_iterations, convergence, diff --git a/src/optimizer/Empty.jl b/src/optimizer/Empty.jl index 9c745470..a542d613 100644 --- a/src/optimizer/Empty.jl +++ b/src/optimizer/Empty.jl @@ -3,19 +3,19 @@ ############################################################################################ # dummy SEM optimizer +""" +Test. +""" struct SemOptimizerEmpty <: SemOptimizer{:Empty} end ############################################################################################ ### Constructor ############################################################################################ -""" - SemOptimizer(engine = :Empty) - -Constructs a dummy optimizer for models that don't need it. -""" SemOptimizer(::Val{:Empty}) = SemOptimizerEmpty() +engine_info(engine::Val{:Empty}) = doc(SemOptimizerEmpty) + ############################################################################################ ### Recommended methods ############################################################################################ diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index b9341072..b7a9de54 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -4,18 +4,25 @@ engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) """ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) -Constructs a `SemOptimizer` object that can be passed to `fit`(@ref) for specifying aspects +Constructs a `SemOptimizer` object that can be passed to [`fit`](@ref) for specifying aspects of the numerical optimization involved in fitting a SEM. The keyword `engine` controlls which Julia package is used, with `:Optim` being the default. +- `optimizer_engines()` prints a list of currently available engines. +- `engine_info(EngineName)` prints information on the usage of a specific engine. + +More engines become available if specific packages are loaded, for example +[*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) (also see [Constrained optimization](@ref) +in the online documentation) or +[*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) +(also see [Regularization](@ref) in the online documentation). + The additional arguments `args...` and `kwargs...` are engine-specific and control further aspects of the optimization process, such as the algorithm, convergence criteria or constraints. +Information on those can be accessed with `engine_info`. -More engines are available if specific packages are loaded, for example [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) -(also see [Constrained optimization](@ref) in the online documentation) or [*ProximalAlgorithms.jl*](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl) -(also see [Regularization](@ref) in the online documentation). -The documentation of available engines (with the packages loaded in the current Julia session) -is shown in the extended help. +To connect the SEM package to a completely new optimization backend, you can implement a new +subtype of SemOptimizer. """ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = SemOptimizer{engine}(args...; kwargs...) @@ -46,6 +53,17 @@ into the current session. """ optimizer_engines() = Symbol[engine(opt_type) for opt_type in subtypes(SemOptimizer)] +""" + engine_info(engine::Symbol) + +Shows information on the optimizer engine. +For a list of available engines, call `optimizer_engines`. +""" +engine_info(engine) = engine_info(Val(engine)) + +engine_info(engine::Val) = + throw(ArgumentError("Unknown engine. Did you forget to load the necessary packages?")) + """ fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 8e93eaa6..e77f72cb 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -10,19 +10,9 @@ mutable struct SemOptimizerOptim{A, B} <: SemOptimizer{:Optim} options::B end - -SemOptimizerOptim(; - algorithm = LBFGS(), - options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), - kwargs..., -) = SemOptimizerOptim(algorithm, options) - """ -# Extended help -*`engine = :Optim`* - -Creates SEM optimizer using [*Optim.jl*](https://julianlsolvers.github.io/Optim.jl/stable/). -For more information on the available algorithms and options, see the *Optim.jl* docs. +Connects to *Optim.jl* as the optimization engine. +For more information on the available algorithms and options, see the [*Optim.jl* docs](https://julianlsolvers.github.io/Optim.jl/stable/). # Constructor @@ -63,8 +53,16 @@ for the variance parameters (the diagonal of the *S* matrix). - `algorithm(::SemOptimizer)` - `options(::SemOptimizer)` """ +SemOptimizerOptim(; + algorithm = LBFGS(), + options = Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), + kwargs..., +) = SemOptimizerOptim(algorithm, options) + SemOptimizer(::Val{:Optim}, args...; kwargs...) = SemOptimizerOptim(args...; kwargs...) +SEM.engine_info(engine::Val{:Optim}) = doc(SemOptimizerOptim) + ############################################################################################ ### Recommended methods ############################################################################################ diff --git a/src/types.jl b/src/types.jl index 73a650e6..92ca2c28 100644 --- a/src/types.jl +++ b/src/types.jl @@ -79,11 +79,6 @@ end Base.:*(x::SemWeight{Nothing}, y) = y Base.:*(x::SemWeight, y) = x.w * y -""" -Supertype of all objects that can serve as the `optimizer` field of a SEM. -Connects the SEM to its optimization backend and controls options like the optimization algorithm. -If you want to connect the SEM package to a new optimization backend, you should implement a subtype of SemOptimizer. -""" abstract type SemOptimizer{E} end """ From d30a609ab1bbabaa33d56e17b5cdfcb880a9419b Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Wed, 28 Jan 2026 20:01:17 +0100 Subject: [PATCH 22/37] remove direct calls of SemOptimizerOptim and add optimizer to SemFit --- docs/src/performance/simulation.md | 2 +- .../tutorials/construction/build_by_parts.md | 2 +- .../construction/outer_constructor.md | 2 +- docs/src/tutorials/fitting/fitting.md | 3 +-- ext/SEMNLOptExt/NLopt.jl | 8 ++++--- ext/SEMProximalOptExt/ProximalAlgorithms.jl | 1 + src/frontend/fit/SemFit.jl | 8 ++++++- src/optimizer/abstract.jl | 2 +- src/optimizer/optim.jl | 22 ++++++------------- test/examples/multigroup/multigroup.jl | 2 +- .../recover_parameters_twofact.jl | 2 +- 11 files changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/src/performance/simulation.md b/docs/src/performance/simulation.md index d268853f..85a0c0a0 100644 --- a/docs/src/performance/simulation.md +++ b/docs/src/performance/simulation.md @@ -67,7 +67,7 @@ For example, new_observed = SemObservedData(;data = data_2, specification = partable) -my_optimizer = SemOptimizerOptim() +my_optimizer = SemOptimizer() new_optimizer = update_observed(my_optimizer, new_observed) ``` diff --git a/docs/src/tutorials/construction/build_by_parts.md b/docs/src/tutorials/construction/build_by_parts.md index 680e2880..52e12f30 100644 --- a/docs/src/tutorials/construction/build_by_parts.md +++ b/docs/src/tutorials/construction/build_by_parts.md @@ -59,7 +59,7 @@ ml = SemML(observed = observed) loss_ml = SemLoss(ml) # optimizer ---------------------------------------------------------------------------- -optimizer = SemOptimizerOptim() +optimizer = SemOptimizer() # model -------------------------------------------------------------------------------- diff --git a/docs/src/tutorials/construction/outer_constructor.md b/docs/src/tutorials/construction/outer_constructor.md index e2772430..e0c69ef3 100644 --- a/docs/src/tutorials/construction/outer_constructor.md +++ b/docs/src/tutorials/construction/outer_constructor.md @@ -41,7 +41,7 @@ model = Sem( data = data, implied = RAMSymbolic, loss = SemWLS, - optimizer = SemOptimizerOptim + optimizer = SemOptimizer ) ``` diff --git a/docs/src/tutorials/fitting/fitting.md b/docs/src/tutorials/fitting/fitting.md index d7353c9f..1af03ce8 100644 --- a/docs/src/tutorials/fitting/fitting.md +++ b/docs/src/tutorials/fitting/fitting.md @@ -17,7 +17,6 @@ Structural Equation Model - Fields observed: SemObservedData implied: RAM - optimizer: SemOptimizerOptim ------------- Optimization result ------------- @@ -60,7 +59,7 @@ The available keyword arguments are listed in the sections [Using Optim.jl](@ref Alternative, you can also explicitely define a `SemOptimizer` and pass it as the first argument to `fit`: ```julia -my_optimizer = SemOptimizerOptim(algorithm = BFGS()) +my_optimizer = SemOptimizer(algorithm = BFGS()) fit(my_optimizer, model) ``` diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index da4d737b..42d67272 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -19,7 +19,8 @@ end """ Uses *NLopt.jl* as the optimization engine. For more information on the available algorithms -and options, see the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the [NLopt docs](https://nlopt.readthedocs.io/en/latest/). +and options, see the [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) package and the +[NLopt docs](https://nlopt.readthedocs.io/en/latest/). # Constructor @@ -138,12 +139,13 @@ SEM.n_iterations(res::NLoptResult) = res.problem.numevals SEM.convergence(res::NLoptResult) = res.result[3] # construct SemFit from fitted NLopt object -function SemFit_NLopt(optimization_result, model::AbstractSem, start_val, opt) +function SemFit_NLopt(optimization_result, model::AbstractSem, start_val, optim, opt) return SemFit( optimization_result[1], optimization_result[2], start_val, model, + optim, NLoptResult(optimization_result, opt), ) end @@ -180,7 +182,7 @@ function SEM.fit( # fit result = NLopt.optimize(opt, start_params) - return SemFit_NLopt(result, model, start_params, opt) + return SemFit_NLopt(result, model, start_params, optim, opt) end ############################################################################################ diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index f9e5bd2d..0bca2529 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -104,6 +104,7 @@ function SEM.fit( solution, start_params, model, + optim, ProximalResult(optimization_result), ) end diff --git a/src/frontend/fit/SemFit.jl b/src/frontend/fit/SemFit.jl index 438da4da..08f364ca 100644 --- a/src/frontend/fit/SemFit.jl +++ b/src/frontend/fit/SemFit.jl @@ -17,11 +17,12 @@ Fitted structural equation model. - `n_iterations(::SemFit)` -> number of iterations - `convergence(::SemFit)` -> convergence properties """ -mutable struct SemFit{Mi, So, St, Mo, O} +mutable struct SemFit{Mi, So, St, Mo, Op, O} minimum::Mi solution::So start_val::St model::Mo + optimizer::Op optimization_result::O end @@ -39,6 +40,10 @@ function Base.show(io::IO, semfit::SemFit) #print(io, "Objective value: $(round(semfit.minimum, digits = 4)) \n") print(io, "------------- Optimization result ------------- \n") print(io, "\n") + print(io, "engine: ") + print(io, engine(semfit)) + print(io, "\n") + print(io, "\n") print(io, semfit.optimization_result) end @@ -58,6 +63,7 @@ model(sem_fit::SemFit) = sem_fit.model optimization_result(sem_fit::SemFit) = sem_fit.optimization_result # optimizer properties +engine(sem_fit::SemFit) = engine(sem_fit.optimizer) optimizer(sem_fit::SemFit) = optimizer(optimization_result(sem_fit)) n_iterations(sem_fit::SemFit) = n_iterations(optimization_result(sem_fit)) convergence(sem_fit::SemFit) = convergence(optimization_result(sem_fit)) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index b7a9de54..84cc0815 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,5 +1,5 @@ engine(::Type{<:SemOptimizer{E}}) where {E} = E -engine(optimizer::SemOptimizer) = engine(typeof(optimizer)) +engine(::SemOptimizer{E}) where {E} = E """ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index e77f72cb..52f09362 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -76,20 +76,6 @@ update_observed(optimizer::SemOptimizerOptim, observed::SemObserved; kwargs...) algorithm(optimizer::SemOptimizerOptim) = optimizer.algorithm options(optimizer::SemOptimizerOptim) = optimizer.options -function SemFit( - optimization_result::Optim.MultivariateOptimizationResults, - model::AbstractSem, - start_val, -) - return SemFit( - optimization_result.minimum, - optimization_result.minimizer, - start_val, - model, - optimization_result, - ) -end - optimizer(res::Optim.MultivariateOptimizationResults) = Optim.summary(res) n_iterations(res::Optim.MultivariateOptimizationResults) = Optim.iterations(res) convergence(res::Optim.MultivariateOptimizationResults) = Optim.converged(res) @@ -139,5 +125,11 @@ function fit( optim.options, ) end - return SemFit(result, model, start_params) + return SemFit( + result.minimum, + result.minimizer, + start_params, + model, + optim, + result) end diff --git a/test/examples/multigroup/multigroup.jl b/test/examples/multigroup/multigroup.jl index 43de554c..15cea61c 100644 --- a/test/examples/multigroup/multigroup.jl +++ b/test/examples/multigroup/multigroup.jl @@ -86,7 +86,7 @@ start_test = [ fill(0.05, 3) fill(0.01, 3) ] -semoptimizer = SemOptimizerOptim() +semoptimizer = SemOptimizer() @testset "RAMMatrices | constructor | Optim" begin include("build_models.jl") diff --git a/test/examples/recover_parameters/recover_parameters_twofact.jl b/test/examples/recover_parameters/recover_parameters_twofact.jl index ce7dc61f..9f9503af 100644 --- a/test/examples/recover_parameters/recover_parameters_twofact.jl +++ b/test/examples/recover_parameters/recover_parameters_twofact.jl @@ -68,7 +68,7 @@ loss_ml = SemLoss(SemML(; observed = semobserved, nparams = length(start))) model_ml = Sem(semobserved, implied_ml, loss_ml) objective!(model_ml, true_val) -optimizer = SemOptimizerOptim( +optimizer = SemOptimizer( BFGS(; linesearch = BackTracking(order = 3), alphaguess = InitialHagerZhang()),# m = 100), Optim.Options(; f_reltol = 1e-10, x_abstol = 1.5e-8), ) From 24506c8ac69b09e2b789bb438fc3d8de06ebf0d7 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Thu, 29 Jan 2026 11:10:23 +0100 Subject: [PATCH 23/37] rename engine related functions --- ext/SEMNLOptExt/NLopt.jl | 2 +- ext/SEMProximalOptExt/ProximalAlgorithms.jl | 2 +- src/StructuralEquationModels.jl | 2 +- src/frontend/fit/SemFit.jl | 4 ++-- src/optimizer/Empty.jl | 2 +- src/optimizer/abstract.jl | 16 ++++++++-------- src/optimizer/optim.jl | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 42d67272..281199b1 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -109,7 +109,7 @@ end SEM.SemOptimizer(::Val{:NLopt}, args...; kwargs...) = SemOptimizerNLopt(args...; kwargs...) -SEM.engine_info(engine::Val{:NLopt}) = doc(SemOptimizerNLopt) +SEM.optimizer_engine_doc(engine::Val{:NLopt}) = doc(SemOptimizerNLopt) ############################################################################################ ### Recommended methods diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index 0bca2529..9a7ebf93 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -9,7 +9,7 @@ end SEM.SemOptimizer{:Proximal}(args...; kwargs...) = SemOptimizerProximal(args...; kwargs...) -SEM.engine_info(engine::Val{:Proximal}) = doc(SemOptimizerProximal) +SEM.optimizer_engine_doc(engine::Val{:Proximal}) = doc(SemOptimizerProximal) """ Connects to `ProximalAlgorithms.jl` as the optimization backend. For more information on diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index 286f1bdc..d3372f27 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -124,7 +124,7 @@ export AbstractSem, SemWLS, loss, SemOptimizer, - engine_info, + optimizer_engine_doc, optimizer_engines, optimizer, n_iterations, diff --git a/src/frontend/fit/SemFit.jl b/src/frontend/fit/SemFit.jl index 08f364ca..69d18541 100644 --- a/src/frontend/fit/SemFit.jl +++ b/src/frontend/fit/SemFit.jl @@ -41,7 +41,7 @@ function Base.show(io::IO, semfit::SemFit) print(io, "------------- Optimization result ------------- \n") print(io, "\n") print(io, "engine: ") - print(io, engine(semfit)) + print(io, optimizer_engine(semfit)) print(io, "\n") print(io, "\n") print(io, semfit.optimization_result) @@ -63,7 +63,7 @@ model(sem_fit::SemFit) = sem_fit.model optimization_result(sem_fit::SemFit) = sem_fit.optimization_result # optimizer properties -engine(sem_fit::SemFit) = engine(sem_fit.optimizer) +optimizer_engine(sem_fit::SemFit) = optimizer_engine(sem_fit.optimizer) optimizer(sem_fit::SemFit) = optimizer(optimization_result(sem_fit)) n_iterations(sem_fit::SemFit) = n_iterations(optimization_result(sem_fit)) convergence(sem_fit::SemFit) = convergence(optimization_result(sem_fit)) diff --git a/src/optimizer/Empty.jl b/src/optimizer/Empty.jl index a542d613..e7d027df 100644 --- a/src/optimizer/Empty.jl +++ b/src/optimizer/Empty.jl @@ -14,7 +14,7 @@ struct SemOptimizerEmpty <: SemOptimizer{:Empty} end SemOptimizer(::Val{:Empty}) = SemOptimizerEmpty() -engine_info(engine::Val{:Empty}) = doc(SemOptimizerEmpty) +optimizer_engine_doc(engine::Val{:Empty}) = doc(SemOptimizerEmpty) ############################################################################################ ### Recommended methods diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index 84cc0815..ccfa1993 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,5 +1,5 @@ -engine(::Type{<:SemOptimizer{E}}) where {E} = E -engine(::SemOptimizer{E}) where {E} = E +optimizer_engine(::Type{<:SemOptimizer{E}}) where {E} = E +optimizer_engine(::SemOptimizer{E}) where {E} = E """ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) @@ -9,7 +9,7 @@ of the numerical optimization involved in fitting a SEM. The keyword `engine` controlls which Julia package is used, with `:Optim` being the default. - `optimizer_engines()` prints a list of currently available engines. -- `engine_info(EngineName)` prints information on the usage of a specific engine. +- `optimizer_engine_doc(EngineName)` prints information on the usage of a specific engine. More engines become available if specific packages are loaded, for example [*NLopt.jl*](https://github.com/JuliaOpt/NLopt.jl) (also see [Constrained optimization](@ref) @@ -19,7 +19,7 @@ in the online documentation) or The additional arguments `args...` and `kwargs...` are engine-specific and control further aspects of the optimization process, such as the algorithm, convergence criteria or constraints. -Information on those can be accessed with `engine_info`. +Information on those can be accessed with `optimizer_engine_doc`. To connect the SEM package to a completely new optimization backend, you can implement a new subtype of SemOptimizer. @@ -51,17 +51,17 @@ the [`SemOptimizer`](@ref) constructor. The list of engines depends on the Julia packages loaded (with the `using` directive) into the current session. """ -optimizer_engines() = Symbol[engine(opt_type) for opt_type in subtypes(SemOptimizer)] +optimizer_engines() = Symbol[optimizer_engine(opt_type) for opt_type in subtypes(SemOptimizer)] """ - engine_info(engine::Symbol) + optimizer_engine_doc(engine::Symbol) Shows information on the optimizer engine. For a list of available engines, call `optimizer_engines`. """ -engine_info(engine) = engine_info(Val(engine)) +optimizer_engine_doc(engine) = optimizer_engine_doc(Val(engine)) -engine_info(engine::Val) = +optimizer_engine_doc(engine::Val) = throw(ArgumentError("Unknown engine. Did you forget to load the necessary packages?")) """ diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 52f09362..0cda0bad 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -61,7 +61,7 @@ SemOptimizerOptim(; SemOptimizer(::Val{:Optim}, args...; kwargs...) = SemOptimizerOptim(args...; kwargs...) -SEM.engine_info(engine::Val{:Optim}) = doc(SemOptimizerOptim) +SEM.optimizer_engine_doc(engine::Val{:Optim}) = doc(SemOptimizerOptim) ############################################################################################ ### Recommended methods From c7c6566e5a8144991bdd92446229b4d4362369d6 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Thu, 29 Jan 2026 11:36:56 +0100 Subject: [PATCH 24/37] streamline engine error throwing --- src/optimizer/abstract.jl | 40 ++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index ccfa1993..b05d8c20 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,5 +1,17 @@ -optimizer_engine(::Type{<:SemOptimizer{E}}) where {E} = E -optimizer_engine(::SemOptimizer{E}) where {E} = E +const optimizer_engine_packages = Dict( + :NLopt => "NLopt", + :Proximal => "ProximalAlgorithms" +) + +function throw_engine_error(E) + if typeof(E) !== Symbol + throw(ArgumentError("engine argument must be a Symbol.")) + elseif haskey(optimizer_engine_packages, E) + error("optimizer \":$E\" requires \"using $(optimizer_engine_packages[E])\".") + else + error("optimizer engine \":$E\" is not supported.") + end +end """ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) @@ -28,20 +40,19 @@ SemOptimizer(args...; engine::Symbol = :Optim, kwargs...) = SemOptimizer{engine}(args...; kwargs...) # fallback optimizer constructor when the engine E is not supported -function SemOptimizer(::Val{E}, args...; kwargs...) where {E} - if typeof(E) !== Symbol - throw(ArgumentError("engine argument must be a Symbol.")) - elseif E == :NLOpt - error("$E optimizer requires \"using NLopt\".") - elseif E == :Proximal - error("$E optimizer requires \"using ProximalAlgorithms\".") - else - error("$E optimizer engine is not supported.") - end -end +SemOptimizer(::Val{E}, args...; kwargs...) where {E} = throw_engine_error(E) SemOptimizer{E}(args...; kwargs...) where {E} = SemOptimizer(Val(E), args...; kwargs...) +""" + (1) optimizer_engine(::Type{<:SemOptimizer{E}}) + (2) optimizer_engine(::SemOptimizer{E}) + +Returns `E`; the engine of a `SemOptimizer` object or a subtype of `SemOptimizer`. +""" +optimizer_engine(::Type{<:SemOptimizer{E}}) where {E} = E +optimizer_engine(::SemOptimizer{E}) where {E} = E + """ optimizer_engines() @@ -61,8 +72,7 @@ For a list of available engines, call `optimizer_engines`. """ optimizer_engine_doc(engine) = optimizer_engine_doc(Val(engine)) -optimizer_engine_doc(engine::Val) = - throw(ArgumentError("Unknown engine. Did you forget to load the necessary packages?")) +optimizer_engine_doc(::Val{E}) where {E} = throw_engine_error(E) """ fit([optim::SemOptimizer], model::AbstractSem; From b724771c89b1487904058275c585ed26b4cb85f0 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Thu, 29 Jan 2026 18:51:16 +0100 Subject: [PATCH 25/37] streamline optimization result methods --- docs/src/developer/optimizer.md | 3 +-- ext/SEMNLOptExt/NLopt.jl | 3 +-- ext/SEMProximalOptExt/ProximalAlgorithms.jl | 7 ++++++- src/frontend/fit/SemFit.jl | 4 ++-- src/frontend/fit/summary.jl | 3 ++- src/optimizer/optim.jl | 3 +-- 6 files changed, 13 insertions(+), 10 deletions(-) diff --git a/docs/src/developer/optimizer.md b/docs/src/developer/optimizer.md index 9e01ac87..1a094a88 100644 --- a/docs/src/developer/optimizer.md +++ b/docs/src/developer/optimizer.md @@ -30,7 +30,6 @@ update_observed(optimizer::SemOptimizerName, observed::SemObserved; kwargs...) = ### additional methods ############################################################################################ -algorithm(optimizer::SemOptimizerName) = optimizer.algorithm options(optimizer::SemOptimizerName) = optimizer.options ``` @@ -68,7 +67,7 @@ The method has to return a `SemFit` object that consists of the minimum of the o In addition, you might want to provide methods to access properties of your optimization result: ```julia -optimizer(res::MyOptimizationResult) = ... +algorithm_name(res::MyOptimizationResult) = ... n_iterations(res::MyOptimizationResult) = ... convergence(res::MyOptimizationResult) = ... ``` \ No newline at end of file diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index 281199b1..e8409014 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -122,7 +122,6 @@ SEM.update_observed(optimizer::SemOptimizerNLopt, observed::SemObserved; kwargs. ### additional methods ############################################################################################ -SEM.algorithm(optimizer::SemOptimizerNLopt) = optimizer.algorithm local_algorithm(optimizer::SemOptimizerNLopt) = optimizer.local_algorithm SEM.options(optimizer::SemOptimizerNLopt) = optimizer.options local_options(optimizer::SemOptimizerNLopt) = optimizer.local_options @@ -134,7 +133,7 @@ struct NLoptResult problem::Any end -SEM.optimizer(res::NLoptResult) = res.problem.algorithm +SEM.algorithm_name(res::NLoptResult) = res.problem.algorithm SEM.n_iterations(res::NLoptResult) = res.problem.numevals SEM.convergence(res::NLoptResult) = res.result[3] diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index 9a7ebf93..a30d649a 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -47,7 +47,12 @@ SEM.update_observed(optimizer::SemOptimizerProximal, observed::SemObserved; kwar ### additional methods ############################################################################################ -SEM.algorithm(optimizer::SemOptimizerProximal) = optimizer.algorithm +SEM.algorithm_name(res::ProximalResult) = SEM.algorithm_name(res.result[:algorithm]) +SEM.algorithm_name(::ProximalAlgorithms.IterativeAlgorithm{I,H,S,D,K}) where + {I, H, S, D, K} = nameof(I) + +SEM.convergence(::ProximalResult) = "No standard convergence criteria for proximal \n algorithms available." +SEM.n_iterations(res::ProximalResult) = res.result[:iterations] ############################################################################ ### Pretty Printing diff --git a/src/frontend/fit/SemFit.jl b/src/frontend/fit/SemFit.jl index 69d18541..9ed73653 100644 --- a/src/frontend/fit/SemFit.jl +++ b/src/frontend/fit/SemFit.jl @@ -13,7 +13,7 @@ Fitted structural equation model. - `model(::SemFit)` - `optimization_result(::SemFit)` -- `optimizer(::SemFit)` -> optimization algorithm +- `algorithm_name(::SemFit)` -> optimization algorithm - `n_iterations(::SemFit)` -> number of iterations - `convergence(::SemFit)` -> convergence properties """ @@ -63,7 +63,7 @@ model(sem_fit::SemFit) = sem_fit.model optimization_result(sem_fit::SemFit) = sem_fit.optimization_result # optimizer properties +algorithm_name(sem_fit::SemFit) = algorithm_name(sem_fit.optimization_result) optimizer_engine(sem_fit::SemFit) = optimizer_engine(sem_fit.optimizer) -optimizer(sem_fit::SemFit) = optimizer(optimization_result(sem_fit)) n_iterations(sem_fit::SemFit) = n_iterations(optimization_result(sem_fit)) convergence(sem_fit::SemFit) = convergence(optimization_result(sem_fit)) diff --git a/src/frontend/fit/summary.jl b/src/frontend/fit/summary.jl index 3071d565..435b1747 100644 --- a/src/frontend/fit/summary.jl +++ b/src/frontend/fit/summary.jl @@ -7,7 +7,8 @@ function details(sem_fit::SemFit; show_fitmeasures = false, color = :light_cyan, color = color, ) print("\n") - println("Optimization algorithm: $(optimizer(sem_fit))") + println("Optimization engine: $(optimizer_engine(sem_fit))") + println("Optimization algorithm: $(algorithm_name(sem_fit))") println("Convergence: $(convergence(sem_fit))") println("No. iterations/evaluations: $(n_iterations(sem_fit))") print("\n") diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index 0cda0bad..6a5f617e 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -73,10 +73,9 @@ update_observed(optimizer::SemOptimizerOptim, observed::SemObserved; kwargs...) ### additional methods ############################################################################################ -algorithm(optimizer::SemOptimizerOptim) = optimizer.algorithm options(optimizer::SemOptimizerOptim) = optimizer.options -optimizer(res::Optim.MultivariateOptimizationResults) = Optim.summary(res) +algorithm_name(res::Optim.MultivariateOptimizationResults) = Optim.summary(res) n_iterations(res::Optim.MultivariateOptimizationResults) = Optim.iterations(res) convergence(res::Optim.MultivariateOptimizationResults) = Optim.converged(res) From 1c6c19327c569f0cdec377eef83f035c852ef174 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Thu, 29 Jan 2026 19:00:48 +0100 Subject: [PATCH 26/37] try fixing the optimizer online docs --- docs/src/tutorials/concept.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 2b453925..7a970322 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -100,9 +100,20 @@ SemConstant ## optimizer +```@eval OptDocs +using StructuralEquationModels, NLopt + +StructuralEquationModels.optimizer_engine_doc(:NLopt) +``` + +```@setup OptDocs +using StructuralEquationModels, NLopt + +SemOptimizerNLopt = Base.get_extension(StructuralEquationModels, :SEMNLOptExt).SemOptimizerNLopt +``` + ```@docs SemOptimizer SemOptimizerOptim SemOptimizerNLopt -SemOptimizerProximal ``` \ No newline at end of file From 1808029ff9635963cf4b377885b4b26ff8d5a983 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 10:39:13 +0100 Subject: [PATCH 27/37] fix proximal extension --- ext/SEMProximalOptExt/ProximalAlgorithms.jl | 38 ++++++++++----------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index a30d649a..b9bc18bb 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -43,24 +43,12 @@ SemOptimizerProximal(; SEM.update_observed(optimizer::SemOptimizerProximal, observed::SemObserved; kwargs...) = optimizer -############################################################################################ -### additional methods -############################################################################################ - -SEM.algorithm_name(res::ProximalResult) = SEM.algorithm_name(res.result[:algorithm]) -SEM.algorithm_name(::ProximalAlgorithms.IterativeAlgorithm{I,H,S,D,K}) where - {I, H, S, D, K} = nameof(I) - -SEM.convergence(::ProximalResult) = "No standard convergence criteria for proximal \n algorithms available." -SEM.n_iterations(res::ProximalResult) = res.result[:iterations] - ############################################################################ -### Pretty Printing +### Model fitting ############################################################################ -function Base.show(io::IO, struct_inst::SemOptimizerProximal) - print_type_name(io, struct_inst) - print_field_types(io, struct_inst) +mutable struct ProximalResult + result::Any end ## connect to ProximalAlgorithms.jl @@ -70,10 +58,6 @@ function ProximalAlgorithms.value_and_gradient(model::AbstractSem, params) return obj, grad end -mutable struct ProximalResult - result::Any -end - function SEM.fit( optim::SemOptimizerProximal, model::AbstractSem, @@ -114,10 +98,26 @@ function SEM.fit( ) end +############################################################################################ +### additional methods +############################################################################################ + +SEM.algorithm_name(res::ProximalResult) = SEM.algorithm_name(res.result[:algorithm]) +SEM.algorithm_name(::ProximalAlgorithms.IterativeAlgorithm{I,H,S,D,K}) where + {I, H, S, D, K} = nameof(I) + +SEM.convergence(::ProximalResult) = "No standard convergence criteria for proximal \n algorithms available." +SEM.n_iterations(res::ProximalResult) = res.result[:iterations] + ############################################################################################ # pretty printing ############################################################################################ +function Base.show(io::IO, struct_inst::SemOptimizerProximal) + print_type_name(io, struct_inst) + print_field_types(io, struct_inst) +end + function Base.show(io::IO, result::ProximalResult) print(io, "Minimum: $(round(result.result[:minimum]; digits = 2)) \n") print(io, "No. evaluations: $(result.result[:iterations]) \n") From b8212120f48d668890fe0ab32503322056330c79 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 10:53:32 +0100 Subject: [PATCH 28/37] fix tests --- test/examples/multigroup/multigroup.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/examples/multigroup/multigroup.jl b/test/examples/multigroup/multigroup.jl index 15cea61c..2d43c3d2 100644 --- a/test/examples/multigroup/multigroup.jl +++ b/test/examples/multigroup/multigroup.jl @@ -169,7 +169,7 @@ start_test = [ 0.01 0.05 ] -semoptimizer = SemOptimizerOptim() +semoptimizer = SemOptimizer() @testset "Graph → Partable → RAMMatrices | constructor | Optim" begin include("build_models.jl") From 2297b15dff705dd8898b550ae3dc0f5ae61efe39 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 10:59:31 +0100 Subject: [PATCH 29/37] start fixing docs --- docs/src/tutorials/concept.md | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 7a970322..439d2831 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -21,13 +21,13 @@ So everything that can be used as the 'observed' part has to be of type `SemObse Here is an overview on the available building blocks: -|[`SemObserved`](@ref) | [`SemImplied`](@ref) | [`SemLossFunction`](@ref) | [`SemOptimizer`](@ref) | -|---------------------------------|-----------------------|---------------------------|-------------------------------| -| [`SemObservedData`](@ref) | [`RAM`](@ref) | [`SemML`](@ref) | [`SemOptimizerOptim`](@ref) | -| [`SemObservedCovariance`](@ref) | [`RAMSymbolic`](@ref) | [`SemWLS`](@ref) | [`SemOptimizerNLopt`](@ref) | -| [`SemObservedMissing`](@ref) | [`ImpliedEmpty`](@ref)| [`SemFIML`](@ref) | | -| | | [`SemRidge`](@ref) | | -| | | [`SemConstant`](@ref) | | +|[`SemObserved`](@ref) | [`SemImplied`](@ref) | [`SemLossFunction`](@ref) | [`SemOptimizer`](@ref) | +|---------------------------------|-----------------------|---------------------------|----------------------------| +| [`SemObservedData`](@ref) | [`RAM`](@ref) | [`SemML`](@ref) | :Optim | +| [`SemObservedCovariance`](@ref) | [`RAMSymbolic`](@ref) | [`SemWLS`](@ref) | :NLopt | +| [`SemObservedMissing`](@ref) | [`ImpliedEmpty`](@ref)| [`SemFIML`](@ref) | :Proximal | +| | | [`SemRidge`](@ref) | | +| | | [`SemConstant`](@ref) | | The rest of this page explains the building blocks for each part. First, we explain every part and give an overview on the different options that are available. After that, the [API - model parts](@ref) section serves as a reference for detailed explanations about the different options. (How to stick them together to a final model is explained in the section on [Model Construction](@ref).) @@ -52,7 +52,7 @@ Available loss functions are ## The optimizer part aka `SemOptimizer` The optimizer part of a model connects to the numerical optimization backend used to fit the model. It can be used to control options like the optimization algorithm, linesearch, stopping criteria, etc. -There are currently three available backends, [`SemOptimizerOptim`](@ref) connecting to the [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) backend, [`SemOptimizerNLopt`](@ref) connecting to the [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) backend and [`SemOptimizerProximal`](@ref) connecting to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). +There are currently three available engines (i.e., backends used to carry out the numerical optimization), `:Optim` connecting to the [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) backend, `:NLopt` connecting to the [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) backend and `:Proximal` connecting to [ProximalAlgorithms.jl](https://github.com/JuliaFirstOrder/ProximalAlgorithms.jl). For more information about the available options see also the tutorials about [Using Optim.jl](@ref) and [Using NLopt.jl](@ref), as well as [Constrained optimization](@ref) and [Regularization](@ref) . # What to do next @@ -114,6 +114,4 @@ SemOptimizerNLopt = Base.get_extension(StructuralEquationModels, :SEMNLOptExt).S ```@docs SemOptimizer -SemOptimizerOptim -SemOptimizerNLopt ``` \ No newline at end of file From 6f4414a9350f16a0cf8fa1363bbc0d74c5a11e17 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 11:23:12 +0100 Subject: [PATCH 30/37] remove undefined refs for now --- docs/src/tutorials/backends/nlopt.md | 2 +- docs/src/tutorials/backends/optim.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/tutorials/backends/nlopt.md b/docs/src/tutorials/backends/nlopt.md index 8bafe7c4..b9dac45e 100644 --- a/docs/src/tutorials/backends/nlopt.md +++ b/docs/src/tutorials/backends/nlopt.md @@ -2,7 +2,7 @@ When [`NLopt.jl`](https://github.com/jump-dev/NLopt.jl) is loaded in the running Julia session, it could be used by the [`SemOptimizer`](@ref) by specifying `engine = :NLopt` -(see [NLopt-specific options](@ref `SemOptimizer(Val(:NLopt))`)). +(see ...). Among other things, `NLopt` enables constrained optimization of the SEM models, which is explained in the [Constrained optimization](@ref) section. diff --git a/docs/src/tutorials/backends/optim.md b/docs/src/tutorials/backends/optim.md index 545de805..00fcbe94 100644 --- a/docs/src/tutorials/backends/optim.md +++ b/docs/src/tutorials/backends/optim.md @@ -1,7 +1,7 @@ # Using Optim.jl [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) is the default optimization engine of *SEM.jl*, -see [`SemOptimizer(Val(:Optim))`](@ref) for a full list of its parameters. +see ... for a full list of its parameters. It defaults to the LBFGS optimization, but we can load the `Optim` and `LineSearches` packages and specify BFGS (!not L-BFGS) with a back-tracking linesearch and Hager-Zhang initial step length guess: From 054bd30289c0137cb244af8579fb1b6428336f89 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 11:50:05 +0100 Subject: [PATCH 31/37] try to fix docs --- docs/src/tutorials/concept.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 439d2831..44e36f11 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -100,7 +100,7 @@ SemConstant ## optimizer -```@eval OptDocs +```@example OptDocs using StructuralEquationModels, NLopt StructuralEquationModels.optimizer_engine_doc(:NLopt) From 9741aadd1ca8099be7c297b88e942089df11e22b Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 12:32:11 +0100 Subject: [PATCH 32/37] try to fix docs --- docs/make.jl | 6 ++++++ docs/src/tutorials/concept.md | 7 +------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index 1bb68c4d..da682ef6 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,6 +1,12 @@ using Documenter, StructuralEquationModels +using NLopt, ProximalAlgorithms makedocs( + modules=[ + StructuralEquationModels, + Base.get_extension(StructuralEquationModels, :SEMNLOptExt), + Base.get_extension(StructuralEquationModels, :SEMProximalOptExt) + ], sitename = "StructuralEquationModels.jl", pages = [ "index.md", diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 44e36f11..5db0b011 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -106,12 +106,7 @@ using StructuralEquationModels, NLopt StructuralEquationModels.optimizer_engine_doc(:NLopt) ``` -```@setup OptDocs -using StructuralEquationModels, NLopt - -SemOptimizerNLopt = Base.get_extension(StructuralEquationModels, :SEMNLOptExt).SemOptimizerNLopt -``` - ```@docs SemOptimizer +SemOptimizerNLopt ``` \ No newline at end of file From d2fdf7a6d73d379c1be3ad95c606641e857f34cf Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Fri, 30 Jan 2026 15:53:59 +0100 Subject: [PATCH 33/37] try to fix docs --- docs/make.jl | 1 + docs/src/tutorials/concept.md | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/make.jl b/docs/make.jl index da682ef6..cf273c05 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -66,6 +66,7 @@ makedocs( collapselevel = 1, ), doctest = false, + checkdocs = :none, ) # doctest(StructuralEquationModels, fix=true) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 5db0b011..f0825db5 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -106,7 +106,16 @@ using StructuralEquationModels, NLopt StructuralEquationModels.optimizer_engine_doc(:NLopt) ``` +```@setup OptDocs +using StructuralEquationModels, NLopt + +SemOptimizerNLopt = Base.get_extension(StructuralEquationModels, :SEMNLOptExt).SemOptimizerNLopt +``` + ```@docs SemOptimizer -SemOptimizerNLopt +``` + +```@autodocs +Modules = [SEMNLOptExt,] ``` \ No newline at end of file From 79232f1e5d7a39e26b501e78a56741de3f610de3 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Sat, 31 Jan 2026 12:53:47 +0100 Subject: [PATCH 34/37] try to fix docs --- docs/src/tutorials/concept.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index f0825db5..12c49560 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -117,5 +117,6 @@ SemOptimizer ``` ```@autodocs -Modules = [SEMNLOptExt,] +Modules = [Base.get_extension(StructuralEquationModels, :SEMNLOptExt)] +Order = [:type, :function] ``` \ No newline at end of file From cfe7aab6b152c632d2db4cc55da735e182e71472 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Sat, 31 Jan 2026 13:02:11 +0100 Subject: [PATCH 35/37] try to fix docs --- docs/src/tutorials/concept.md | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 12c49560..86a64168 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -100,23 +100,13 @@ SemConstant ## optimizer -```@example OptDocs -using StructuralEquationModels, NLopt - -StructuralEquationModels.optimizer_engine_doc(:NLopt) -``` - -```@setup OptDocs -using StructuralEquationModels, NLopt - -SemOptimizerNLopt = Base.get_extension(StructuralEquationModels, :SEMNLOptExt).SemOptimizerNLopt -``` - ```@docs SemOptimizer ``` ```@autodocs -Modules = [Base.get_extension(StructuralEquationModels, :SEMNLOptExt)] +Modules = [ + Base.get_extension(StructuralEquationModels, :SEMNLOptExt), + Base.get_extension(StructuralEquationModels, :SEMProximalOptExt)] Order = [:type, :function] ``` \ No newline at end of file From 9ecc9c5b64bdf7e67b62603641328a57e2217346 Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Sat, 31 Jan 2026 13:18:27 +0100 Subject: [PATCH 36/37] try to fix docs --- docs/src/tutorials/concept.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 86a64168..7355c627 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -104,6 +104,8 @@ SemConstant SemOptimizer ``` +A reference: [NLopt engine](@ref Base.get_extension(StructuralEquationModels, :SEMNLOptExt).SemOptimizerNLopt) + ```@autodocs Modules = [ Base.get_extension(StructuralEquationModels, :SEMNLOptExt), From ece695b4b66119e56501de10632b90e929d895ae Mon Sep 17 00:00:00 2001 From: Maximilian Ernst Date: Sat, 31 Jan 2026 13:23:19 +0100 Subject: [PATCH 37/37] try to fix docs --- docs/src/tutorials/concept.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/tutorials/concept.md b/docs/src/tutorials/concept.md index 7355c627..8c2c8850 100644 --- a/docs/src/tutorials/concept.md +++ b/docs/src/tutorials/concept.md @@ -104,7 +104,7 @@ SemConstant SemOptimizer ``` -A reference: [NLopt engine](@ref Base.get_extension(StructuralEquationModels, :SEMNLOptExt).SemOptimizerNLopt) +A reference: [NLopt engine](@ref SEMNLOptExt.SemOptimizerNLopt) ```@autodocs Modules = [