diff --git a/Project.toml b/Project.toml index 94ab214e8..2b0075e39 100644 --- a/Project.toml +++ b/Project.toml @@ -18,6 +18,7 @@ PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +StatsAPI = "82ae8749-77ed-4fe6-ae5f-f523153014b0" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" StenoGraphs = "78862bba-adae-4a83-bb4d-33c106177f81" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" diff --git a/README.md b/README.md index 79c11da21..9754a8c20 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Models you can fit include - Multigroup SEM - Sums of arbitrary loss functions (everything the optimizer can handle). -# What are the merrits? +# What are the merits? We provide fast objective functions, gradients, and for some cases hessians as well as approximations thereof. As a user, you can easily define custom loss functions. diff --git a/docs/src/developer/loss.md b/docs/src/developer/loss.md index 57a7b485d..931c2d0e5 100644 --- a/docs/src/developer/loss.md +++ b/docs/src/developer/loss.md @@ -79,7 +79,7 @@ model = SemFiniteDiff( loss = (SemML, myridge) ) -model_fit = sem_fit(model) +model_fit = fit(model) ``` This is one way of specifying the model - we now have **one model** with **multiple loss functions**. Because we did not provide a gradient for `Ridge`, we have to specify a `SemFiniteDiff` model that computes numerical gradients with finite difference approximation. @@ -117,7 +117,7 @@ model_new = Sem( loss = (SemML, myridge) ) -model_fit = sem_fit(model_new) +model_fit = fit(model_new) ``` The results are the same, but we can verify that the computational costs are way lower (for this, the julia package `BenchmarkTools` has to be installed): @@ -125,9 +125,9 @@ The results are the same, but we can verify that the computational costs are way ```julia using BenchmarkTools -@benchmark sem_fit(model) +@benchmark fit(model) -@benchmark sem_fit(model_new) +@benchmark fit(model_new) ``` The exact results of those benchmarks are of course highly depended an your system (processor, RAM, etc.), but you should see that the median computation time with analytical gradients drops to about 5% of the computation without analytical gradients. @@ -241,7 +241,7 @@ model_ml = SemFiniteDiff( loss = MaximumLikelihood() ) -model_fit = sem_fit(model_ml) +model_fit = fit(model_ml) ``` If you want to differentiate your own loss functions via automatic differentiation, check out the [AutoDiffSEM](https://github.com/StructuralEquationModels/AutoDiffSEM) package. diff --git a/docs/src/developer/optimizer.md b/docs/src/developer/optimizer.md index 82ec594d8..a651ec636 100644 --- a/docs/src/developer/optimizer.md +++ b/docs/src/developer/optimizer.md @@ -34,7 +34,7 @@ algorithm(optimizer::SemOptimizerName) = optimizer.algorithm options(optimizer::SemOptimizerName) = optimizer.options ``` -Note that your optimizer is a subtype of `SemOptimizer{:Name}`, where you can choose a `:Name` that can later be used as a keyword argument to `sem_fit(engine = :Name)`. +Note that your optimizer is a subtype of `SemOptimizer{:Name}`, where you can choose a `:Name` that can later be used as a keyword argument to `fit(engine = :Name)`. Similarly, `SemOptimizer{:Name}(args...; kwargs...) = SemOptimizerName(args...; kwargs...)` should be defined as well as a constructor that uses only keyword arguments: ´´´julia @@ -46,10 +46,10 @@ SemOptimizerName(; ´´´ A method for `update_observed` and additional methods might be usefull, but are not necessary. -Now comes the substantive part: We need to provide a method for `sem_fit`: +Now comes the substantive part: We need to provide a method for `fit`: ```julia -function sem_fit( +function fit( optim::SemOptimizerName, model::AbstractSem, start_params::AbstractVector; diff --git a/docs/src/internals/files.md b/docs/src/internals/files.md index 0872c2b02..90ceceaaf 100644 --- a/docs/src/internals/files.md +++ b/docs/src/internals/files.md @@ -11,7 +11,7 @@ Source code is in the `"src"` folder: - `"types.jl"` defines all abstract types and the basic type hierarchy - `"objective_gradient_hessian.jl"` contains methods for computing objective, gradient and hessian values for different model types as well as generic fallback methods - The four folders `"observed"`, `"implied"`, `"loss"` and `"diff"` contain implementations of specific subtypes (for example, the `"loss"` folder contains a file `"ML.jl"` that implements the `SemML` loss function). -- `"optimizer"` contains connections to different optimization backends (aka methods for `sem_fit`) +- `"optimizer"` contains connections to different optimization backends (aka methods for `fit`) - `"optim.jl"`: connection to the `Optim.jl` package - `"frontend"` contains user-facing functions - `"specification"` contains functionality for model specification diff --git a/docs/src/performance/mixed_differentiation.md b/docs/src/performance/mixed_differentiation.md index 2ac937077..b7ae333b5 100644 --- a/docs/src/performance/mixed_differentiation.md +++ b/docs/src/performance/mixed_differentiation.md @@ -19,7 +19,7 @@ model_ridge = SemFiniteDiff( model_ml_ridge = SemEnsemble(model_ml, model_ridge) -model_ml_ridge_fit = sem_fit(model_ml_ridge) +model_ml_ridge_fit = fit(model_ml_ridge) ``` The results of both methods will be the same, but we can verify that the computation costs differ (the package `BenchmarkTools` has to be installed for this): @@ -27,7 +27,7 @@ The results of both methods will be the same, but we can verify that the computa ```julia using BenchmarkTools -@benchmark sem_fit(model) +@benchmark fit(model) -@benchmark sem_fit(model_ml_ridge) +@benchmark fit(model_ml_ridge) ``` \ No newline at end of file diff --git a/docs/src/performance/mkl.md b/docs/src/performance/mkl.md index 0d5467658..4361ab445 100644 --- a/docs/src/performance/mkl.md +++ b/docs/src/performance/mkl.md @@ -27,9 +27,9 @@ To check the performance implications for fitting a SEM, you can use the [`Bench ```julia using BenchmarkTools -@benchmark sem_fit($your_model) +@benchmark fit($your_model) using MKL -@benchmark sem_fit($your_model) +@benchmark fit($your_model) ``` \ No newline at end of file diff --git a/docs/src/performance/simulation.md b/docs/src/performance/simulation.md index 881da6222..0cb2ea25d 100644 --- a/docs/src/performance/simulation.md +++ b/docs/src/performance/simulation.md @@ -100,7 +100,7 @@ models = [model1, model2] fits = Vector{SemFit}(undef, 2) Threads.@threads for i in 1:2 - fits[i] = sem_fit(models[i]) + fits[i] = fit(models[i]) end ``` diff --git a/docs/src/performance/starting_values.md b/docs/src/performance/starting_values.md index ba7b4f41d..2df8d94d4 100644 --- a/docs/src/performance/starting_values.md +++ b/docs/src/performance/starting_values.md @@ -1,9 +1,9 @@ # Starting values -The `sem_fit` function has a keyword argument that takes either a vector of starting values or a function that takes a model as input to compute starting values. Current options are `start_fabin3` for fabin 3 starting values [^Hägglund82] or `start_simple` for simple starting values. Additional keyword arguments to `sem_fit` are passed to the starting value function. For example, +The `fit` function has a keyword argument that takes either a vector of starting values or a function that takes a model as input to compute starting values. Current options are `start_fabin3` for fabin 3 starting values [^Hägglund82] or `start_simple` for simple starting values. Additional keyword arguments to `fit` are passed to the starting value function. For example, ```julia - sem_fit( + fit( model; start_val = start_simple, start_covariances_latent = 0.5 diff --git a/docs/src/tutorials/collection/multigroup.md b/docs/src/tutorials/collection/multigroup.md index 23c13b950..1007f4563 100644 --- a/docs/src/tutorials/collection/multigroup.md +++ b/docs/src/tutorials/collection/multigroup.md @@ -81,7 +81,7 @@ model_ml_multigroup = SemEnsemble( We now fit the model and inspect the parameter estimates: ```@example mg; ansicolor = true -fit = sem_fit(model_ml_multigroup) +fit = fit(model_ml_multigroup) update_estimate!(partable, fit) details(partable) ``` diff --git a/docs/src/tutorials/constraints/constraints.md b/docs/src/tutorials/constraints/constraints.md index cdd9111a2..338803cb3 100644 --- a/docs/src/tutorials/constraints/constraints.md +++ b/docs/src/tutorials/constraints/constraints.md @@ -48,7 +48,7 @@ model = Sem( data = data ) -model_fit = sem_fit(model) +model_fit = fit(model) update_estimate!(partable, model_fit) @@ -153,7 +153,7 @@ model_constrained = Sem( data = data ) -model_fit_constrained = sem_fit(constrained_optimizer, model_constrained) +model_fit_constrained = fit(constrained_optimizer, model_constrained) ``` As you can see, the optimizer converged (`:XTOL_REACHED`) and investigating the solution yields @@ -162,7 +162,7 @@ As you can see, the optimizer converged (`:XTOL_REACHED`) and investigating the update_partable!( partable, :estimate_constr, - params(model_fit_constrained), + param_labels(model_fit_constrained), solution(model_fit_constrained), ) diff --git a/docs/src/tutorials/construction/build_by_parts.md b/docs/src/tutorials/construction/build_by_parts.md index 606a6576e..45d2a2ea1 100644 --- a/docs/src/tutorials/construction/build_by_parts.md +++ b/docs/src/tutorials/construction/build_by_parts.md @@ -65,5 +65,5 @@ optimizer = SemOptimizerOptim() model_ml = Sem(observed, implied_ram, loss_ml) -sem_fit(optimizer, model_ml) +fit(optimizer, model_ml) ``` \ No newline at end of file diff --git a/docs/src/tutorials/construction/outer_constructor.md b/docs/src/tutorials/construction/outer_constructor.md index 6a3cd2cef..a1c0b8ad3 100644 --- a/docs/src/tutorials/construction/outer_constructor.md +++ b/docs/src/tutorials/construction/outer_constructor.md @@ -131,4 +131,4 @@ model = SemFiniteDiff( ) ``` -constructs a model that will use finite difference approximation if you estimate the parameters via `sem_fit(model)`. \ No newline at end of file +constructs a model that will use finite difference approximation if you estimate the parameters via `fit(model)`. \ No newline at end of file diff --git a/docs/src/tutorials/first_model.md b/docs/src/tutorials/first_model.md index 5b7284649..e8048966c 100644 --- a/docs/src/tutorials/first_model.md +++ b/docs/src/tutorials/first_model.md @@ -110,7 +110,7 @@ model = Sem( We can now fit the model via ```@example high_level; ansicolor = true -model_fit = sem_fit(model) +model_fit = fit(model) ``` and compute fit measures as diff --git a/docs/src/tutorials/fitting/fitting.md b/docs/src/tutorials/fitting/fitting.md index a3e4b9b91..fff06abaa 100644 --- a/docs/src/tutorials/fitting/fitting.md +++ b/docs/src/tutorials/fitting/fitting.md @@ -3,7 +3,7 @@ As we saw in [A first model](@ref), after you have build a model, you can fit it via ```julia -model_fit = sem_fit(model) +model_fit = fit(model) # output @@ -45,24 +45,24 @@ Structural Equation Model ## Choosing an optimizer -To choose a different optimizer, you can call `sem_fit` with the keyword argument `engine = ...`, and pass additional keyword arguments: +To choose a different optimizer, you can call `fit` with the keyword argument `engine = ...`, and pass additional keyword arguments: ```julia using Optim -model_fit = sem_fit(model; engine = :Optim, algorithm = BFGS()) +model_fit = fit(model; engine = :Optim, algorithm = BFGS()) ``` Available options for engine are `:Optim`, `:NLopt` and `:Proximal`, where `:NLopt` and `:Proximal` are only available if the `NLopt.jl` and `ProximalAlgorithms.jl` packages are loaded respectively. The available keyword arguments are listed in the sections [Using Optim.jl](@ref), [Using NLopt.jl](@ref) and [Regularization](@ref). -Alternative, you can also explicitely define a `SemOptimizer` and pass it as the first argument to `sem_fit`: +Alternative, you can also explicitely define a `SemOptimizer` and pass it as the first argument to `fit`: ```julia my_optimizer = SemOptimizerOptim(algorithm = BFGS()) -sem_fit(my_optimizer, model) +fit(my_optimizer, model) ``` You may also optionally specify [Starting values](@ref). @@ -70,5 +70,5 @@ You may also optionally specify [Starting values](@ref). # API - model fitting ```@docs -sem_fit +fit ``` \ No newline at end of file diff --git a/docs/src/tutorials/inspection/inspection.md b/docs/src/tutorials/inspection/inspection.md index 2b6d3191f..abd416c1c 100644 --- a/docs/src/tutorials/inspection/inspection.md +++ b/docs/src/tutorials/inspection/inspection.md @@ -42,13 +42,13 @@ model = Sem( data = data ) -model_fit = sem_fit(model) +model_fit = fit(model) ``` After you fitted a model, ```julia -model_fit = sem_fit(model) +model_fit = fit(model) ``` you end up with an object of type [`SemFit`](@ref). @@ -87,8 +87,8 @@ We can also update the `ParameterTable` object with other information via [`upda se_bs = se_bootstrap(model_fit; n_boot = 20) se_he = se_hessian(model_fit) -update_partable!(partable, :se_hessian, params(model_fit), se_he) -update_partable!(partable, :se_bootstrap, params(model_fit), se_bs) +update_partable!(partable, :se_hessian, param_labels(model_fit), se_he) +update_partable!(partable, :se_bootstrap, param_labels(model_fit), se_bs) details(partable) ``` @@ -126,11 +126,11 @@ fit_measures AIC BIC χ² -df +dof minus2ll nobserved_vars nsamples -params +param_labels nparams p_value RMSEA diff --git a/docs/src/tutorials/meanstructure.md b/docs/src/tutorials/meanstructure.md index 60578224a..b2da5029a 100644 --- a/docs/src/tutorials/meanstructure.md +++ b/docs/src/tutorials/meanstructure.md @@ -96,7 +96,7 @@ model = Sem( meanstructure = true ) -sem_fit(model) +fit(model) ``` If we build the model by parts, we have to pass the `meanstructure = true` argument to every part that requires it (when in doubt, simply consult the documentation for the respective part). @@ -112,5 +112,5 @@ ml = SemML(observed = observed, meanstructure = true) model = Sem(observed, implied_ram, SemLoss(ml)) -sem_fit(model) +fit(model) ``` \ No newline at end of file diff --git a/docs/src/tutorials/regularization/regularization.md b/docs/src/tutorials/regularization/regularization.md index 37e42975a..3d82fcfba 100644 --- a/docs/src/tutorials/regularization/regularization.md +++ b/docs/src/tutorials/regularization/regularization.md @@ -120,25 +120,25 @@ Let's fit the regularized model ```@example reg -fit_lasso = sem_fit(optimizer_lasso, model_lasso) +fit_lasso = fit(optimizer_lasso, model_lasso) ``` and compare the solution to unregularizted estimates: ```@example reg -fit = sem_fit(model) +fit = fit(model) update_estimate!(partable, fit) -update_partable!(partable, :estimate_lasso, params(fit_lasso), solution(fit_lasso)) +update_partable!(partable, :estimate_lasso, param_labels(fit_lasso), solution(fit_lasso)) details(partable) ``` -Instead of explicitely defining a `SemOptimizerProximal` object, you can also pass `engine = :Proximal` and additional keyword arguments to `sem_fit`: +Instead of explicitely defining a `SemOptimizerProximal` object, you can also pass `engine = :Proximal` and additional keyword arguments to `fit`: ```@example reg -fit = sem_fit(model; engine = :Proximal, operator_g = NormL1(λ)) +fit = fit(model; engine = :Proximal, operator_g = NormL1(λ)) ``` ## Second example - mixed l1 and l0 regularization @@ -162,13 +162,13 @@ model_mixed = Sem( data = data, ) -fit_mixed = sem_fit(model_mixed; engine = :Proximal, operator_g = prox_operator) +fit_mixed = fit(model_mixed; engine = :Proximal, operator_g = prox_operator) ``` Let's again compare the different results: ```@example reg -update_partable!(partable, :estimate_mixed, params(fit_mixed), solution(fit_mixed)) +update_partable!(partable, :estimate_mixed, param_labels(fit_mixed), solution(fit_mixed)) details(partable) ``` \ No newline at end of file diff --git a/docs/src/tutorials/specification/ram_matrices.md b/docs/src/tutorials/specification/ram_matrices.md index 6e01eb38b..abe76ea6f 100644 --- a/docs/src/tutorials/specification/ram_matrices.md +++ b/docs/src/tutorials/specification/ram_matrices.md @@ -59,7 +59,7 @@ spec = RAMMatrices(; A = A, S = S, F = F, - params = θ, + param_labels = θ, vars = [:x1, :x2, :x3, :y1, :y2, :y3, :y4, :y5, :y6, :y7, :y8, :ind60, :dem60, :dem65] ) @@ -90,7 +90,7 @@ spec = RAMMatrices(; A = A, S = S, F = F, - params = θ, + param_labels = θ, vars = [:x1, :x2, :x3, :y1, :y2, :y3, :y4, :y5, :y6, :y7, :y8, :ind60, :dem60, :dem65] ) ``` diff --git a/ext/SEMNLOptExt/NLopt.jl b/ext/SEMNLOptExt/NLopt.jl index a614c501b..c5e0ad6cb 100644 --- a/ext/SEMNLOptExt/NLopt.jl +++ b/ext/SEMNLOptExt/NLopt.jl @@ -71,8 +71,8 @@ function SemFit_NLopt(optimization_result, model::AbstractSem, start_val, opt) ) end -# sem_fit method -function SEM.sem_fit( +# fit method +function SEM.fit( optim::SemOptimizerNLopt, model::AbstractSem, start_params::AbstractVector; diff --git a/ext/SEMProximalOptExt/ProximalAlgorithms.jl b/ext/SEMProximalOptExt/ProximalAlgorithms.jl index 2f1775e85..0d4748e3a 100644 --- a/ext/SEMProximalOptExt/ProximalAlgorithms.jl +++ b/ext/SEMProximalOptExt/ProximalAlgorithms.jl @@ -40,7 +40,7 @@ mutable struct ProximalResult result::Any end -function SEM.sem_fit( +function SEM.fit( optim::SemOptimizerProximal, model::AbstractSem, start_params::AbstractVector; diff --git a/src/StructuralEquationModels.jl b/src/StructuralEquationModels.jl index 6e1a934f3..f6068dc50 100644 --- a/src/StructuralEquationModels.jl +++ b/src/StructuralEquationModels.jl @@ -4,6 +4,7 @@ using LinearAlgebra, Optim, NLSolversBase, Statistics, + StatsAPI, StatsBase, SparseArrays, Symbolics, @@ -15,6 +16,8 @@ using LinearAlgebra, DelimitedFiles, DataFrames +import StatsAPI: params, coef, coefnames, dof, fit, nobs, coeftable + export StenoGraphs, @StenoGraph, meld const SEM = StructuralEquationModels @@ -37,6 +40,7 @@ include("frontend/specification/RAMMatrices.jl") include("frontend/specification/EnsembleParameterTable.jl") include("frontend/specification/StenoGraphs.jl") include("frontend/fit/summary.jl") +include("frontend/StatsAPI.jl") # pretty printing include("frontend/pretty_printing.jl") # observed @@ -74,7 +78,7 @@ include("additional_functions/simulation.jl") include("frontend/fit/fitmeasures/AIC.jl") include("frontend/fit/fitmeasures/BIC.jl") include("frontend/fit/fitmeasures/chi2.jl") -include("frontend/fit/fitmeasures/df.jl") +include("frontend/fit/fitmeasures/dof.jl") include("frontend/fit/fitmeasures/minus2ll.jl") include("frontend/fit/fitmeasures/p.jl") include("frontend/fit/fitmeasures/RMSEA.jl") @@ -89,6 +93,9 @@ include("package_extensions/SEMProximalOptExt.jl") export AbstractSem, AbstractSemSingle, AbstractSemCollection, + coef, + coefnames, + coeftable, Sem, SemFiniteDiff, SemEnsemble, @@ -129,8 +136,9 @@ export AbstractSem, obs_cov, obs_mean, nsamples, + nobs, samples, - sem_fit, + fit, SemFit, minimum, solution, @@ -165,13 +173,15 @@ export AbstractSem, sort_vars!, sort_vars, params, + params!, nparams, param_indices, + param_labels, fit_measures, AIC, BIC, χ², - df, + dof, fit_measures, minus2ll, p_value, diff --git a/src/additional_functions/params_array.jl b/src/additional_functions/params_array.jl index 3a58171aa..1031e349e 100644 --- a/src/additional_functions/params_array.jl +++ b/src/additional_functions/params_array.jl @@ -102,17 +102,17 @@ param_occurences(arr::ParamsArray, i::Integer) = """ materialize!(dest::AbstractArray{<:Any, N}, src::ParamsArray{<:Any, N}, - param_values::AbstractVector; + params::AbstractVector; set_constants::Bool = true, set_zeros::Bool = false) Materialize the parameterized array `src` into `dest` by substituting the parameter -references with the parameter values from `param_values`. +references with the parameter values from `params`. """ function materialize!( dest::AbstractArray{<:Any, N}, src::ParamsArray{<:Any, N}, - param_values::AbstractVector; + params::AbstractVector; set_constants::Bool = true, set_zeros::Bool = false, ) where {N} @@ -121,9 +121,9 @@ function materialize!( "Parameters ($(size(params_arr))) and destination ($(size(dest))) array sizes don't match", ), ) - nparams(src) == length(param_values) || throw( + nparams(src) == length(params) || throw( DimensionMismatch( - "Number of values ($(length(param_values))) does not match the number of parameters ($(nparams(src)))", + "Number of values ($(length(params))) does not match the number of parameters ($(nparams(src)))", ), ) Z = eltype(dest) <: Number ? eltype(dest) : eltype(src) @@ -133,7 +133,7 @@ function materialize!( dest[i] = val end end - @inbounds for (i, val) in enumerate(param_values) + @inbounds for (i, val) in enumerate(params) for j in param_occurences_range(src, i) dest[src.linear_indices[j]] = val end @@ -144,7 +144,7 @@ end function materialize!( dest::SparseMatrixCSC, src::ParamsMatrix, - param_values::AbstractVector; + params::AbstractVector; set_constants::Bool = true, set_zeros::Bool = false, ) @@ -154,9 +154,9 @@ function materialize!( "Parameters ($(size(params_arr))) and destination ($(size(dest))) array sizes don't match", ), ) - nparams(src) == length(param_values) || throw( + nparams(src) == length(params) || throw( DimensionMismatch( - "Number of values ($(length(param_values))) does not match the number of parameters ($(nparams(src)))", + "Number of values ($(length(params))) does not match the number of parameters ($(nparams(src)))", ), ) @@ -170,7 +170,7 @@ function materialize!( dest.nzval[j] = val end end - @inbounds for (i, val) in enumerate(param_values) + @inbounds for (i, val) in enumerate(params) for j in param_occurences_range(src, i) dest.nzval[src.nz_indices[j]] = val end @@ -180,33 +180,33 @@ end """ materialize([T], src::ParamsArray{<:Any, N}, - param_values::AbstractVector{T}) where T + params::AbstractVector{T}) where T Materialize the parameterized array `src` into a new array of type `T` -by substituting the parameter references with the parameter values from `param_values`. +by substituting the parameter references with the parameter values from `params`. """ -materialize(::Type{T}, arr::ParamsArray, param_values::AbstractVector) where {T} = - materialize!(similar(arr, T), arr, param_values, set_constants = true, set_zeros = true) +materialize(::Type{T}, arr::ParamsArray, params::AbstractVector) where {T} = + materialize!(similar(arr, T), arr, params, set_constants = true, set_zeros = true) -materialize(arr::ParamsArray, param_values::AbstractVector{T}) where {T} = - materialize(Union{T, eltype(arr)}, arr, param_values) +materialize(arr::ParamsArray, params::AbstractVector{T}) where {T} = + materialize(Union{T, eltype(arr)}, arr, params) # the hack to update the structured matrix (should be fine since the structure is imposed by ParamsMatrix) materialize!( dest::Union{Symmetric, LowerTriangular, UpperTriangular}, src::ParamsMatrix{<:Any}, - param_values::AbstractVector; + params::AbstractVector; kwargs..., -) = materialize!(parent(dest), src, param_values; kwargs...) +) = materialize!(parent(dest), src, params; kwargs...) function sparse_materialize( ::Type{T}, arr::ParamsMatrix, - param_values::AbstractVector, + params::AbstractVector, ) where {T} - nparams(arr) == length(param_values) || throw( + nparams(arr) == length(params) || throw( DimensionMismatch( - "Number of values ($(length(param_values))) does not match the number of parameter ($(nparams(arr)))", + "Number of values ($(length(params))) does not match the number of parameter ($(nparams(arr)))", ), ) @@ -218,7 +218,7 @@ function sparse_materialize( nz_lininds[nz_ind] = lin_ind end # fill parameters - @inbounds for (i, val) in enumerate(param_values) + @inbounds for (i, val) in enumerate(params) for j in param_occurences_range(arr, i) nz_ind = arr.nz_indices[j] nz_vals[nz_ind] = val diff --git a/src/frontend/StatsAPI.jl b/src/frontend/StatsAPI.jl new file mode 100644 index 000000000..edd677e34 --- /dev/null +++ b/src/frontend/StatsAPI.jl @@ -0,0 +1,78 @@ +""" + params!(out::AbstractVector, partable::ParameterTable, + col::Symbol = :estimate) + +Extract parameter values from the `col` column of `partable` +into the `out` vector. + +The `out` vector should be of `nparams(partable)` length. +The *i*-th element of the `out` vector will contain the +value of the *i*-th parameter from `params_labels(partable)`. + +Note that the function combines the duplicate occurences of the +same parameter in `partable` and will raise an error if the +values do not match. +""" +function params!( + out::AbstractVector, + partable::ParameterTable, + col::Symbol = :estimate, +) + (length(out) == nparams(partable)) || throw( + DimensionMismatch( + "The length of parameter values vector ($(length(out))) does not match the number of parameters ($(nparams(partable)))", + ), + ) + param_index = param_indices(partable) + params_col = partable.columns[col] + for (i, label) in enumerate(partable.columns[:label]) + (label == :const) && continue + param_ind = get(param_index, label, nothing) + @assert !isnothing(param_ind) "Parameter table contains unregistered parameter :$param at row #$i" + param = params_col[i] + if !isnan(out[param_ind]) + @assert isequal(out[param_ind], param) "Parameter :$label value at row #$i ($param) differs from the earlier encountered value ($(out[param_ind]))" + else + out[param_ind] = param + end + end + return out +end + +""" + params(x::ParameterTable, col::Symbol = :estimate) + +Extract parameter values from the `col` column of `partable`. + +Returns the values vector. The *i*-th element corresponds to +the value of *i*-th parameter from `params_label(partable)`. + +Note that the function combines the duplicate occurences of the +same parameter in `partable` and will raise an error if the +values do not match. +""" +params(partable::ParameterTable, col::Symbol = :estimate) = + params!(fill(NaN, nparams(partable)), partable, col) + +""" + coef(x::ParameterTable) + +For a `ParameterTable`, this function is synonymous to [`params`](@ref). +""" +coef(x::ParameterTable) = params(x) + +""" + coefnames(x::ParameterTable) + +Synonymous to [`param_labels`](@ref param_labels). +""" +coefnames(x::ParameterTable) = param_labels(x) + +""" + nobs(model::AbstractSem) -> Int + +Synonymous to [`nsamples`](@ref). +""" +nobs(model::AbstractSem) = nsamples(model) + +coeftable(model::AbstractSem; level::Real=0.95) = throw(ArgumentError("StructuralEquationModels does not support the `CoefTable` interface; see [`ParameterTable`](@ref) instead.")) \ No newline at end of file diff --git a/src/frontend/common.jl b/src/frontend/common.jl index 41d03effb..e89a6cf8b 100644 --- a/src/frontend/common.jl +++ b/src/frontend/common.jl @@ -14,7 +14,7 @@ Return the number of parameters in a SEM model associated with `semobj`. See also [`params`](@ref). """ -nparams(semobj) = length(params(semobj)) +nparams(semobj) = length(param_labels(semobj)) """ nvars(semobj) @@ -52,7 +52,7 @@ parind[:param_name] See also [`params`](@ref). """ -param_indices(semobj) = Dict(par => i for (i, par) in enumerate(params(semobj))) +param_indices(semobj) = Dict(par => i for (i, par) in enumerate(param_labels(semobj))) """ nsamples(semobj) diff --git a/src/frontend/fit/SemFit.jl b/src/frontend/fit/SemFit.jl index 84d2f502c..438da4da6 100644 --- a/src/frontend/fit/SemFit.jl +++ b/src/frontend/fit/SemFit.jl @@ -46,7 +46,7 @@ end # additional methods ############################################################################################ -params(fit::SemFit) = params(fit.model) +param_labels(fit::SemFit) = param_labels(fit.model) nparams(fit::SemFit) = nparams(fit.model) nsamples(fit::SemFit) = nsamples(fit.model) diff --git a/src/frontend/fit/fitmeasures/RMSEA.jl b/src/frontend/fit/fitmeasures/RMSEA.jl index b91e81d3e..b9fff648e 100644 --- a/src/frontend/fit/fitmeasures/RMSEA.jl +++ b/src/frontend/fit/fitmeasures/RMSEA.jl @@ -6,13 +6,13 @@ Return the RMSEA. function RMSEA end RMSEA(sem_fit::SemFit{Mi, So, St, Mo, O} where {Mi, So, St, Mo <: AbstractSemSingle, O}) = - RMSEA(df(sem_fit), χ²(sem_fit), nsamples(sem_fit)) + RMSEA(dof(sem_fit), χ²(sem_fit), nsamples(sem_fit)) RMSEA(sem_fit::SemFit{Mi, So, St, Mo, O} where {Mi, So, St, Mo <: SemEnsemble, O}) = - sqrt(length(sem_fit.model.sems)) * RMSEA(df(sem_fit), χ²(sem_fit), nsamples(sem_fit)) + sqrt(length(sem_fit.model.sems)) * RMSEA(dof(sem_fit), χ²(sem_fit), nsamples(sem_fit)) -function RMSEA(df, chi2, nsamples) - rmsea = (chi2 - df) / (nsamples * df) +function RMSEA(dof, chi2, nsamples) + rmsea = (chi2 - dof) / (nsamples * dof) rmsea > 0 ? nothing : rmsea = 0 return sqrt(rmsea) end diff --git a/src/frontend/fit/fitmeasures/df.jl b/src/frontend/fit/fitmeasures/dof.jl similarity index 62% rename from src/frontend/fit/fitmeasures/df.jl rename to src/frontend/fit/fitmeasures/dof.jl index 4d9025601..3df49d89d 100644 --- a/src/frontend/fit/fitmeasures/df.jl +++ b/src/frontend/fit/fitmeasures/dof.jl @@ -1,14 +1,14 @@ """ - df(sem_fit::SemFit) - df(model::AbstractSem) + dof(sem_fit::SemFit) + dof(model::AbstractSem) Return the degrees of freedom. """ -function df end +function dof end -df(sem_fit::SemFit) = df(sem_fit.model) +dof(sem_fit::SemFit) = dof(sem_fit.model) -df(model::AbstractSem) = n_dp(model) - nparams(model) +dof(model::AbstractSem) = n_dp(model) - nparams(model) function n_dp(model::AbstractSemSingle) nvars = nobserved_vars(model) diff --git a/src/frontend/fit/fitmeasures/fit_measures.jl b/src/frontend/fit/fitmeasures/fit_measures.jl index 40e3caae0..2fc4dfba0 100644 --- a/src/frontend/fit/fitmeasures/fit_measures.jl +++ b/src/frontend/fit/fitmeasures/fit_measures.jl @@ -1,5 +1,5 @@ fit_measures(sem_fit) = - fit_measures(sem_fit, nparams, df, AIC, BIC, RMSEA, χ², p_value, minus2ll) + fit_measures(sem_fit, nparams, dof, AIC, BIC, RMSEA, χ², p_value, minus2ll) function fit_measures(sem_fit, args...) measures = Dict{Symbol, Union{Float64, Missing}}() diff --git a/src/frontend/fit/fitmeasures/p.jl b/src/frontend/fit/fitmeasures/p.jl index 3d4275f95..8c69d5ec2 100644 --- a/src/frontend/fit/fitmeasures/p.jl +++ b/src/frontend/fit/fitmeasures/p.jl @@ -3,4 +3,4 @@ Return the p value computed from the χ² test statistic. """ -p_value(sem_fit::SemFit) = 1 - cdf(Chisq(df(sem_fit)), χ²(sem_fit)) +p_value(sem_fit::SemFit) = 1 - cdf(Chisq(dof(sem_fit)), χ²(sem_fit)) diff --git a/src/frontend/fit/standard_errors/bootstrap.jl b/src/frontend/fit/standard_errors/bootstrap.jl index e8d840d0c..4589dc020 100644 --- a/src/frontend/fit/standard_errors/bootstrap.jl +++ b/src/frontend/fit/standard_errors/bootstrap.jl @@ -1,5 +1,5 @@ """ - se_bootstrap(semfit::SemFit; n_boot = 3000, data = nothing, kwargs...) + se_bootstrap(sem_fit::SemFit; n_boot = 3000, data = nothing, kwargs...) Return boorstrap standard errors. Only works for single models. @@ -52,7 +52,7 @@ function se_bootstrap( new_solution .= 0.0 try - new_solution = solution(sem_fit(new_model; start_val = start)) + new_solution = solution(fit(new_model; start_val = start)) catch n_failed += 1 end diff --git a/src/frontend/specification/EnsembleParameterTable.jl b/src/frontend/specification/EnsembleParameterTable.jl index d5ac7e51b..14169dd94 100644 --- a/src/frontend/specification/EnsembleParameterTable.jl +++ b/src/frontend/specification/EnsembleParameterTable.jl @@ -4,7 +4,7 @@ struct EnsembleParameterTable <: AbstractParameterTable tables::Dict{Symbol, ParameterTable} - params::Vector{Symbol} + param_labels::Vector{Symbol} end ############################################################################################ @@ -12,35 +12,35 @@ end ############################################################################################ # constuct an empty table -EnsembleParameterTable(::Nothing; params::Union{Nothing, Vector{Symbol}} = nothing) = +EnsembleParameterTable(::Nothing; param_labels::Union{Nothing, Vector{Symbol}} = nothing) = EnsembleParameterTable( Dict{Symbol, ParameterTable}(), - isnothing(params) ? Symbol[] : copy(params), + isnothing(param_labels) ? Symbol[] : copy(param_labels), ) # convert pairs to dict -EnsembleParameterTable(ps::Pair{K, V}...; params = nothing) where {K, V} = - EnsembleParameterTable(Dict(ps...); params = params) +EnsembleParameterTable(ps::Pair{K, V}...; param_labels = nothing) where {K, V} = + EnsembleParameterTable(Dict(ps...); param_labels = param_labels) # dictionary of SEM specifications function EnsembleParameterTable( spec_ensemble::AbstractDict{K, V}; - params::Union{Nothing, Vector{Symbol}} = nothing, + param_labels::Union{Nothing, Vector{Symbol}} = nothing, ) where {K, V <: SemSpecification} - params = if isnothing(params) + param_labels = if isnothing(param_labels) # collect all SEM parameters in ensemble if not specified # and apply the set to all partables - unique(mapreduce(SEM.params, vcat, values(spec_ensemble), init = Vector{Symbol}())) + unique(mapreduce(SEM.param_labels, vcat, values(spec_ensemble), init = Vector{Symbol}())) else - copy(params) + copy(param_labels) end # convert each model specification to ParameterTable partables = Dict{Symbol, ParameterTable}( - Symbol(group) => convert(ParameterTable, spec; params) for + Symbol(group) => convert(ParameterTable, spec; param_labels) for (group, spec) in pairs(spec_ensemble) ) - return EnsembleParameterTable(partables, params) + return EnsembleParameterTable(partables, param_labels) end ############################################################################################ @@ -54,12 +54,12 @@ end function Base.convert( ::Type{Dict{K, RAMMatrices}}, partables::EnsembleParameterTable; - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, ) where {K} - isnothing(params) || (params = SEM.params(partables)) + isnothing(param_labels) || (param_labels = SEM.param_labels(partables)) return Dict{K, RAMMatrices}( - K(key) => RAMMatrices(partable; params = params) for + K(key) => RAMMatrices(partable; param_labels = param_labels) for (key, partable) in pairs(partables.tables) ) end @@ -124,11 +124,11 @@ Base.getindex(partable::EnsembleParameterTable, group) = partable.tables[group] function update_partable!( partables::EnsembleParameterTable, column::Symbol, - param_values::AbstractDict{Symbol}, + params::AbstractDict{Symbol}, default::Any = nothing, ) for partable in values(partables.tables) - update_partable!(partable, column, param_values, default) + update_partable!(partable, column, params, default) end return partables end @@ -136,11 +136,11 @@ end function update_partable!( partables::EnsembleParameterTable, column::Symbol, - params::AbstractVector{Symbol}, + param_labels::AbstractVector{Symbol}, values::AbstractVector, default::Any = nothing, ) - return update_partable!(partables, column, Dict(zip(params, values)), default) + return update_partable!(partables, column, Dict(zip(param_labels, values)), default) end ############################################################################################ @@ -148,6 +148,6 @@ end ############################################################################################ function Base.:(==)(p1::EnsembleParameterTable, p2::EnsembleParameterTable) - out = (p1.tables == p2.tables) && (p1.params == p2.params) + out = (p1.tables == p2.tables) && (p1.param_labels == p2.param_labels) return out end diff --git a/src/frontend/specification/ParameterTable.jl b/src/frontend/specification/ParameterTable.jl index 74c963ccb..2af269372 100644 --- a/src/frontend/specification/ParameterTable.jl +++ b/src/frontend/specification/ParameterTable.jl @@ -7,7 +7,7 @@ struct ParameterTable{C} <: AbstractParameterTable observed_vars::Vector{Symbol} latent_vars::Vector{Symbol} sorted_vars::Vector{Symbol} - params::Vector{Symbol} + param_labels::Vector{Symbol} end ############################################################################################ @@ -24,7 +24,7 @@ empty_partable_columns(nrows::Integer = 0) = Dict{Symbol, Vector}( :value_fixed => fill(NaN, nrows), :start => fill(NaN, nrows), :estimate => fill(NaN, nrows), - :param => fill(Symbol(), nrows), + :label => fill(Symbol(), nrows), ) # construct using the provided columns data or create an empty table @@ -32,31 +32,31 @@ function ParameterTable( columns::Dict{Symbol, Vector}; observed_vars::Union{AbstractVector{Symbol}, Nothing} = nothing, latent_vars::Union{AbstractVector{Symbol}, Nothing} = nothing, - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, ) - params = isnothing(params) ? unique!(filter(!=(:const), columns[:param])) : copy(params) - check_params(params, columns[:param]) + param_labels = isnothing(param_labels) ? unique!(filter(!=(:const), columns[:label])) : copy(param_labels) + check_param_labels(param_labels, columns[:label]) return ParameterTable( columns, !isnothing(observed_vars) ? copy(observed_vars) : Vector{Symbol}(), !isnothing(latent_vars) ? copy(latent_vars) : Vector{Symbol}(), Vector{Symbol}(), - params, + param_labels, ) end # new parameter table with different parameters order function ParameterTable( partable::ParameterTable; - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, ) - isnothing(params) || check_params(params, partable.columns[:param]) + isnothing(param_labels) || check_param_labels(param_labels, partable.columns[:label]) return ParameterTable( Dict(col => copy(values) for (col, values) in pairs(partable.columns)), observed_vars = copy(observed_vars(partable)), latent_vars = copy(latent_vars(partable)), - params = params, + param_labels = param_labels, ) end @@ -80,10 +80,10 @@ end function Base.convert( ::Type{ParameterTable}, partable::ParameterTable; - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, ) - return isnothing(params) || partable.params == params ? partable : - ParameterTable(partable; params) + return isnothing(param_labels) || partable.param_labels == param_labels ? partable : + ParameterTable(partable; param_labels) end function DataFrames.DataFrame( @@ -102,7 +102,7 @@ end function Base.show(io::IO, partable::ParameterTable) relevant_columns = - [:from, :relation, :to, :free, :value_fixed, :start, :estimate, :se, :param] + [:from, :relation, :to, :free, :value_fixed, :start, :estimate, :se, :label] shown_columns = filter!( col -> haskey(partable.columns, col) && length(partable.columns[col]) > 0, relevant_columns, @@ -133,7 +133,7 @@ function Base.:(==)(p1::ParameterTable, p2::ParameterTable) (p1.observed_vars == p2.observed_vars) && (p1.latent_vars == p2.latent_vars) && (p1.sorted_vars == p2.sorted_vars) && - (p1.params == p2.params) + (p1.param_labels == p2.param_labels) return out end @@ -153,18 +153,17 @@ Base.getindex(partable::ParameterTable, i::Integer) = ( to = partable.columns[:to][i], free = partable.columns[:free][i], value_fixed = partable.columns[:value_fixed][i], - param = partable.columns[:param][i], + param = partable.columns[:label][i], ) -Base.length(partable::ParameterTable) = length(partable.columns[:param]) +Base.length(partable::ParameterTable) = length(partable.columns[:label]) Base.eachindex(partable::ParameterTable) = Base.OneTo(length(partable)) Base.eltype(::Type{<:ParameterTable}) = ParameterTableRow Base.iterate(partable::ParameterTable, i::Integer = 1) = i > length(partable) ? nothing : (partable[i], i + 1) -params(partable::ParameterTable) = partable.params -nparams(partable::ParameterTable) = length(params(partable)) +nparams(partable::ParameterTable) = length(param_labels(partable)) # Sorting ---------------------------------------------------------------------------------- @@ -264,18 +263,18 @@ end function update_partable!( partable::ParameterTable, column::Symbol, - param_values::AbstractDict{Symbol, T}, + params::AbstractDict{Symbol, T}, default::Any = nothing, ) where {T} coldata = get!(() -> Vector{T}(undef, length(partable)), partable.columns, column) isvec_def = (default isa AbstractVector) && (length(default) == length(partable)) - for (i, par) in enumerate(partable.columns[:param]) + for (i, par) in enumerate(partable.columns[:label]) if par == :const coldata[i] = !isnothing(default) ? (isvec_def ? default[i] : default) : zero(T) - elseif haskey(param_values, par) - coldata[i] = param_values[par] + elseif haskey(params, par) + coldata[i] = params[par] else if isnothing(default) throw(KeyError(par)) @@ -289,31 +288,29 @@ function update_partable!( end """ - update_partable!(partable::AbstractParameterTable, params::Vector{Symbol}, values, column) + update_partable!(partable::AbstractParameterTable, param_labels::Vector{Symbol}, params, column) Write parameter `values` into `column` of `partable`. -The `params` and `values` vectors define the pairs of value +The `param_labels` and `params` vectors define the pairs of parameters, which are being matched to the `:param` column of the `partable`. """ function update_partable!( partable::ParameterTable, column::Symbol, - params::AbstractVector{Symbol}, - values::AbstractVector, + param_labels::AbstractVector{Symbol}, + params::AbstractVector, default::Any = nothing, ) - length(params) == length(values) || throw( + length(param_labels) == length(params) || throw( ArgumentError( - "The length of `params` ($(length(params))) and their `values` ($(length(values))) must be the same", + "The length of `param_labels` ($(length(param_labels))) and their `params` ($(length(param_labels))) must be the same", ), ) - dup_params = nonunique(params) - isempty(dup_params) || - throw(ArgumentError("Duplicate parameters detected: $(join(dup_params, ", "))")) - param_values = Dict(zip(params, values)) - update_partable!(partable, column, param_values, default) + check_param_labels(param_labels, nothing) + params = Dict(zip(param_labels, params)) + update_partable!(partable, column, params, default) end # update estimates ------------------------------------------------------------------------- @@ -327,14 +324,14 @@ Write parameter estimates from `fit` to the `:estimate` column of `partable` update_estimate!(partable::ParameterTable, fit::SemFit) = update_partable!( partable, :estimate, - params(fit), + param_labels(fit), fit.solution, partable.columns[:value_fixed], ) # fallback method for ensemble update_estimate!(partable::AbstractParameterTable, fit::SemFit) = - update_partable!(partable, :estimate, params(fit), fit.solution) + update_partable!(partable, :estimate, param_labels(fit), fit.solution) # update starting values ------------------------------------------------------------------- """ @@ -351,7 +348,7 @@ Write starting values from `fit` or `start_val` to the `:start` column of `parta update_start!(partable::AbstractParameterTable, fit::SemFit) = update_partable!( partable, :start, - params(fit), + param_labels(fit), fit.start_val, partable.columns[:value_fixed], ) @@ -365,7 +362,7 @@ function update_start!( if !(start_val isa Vector) start_val = start_val(model; kwargs...) end - return update_partable!(partable, :start, params(model), start_val) + return update_partable!(partable, :start, param_labels(model), start_val) end # update partable standard errors ---------------------------------------------------------- @@ -389,67 +386,12 @@ function update_se_hessian!( method = :finitediff, ) se = se_hessian(fit; method) - return update_partable!(partable, :se, params(fit), se) + return update_partable!(partable, :se, param_labels(fit), se) end -""" - param_values!(out::AbstractVector, partable::ParameterTable, - col::Symbol = :estimate) - -Extract parameter values from the `col` column of `partable` -into the `out` vector. - -The `out` vector should be of `nparams(partable)` length. -The *i*-th element of the `out` vector will contain the -value of the *i*-th parameter from `params(partable)`. - -Note that the function combines the duplicate occurences of the -same parameter in `partable` and will raise an error if the -values do not match. -""" -function param_values!( - out::AbstractVector, - partable::ParameterTable, - col::Symbol = :estimate, -) - (length(out) == nparams(partable)) || throw( - DimensionMismatch( - "The length of parameter values vector ($(length(out))) does not match the number of parameters ($(nparams(partable)))", - ), - ) - param_index = Dict(param => i for (i, param) in enumerate(params(partable))) - param_values_col = partable.columns[col] - for (i, param) in enumerate(partable.columns[:param]) - (param == :const) && continue - param_ind = get(param_index, param, nothing) - @assert !isnothing(param_ind) "Parameter table contains unregistered parameter :$param at row #$i" - val = param_values_col[i] - if !isnan(out[param_ind]) - @assert isequal(out[param_ind], val) "Parameter :$param value at row #$i ($val) differs from the earlier encountered value ($(out[param_ind]))" - else - out[param_ind] = val - end - end - return out -end - -""" - param_values(out::AbstractVector, col::Symbol = :estimate) - -Extract parameter values from the `col` column of `partable`. - -Returns the values vector. The *i*-th element corresponds to -the value of *i*-th parameter from `params(partable)`. - -Note that the function combines the duplicate occurences of the -same parameter in `partable` and will raise an error if the -values do not match. -""" -param_values(partable::ParameterTable, col::Symbol = :estimate) = - param_values!(fill(NaN, nparams(partable)), partable, col) """ - lavaan_param_values!(out::AbstractVector, partable_lav, + lavaan_params!(out::AbstractVector, partable_lav, partable::ParameterTable, lav_col::Symbol = :est, lav_group = nothing) @@ -457,14 +399,14 @@ Extract parameter values from the `partable_lav` lavaan model that match the parameters of `partable` into the `out` vector. The method sets the *i*-th element of the `out` vector to -the value of *i*-th parameter from `params(partable)`. +the value of *i*-th parameter from `param_labels(partable)`. Note that the lavaan and `partable` models are matched by the the names of variables in the tables (`from` and `to` columns) as well as the type of their relationship (`relation` column), and not by the names of the model parameters. """ -function lavaan_param_values!( +function lavaan_params!( out::AbstractVector, partable_lav, partable::ParameterTable, @@ -481,13 +423,13 @@ function lavaan_param_values!( ), ) partable_mask = findall(partable.columns[:free]) - param_index = Dict(param => i for (i, param) in enumerate(params(partable))) + param_index = param_indices(partable) lav_values = partable_lav[:, lav_col] for (from, to, type, id) in zip( [ view(partable.columns[k], partable_mask) for - k in [:from, :to, :relation, :param] + k in [:from, :to, :relation, :label] ]..., ) lav_ind = nothing @@ -562,7 +504,7 @@ function lavaan_param_values!( end """ - lavaan_param_values(partable_lav, partable::ParameterTable, + lavaan_params(partable_lav, partable::ParameterTable, lav_col::Symbol = :est, lav_group = nothing) Extract parameter values from the `partable_lav` lavaan model that @@ -570,19 +512,19 @@ match the parameters of `partable`. The `out` vector should be of `nparams(partable)` length. The *i*-th element of the `out` vector will contain the -value of the *i*-th parameter from `params(partable)`. +value of the *i*-th parameter from `param_labels(partable)`. Note that the lavaan and `partable` models are matched by the the names of variables in the tables (`from` and `to` columns), and the type of their relationship (`relation` column), but not by the ids of the model parameters. """ -lavaan_param_values( +lavaan_params( partable_lav, partable::ParameterTable, lav_col::Symbol = :est, lav_group = nothing, -) = lavaan_param_values!( +) = lavaan_params!( fill(NaN, nparams(partable)), partable_lav, partable, diff --git a/src/frontend/specification/RAMMatrices.jl b/src/frontend/specification/RAMMatrices.jl index 4ebea95fb..75175a87d 100644 --- a/src/frontend/specification/RAMMatrices.jl +++ b/src/frontend/specification/RAMMatrices.jl @@ -8,7 +8,7 @@ struct RAMMatrices <: SemSpecification S::ParamsMatrix{Float64} F::SparseMatrixCSC{Float64} M::Union{ParamsVector{Float64}, Nothing} - params::Vector{Symbol} + param_labels::Vector{Symbol} vars::Union{Vector{Symbol}, Nothing} # better call it "variables": it's a mixture of observed and latent (and it gets confusing with get_vars()) end @@ -71,7 +71,7 @@ function RAMMatrices(; S::AbstractMatrix, F::AbstractMatrix, M::Union{AbstractVector, Nothing} = nothing, - params::AbstractVector{Symbol}, + param_labels::AbstractVector{Symbol}, vars::Union{AbstractVector{Symbol}, Nothing} = nothing, ) ncols = size(A, 2) @@ -101,16 +101,16 @@ function RAMMatrices(; ), ) end - check_params(params, nothing) + check_param_labels(param_labels, nothing) - A = ParamsMatrix{Float64}(A, params) - S = ParamsMatrix{Float64}(S, params) - M = !isnothing(M) ? ParamsVector{Float64}(M, params) : nothing + A = ParamsMatrix{Float64}(A, param_labels) + S = ParamsMatrix{Float64}(S, param_labels) + M = !isnothing(M) ? ParamsVector{Float64}(M, param_labels) : nothing spF = sparse(F) if any(!isone, spF.nzval) throw(ArgumentError("F should contain only 0s and 1s")) end - return RAMMatrices(A, S, F, M, copy(params), vars) + return RAMMatrices(A, S, F, M, copy(param_labels), vars) end ############################################################################################ @@ -119,11 +119,11 @@ end function RAMMatrices( partable::ParameterTable; - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, ) - params = copy(isnothing(params) ? SEM.params(partable) : params) - check_params(params, partable.columns[:param]) - params_index = Dict(param => i for (i, param) in enumerate(params)) + param_labels = copy(isnothing(param_labels) ? SEM.param_labels(partable) : param_labels) + check_param_labels(param_labels, partable.columns[:label]) + param_labels_index = param_indices(partable) n_observed = length(partable.observed_vars) n_latent = length(partable.latent_vars) @@ -146,16 +146,16 @@ function RAMMatrices( # known_labels = Dict{Symbol, Int64}() T = nonmissingtype(eltype(partable.columns[:value_fixed])) - A_inds = [Vector{Int64}() for _ in 1:length(params)] + A_inds = [Vector{Int64}() for _ in 1:length(param_labels)] A_lin_ixs = LinearIndices((n_vars, n_vars)) - S_inds = [Vector{Int64}() for _ in 1:length(params)] + S_inds = [Vector{Int64}() for _ in 1:length(param_labels)] S_lin_ixs = LinearIndices((n_vars, n_vars)) A_consts = Vector{Pair{Int, T}}() S_consts = Vector{Pair{Int, T}}() # is there a meanstructure? M_inds = any(==(Symbol(1)), partable.columns[:from]) ? - [Vector{Int64}() for _ in 1:length(params)] : nothing + [Vector{Int64}() for _ in 1:length(param_labels)] : nothing M_consts = !isnothing(M_inds) ? Vector{Pair{Int, T}}() : nothing for r in partable @@ -185,7 +185,7 @@ function RAMMatrices( error("Unsupported relation: $(r.relation)") end else - par_ind = params_index[r.param] + par_ind = param_labels_index[r.param] if (r.relation == :→) && (r.from == Symbol(1)) push!(M_inds[par_ind], row_ind) elseif r.relation == :→ @@ -229,7 +229,7 @@ function RAMMatrices( n_vars, ), !isnothing(M_inds) ? ParamsVector{T}(M_inds, M_consts, (n_vars,)) : nothing, - params, + param_labels, vars_sorted, ) end @@ -237,8 +237,8 @@ end Base.convert( ::Type{RAMMatrices}, partable::ParameterTable; - params::Union{AbstractVector{Symbol}, Nothing} = nothing, -) = RAMMatrices(partable; params) + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, +) = RAMMatrices(partable; param_labels) ############################################################################################ ### get parameter table from RAMMatrices @@ -246,7 +246,7 @@ Base.convert( function ParameterTable( ram::RAMMatrices; - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, observed_var_prefix::Symbol = :obs, latent_var_prefix::Symbol = :var, ) @@ -266,17 +266,17 @@ function ParameterTable( partable = ParameterTable( observed_vars = observed_vars, latent_vars = latent_vars, - params = isnothing(params) ? SEM.params(ram) : params, + param_labels = isnothing(param_labels) ? SEM.param_labels(ram) : param_labels, ) # fill the table - append_rows!(partable, ram.S, :S, ram.params, vars, skip_symmetric = true) - append_rows!(partable, ram.A, :A, ram.params, vars) + append_rows!(partable, ram.S, :S, ram.param_labels, vars, skip_symmetric = true) + append_rows!(partable, ram.A, :A, ram.param_labels, vars) if !isnothing(ram.M) - append_rows!(partable, ram.M, :M, ram.params, vars) + append_rows!(partable, ram.M, :M, ram.param_labels, vars) end - check_params(SEM.params(partable), partable.columns[:param]) + check_param_labels(SEM.param_labels(partable), partable.columns[:label]) return partable end @@ -284,8 +284,8 @@ end Base.convert( ::Type{<:ParameterTable}, ram::RAMMatrices; - params::Union{AbstractVector{Symbol}, Nothing} = nothing, -) = ParameterTable(ram; params) + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, +) = ParameterTable(ram; param_labels) ############################################################################################ ### Pretty Printing @@ -343,7 +343,7 @@ function partable_row( value_fixed = free ? 0.0 : val, start = 0.0, estimate = 0.0, - param = free ? val : :const, + label = free ? val : :const, ) end @@ -351,20 +351,20 @@ function append_rows!( partable::ParameterTable, arr::ParamsArray, arr_name::Symbol, - params::AbstractVector, + param_labels::AbstractVector, varnames::AbstractVector{Symbol}; skip_symmetric::Bool = false, ) - nparams(arr) == length(params) || throw( + nparams(arr) == length(param_labels) || throw( ArgumentError( - "Length of parameters vector ($(length(params))) does not match the number of parameters in the matrix ($(nparams(arr)))", + "Length of parameters vector ($(length(param_labels))) does not match the number of parameters in the matrix ($(nparams(arr)))", ), ) arr_ixs = eachindex(arr) # add parameters visited_indices = Set{eltype(arr_ixs)}() - for (i, par) in enumerate(params) + for (i, par) in enumerate(param_labels) for j in param_occurences_range(arr, i) arr_ix = arr_ixs[arr.linear_indices[j]] skip_symmetric && (arr_ix ∈ visited_indices) && continue @@ -399,7 +399,7 @@ function Base.:(==)(mat1::RAMMatrices, mat2::RAMMatrices) (mat1.S == mat2.S) && (mat1.F == mat2.F) && (mat1.M == mat2.M) && - (mat1.params == mat2.params) && + (mat1.param_labels == mat2.param_labels) && (mat1.vars == mat2.vars) ) return res diff --git a/src/frontend/specification/Sem.jl b/src/frontend/specification/Sem.jl index 33440e257..7ba8f7fb7 100644 --- a/src/frontend/specification/Sem.jl +++ b/src/frontend/specification/Sem.jl @@ -35,7 +35,7 @@ vars(model::AbstractSemSingle) = vars(implied(model)) observed_vars(model::AbstractSemSingle) = observed_vars(implied(model)) latent_vars(model::AbstractSemSingle) = latent_vars(implied(model)) -params(model::AbstractSemSingle) = params(implied(model)) +param_labels(model::AbstractSemSingle) = param_labels(implied(model)) nparams(model::AbstractSemSingle) = nparams(implied(model)) """ @@ -45,6 +45,11 @@ Returns the [*observed*](@ref SemObserved) part of a model. """ observed(model::AbstractSemSingle) = model.observed +""" + nsamples(model::AbstractSem) -> Int + +Returns the number of samples from the [*observed*](@ref SemObserved) part of a model. +""" nsamples(model::AbstractSemSingle) = nsamples(observed(model)) """ diff --git a/src/frontend/specification/StenoGraphs.jl b/src/frontend/specification/StenoGraphs.jl index 65bace302..314abcc35 100644 --- a/src/frontend/specification/StenoGraphs.jl +++ b/src/frontend/specification/StenoGraphs.jl @@ -40,7 +40,7 @@ function ParameterTable( graph::AbstractStenoGraph; observed_vars::AbstractVector{Symbol}, latent_vars::AbstractVector{Symbol}, - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, group::Union{Integer, Nothing} = nothing, param_prefix::Symbol = :θ, ) @@ -54,7 +54,7 @@ function ParameterTable( free = columns[:free] value_fixed = columns[:value_fixed] start = columns[:start] - param_refs = columns[:param] + param_refs = columns[:label] # group = Vector{Symbol}(undef, n) for (i, element) in enumerate(graph) @@ -126,7 +126,7 @@ function ParameterTable( end end - return ParameterTable(columns; latent_vars, observed_vars, params) + return ParameterTable(columns; latent_vars, observed_vars, param_labels) end ############################################################################################ @@ -148,7 +148,7 @@ function EnsembleParameterTable( graph::AbstractStenoGraph; observed_vars::AbstractVector{Symbol}, latent_vars::AbstractVector{Symbol}, - params::Union{AbstractVector{Symbol}, Nothing} = nothing, + param_labels::Union{AbstractVector{Symbol}, Nothing} = nothing, groups, ) graph = unique(graph) @@ -158,11 +158,11 @@ function EnsembleParameterTable( graph; observed_vars, latent_vars, - params, + param_labels, group = i, param_prefix = Symbol(:g, group), ) for (i, group) in enumerate(groups) ) - return EnsembleParameterTable(partables; params) + return EnsembleParameterTable(partables; param_labels) end diff --git a/src/frontend/specification/checks.jl b/src/frontend/specification/checks.jl index 5326e535f..5ef41c59d 100644 --- a/src/frontend/specification/checks.jl +++ b/src/frontend/specification/checks.jl @@ -1,18 +1,18 @@ # check if params vector correctly matches the parameter references (from the ParameterTable) -function check_params( - params::AbstractVector{Symbol}, +function check_param_labels( + param_labels::AbstractVector{Symbol}, param_refs::Union{AbstractVector{Symbol}, Nothing}, ) - dup_params = nonunique(params) - isempty(dup_params) || - throw(ArgumentError("Duplicate parameters detected: $(join(dup_params, ", "))")) - any(==(:const), params) && + dup_param_labels = nonunique(param_labels) + isempty(dup_param_labels) || + throw(ArgumentError("Duplicate parameter labels detected: $(join(dup_param_labels, ", "))")) + any(==(:const), param_labels) && throw(ArgumentError("Parameters constain reserved :const name")) if !isnothing(param_refs) # check if all references parameters are present all_refs = Set(id for id in param_refs if id != :const) - undecl_params = setdiff(all_refs, params) + undecl_params = setdiff(all_refs, param_labels) if !isempty(undecl_params) throw( ArgumentError( diff --git a/src/frontend/specification/documentation.jl b/src/frontend/specification/documentation.jl index 72d95c6b4..54f43fa9c 100644 --- a/src/frontend/specification/documentation.jl +++ b/src/frontend/specification/documentation.jl @@ -1,4 +1,4 @@ -params(spec::SemSpecification) = spec.params +param_labels(spec::SemSpecification) = spec.param_labels """ vars(semobj) -> Vector{Symbol} @@ -65,7 +65,7 @@ function ParameterTable end (1) EnsembleParameterTable(;graph, observed_vars, latent_vars, groups) - (2) EnsembleParameterTable(ps::Pair...; params = nothing) + (2) EnsembleParameterTable(ps::Pair...; param_labels = nothing) Return an `EnsembleParameterTable` constructed from (1) a graph or (2) multiple specifications. @@ -73,7 +73,7 @@ Return an `EnsembleParameterTable` constructed from (1) a graph or (2) multiple - `graph`: graph defined via `@StenoGraph` - `observed_vars::Vector{Symbol}`: observed variable names - `latent_vars::Vector{Symbol}`: latent variable names -- `params::Vector{Symbol}`: (optional) a vector of parameter names +- `param_labels::Vector{Symbol}`: (optional) a vector of parameter names - `ps::Pair...`: `:group_name => specification`, where `specification` is either a `ParameterTable` or `RAMMatrices` # Examples @@ -88,7 +88,7 @@ function EnsembleParameterTable end (1) RAMMatrices(partable::ParameterTable) - (2) RAMMatrices(;A, S, F, M = nothing, params, vars) + (2) RAMMatrices(;A, S, F, M = nothing, param_labels, vars) (3) RAMMatrices(partable::EnsembleParameterTable) @@ -102,7 +102,7 @@ Return `RAMMatrices` constructed from (1) a parameter table or (2) individual ma - `S`: matrix of undirected effects - `F`: filter matrix - `M`: vector of mean effects -- `params::Vector{Symbol}`: parameter labels +- `param_labels::Vector{Symbol}`: parameter labels - `vars::Vector{Symbol}`: variable names corresponding to the A, S and F matrix columns # Examples diff --git a/src/implied/RAM/generic.jl b/src/implied/RAM/generic.jl index 30bd29bf4..301c455e9 100644 --- a/src/implied/RAM/generic.jl +++ b/src/implied/RAM/generic.jl @@ -34,7 +34,7 @@ and for models with a meanstructure, the model implied means are computed as ``` ## Interfaces -- `params(::RAM) `-> vector of parameter labels +- `param_labels(::RAM) `-> vector of parameter labels - `nparams(::RAM)` -> number of parameters - `Σ(::RAM)` -> model implied covariance matrix @@ -169,11 +169,11 @@ end ### methods ############################################################################################ -function update!(targets::EvaluationTargets, implied::RAM, model::AbstractSemSingle, params) - materialize!(implied.A, implied.ram_matrices.A, params) - materialize!(implied.S, implied.ram_matrices.S, params) +function update!(targets::EvaluationTargets, implied::RAM, model::AbstractSemSingle, param_labels) + materialize!(implied.A, implied.ram_matrices.A, param_labels) + materialize!(implied.S, implied.ram_matrices.S, param_labels) if !isnothing(implied.M) - materialize!(implied.M, implied.ram_matrices.M, params) + materialize!(implied.M, implied.ram_matrices.M, param_labels) end parent(implied.I_A) .= .-implied.A diff --git a/src/implied/RAM/symbolic.jl b/src/implied/RAM/symbolic.jl index 44ad4949d..eff193c17 100644 --- a/src/implied/RAM/symbolic.jl +++ b/src/implied/RAM/symbolic.jl @@ -29,7 +29,7 @@ Subtype of `SemImplied` that implements the RAM notation with symbolic precomput Subtype of `SemImplied`. ## Interfaces -- `params(::RAMSymbolic) `-> vector of parameter ids +- `param_labels(::RAMSymbolic) `-> vector of parameter ids - `nparams(::RAMSymbolic)` -> number of parameters - `Σ(::RAMSymbolic)` -> model implied covariance matrix diff --git a/src/implied/abstract.jl b/src/implied/abstract.jl index 99bb4d68d..af51440c6 100644 --- a/src/implied/abstract.jl +++ b/src/implied/abstract.jl @@ -8,7 +8,7 @@ nvars(implied::SemImplied) = nvars(implied.ram_matrices) nobserved_vars(implied::SemImplied) = nobserved_vars(implied.ram_matrices) nlatent_vars(implied::SemImplied) = nlatent_vars(implied.ram_matrices) -params(implied::SemImplied) = params(implied.ram_matrices) +param_labels(implied::SemImplied) = param_labels(implied.ram_matrices) nparams(implied::SemImplied) = nparams(implied.ram_matrices) # checks if the A matrix is acyclic diff --git a/src/implied/empty.jl b/src/implied/empty.jl index a80f8c185..3b0292e73 100644 --- a/src/implied/empty.jl +++ b/src/implied/empty.jl @@ -19,7 +19,7 @@ model per group and an additional model with `ImpliedEmpty` and `SemRidge` for t # Extended help ## Interfaces -- `params(::RAMSymbolic) `-> Vector of parameter labels +- `param_labels(::RAMSymbolic) `-> Vector of parameter labels - `nparams(::RAMSymbolic)` -> Number of parameters ## Implementation diff --git a/src/loss/ML/FIML.jl b/src/loss/ML/FIML.jl index 0ef542f70..ca23ded97 100644 --- a/src/loss/ML/FIML.jl +++ b/src/loss/ML/FIML.jl @@ -95,12 +95,12 @@ function evaluate!( semfiml::SemFIML, implied::SemImplied, model::AbstractSemSingle, - params, + param_labels, ) isnothing(hessian) || error("Hessian not implemented for FIML") if !check_fiml(semfiml, model) - isnothing(objective) || (objective = non_posdef_return(params)) + isnothing(objective) || (objective = non_posdef_return(param_labels)) isnothing(gradient) || fill!(gradient, 1) return objective end @@ -109,7 +109,7 @@ function evaluate!( scale = inv(nsamples(observed(model))) isnothing(objective) || - (objective = scale * F_FIML(observed(model), semfiml, model, params)) + (objective = scale * F_FIML(observed(model), semfiml, model, param_labels)) isnothing(gradient) || (∇F_FIML!(gradient, observed(model), semfiml, model); gradient .*= scale) @@ -169,8 +169,8 @@ function ∇F_fiml_outer!(G, JΣ, Jμ, implied, model, semfiml) mul!(G, ∇μ', Jμ, -1, 1) end -function F_FIML(observed::SemObservedMissing, semfiml, model, params) - F = zero(eltype(params)) +function F_FIML(observed::SemObservedMissing, semfiml, model, param_labels) + F = zero(eltype(param_labels)) for (i, pat) in enumerate(observed.patterns) F += F_one_pattern( semfiml.meandiff[i], diff --git a/src/loss/regularization/ridge.jl b/src/loss/regularization/ridge.jl index 02f637270..aee521624 100644 --- a/src/loss/regularization/ridge.jl +++ b/src/loss/regularization/ridge.jl @@ -59,7 +59,7 @@ function SemRidge(; ), ) else - par2ind = Dict(par => ind for (ind, par) in enumerate(params(implied))) + par2ind = param_indices(implied) which_ridge = getindex.(Ref(par2ind), which_ridge) end end diff --git a/src/optimizer/abstract.jl b/src/optimizer/abstract.jl index 68bcc04ad..2487b7c52 100644 --- a/src/optimizer/abstract.jl +++ b/src/optimizer/abstract.jl @@ -1,5 +1,5 @@ """ - sem_fit([optim::SemOptimizer], model::AbstractSem; + fit([optim::SemOptimizer], model::AbstractSem; [engine::Symbol], start_val = start_val, kwargs...) Return the fitted `model`. @@ -20,25 +20,25 @@ the online documentation on [Starting values](@ref). # Examples ```julia -sem_fit( +fit( my_model; start_val = start_simple, start_covariances_latent = 0.5) ``` """ -function sem_fit(optim::SemOptimizer, model::AbstractSem; start_val = nothing, kwargs...) +function fit(optim::SemOptimizer, model::AbstractSem; start_val = nothing, kwargs...) start_params = prepare_start_params(start_val, model; kwargs...) @assert start_params isa AbstractVector @assert length(start_params) == nparams(model) - sem_fit(optim, model, start_params; kwargs...) + fit(optim, model, start_params; kwargs...) end -sem_fit(model::AbstractSem; engine::Symbol = :Optim, start_val = nothing, kwargs...) = - sem_fit(SemOptimizer(; engine, kwargs...), model; start_val, kwargs...) +fit(model::AbstractSem; engine::Symbol = :Optim, start_val = nothing, kwargs...) = +fit(SemOptimizer(; engine, kwargs...), model; start_val, kwargs...) # fallback method -sem_fit(optim::SemOptimizer, model::AbstractSem, start_params; kwargs...) = +fit(optim::SemOptimizer, model::AbstractSem, start_params; kwargs...) = error("Optimizer $(optim) support not implemented.") # FABIN3 is the default method for single models diff --git a/src/optimizer/optim.jl b/src/optimizer/optim.jl index cec37a77a..8f5404bc2 100644 --- a/src/optimizer/optim.jl +++ b/src/optimizer/optim.jl @@ -102,7 +102,7 @@ optimizer(res::Optim.MultivariateOptimizationResults) = Optim.summary(res) n_iterations(res::Optim.MultivariateOptimizationResults) = Optim.iterations(res) convergence(res::Optim.MultivariateOptimizationResults) = Optim.converged(res) -function sem_fit( +function fit( optim::SemOptimizerOptim, model::AbstractSem, start_params::AbstractVector; diff --git a/src/types.jl b/src/types.jl index e802e057a..64a4acbac 100644 --- a/src/types.jl +++ b/src/types.jl @@ -188,13 +188,13 @@ Returns a SemEnsemble with fields - `n::Int`: Number of models. - `sems::Tuple`: `AbstractSem`s. - `weights::Vector`: Weights for each model. -- `params::Vector`: Stores parameter labels and their position. +- `param_labels::Vector`: Stores parameter labels and their position. """ struct SemEnsemble{N, T <: Tuple, V <: AbstractVector, I} <: AbstractSemCollection n::N sems::T weights::V - params::I + param_labels::I end # constructor from multiple models @@ -209,16 +209,16 @@ function SemEnsemble(models...; weights = nothing, kwargs...) end # check parameters equality - params = SEM.params(models[1]) + param_labels = SEM.param_labels(models[1]) for model in models - if params != SEM.params(model) + if param_labels != SEM.param_labels(model) throw(ErrorException("The parameters of your models do not match. \n Maybe you tried to specify models of an ensemble via ParameterTables. \n In that case, you may use RAMMatrices instead.")) end end - return SemEnsemble(n, models, weights, params) + return SemEnsemble(n, models, weights, param_labels) end # constructor from EnsembleParameterTable and data set @@ -239,7 +239,7 @@ function SemEnsemble(; specification, data, groups, column = :group, kwargs...) return SemEnsemble(models...; weights = nothing, kwargs...) end -params(ensemble::SemEnsemble) = ensemble.params +param_labels(ensemble::SemEnsemble) = ensemble.param_labels """ n_models(ensemble::SemEnsemble) -> Integer diff --git a/test/examples/helper.jl b/test/examples/helper.jl index f35d2cac6..4ff9bd507 100644 --- a/test/examples/helper.jl +++ b/test/examples/helper.jl @@ -51,7 +51,7 @@ end fitmeasure_names_ml = Dict( :AIC => "aic", :BIC => "bic", - :df => "df", + :dof => "df", :χ² => "chisq", :p_value => "pvalue", :nparams => "npar", @@ -59,7 +59,7 @@ fitmeasure_names_ml = Dict( ) fitmeasure_names_ls = Dict( - :df => "df", + :dof => "df", :χ² => "chisq", :p_value => "pvalue", :nparams => "npar", @@ -89,8 +89,8 @@ function test_estimates( lav_group = nothing, skip::Bool = false, ) - actual = StructuralEquationModels.param_values(partable, col) - expected = StructuralEquationModels.lavaan_param_values( + actual = StructuralEquationModels.params(partable, col) + expected = StructuralEquationModels.lavaan_params( partable_lav, partable, lav_col, @@ -120,8 +120,8 @@ function test_estimates( actual = fill(NaN, nparams(ens_partable)) expected = fill(NaN, nparams(ens_partable)) for (key, partable) in pairs(ens_partable.tables) - StructuralEquationModels.param_values!(actual, partable, col) - StructuralEquationModels.lavaan_param_values!( + StructuralEquationModels.params!(actual, partable, col) + StructuralEquationModels.lavaan_params!( expected, partable_lav, partable, diff --git a/test/examples/multigroup/build_models.jl b/test/examples/multigroup/build_models.jl index 1e97617fc..f6a7a230d 100644 --- a/test/examples/multigroup/build_models.jl +++ b/test/examples/multigroup/build_models.jl @@ -8,7 +8,7 @@ model_g1 = Sem(specification = specification_g1, data = dat_g1, implied = RAMSym model_g2 = Sem(specification = specification_g2, data = dat_g2, implied = RAM) -@test SEM.params(model_g1.implied.ram_matrices) == SEM.params(model_g2.implied.ram_matrices) +@test SEM.param_labels(model_g1.implied.ram_matrices) == SEM.param_labels(model_g2.implied.ram_matrices) # test the different constructors model_ml_multigroup = SemEnsemble(model_g1, model_g2) @@ -28,7 +28,7 @@ end # fit @testset "ml_solution_multigroup" begin - solution = sem_fit(semoptimizer, model_ml_multigroup) + solution = fit(semoptimizer, model_ml_multigroup) update_estimate!(partable, solution) test_estimates( partable, @@ -36,7 +36,7 @@ end atol = 1e-4, lav_groups = Dict(:Pasteur => 1, :Grant_White => 2), ) - solution = sem_fit(semoptimizer, model_ml_multigroup2) + solution = fit(semoptimizer, model_ml_multigroup2) update_estimate!(partable, solution) test_estimates( partable, @@ -47,7 +47,7 @@ end end @testset "fitmeasures/se_ml" begin - solution_ml = sem_fit(model_ml_multigroup) + solution_ml = fit(model_ml_multigroup) test_fitmeasures( fit_measures(solution_ml), solution_lav[:fitmeasures_ml]; @@ -64,7 +64,7 @@ end lav_groups = Dict(:Pasteur => 1, :Grant_White => 2), ) - solution_ml = sem_fit(model_ml_multigroup2) + solution_ml = fit(model_ml_multigroup2) test_fitmeasures( fit_measures(solution_ml), solution_lav[:fitmeasures_ml]; @@ -113,7 +113,7 @@ grad_fd = FiniteDiff.finite_difference_gradient( # fit @testset "ml_solution_multigroup | sorted" begin - solution = sem_fit(model_ml_multigroup) + solution = fit(model_ml_multigroup) update_estimate!(partable_s, solution) test_estimates( partable_s, @@ -124,7 +124,7 @@ grad_fd = FiniteDiff.finite_difference_gradient( end @testset "fitmeasures/se_ml | sorted" begin - solution_ml = sem_fit(model_ml_multigroup) + solution_ml = fit(model_ml_multigroup) test_fitmeasures( fit_measures(solution_ml), solution_lav[:fitmeasures_ml]; @@ -191,7 +191,7 @@ end # fit @testset "solution_user_defined_loss" begin - solution = sem_fit(model_ml_multigroup) + solution = fit(model_ml_multigroup) update_estimate!(partable, solution) test_estimates( partable, @@ -226,7 +226,7 @@ model_ls_multigroup = SemEnsemble(model_ls_g1, model_ls_g2; optimizer = semoptim end @testset "ls_solution_multigroup" begin - solution = sem_fit(model_ls_multigroup) + solution = fit(model_ls_multigroup) update_estimate!(partable, solution) test_estimates( partable, @@ -237,7 +237,7 @@ end end @testset "fitmeasures/se_ls" begin - solution_ls = sem_fit(model_ls_multigroup) + solution_ls = fit(model_ls_multigroup) test_fitmeasures( fit_measures(solution_ls), solution_lav[:fitmeasures_ls]; @@ -321,7 +321,7 @@ if !isnothing(specification_miss_g1) end @testset "fiml_solution_multigroup" begin - solution = sem_fit(semoptimizer, model_ml_multigroup) + solution = fit(semoptimizer, model_ml_multigroup) update_estimate!(partable_miss, solution) test_estimates( partable_miss, @@ -329,7 +329,7 @@ if !isnothing(specification_miss_g1) atol = 1e-4, lav_groups = Dict(:Pasteur => 1, :Grant_White => 2), ) - solution = sem_fit(semoptimizer, model_ml_multigroup2) + solution = fit(semoptimizer, model_ml_multigroup2) update_estimate!(partable_miss, solution) test_estimates( partable_miss, @@ -340,7 +340,7 @@ if !isnothing(specification_miss_g1) end @testset "fitmeasures/se_fiml" begin - solution = sem_fit(semoptimizer, model_ml_multigroup) + solution = fit(semoptimizer, model_ml_multigroup) test_fitmeasures( fit_measures(solution), solution_lav[:fitmeasures_fiml]; @@ -357,7 +357,7 @@ if !isnothing(specification_miss_g1) lav_groups = Dict(:Pasteur => 1, :Grant_White => 2), ) - solution = sem_fit(semoptimizer, model_ml_multigroup2) + solution = fit(semoptimizer, model_ml_multigroup2) test_fitmeasures( fit_measures(solution), solution_lav[:fitmeasures_fiml]; diff --git a/test/examples/multigroup/multigroup.jl b/test/examples/multigroup/multigroup.jl index eac2b38dd..239bf713c 100644 --- a/test/examples/multigroup/multigroup.jl +++ b/test/examples/multigroup/multigroup.jl @@ -59,7 +59,7 @@ specification_g1 = RAMMatrices(; A = A, S = S1, F = F, - params = x, + param_labels = x, vars = [:x1, :x2, :x3, :x4, :x5, :x6, :x7, :x8, :x9, :visual, :textual, :speed], ) @@ -67,7 +67,7 @@ specification_g2 = RAMMatrices(; A = A, S = S2, F = F, - params = x, + param_labels = x, vars = [:x1, :x2, :x3, :x4, :x5, :x6, :x7, :x8, :x9, :visual, :textual, :speed], ) diff --git a/test/examples/political_democracy/by_parts.jl b/test/examples/political_democracy/by_parts.jl index ddbbfc3fa..3397b5f0a 100644 --- a/test/examples/political_democracy/by_parts.jl +++ b/test/examples/political_democracy/by_parts.jl @@ -70,7 +70,7 @@ solution_names = Symbol.("parameter_estimates_" .* ["ml", "ls", "ml", "ml"]) for (model, name, solution_name) in zip(models, model_names, solution_names) try @testset "$(name)_solution" begin - solution = sem_fit(optimizer_obj, model) + solution = fit(optimizer_obj, model) update_estimate!(partable, solution) test_estimates(partable, solution_lav[solution_name]; atol = 1e-2) end @@ -79,9 +79,9 @@ for (model, name, solution_name) in zip(models, model_names, solution_names) end @testset "ridge_solution" begin - solution_ridge = sem_fit(optimizer_obj, model_ridge) - solution_ml = sem_fit(optimizer_obj, model_ml) - # solution_ridge_id = sem_fit(optimizer_obj, model_ridge_id) + solution_ridge = fit(optimizer_obj, model_ridge) + solution_ml = fit(optimizer_obj, model_ml) + # solution_ridge_id = fit(optimizer_obj, model_ridge_id) @test solution_ridge.minimum < solution_ml.minimum + 1 end @@ -97,8 +97,8 @@ end end @testset "ml_solution_weighted" begin - solution_ml = sem_fit(optimizer_obj, model_ml) - solution_ml_weighted = sem_fit(optimizer_obj, model_ml_weighted) + solution_ml = fit(optimizer_obj, model_ml) + solution_ml_weighted = fit(optimizer_obj, model_ml_weighted) @test solution(solution_ml) ≈ solution(solution_ml_weighted) rtol = 1e-3 @test nsamples(model_ml) * StructuralEquationModels.minimum(solution_ml) ≈ StructuralEquationModels.minimum(solution_ml_weighted) rtol = 1e-6 @@ -109,7 +109,7 @@ end ############################################################################################ @testset "fitmeasures/se_ml" begin - solution_ml = sem_fit(optimizer_obj, model_ml) + solution_ml = fit(optimizer_obj, model_ml) test_fitmeasures(fit_measures(solution_ml), solution_lav[:fitmeasures_ml]; atol = 1e-3) update_se_hessian!(partable, solution_ml) @@ -123,7 +123,7 @@ end end @testset "fitmeasures/se_ls" begin - solution_ls = sem_fit(optimizer_obj, model_ls_sym) + solution_ls = fit(optimizer_obj, model_ls_sym) fm = fit_measures(solution_ls) test_fitmeasures( fm, @@ -176,13 +176,13 @@ if opt_engine == :Optim end @testset "ml_solution_hessian" begin - solution = sem_fit(optimizer_obj, model_ml) + solution = fit(optimizer_obj, model_ml) update_estimate!(partable, solution) test_estimates(partable, solution_lav[:parameter_estimates_ml]; atol = 1e-2) end @testset "ls_solution_hessian" begin - solution = sem_fit(optimizer_obj, model_ls) + solution = fit(optimizer_obj, model_ls) update_estimate!(partable, solution) test_estimates( partable, @@ -254,7 +254,7 @@ solution_names = Symbol.("parameter_estimates_" .* ["ml", "ls", "ml"] .* "_mean" for (model, name, solution_name) in zip(models, model_names, solution_names) try @testset "$(name)_solution_mean" begin - solution = sem_fit(optimizer_obj, model) + solution = fit(optimizer_obj, model) update_estimate!(partable_mean, solution) test_estimates(partable_mean, solution_lav[solution_name]; atol = 1e-2) end @@ -267,7 +267,7 @@ end ############################################################################################ @testset "fitmeasures/se_ml_mean" begin - solution_ml = sem_fit(optimizer_obj, model_ml) + solution_ml = fit(optimizer_obj, model_ml) test_fitmeasures( fit_measures(solution_ml), solution_lav[:fitmeasures_ml_mean]; @@ -285,7 +285,7 @@ end end @testset "fitmeasures/se_ls_mean" begin - solution_ls = sem_fit(optimizer_obj, model_ls) + solution_ls = fit(optimizer_obj, model_ls) fm = fit_measures(solution_ls) test_fitmeasures( fm, @@ -336,13 +336,13 @@ end ############################################################################################ @testset "fiml_solution" begin - solution = sem_fit(optimizer_obj, model_ml) + solution = fit(optimizer_obj, model_ml) update_estimate!(partable_mean, solution) test_estimates(partable_mean, solution_lav[:parameter_estimates_fiml]; atol = 1e-2) end @testset "fiml_solution_symbolic" begin - solution = sem_fit(optimizer_obj, model_ml_sym) + solution = fit(optimizer_obj, model_ml_sym) update_estimate!(partable_mean, solution) test_estimates(partable_mean, solution_lav[:parameter_estimates_fiml]; atol = 1e-2) end @@ -352,7 +352,7 @@ end ############################################################################################ @testset "fitmeasures/se_fiml" begin - solution_ml = sem_fit(optimizer_obj, model_ml) + solution_ml = fit(optimizer_obj, model_ml) test_fitmeasures( fit_measures(solution_ml), solution_lav[:fitmeasures_fiml]; diff --git a/test/examples/political_democracy/constraints.jl b/test/examples/political_democracy/constraints.jl index fb2116023..cc1b0874d 100644 --- a/test/examples/political_democracy/constraints.jl +++ b/test/examples/political_democracy/constraints.jl @@ -39,14 +39,14 @@ constrained_optimizer = SemOptimizer(; ############################################################################################ @testset "ml_solution_maxeval" begin - solution_maxeval = sem_fit(model_ml, engine = :NLopt, options = Dict(:maxeval => 10)) + solution_maxeval = fit(model_ml, engine = :NLopt, options = Dict(:maxeval => 10)) @test solution_maxeval.optimization_result.problem.numevals == 10 @test solution_maxeval.optimization_result.result[3] == :MAXEVAL_REACHED end @testset "ml_solution_constrained" begin - solution_constrained = sem_fit(constrained_optimizer, model_ml) + solution_constrained = fit(constrained_optimizer, model_ml) @test solution_constrained.solution[31] * solution_constrained.solution[30] >= (0.6 - 1e-8) diff --git a/test/examples/political_democracy/constructor.jl b/test/examples/political_democracy/constructor.jl index 3f226b4c8..7a8adc72e 100644 --- a/test/examples/political_democracy/constructor.jl +++ b/test/examples/political_democracy/constructor.jl @@ -8,7 +8,7 @@ using Random, NLopt semoptimizer = SemOptimizer(engine = opt_engine) model_ml = Sem(specification = spec, data = dat) -@test SEM.params(model_ml.implied.ram_matrices) == SEM.params(spec) +@test SEM.param_labels(model_ml.implied.ram_matrices) == SEM.param_labels(spec) model_ml_cov = Sem( specification = spec, @@ -75,7 +75,7 @@ solution_names = Symbol.("parameter_estimates_" .* ["ml", "ml", "ls", "ml", "ml" for (model, name, solution_name) in zip(models, model_names, solution_names) try @testset "$(name)_solution" begin - solution = sem_fit(semoptimizer, model) + solution = fit(semoptimizer, model) update_estimate!(partable, solution) test_estimates(partable, solution_lav[solution_name]; atol = 1e-2) end @@ -84,9 +84,9 @@ for (model, name, solution_name) in zip(models, model_names, solution_names) end @testset "ridge_solution" begin - solution_ridge = sem_fit(semoptimizer, model_ridge) - solution_ml = sem_fit(semoptimizer, model_ml) - # solution_ridge_id = sem_fit(semoptimizer, model_ridge_id) + solution_ridge = fit(semoptimizer, model_ridge) + solution_ml = fit(semoptimizer, model_ml) + # solution_ridge_id = fit(semoptimizer, model_ridge_id) @test abs(solution_ridge.minimum - solution_ml.minimum) < 1 end @@ -102,8 +102,8 @@ end end @testset "ml_solution_weighted" begin - solution_ml = sem_fit(semoptimizer, model_ml) - solution_ml_weighted = sem_fit(semoptimizer, model_ml_weighted) + solution_ml = fit(semoptimizer, model_ml) + solution_ml_weighted = fit(semoptimizer, model_ml_weighted) @test isapprox(solution(solution_ml), solution(solution_ml_weighted), rtol = 1e-3) @test isapprox( nsamples(model_ml) * StructuralEquationModels.minimum(solution_ml), @@ -117,7 +117,7 @@ end ############################################################################################ @testset "fitmeasures/se_ml" begin - solution_ml = sem_fit(semoptimizer, model_ml) + solution_ml = fit(semoptimizer, model_ml) test_fitmeasures(fit_measures(solution_ml), solution_lav[:fitmeasures_ml]; atol = 1e-3) update_se_hessian!(partable, solution_ml) @@ -131,7 +131,7 @@ end end @testset "fitmeasures/se_ls" begin - solution_ls = sem_fit(semoptimizer, model_ls_sym) + solution_ls = fit(semoptimizer, model_ls_sym) fm = fit_measures(solution_ls) test_fitmeasures( fm, @@ -182,8 +182,8 @@ end obs_colnames = colnames, ) # fit models - sol_ml = solution(sem_fit(semoptimizer, model_ml_new)) - sol_ml_sym = solution(sem_fit(semoptimizer, model_ml_sym_new)) + sol_ml = solution(fit(semoptimizer, model_ml_new)) + sol_ml_sym = solution(fit(semoptimizer, model_ml_sym_new)) # check solution @test maximum(abs.(sol_ml - params)) < 0.01 @test maximum(abs.(sol_ml_sym - params)) < 0.01 @@ -225,13 +225,13 @@ if opt_engine == :Optim end @testset "ml_solution_hessian" begin - solution = sem_fit(semoptimizer, model_ml) + solution = fit(semoptimizer, model_ml) update_estimate!(partable, solution) test_estimates(partable, solution_lav[:parameter_estimates_ml]; atol = 1e-2) end @testset "ls_solution_hessian" begin - solution = sem_fit(semoptimizer, model_ls) + solution = fit(semoptimizer, model_ls) update_estimate!(partable, solution) test_estimates( partable, @@ -296,7 +296,7 @@ solution_names = Symbol.("parameter_estimates_" .* ["ml", "ml", "ls", "ml"] .* " for (model, name, solution_name) in zip(models, model_names, solution_names) try @testset "$(name)_solution_mean" begin - solution = sem_fit(semoptimizer, model) + solution = fit(semoptimizer, model) update_estimate!(partable_mean, solution) test_estimates(partable_mean, solution_lav[solution_name]; atol = 1e-2) end @@ -309,7 +309,7 @@ end ############################################################################################ @testset "fitmeasures/se_ml_mean" begin - solution_ml = sem_fit(semoptimizer, model_ml) + solution_ml = fit(semoptimizer, model_ml) test_fitmeasures( fit_measures(solution_ml), solution_lav[:fitmeasures_ml_mean]; @@ -327,7 +327,7 @@ end end @testset "fitmeasures/se_ls_mean" begin - solution_ls = sem_fit(semoptimizer, model_ls) + solution_ls = fit(semoptimizer, model_ls) fm = fit_measures(solution_ls) test_fitmeasures( fm, @@ -381,8 +381,8 @@ end meanstructure = true, ) # fit models - sol_ml = solution(sem_fit(semoptimizer, model_ml_new)) - sol_ml_sym = solution(sem_fit(semoptimizer, model_ml_sym_new)) + sol_ml = solution(fit(semoptimizer, model_ml_new)) + sol_ml_sym = solution(fit(semoptimizer, model_ml_sym_new)) # check solution @test maximum(abs.(sol_ml - params)) < 0.01 @test maximum(abs.(sol_ml_sym - params)) < 0.01 @@ -427,13 +427,13 @@ end ############################################################################################ @testset "fiml_solution" begin - solution = sem_fit(semoptimizer, model_ml) + solution = fit(semoptimizer, model_ml) update_estimate!(partable_mean, solution) test_estimates(partable_mean, solution_lav[:parameter_estimates_fiml]; atol = 1e-2) end @testset "fiml_solution_symbolic" begin - solution = sem_fit(semoptimizer, model_ml_sym) + solution = fit(semoptimizer, model_ml_sym) update_estimate!(partable_mean, solution) test_estimates(partable_mean, solution_lav[:parameter_estimates_fiml]; atol = 1e-2) end @@ -443,7 +443,7 @@ end ############################################################################################ @testset "fitmeasures/se_fiml" begin - solution_ml = sem_fit(semoptimizer, model_ml) + solution_ml = fit(semoptimizer, model_ml) test_fitmeasures( fit_measures(solution_ml), solution_lav[:fitmeasures_fiml]; diff --git a/test/examples/political_democracy/political_democracy.jl b/test/examples/political_democracy/political_democracy.jl index 7394175b7..ad06e0fcd 100644 --- a/test/examples/political_democracy/political_democracy.jl +++ b/test/examples/political_democracy/political_democracy.jl @@ -77,13 +77,13 @@ spec = RAMMatrices(; A = A, S = S, F = F, - params = x, + param_labels = x, vars = [:x1, :x2, :x3, :y1, :y2, :y3, :y4, :y5, :y6, :y7, :y8, :ind60, :dem60, :dem65], ) partable = ParameterTable(spec) -@test SEM.params(spec) == SEM.params(partable) +@test SEM.param_labels(spec) == SEM.param_labels(partable) # w. meanstructure ------------------------------------------------------------------------- @@ -94,13 +94,13 @@ spec_mean = RAMMatrices(; S = S, F = F, M = M, - params = [SEM.params(spec); Symbol.("x", string.(32:38))], + param_labels = [SEM.param_labels(spec); Symbol.("x", string.(32:38))], vars = [:x1, :x2, :x3, :y1, :y2, :y3, :y4, :y5, :y6, :y7, :y8, :ind60, :dem60, :dem65], ) partable_mean = ParameterTable(spec_mean) -@test SEM.params(partable_mean) == SEM.params(spec_mean) +@test SEM.param_labels(partable_mean) == SEM.param_labels(spec_mean) start_test = [fill(1.0, 11); fill(0.05, 3); fill(0.05, 6); fill(0.5, 8); fill(0.05, 3)] start_test_mean = @@ -138,7 +138,7 @@ end spec = ParameterTable(spec) spec_mean = ParameterTable(spec_mean) -@test SEM.params(spec) == SEM.params(partable) +@test SEM.param_labels(spec) == SEM.param_labels(partable) partable = spec partable_mean = spec_mean diff --git a/test/examples/proximal/l0.jl b/test/examples/proximal/l0.jl index da20f3901..374f8e58a 100644 --- a/test/examples/proximal/l0.jl +++ b/test/examples/proximal/l0.jl @@ -35,7 +35,7 @@ ram_mat = RAMMatrices(partable) model = Sem(specification = partable, data = dat, loss = SemML) -fit = sem_fit(model) +sem_fit = fit(model) # use l0 from ProximalSEM # regularized @@ -44,11 +44,11 @@ prox_operator = model_prox = Sem(specification = partable, data = dat, loss = SemML) -fit_prox = sem_fit(model_prox, engine = :Proximal, operator_g = prox_operator) +fit_prox = fit(model_prox, engine = :Proximal, operator_g = prox_operator) @testset "l0 | solution_unregularized" begin @test fit_prox.optimization_result.result[:iterations] < 1000 - @test maximum(abs.(solution(fit) - solution(fit_prox))) < 0.002 + @test maximum(abs.(solution(sem_fit) - solution(fit_prox))) < 0.002 end # regularized @@ -56,12 +56,12 @@ prox_operator = SlicedSeparableSum((NormL0(0.0), NormL0(100.0)), ([1:30], [31])) model_prox = Sem(specification = partable, data = dat, loss = SemML) -fit_prox = sem_fit(model_prox, engine = :Proximal, operator_g = prox_operator) +fit_prox = fit(model_prox, engine = :Proximal, operator_g = prox_operator) @testset "l0 | solution_regularized" begin @test fit_prox.optimization_result.result[:iterations] < 1000 @test solution(fit_prox)[31] == 0.0 @test abs( - StructuralEquationModels.minimum(fit_prox) - StructuralEquationModels.minimum(fit), + StructuralEquationModels.minimum(fit_prox) - StructuralEquationModels.minimum(sem_fit), ) < 1.0 end diff --git a/test/examples/proximal/lasso.jl b/test/examples/proximal/lasso.jl index 314453df4..beb5cf529 100644 --- a/test/examples/proximal/lasso.jl +++ b/test/examples/proximal/lasso.jl @@ -35,18 +35,18 @@ ram_mat = RAMMatrices(partable) model = Sem(specification = partable, data = dat, loss = SemML) -fit = sem_fit(model) +sem_fit = fit(model) # use lasso from ProximalSEM λ = zeros(31) model_prox = Sem(specification = partable, data = dat, loss = SemML) -fit_prox = sem_fit(model_prox, engine = :Proximal, operator_g = NormL1(λ)) +fit_prox = fit(model_prox, engine = :Proximal, operator_g = NormL1(λ)) @testset "lasso | solution_unregularized" begin @test fit_prox.optimization_result.result[:iterations] < 1000 - @test maximum(abs.(solution(fit) - solution(fit_prox))) < 0.002 + @test maximum(abs.(solution(sem_fit) - solution(fit_prox))) < 0.002 end λ = zeros(31); @@ -54,11 +54,11 @@ end model_prox = Sem(specification = partable, data = dat, loss = SemML) -fit_prox = sem_fit(model_prox, engine = :Proximal, operator_g = NormL1(λ)) +fit_prox = fit(model_prox, engine = :Proximal, operator_g = NormL1(λ)) @testset "lasso | solution_regularized" begin @test fit_prox.optimization_result.result[:iterations] < 1000 - @test all(solution(fit_prox)[16:20] .< solution(fit)[16:20]) + @test all(solution(fit_prox)[16:20] .< solution(sem_fit)[16:20]) @test StructuralEquationModels.minimum(fit_prox) - - StructuralEquationModels.minimum(fit) < 0.03 + StructuralEquationModels.minimum(sem_fit) < 0.03 end diff --git a/test/examples/proximal/ridge.jl b/test/examples/proximal/ridge.jl index 3d116dcd4..fd7ae113d 100644 --- a/test/examples/proximal/ridge.jl +++ b/test/examples/proximal/ridge.jl @@ -35,7 +35,7 @@ ram_mat = RAMMatrices(partable) model = Sem(specification = partable, data = dat, loss = SemML) -fit = sem_fit(model) +sem_fit = fit(model) # use ridge from StructuralEquationModels model_ridge = Sem( @@ -46,7 +46,7 @@ model_ridge = Sem( which_ridge = 16:20, ) -solution_ridge = sem_fit(model_ridge) +solution_ridge = fit(model_ridge) # use ridge from ProximalSEM; SqrNormL2 uses λ/2 as penalty λ = zeros(31); @@ -54,7 +54,7 @@ solution_ridge = sem_fit(model_ridge) model_prox = Sem(specification = partable, data = dat, loss = SemML) -solution_prox = @suppress sem_fit(model_prox, engine = :Proximal, operator_g = SqrNormL2(λ)) +solution_prox = @suppress fit(model_prox, engine = :Proximal, operator_g = SqrNormL2(λ)) @testset "ridge_solution" begin @test isapprox(solution_prox.solution, solution_ridge.solution; rtol = 1e-3) diff --git a/test/examples/recover_parameters/recover_parameters_twofact.jl b/test/examples/recover_parameters/recover_parameters_twofact.jl index 6899fe7a7..a3e426cbc 100644 --- a/test/examples/recover_parameters/recover_parameters_twofact.jl +++ b/test/examples/recover_parameters/recover_parameters_twofact.jl @@ -40,7 +40,7 @@ A = [ 0 0 0 0 0 0 0 0 ] -ram_matrices = RAMMatrices(; A = A, S = S, F = F, params = x, vars = nothing) +ram_matrices = RAMMatrices(; A = A, S = S, F = F, param_labels = x, vars = nothing) true_val = [ repeat([1], 8) @@ -73,6 +73,6 @@ optimizer = SemOptimizerOptim( Optim.Options(; f_tol = 1e-10, x_tol = 1.5e-8), ) -solution_ml = sem_fit(optimizer, model_ml) +solution_ml = fit(optimizer, model_ml) @test true_val ≈ solution(solution_ml) atol = 0.05 diff --git a/test/unit_tests/StatsAPI.jl b/test/unit_tests/StatsAPI.jl new file mode 100644 index 000000000..8648fc363 --- /dev/null +++ b/test/unit_tests/StatsAPI.jl @@ -0,0 +1,29 @@ +using StructuralEquationModels +graph = @StenoGraph begin + a → b +end +partable = ParameterTable(graph, observed_vars = [:a, :b], latent_vars = Symbol[]) +update_partable!(partable, :estimate, param_labels(partable), [3.1415]) +data = randn(100, 2) +model = Sem( + specification = partable, + data = data +) +model_fit = fit(model) + +@testset "params" begin + out = [NaN] + StructuralEquationModels.params!(out, partable) + @test params(partable) == out == [3.1415] == coef(partable) +end +@testset "param_labels" begin + @test param_labels(partable) == [:θ_1] == coefnames(partable) +end + +@testset "nobs" begin + @test nobs(model) == nsamples(model) +end + +@testset "coeftable" begin + @test_throws "StructuralEquationModels does not support" coeftable(model) +end \ No newline at end of file diff --git a/test/unit_tests/bootstrap.jl b/test/unit_tests/bootstrap.jl index f30092865..a2d5b6832 100644 --- a/test/unit_tests/bootstrap.jl +++ b/test/unit_tests/bootstrap.jl @@ -1,4 +1,4 @@ -solution_ml = sem_fit(model_ml) +solution_ml = fit(model_ml) bs = se_bootstrap(solution_ml; n_boot = 20) update_se_hessian!(partable, solution_ml) diff --git a/test/unit_tests/model.jl b/test/unit_tests/model.jl index 7ed190c22..2bf5dedaf 100644 --- a/test/unit_tests/model.jl +++ b/test/unit_tests/model.jl @@ -25,6 +25,7 @@ graph = @StenoGraph begin y8 ↔ y4 + y6 end + ram_matrices = RAMMatrices(ParameterTable(graph, observed_vars = obs_vars, latent_vars = lat_vars)) @@ -43,7 +44,7 @@ end function test_params_api(semobj, spec::SemSpecification) @test @inferred(nparams(semobj)) == nparams(spec) - @test @inferred(params(semobj)) == params(spec) + @test @inferred(param_labels(semobj)) == param_labels(spec) end @testset "Sem(implied=$impliedtype, loss=$losstype)" for impliedtype in (RAM, RAMSymbolic), diff --git a/test/unit_tests/sorting.jl b/test/unit_tests/sorting.jl index 0908a6497..3c61e13c4 100644 --- a/test/unit_tests/sorting.jl +++ b/test/unit_tests/sorting.jl @@ -11,7 +11,7 @@ model_ml_sorted = Sem(specification = partable, data = dat) end @testset "ml_solution_sorted" begin - solution_ml_sorted = sem_fit(model_ml_sorted) + solution_ml_sorted = fit(model_ml_sorted) update_estimate!(partable, solution_ml_sorted) @test test_estimates(par_ml, partable, 0.01) end diff --git a/test/unit_tests/specification.jl b/test/unit_tests/specification.jl index ef9fc73a1..b69230d7f 100644 --- a/test/unit_tests/specification.jl +++ b/test/unit_tests/specification.jl @@ -58,8 +58,8 @@ end @test nvars(partable) == length(obs_vars) + length(lat_vars) @test issetequal(vars(partable), [obs_vars; lat_vars]) - # params API - @test params(partable) == [[:θ_1, :a₁, :λ₉]; Symbol.("θ_", 2:16)] + # param_labels API + @test param_labels(partable) == [[:θ_1, :a₁, :λ₉]; Symbol.("θ_", 2:16)] @test nparams(partable) == 18 # don't allow constructing ParameterTable from a graph for an ensemble @@ -116,7 +116,7 @@ end @test nparams(enspartable) == 36 @test issetequal( - params(enspartable), + param_labels(enspartable), [Symbol.("gPasteur_", 1:16); Symbol.("gGrant_White_", 1:17); [:a₁, :a₂, :λ₉]], ) end @@ -135,7 +135,7 @@ end @test nvars(ram_matrices) == length(obs_vars) + length(lat_vars) @test issetequal(vars(ram_matrices), [obs_vars; lat_vars]) - # params API + # param_labels API @test nparams(ram_matrices) == nparams(partable) - @test params(ram_matrices) == params(partable) + @test param_labels(ram_matrices) == param_labels(partable) end diff --git a/test/unit_tests/unit_tests.jl b/test/unit_tests/unit_tests.jl index a638b991d..7189addd4 100644 --- a/test/unit_tests/unit_tests.jl +++ b/test/unit_tests/unit_tests.jl @@ -1,21 +1,35 @@ using Test, SafeTestsets -@safetestset "Multithreading" begin - include("multithreading.jl") -end - -@safetestset "Matrix algebra helper functions" begin - include("matrix_helpers.jl") -end +# Define available test sets +available_tests = Dict( + "multithreading" => "Multithreading", + "matrix_helpers" => "Matrix algebra helper functions", + "data_input_formats" => "SemObserved", + "specification" => "SemSpecification", + "model" => "Sem model", + "StatsAPI" => "StatsAPI" +) -@safetestset "SemObserved" begin - include("data_input_formats.jl") -end - -@safetestset "SemSpecification" begin - include("specification.jl") -end +# Determine which tests to run based on command-line arguments +selected_tests = isempty(ARGS) ? collect(keys(available_tests)) : ARGS -@safetestset "Sem model" begin - include("model.jl") +@testset "All Tests" begin + for file in selected_tests + if haskey(available_tests, file) + let file_ = file, test_name = available_tests[file] + # Compute the literal values + test_sym = Symbol(file_) + file_jl = file_ * ".jl" + # Build the expression with no free variables: + ex = quote + @safetestset $(Symbol(test_sym)) = $test_name begin + include($file_jl) + end + end + eval(ex) + end + else + @warn "Test file '$file' not found in available tests. Skipping." + end + end end diff --git a/test/unit_tests/unit_tests_interactive.jl b/test/unit_tests/unit_tests_interactive.jl new file mode 100644 index 000000000..cf082fa60 --- /dev/null +++ b/test/unit_tests/unit_tests_interactive.jl @@ -0,0 +1,10 @@ +# requires: TestEnv to be installed globally, and the StructuralEquationModels package `]dev`ed +# example: julia test/unit_tests/unit_tests_interactive.jl matrix_helpers + +try + import TestEnv + TestEnv.activate("StructuralEquationModels") +catch e + @warn "Error initializing Test Env" exception=(e, catch_backtrace()) +end +include("unit_tests.jl") \ No newline at end of file