seminrExtras 1.0.0 is now on CRAN. It extends the
seminr PLS-SEM workflow with eight new assessment
methods:
| Method | Function(s) | What it tells you |
|---|---|---|
| COA | assess_coa() |
Why / for whom your model fails to generalise |
| NCA | assess_nca() |
Which predictors are necessary (not just sufficient) |
| NCA-ESSE | assess_nca_esse() |
How robust NCA conclusions are to extreme responses |
| cIPMA | assess_cipma() / assess_ipma() |
Managerial priority map, optionally with necessity |
| FIMIX-PLS | assess_fimix() /
assess_fimix_compare() |
EM-based latent-class segmentation |
| PLS-POS | assess_pos() / assess_pos_compare() /
pos_segments() |
Prediction-oriented segmentation (max ΣR²) |
| CTA-PLS | assess_cta() |
Is this measurement model actually reflective? |
| PCM | assess_pcm() |
Does the mediator improve out-of-sample prediction? |
Plus hardened assess_cvpat(),
assess_cvpat_compare(), and
congruence_test().
# install.packages("seminrExtras")
library(seminr)
library(seminrExtras)
packageVersion("seminrExtras")
## [1] '1.0.0'
Two bundled datasets are used throughout:
mobi — MOBI customer satisfaction (from
seminr)corp_rep_data — Corporate reputation (from
seminr)mobi_mm <- constructs(
composite("Image", multi_items("IMAG", 1:5)),
composite("Expectation", multi_items("CUEX", 1:3)),
composite("Value", multi_items("PERV", 1:2)),
composite("Satisfaction", multi_items("CUSA", 1:3)),
composite("Loyalty", multi_items("CUSL", 1:3))
)
mobi_sm <- relationships(
paths(from = "Image", to = c("Expectation", "Satisfaction", "Loyalty")),
paths(from = "Expectation", to = c("Value", "Satisfaction")),
paths(from = "Value", to = "Satisfaction"),
paths(from = "Satisfaction", to = "Loyalty")
)
mobi_pls <- estimate_pls(
data = mobi,
measurement_model = mobi_mm,
structural_model = mobi_sm
)
corp_rep_mm <- constructs(
composite("QUAL", multi_items("qual_", 1:8), weights = mode_B),
composite("PERF", multi_items("perf_", 1:5), weights = mode_B),
composite("CSOR", multi_items("csor_", 1:5), weights = mode_B),
composite("ATTR", multi_items("attr_", 1:3), weights = mode_B),
composite("COMP", multi_items("comp_", 1:3)),
composite("LIKE", multi_items("like_", 1:3)),
composite("CUSA", single_item("cusa")),
composite("CUSL", multi_items("cusl_", 1:3))
)
corp_rep_sm <- relationships(
paths(from = c("QUAL", "PERF", "CSOR", "ATTR"), to = c("COMP", "LIKE")),
paths(from = c("COMP", "LIKE"), to = c("CUSA", "CUSL")),
paths(from = "CUSA", to = "CUSL")
)
corp_pls <- estimate_pls(
data = corp_rep_data,
measurement_model = corp_rep_mm,
structural_model = corp_rep_sm,
missing = mean_replacement,
missing_value = "-99"
)
corp_mm_simple <- constructs(
composite("COMP", multi_items("comp_", 1:3)),
composite("LIKE", multi_items("like_", 1:3)),
composite("CUSA", single_item("cusa")),
composite("CUSL", multi_items("cusl_", 1:3))
)
corp_sm_simple <- relationships(
paths(from = c("COMP", "LIKE"), to = "CUSA"),
paths(from = "CUSA", to = "CUSL")
)
corp_model <- estimate_pls(
data = corp_rep_data,
measurement_model = corp_mm_simple,
structural_model = corp_sm_simple,
missing = mean_replacement,
missing_value = "-99"
)
Diagnoses why and for whom the model fails to generalise. Returns predictive deviance, a decision tree that groups deviant cases, and unstable-path analysis.
coa_result <- assess_coa(
corp_model,
focal_construct = "CUSL",
noFolds = 10, reps = 1, cores = 1,
seed = 123
)
print(coa_result)
## Composite Overfit Analysis (COA)
## ================================
## Focal construct: CUSL
## Deviance bounds: 0.025 - 0.975
## Observations: 344
##
## Prediction Metrics:
## In-sample MSE: 0.522
## Out-of-sample MSE: 0.5231
## Overfit ratio: 0.0022
##
## Deviant Cases:
## Groups: 4 ( A = 3, B = 3, C = 2, D = 6 )
## Unique deviants: 4
summary(coa_result)
## Composite Overfit Analysis (COA) Summary
## =========================================
## Focal construct: CUSL
## Observations: 344
## Deviance bounds: 0.025 - 0.975
##
## Prediction Metrics:
## Metric Value
## IS_MSE 0.521990663
## OOS_MSE 0.523138987
## Overfit_Ratio 0.002199894
##
## Deviant Groups:
## Group Size Root_Node
## A 3 268
## B 3 39
## C 2 5
## D 6 3
##
## Unique deviants: 4
##
## Parameter Instability (max |path_coef diff|):
## Group Max_Path_Diff
## A 0.009893759
## B 0.018626833
## C 0.005045965
## D 0.022216510
plot(coa_result, type = "pd") # predictive deviance distribution
plot(coa_result, type = "tree") # decision tree
plot(coa_result, type = "groups") # deviant group highlights
pd <- predictive_deviance(corp_model,
focal_construct = "CUSL",
noFolds = 10, reps = 1, cores = 1,
seed = 123)
tree <- deviance_tree(pd, deviance_bounds = c(0.025, 0.975))
instab <- unstable_params(corp_model, tree$deviant_groups, params = "path_coef")
print(instab)
## $A
## $A$cases
## [1] 12 162 259
##
## $A$param_diffs
## $A$param_diffs$path_coef
## COMP LIKE CUSA CUSL
## COMP 0 0 -0.007714185 0.000000000
## LIKE 0 0 0.009893759 0.000000000
## CUSA 0 0 0.000000000 0.001327771
## CUSL 0 0 0.000000000 0.000000000
##
##
##
## $B
## $B$cases
## [1] 35 114 186
##
## $B$param_diffs
## $B$param_diffs$path_coef
## COMP LIKE CUSA CUSL
## COMP 0 0 -0.01862683 0.000000000
## LIKE 0 0 0.01158709 0.000000000
## CUSA 0 0 0.00000000 -0.005318762
## CUSL 0 0 0.00000000 0.000000000
##
##
##
## $C
## $C$cases
## [1] 297 311
##
## $C$param_diffs
## $C$param_diffs$path_coef
## COMP LIKE CUSA CUSL
## COMP 0 0 0.0005863733 0.000000000
## LIKE 0 0 -0.0050459655 0.000000000
## CUSA 0 0 0.0000000000 -0.004058067
## CUSL 0 0 0.0000000000 0.000000000
##
##
##
## $D
## $D$cases
## [1] 29 61 63 106 149 197
##
## $D$param_diffs
## $D$param_diffs$path_coef
## COMP LIKE CUSA CUSL
## COMP 0 0 -0.02221651 0.0000000
## LIKE 0 0 0.01277128 0.0000000
## CUSA 0 0 0.00000000 -0.0212177
## CUSL 0 0 0.00000000 0.0000000
##
##
##
## attr(,"class")
## [1] "coa_unstable" "list"
# Inspect the rules and competing splits for one deviant group
first_group <- names(tree$deviant_groups)[1]
group_rules(first_group, coa_result) # decision rule for group "A"
## construct gte lt
## 1 CUSA -2.5050509 0.9029112
## 2 COMP -2.6219498 NA
## 3 LIKE 0.2285191 0.3594257
## 4 CUSL NA -0.2007597
competes(tree$group_roots[[first_group]], tree) # competing splits at that node
## criterion sign value improve
## 1 LIKE < 0.3594257 0.6630509
## 2 CUSA < -0.8010698 0.2569164
## 3 COMP < -0.9193789 0.2504132
## 4 CUSL >= -1.5408427 0.1688486
Tests necessity, not sufficiency. No external
NCA package required — CE-FDH and CR-FDH ceiling lines are
built in.
nca_sat <- assess_nca(
mobi_pls,
target = "Satisfaction",
test.rep = 1000,
seed = 123
)
print(nca_sat)
## Necessary Condition Analysis (NCA)
## ==================================
## Target: Satisfaction
## Predictors: Image, Expectation, Value
## Ceilings: ce_fdh, cr_fdh
## Observations: 250
##
## Effect Sizes (d):
## ce_fdh cr_fdh
## Image 0.292 0.261
## Expectation 0.193 0.199
## Value 0.000 0.000
##
## NCA Effect Sizes (d >= 0.1 indicates a necessary condition)
##
## Permutation p-values:
## ce_fdh cr_fdh
## Image 0.000 0.000
## Expectation 0.000 0.000
## Value 1.000 1.000
##
## NCA Significance (permutation test p-values)
##
## Necessary conditions (d >= 0.1, p < 0.05): Image, Expectation
summary(nca_sat)
## Necessary Condition Analysis (NCA) Summary
## ============================================
## Target: Satisfaction
## Predictors: Image, Expectation, Value
## Ceilings: ce_fdh, cr_fdh
## Observations: 250
##
## Effect Sizes (d):
## ce_fdh cr_fdh
## Image 0.292 0.261
## Expectation 0.193 0.199
## Value 0.000 0.000
##
## NCA Effect Sizes (d >= 0.1 indicates a necessary condition)
##
## Permutation p-values:
## ce_fdh cr_fdh
## Image 0.000 0.000
## Expectation 0.000 0.000
## Value 1.000 1.000
##
## NCA Significance (permutation test p-values)
##
## Necessary conditions (d >= 0.1, p < 0.05): Image, Expectation
##
## Bottleneck table (ce_fdh):
## Satisfaction Image Expectation Value
## 1 0 0.0 0.0 0
## 2 10 11.5 10.7 0
## 3 20 11.5 11.3 0
## 4 30 11.5 13.7 0
## 5 40 11.5 13.7 0
## 6 50 17.2 16.4 0
## 7 60 23.7 19.0 0
## 8 70 44.8 19.3 0
## 9 80 46.9 19.3 0
## 10 90 71.6 42.0 0
## 11 100 74.0 62.3 0
##
## Bottleneck table (cr_fdh):
## Satisfaction Image Expectation Value
## 1 0 NA NA 0
## 2 10 NA NA 0
## 3 20 NA NA 0
## 4 30 0.5 5.0 0
## 5 40 11.0 11.6 0
## 6 50 21.5 18.2 0
## 7 60 32.1 24.8 0
## 8 70 42.6 31.4 0
## 9 80 53.1 38.0 0
## 10 90 63.6 44.6 0
## 11 100 74.1 51.2 0
plot(nca_sat, type = "effects") # effect size bar plot
plot(nca_sat, type = "scatter") # ceiling-line scatter plots
Varies the ECDF threshold, removes upper-left observations, and compares the empirical effect size drop against a uniform benchmark (Becker et al., 2026).
esse_sat <- assess_nca_esse(
mobi_pls,
target = "Satisfaction",
thresholds = seq(0, 0.05, by = 0.005),
seed = 123
)
print(esse_sat)
## NCA-ESSE: Effect Size Sensitivity Extension
## =============================================
## Target: Satisfaction
## Predictors: Image, Expectation, Value
## Ceiling: ce_fdh
## Observations: 250
## Thresholds: 0% to 5%
##
## Empirical effect sizes by ECDF threshold:
## Image Expectation Value
## 0% 0.2917 0.1930 0.0000
## 0.5% 0.2787 0.1821 0.1191
## 1% 0.3118 0.1923 0.2620
## 1.5% 0.3227 0.2095 0.3661
## 2% 0.3366 0.2228 0.2666
## 2.5% 0.3375 0.2361 0.2840
## 3% 0.3280 0.2410 0.3364
## 3.5% 0.3454 0.2510 0.3414
## 4% 0.2796 0.2117 0.1975
## 4.5% 0.3161 0.2072 0.2065
## 5% 0.3224 0.2097 0.2157
##
## Benchmark (uniform) effect sizes:
## 0% 0.5% 1% 1.5% 2% 2.5% 3% 3.5% 4% 4.5% 5%
## 0.0000 0.0315 0.0561 0.0780 0.0982 0.1172 0.1352 0.1523 0.1688 0.1845 0.1998
##
## Sensitivity (empirical - benchmark):
## Image Expectation Value
## 0% 0.2917 0.1930 0.0000
## 0.5% 0.2472 0.1506 0.0876
## 1% 0.2557 0.1362 0.2060
## 1.5% 0.2447 0.1315 0.2882
## 2% 0.2383 0.1245 0.1684
## 2.5% 0.2202 0.1189 0.1668
## 3% 0.1928 0.1058 0.2013
## 3.5% 0.1931 0.0987 0.1891
## 4% 0.1109 0.0429 0.0288
## 4.5% 0.1315 0.0226 0.0220
## 5% 0.1226 0.0099 0.0159
summary(esse_sat)
## NCA-ESSE Summary (Becker et al., 2026)
## =======================================
## Target: Satisfaction
## Ceiling: ce_fdh
## Observations: 250
##
## Predictor: Image
## ------------------------------------------------------------
## ECDF_threshold Empirical_d Benchmark_d Difference
## 0.000 0.2917 0.0000 0.2917
## 0.005 0.2787 0.0315 0.2472
## 0.010 0.3118 0.0561 0.2557
## 0.015 0.3227 0.0780 0.2447
## 0.020 0.3366 0.0982 0.2383
## 0.025 0.3375 0.1172 0.2202
## 0.030 0.3280 0.1352 0.1928
## 0.035 0.3454 0.1523 0.1931
## 0.040 0.2796 0.1688 0.1109
## 0.045 0.3161 0.1845 0.1315
## 0.050 0.3224 0.1998 0.1226
##
## Predictor: Expectation
## ------------------------------------------------------------
## ECDF_threshold Empirical_d Benchmark_d Difference
## 0.000 0.1930 0.0000 0.1930
## 0.005 0.1821 0.0315 0.1506
## 0.010 0.1923 0.0561 0.1362
## 0.015 0.2095 0.0780 0.1315
## 0.020 0.2228 0.0982 0.1245
## 0.025 0.2361 0.1172 0.1189
## 0.030 0.2410 0.1352 0.1058
## 0.035 0.2510 0.1523 0.0987
## 0.040 0.2117 0.1688 0.0429
## 0.045 0.2072 0.1845 0.0226
## 0.050 0.2097 0.1998 0.0099
##
## Predictor: Value
## ------------------------------------------------------------
## ECDF_threshold Empirical_d Benchmark_d Difference
## 0.000 0.0000 0.0000 0.0000
## 0.005 0.1191 0.0315 0.0876
## 0.010 0.2620 0.0561 0.2060
## 0.015 0.3661 0.0780 0.2882
## 0.020 0.2666 0.0982 0.1684
## 0.025 0.2840 0.1172 0.1668
## 0.030 0.3364 0.1352 0.2013
## 0.035 0.3414 0.1523 0.1891
## 0.040 0.1975 0.1688 0.0288
## 0.045 0.2065 0.1845 0.0220
## 0.050 0.2157 0.1998 0.0159
plot(esse_sat, type = "sensitivity") # Fig. 4, Becker et al. (2026)
plot(esse_sat, type = "difference") # Fig. 6, Becker et al. (2026)
assess_ipma() is a convenience wrapper for IPMA alone;
assess_cipma() overlays NCA necessity onto the IPMA
map.
ipma_result <- assess_ipma(
mobi_pls,
target = "Loyalty",
scale_min = 1,
scale_max = 10
)
print(ipma_result)
## Importance-Performance Map Analysis (IPMA)
## ============================================
## Target: Loyalty
## Constructs: Image, Expectation, Value, Satisfaction
## Scale range: 1 - 10
## Observations: 250
##
## Importance-Performance Results:
## Construct Unstd. Total Effect Std. Total Effect Performance
## Image 0.8167 0.5105 73.15
## Expectation 0.2267 0.1455 72.40
## Value 0.1739 0.1631 61.95
## Satisfaction 0.7512 0.5110 71.90
##
## IPMA Classification:
## Construct Priority
## Image Important driver
## Expectation Low priority
## Value Low priority
## Satisfaction Important driver
summary(ipma_result)
## Importance-Performance Map Analysis (IPMA) Summary
## ====================================================
## Target: Loyalty
## Scale range: 1 - 10
## Observations: 250
##
## Importance (Unstandardized Total Effects on Loyalty ):
## Image Expectation Value Satisfaction
## 0.8167 0.2267 0.1739 0.7512
##
## Importance (Standardized Total Effects on Loyalty ):
## Image Expectation Value Satisfaction
## 0.5105 0.1455 0.1631 0.5110
##
## Performance (0-100 rescaled):
## Image Expectation Value Satisfaction
## 73.15 72.40 61.95 71.90
##
## Construct Classification:
## Construct Importance Performance High_Importance Necessary Priority
## Image 0.8167 73.15 TRUE FALSE Important driver
## Expectation 0.2267 72.40 FALSE FALSE Low priority
## Value 0.1739 61.95 FALSE FALSE Low priority
## Satisfaction 0.7512 71.90 TRUE FALSE Important driver
plot(ipma_result, type = "ipma")
cipma_result <- assess_cipma(
mobi_pls,
target = "Loyalty",
scale_min = 1,
scale_max = 10,
nca_test.rep = 1000,
seed = 123
)
print(cipma_result)
## Combined Importance-Performance Map Analysis (cIPMA)
## =====================================================
## Target: Loyalty
## Constructs: Image, Expectation, Value, Satisfaction
## Scale range: 1 - 10
## Observations: 250
##
## Importance-Performance Results:
## Construct Unstd. Total Effect Std. Total Effect Performance
## Image 0.8167 0.5105 73.15
## Expectation 0.2267 0.1455 72.40
## Value 0.1739 0.1631 61.95
## Satisfaction 0.7512 0.5110 71.90
##
## Necessary Conditions (NCA):
## Construct d (ce_fdh) d (cr_fdh) Necessary
## Image 0.1512 0.1335 Yes
## Expectation 0.1287 0.1032 Yes
## Value 0.0150 0.0075 No
## Satisfaction 0.1430 0.1246 Yes
##
## cIPMA Classification:
## Construct Priority
## Image Top priority
## Expectation Bottleneck risk
## Value Low priority
## Satisfaction Top priority
summary(cipma_result)
## Combined Importance-Performance Map Analysis (cIPMA) Summary
## =============================================================
## Target: Loyalty
## Scale range: 1 - 10
## Observations: 250
##
## Importance (Unstandardized Total Effects on Loyalty ):
## Image Expectation Value Satisfaction
## 0.8167 0.2267 0.1739 0.7512
##
## Importance (Standardized Total Effects on Loyalty ):
## Image Expectation Value Satisfaction
## 0.5105 0.1455 0.1631 0.5110
##
## Performance (0-100 rescaled):
## Image Expectation Value Satisfaction
## 73.15 72.40 61.95 71.90
##
## NCA Effect Sizes:
## ce_fdh cr_fdh
## Image 0.1512 0.1335
## Expectation 0.1287 0.1032
## Value 0.0150 0.0075
## Satisfaction 0.1430 0.1246
##
## NCA Permutation p-values:
## ce_fdh cr_fdh
## Image 0.004 0.008
## Expectation 0.015 0.019
## Value 0.593 0.615
## Satisfaction 0.000 0.000
##
## Necessary conditions: Image, Expectation, Satisfaction
##
## Bottleneck table (ce_fdh):
## Loyalty Image Expectation Value Satisfaction
## 1 0 0.0 0.0 0.0 0.0
## 2 10 0.0 0.0 0.0 3.2
## 3 20 11.5 10.7 0.0 7.1
## 4 30 16.2 10.7 0.0 10.4
## 5 40 16.2 10.7 0.0 10.4
## 6 50 17.2 10.7 0.0 10.4
## 7 60 17.2 10.7 0.0 10.4
## 8 70 17.2 19.3 0.0 22.6
## 9 80 17.2 19.3 0.0 22.6
## 10 90 23.7 19.3 0.0 22.6
## 11 100 30.8 34.6 44.4 28.9
##
## Bottleneck table (cr_fdh):
## Loyalty Image Expectation Value Satisfaction
## 1 0 NA NA NA NA
## 2 10 1.0 NA NA 1.3
## 3 20 4.1 NA NA 4.1
## 4 30 7.1 0.6 NA 6.9
## 5 40 10.2 4.7 NA 9.7
## 6 50 13.3 8.7 NA 12.4
## 7 60 16.4 12.7 NA 15.2
## 8 70 19.4 16.8 NA 18.0
## 9 80 22.5 20.8 NA 20.7
## 10 90 25.6 24.8 NA 23.5
## 11 100 28.6 28.9 44.4 26.3
##
## Construct Classification:
## Construct Importance Performance High_Importance Necessary Priority
## Image 0.8167 73.15 TRUE TRUE Top priority
## Expectation 0.2267 72.40 FALSE TRUE Bottleneck risk
## Value 0.1739 61.95 FALSE FALSE Low priority
## Satisfaction 0.7512 71.90 TRUE TRUE Top priority
plot(cipma_result, type = "cipma") # IPMA + NCA overlay
plot(cipma_result, importance_metric = "standardized") # standardized total effects
Uncovers unobserved heterogeneity. Multi-start EM with information-criteria comparison across K.
fimix_k2 <- assess_fimix(corp_pls, K = 2, nstart = 10, seed = 123)
print(fimix_k2)
## FIMIX-PLS Analysis
## ==================
## Segments: 2
## Observations: 344
## Converged: Yes ( 104 iterations )
## Random starts: 10
##
## Segment Proportions:
## Segment Proportion Size
## Segment 1 0.4087 118
## Segment 2 0.5913 226
##
## Fit Criteria:
## lnL AIC AIC3 AIC4 BIC CAIC EN
## -1376.69 2839.39 2882.39 2925.39 3004.54 3047.54 0.4873
##
## Segment Path Coefficients:
##
## Segment 1 :
## From To Coefficient
## QUAL COMP 0.6345
## PERF COMP 0.2744
## CSOR COMP -0.2286
## ATTR COMP 0.1038
## QUAL LIKE 0.5920
## PERF LIKE -0.0525
## CSOR LIKE -0.0318
## ATTR LIKE 0.1770
## COMP CUSA 0.3869
## LIKE CUSA 0.4486
## COMP CUSL 0.2501
## LIKE CUSL 0.3283
## CUSA CUSL 0.3536
##
## Segment 2 :
## From To Coefficient
## QUAL COMP 0.2952
## PERF COMP 0.2067
## CSOR COMP 0.3251
## ATTR COMP 0.1746
## QUAL LIKE 0.1915
## PERF LIKE 0.2480
## CSOR LIKE 0.3039
## ATTR LIKE 0.1881
## COMP CUSA 0.1864
## LIKE CUSA 0.2003
## COMP CUSL -0.0186
## LIKE CUSL 0.3270
## CUSA CUSL 0.3943
summary(fimix_k2)
## FIMIX-PLS Analysis Summary
## ==========================
## Segments: 2
## Observations: 344
## Free parameters: 43
## Converged: Yes ( 104 iterations )
## Random starts: 10
##
## Segment Proportions:
## Segment_1 Segment_2
## 0.4087 0.5913
##
## Fit Criteria:
## lnL AIC AIC3 AIC4 BIC CAIC HQ
## -1376.6943 2839.3885 2882.3885 2925.3885 3004.5361 3047.5361 2905.1648
## MDL5 EN
## 2878.9623 0.4873
##
## --- Segment 1 ---
## Intercepts:
## COMP LIKE CUSA CUSL
## 0.2189 -0.1076 -0.3943 -0.2994
## Path Coefficients:
## From To Coefficient
## QUAL COMP 0.6345
## PERF COMP 0.2744
## CSOR COMP -0.2286
## ATTR COMP 0.1038
## QUAL LIKE 0.5920
## PERF LIKE -0.0525
## CSOR LIKE -0.0318
## ATTR LIKE 0.1770
## COMP CUSA 0.3869
## LIKE CUSA 0.4486
## COMP CUSL 0.2501
## LIKE CUSL 0.3283
## CUSA CUSL 0.3536
## Residual Variances:
## COMP LIKE CUSA CUSL
## 0.3259 0.4965 0.7807 0.6977
##
## --- Segment 2 ---
## Intercepts:
## COMP LIKE CUSA CUSL
## -0.1785 0.0503 0.3006 0.1782
## Path Coefficients:
## From To Coefficient
## QUAL COMP 0.2952
## PERF COMP 0.2067
## CSOR COMP 0.3251
## ATTR COMP 0.1746
## QUAL LIKE 0.1915
## PERF LIKE 0.2480
## CSOR LIKE 0.3039
## ATTR LIKE 0.1881
## COMP CUSA 0.1864
## LIKE CUSA 0.2003
## COMP CUSL -0.0186
## LIKE CUSL 0.3270
## CUSA CUSL 0.3943
## Residual Variances:
## COMP LIKE CUSA CUSL
## 0.2659 0.3480 0.4050 0.1680
plot(fimix_k2)
fimix_compare <- assess_fimix_compare(
corp_pls,
K_range = 2:4,
nstart = 10,
seed = 123
)
print(fimix_compare)
## FIMIX-PLS Segment Selection
## ===========================
## K range: 2 to 4
##
## Fit Criteria:
## K lnL AIC AIC3 AIC4 BIC CAIC HQ MDL5 EN
## 2 -1376.69 2839.39 2882.39 2925.39 3004.54 3047.54 2905.16 2878.96 0.4873
## 3 -1321.68 2773.37 2838.37 2903.37 3023.01 3088.01 2872.80 2833.19 0.6340
## 4 -1290.10 2754.20 2841.20 2928.20 3088.34 3175.34 2887.28 2834.27 0.6629
##
## Best K by criterion:
## AIC : 4
## AIC3 : 3
## AIC4 : 3
## BIC : 2
## CAIC : 2
## EN (best): 4
plot(fimix_compare) # IC across K
Maximises Σ R² across segments (Becker et al., 2013). No distributional assumptions, detects heterogeneity in both structural and formative measurement models.
pos_k2 <- assess_pos(corp_pls, K = 2, nstart = 10, max_iter = 100, seed = 123)
print(pos_k2)
## PLS-POS Analysis
## ================
## Segments: 2
## Observations: 344
## Converged: Yes ( 68 iterations )
## Random starts: 10
## Objective (Sum R²): 4.8026
##
## Segment Sizes:
## Segment Size Proportion
## Segment 1 159 0.4622
## Segment 2 185 0.5378
##
## R² per Endogenous Construct:
## Segment 1 Segment 2 Global
## COMP 0.8043 0.5872 0.6309
## LIKE 0.6159 0.6614 0.5576
## CUSA 0.5348 0.2932 0.2919
## CUSL 0.6974 0.6084 0.5620
##
## Segment Path Coefficients:
##
## Segment 1 :
## From To Coefficient
## QUAL COMP 0.1765
## PERF COMP 0.5059
## CSOR COMP 0.2550
## ATTR COMP 0.0460
## QUAL LIKE -0.0668
## PERF LIKE 0.1745
## CSOR LIKE 0.3721
## ATTR LIKE 0.4025
## COMP CUSA 0.5583
## LIKE CUSA 0.2320
## COMP CUSL 0.1773
## LIKE CUSL -0.0389
## CUSA CUSL 0.7244
##
## Segment 2 :
## From To Coefficient
## QUAL COMP 0.7510
## PERF COMP 0.0283
## CSOR COMP -0.1526
## ATTR COMP 0.1267
## QUAL LIKE 0.7855
## PERF LIKE -0.0191
## CSOR LIKE 0.1165
## ATTR LIKE -0.0600
## COMP CUSA -0.3203
## LIKE CUSA 0.6817
## COMP CUSL -0.2490
## LIKE CUSL 0.7578
## CUSA CUSL 0.2309
summary(pos_k2)
## PLS-POS Analysis — Detailed Summary
## ======================================
## Segments: 2 | Observations: 344
## Objective (Sum R²): 4.8026
## Converged: Yes ( 68 iterations )
## Random starts: 10
## Start objectives: 4.8026, 4.5605, 4.6819, 4.7407, 4.5684, 4.4232, 4.7631, 4.5952, 4.721, 4.7688
##
## Segment Sizes:
## Segment 1: 159 (46.2%)
## Segment 2: 185 (53.8%)
##
## R² Comparison (Segment vs Global):
## Segment 1 Segment 2 Global
## COMP 0.8043 0.5872 0.6309
## LIKE 0.6159 0.6614 0.5576
## CUSA 0.5348 0.2932 0.2919
## CUSL 0.6974 0.6084 0.5620
##
## Path Coefficients per Segment:
## Path Global Seg.1 Seg.2
## QUAL -> COMP 0.4297 0.1765 0.7510
## PERF -> COMP 0.2955 0.5059 0.0283
## CSOR -> COMP 0.0589 0.2550 -0.1526
## ATTR -> COMP 0.0861 0.0460 0.1267
## QUAL -> LIKE 0.3800 -0.0668 0.7855
## PERF -> LIKE 0.1170 0.1745 -0.0191
## CSOR -> LIKE 0.1784 0.3721 0.1165
## ATTR -> LIKE 0.1671 0.4025 -0.0600
## COMP -> CUSA 0.1455 0.5583 -0.3203
## LIKE -> CUSA 0.4357 0.2320 0.6817
## COMP -> CUSL 0.0057 0.1773 -0.2490
## LIKE -> CUSL 0.3440 -0.0389 0.7578
## CUSA -> CUSL 0.5050 0.7244 0.2309
plot(pos_k2, type = "segments")
plot(pos_k2, type = "rsquared")
plot(pos_k2, type = "paths")
seg_models <- pos_segments(pos_k2)
summary(seg_models[[1]])
##
## Results from package seminr (2.4.2)
##
## Path Coefficients:
## COMP LIKE CUSA CUSL
## R^2 0.804 0.616 0.535 0.697
## AdjR^2 0.799 0.606 0.529 0.692
## QUAL 0.177 -0.067 . .
## PERF 0.506 0.175 . .
## CSOR 0.255 0.372 . .
## ATTR 0.046 0.403 . .
## COMP . . 0.558 0.177
## LIKE . . 0.232 -0.039
## CUSA . . . 0.724
##
## Reliability:
## alpha rhoA rhoC AVE
## QUAL 0.885 1.000 0.896 0.522
## PERF 0.756 1.000 0.831 0.501
## CSOR 0.810 1.000 0.849 0.536
## ATTR 0.675 1.000 0.813 0.596
## COMP 0.823 0.829 0.894 0.738
## LIKE 0.828 0.844 0.897 0.743
## CUSA 1.000 1.000 1.000 1.000
## CUSL 0.846 0.847 0.907 0.765
##
## Alpha, rhoA, and rhoC should exceed 0.7 while AVE should exceed 0.5
pos_compare <- assess_pos_compare(
corp_pls,
K_range = 2:4,
nstart = 10,
max_iter = 100,
seed = 123
)
print(pos_compare)
## PLS-POS Comparison
## ==================
## K range: 2, 3, 4
##
## K Sum_R2 Avg_R2_per_segment Converged Iterations
## 2 4.802607 2.401303 TRUE 68
## 3 7.793681 2.597894 TRUE 85
## 4 10.758069 2.689517 FALSE 100
plot(pos_compare)
Tests whether a construct’s measurement model is consistent with a
reflective specification. With borrow = TRUE (default),
constructs with 2–3 indicators borrow from structurally connected
constructs so they can still be tested.
cta_result <- assess_cta(mobi_pls, nboot = 5000, seed = 123)
print(cta_result)
## Confirmatory Tetrad Analysis (CTA-PLS)
## =======================================
## Bootstrap samples: 5000 | Alpha: 0.05 | Correction: BH
##
## Construct Mode Indicators Tetrads Significant Verdict
## ----------------------------------------------------------------------------------------------------------------------
## Image Mode A (reflective) 5 10 0 Reflective supported
## Expectation Mode A (reflective) [borrowed from Image] 4 2 0 Reflective supported
## Value Mode A (reflective) [borrowed from Satisfaction] 4 1 0 Reflective supported
## Satisfaction Mode A (reflective) [borrowed from Image] 4 2 1 Reflective rejected
## Loyalty Mode A (reflective) [borrowed from Image] 4 2 0 Reflective supported
##
## Borrowing:
## Expectation: borrowed from Image (all pattern, 2 vanishing tetrad(s))
## Value: borrowed from Satisfaction (tau_1342 pattern, 1 vanishing tetrad(s))
## Satisfaction: borrowed from Image (all pattern, 2 vanishing tetrad(s))
## Loyalty: borrowed from Image (all pattern, 2 vanishing tetrad(s))
summary(cta_result)
## Confirmatory Tetrad Analysis (CTA-PLS) — Detailed Results
## ==========================================================
## Bootstrap samples: 5000 | Alpha: 0.05 | Correction: BH
##
## --- Image (Mode A (reflective)) ---
## Verdict: Reflective supported
##
## Tetrad Estimate T_Value
## s(IMAG1,IMAG2)s(IMAG3,IMAG4) - s(IMAG1,IMAG3)s(IMAG2,IMAG4) 0.9353 1.5287
## s(IMAG1,IMAG2)s(IMAG3,IMAG4) - s(IMAG1,IMAG4)s(IMAG2,IMAG3) 1.1614 1.8327
## s(IMAG1,IMAG2)s(IMAG3,IMAG5) - s(IMAG1,IMAG3)s(IMAG2,IMAG5) 0.5117 0.9215
## s(IMAG1,IMAG2)s(IMAG3,IMAG5) - s(IMAG1,IMAG5)s(IMAG2,IMAG3) 0.7169 1.4804
## s(IMAG1,IMAG2)s(IMAG4,IMAG5) - s(IMAG1,IMAG4)s(IMAG2,IMAG5) 0.2872 0.5121
## s(IMAG1,IMAG2)s(IMAG4,IMAG5) - s(IMAG1,IMAG5)s(IMAG2,IMAG4) 0.3176 0.6330
## s(IMAG1,IMAG3)s(IMAG4,IMAG5) - s(IMAG1,IMAG4)s(IMAG3,IMAG5) -0.3935 -1.1713
## s(IMAG1,IMAG3)s(IMAG4,IMAG5) - s(IMAG1,IMAG5)s(IMAG3,IMAG4) -0.6805 -1.7610
## s(IMAG2,IMAG3)s(IMAG4,IMAG5) - s(IMAG2,IMAG4)s(IMAG3,IMAG5) -0.4601 -1.4450
## s(IMAG2,IMAG3)s(IMAG4,IMAG5) - s(IMAG2,IMAG5)s(IMAG3,IMAG4) -0.6861 -1.5587
## Boot_Mean Boot_SD 2.5% CI 97.5% CI P_Value Adj_P Significant
## 0.9272 0.6118 -0.0974 2.2635 0.0792 0.1896
## 1.1520 0.6337 0.1238 2.5868 0.0280 0.1896
## 0.5037 0.5553 -0.5068 1.7029 0.3428 0.4285
## 0.7059 0.4842 -0.0768 1.7983 0.0884 0.1896
## 0.2872 0.5608 -0.7462 1.4528 0.6084 0.6084
## 0.3144 0.5017 -0.5746 1.4179 0.5352 0.5947
## -0.3829 0.3359 -1.0682 0.2580 0.2384 0.3406
## -0.6818 0.3864 -1.4993 0.0107 0.0560 0.1896
## -0.4524 0.3184 -1.1178 0.1307 0.1356 0.2260
## -0.6829 0.4402 -1.6298 0.1083 0.0948 0.1896
##
## --- Expectation (Mode A (reflective) [borrowed from Image]) ---
## Borrowed 2 vanishing tetrad(s) from Image (all)
## Verdict: Reflective supported
##
## Tetrad Estimate T_Value
## s(CUEX1,CUEX2)s(CUEX3,IMAG1) - s(CUEX1,CUEX3)s(CUEX2,IMAG1) 0.0937 0.2439
## s(CUEX1,CUEX2)s(CUEX3,IMAG1) - s(CUEX1,IMAG1)s(CUEX2,CUEX3) 0.4385 1.4817
## Boot_Mean Boot_SD 2.5% CI 97.5% CI P_Value Adj_P Significant
## 0.0870 0.3842 -0.6765 0.8310 0.8060 0.8060
## 0.4299 0.2959 -0.0921 1.0674 0.1060 0.2120
##
## --- Value (Mode A (reflective) [borrowed from Satisfaction]) ---
## Borrowed 1 vanishing tetrad(s) from Satisfaction (tau_1342)
## Verdict: Reflective supported
##
## Tetrad Estimate T_Value
## s(PERV1,CUSA1)s(PERV2,CUSA2) - s(PERV1,CUSA2)s(PERV2,CUSA1) 0.0377 0.1945
## Boot_Mean Boot_SD 2.5% CI 97.5% CI P_Value Adj_P Significant
## 0.0387 0.1938 -0.3445 0.4259 0.8368 0.8368
##
## --- Satisfaction (Mode A (reflective) [borrowed from Image]) ---
## Borrowed 2 vanishing tetrad(s) from Image (all)
## Verdict: Reflective rejected
##
## Tetrad Estimate T_Value
## s(CUSA1,CUSA2)s(CUSA3,IMAG1) - s(CUSA1,CUSA3)s(CUSA2,IMAG1) 0.6346 2.5061
## s(CUSA1,CUSA2)s(CUSA3,IMAG1) - s(CUSA1,IMAG1)s(CUSA2,CUSA3) 0.1103 0.3397
## Boot_Mean Boot_SD 2.5% CI 97.5% CI P_Value Adj_P Significant
## 0.6273 0.2532 0.1752 1.1605 0.0056 0.0112 *
## 0.1057 0.3248 -0.5275 0.7803 0.7520 0.7520
##
## --- Loyalty (Mode A (reflective) [borrowed from Image]) ---
## Borrowed 2 vanishing tetrad(s) from Image (all)
## Verdict: Reflective supported
##
## Tetrad Estimate T_Value
## s(CUSL1,CUSL2)s(CUSL3,IMAG1) - s(CUSL1,CUSL3)s(CUSL2,IMAG1) 0.2377 0.2280
## s(CUSL1,CUSL2)s(CUSL3,IMAG1) - s(CUSL1,IMAG1)s(CUSL2,CUSL3) -0.1880 -0.3163
## Boot_Mean Boot_SD 2.5% CI 97.5% CI P_Value Adj_P Significant
## 0.2216 1.0425 -1.8345 2.2893 0.8256 0.8256
## -0.1788 0.5946 -1.3537 0.9875 0.7544 0.8256
plot(cta_result)
cta_no_borrow <- assess_cta(mobi_pls, nboot = 5000, borrow = FALSE, seed = 123)
print(cta_no_borrow)
## Confirmatory Tetrad Analysis (CTA-PLS)
## =======================================
## Bootstrap samples: 5000 | Alpha: 0.05 | Correction: BH
##
## Construct Mode Indicators Tetrads Significant Verdict
## --------------------------------------------------------------------------------------
## Image Mode A (reflective) 5 10 0 Reflective supported
##
## Skipped: Expectation, Value, Satisfaction, Loyalty
Does the mediator actually improve out-of-sample prediction? Compares Direct-Antecedent (DA) and Earliest-Antecedent (EA) predictions on the isolated mediation sub-model (Danks, 2021). Mediation paths are auto-detected.
# Use a minimal mediation model: Image -> Satisfaction -> Loyalty (+ direct)
pcm_mm <- constructs(
composite("Image", multi_items("IMAG", 1:5)),
composite("Satisfaction", multi_items("CUSA", 1:3)),
composite("Loyalty", multi_items("CUSL", 1:3))
)
pcm_sm <- relationships(
paths(from = "Image", to = "Satisfaction"),
paths(from = "Satisfaction", to = "Loyalty"),
paths(from = "Image", to = "Loyalty")
)
pcm_pls <- estimate_pls(mobi, pcm_mm, pcm_sm)
pcm_result <- assess_pcm(
pcm_pls,
target = "Loyalty",
noFolds = 10,
reps = 10
)
pcm_result
## Predictive Contribution of the Mediator (PCM)
## ==============================================
## Target: Loyalty
## Cross-validation: 10 folds, 10 reps
## Mediation paths: 1
##
## Image -> Satisfaction -> Loyalty
## Avg PCM (RMSE): 0.0473 [Weak]
## Avg PCM (MAE): 0.0618 [Moderate]
summary(pcm_result)
## Predictive Contribution of the Mediator (PCM)
## ==============================================
## Target: Loyalty
## Cross-validation: 10 folds, 10 reps
##
## Mediation: Image -> Satisfaction -> Loyalty
## ------------------------------------------------------------
## Indicator RMSE_DA RMSE_EA PCM_RMSE MAE_DA MAE_EA PCM_MAE Conclusion
## CUSL1 2.3675 2.4142 0.0194 1.7538 1.8101 0.0311 Weak
## CUSL2 2.8289 2.8523 0.0082 2.3983 2.4078 0.0039 Weak
## CUSL3 1.6664 1.8812 0.1142 1.1793 1.3878 0.1503 Strong
##
## PCM thresholds: < 0 Negative | 0-0.05 Weak | 0.05-0.10 Moderate | > 0.10 Strong
## Reference: Danks (2021), The DATA BASE for Advances in IS, 52(SI), 24-42.
plot(pcm_result)
pcm_full <- assess_pcm(
mobi_pls,
target = "Loyalty",
noFolds = 10,
reps = 10
)
pcm_full
## Predictive Contribution of the Mediator (PCM)
## ==============================================
## Target: Loyalty
## Cross-validation: 10 folds, 10 reps
## Mediation paths: 3
##
## Image -> Satisfaction -> Loyalty
## Avg PCM (RMSE): 0.0457 [Weak]
## Avg PCM (MAE): 0.0601 [Moderate]
##
## Expectation -> Satisfaction -> Loyalty
## Avg PCM (RMSE): 0.0852 [Moderate]
## Avg PCM (MAE): 0.1113 [Strong]
##
## Value -> Satisfaction -> Loyalty
## Avg PCM (RMSE): 0.0516 [Moderate]
## Avg PCM (MAE): 0.0744 [Moderate]
summary(pcm_full)
## Predictive Contribution of the Mediator (PCM)
## ==============================================
## Target: Loyalty
## Cross-validation: 10 folds, 10 reps
##
## Mediation: Image -> Satisfaction -> Loyalty
## ------------------------------------------------------------
## Indicator RMSE_DA RMSE_EA PCM_RMSE MAE_DA MAE_EA PCM_MAE Conclusion
## CUSL1 2.3644 2.4148 0.0209 1.7485 1.8084 0.0331 Weak
## CUSL2 2.8288 2.8367 0.0028 2.3972 2.3922 -0.0021 Weak
## CUSL3 1.6715 1.8854 0.1134 1.1792 1.3860 0.1492 Strong
##
## Mediation: Expectation -> Satisfaction -> Loyalty
## ------------------------------------------------------------
## Indicator RMSE_DA RMSE_EA PCM_RMSE MAE_DA MAE_EA PCM_MAE Conclusion
## CUSL1 2.3848 2.5764 0.0744 1.7836 2.0174 0.1159 Moderate
## CUSL2 2.8627 2.8371 -0.0090 2.4217 2.3934 -0.0118 Negative
## CUSL3 1.6961 2.0948 0.1904 1.2065 1.5663 0.2297 Strong
##
## Mediation: Value -> Satisfaction -> Loyalty
## ------------------------------------------------------------
## Indicator RMSE_DA RMSE_EA PCM_RMSE MAE_DA MAE_EA PCM_MAE Conclusion
## CUSL1 2.3649 2.4422 0.0316 1.7500 1.8357 0.0467 Weak
## CUSL2 2.8470 2.8298 -0.0061 2.4116 2.3942 -0.0073 Negative
## CUSL3 1.6854 1.9353 0.1292 1.1853 1.4524 0.1839 Strong
##
## PCM thresholds: < 0 Negative | 0-0.05 Weak | 0.05-0.10 Moderate | > 0.10 Strong
## Reference: Danks (2021), The DATA BASE for Advances in IS, 52(SI), 24-42.
plot(pcm_full)
# Alternative structural model: drop CUSL from COMP/LIKE block
alt_sm <- relationships(
paths(from = c("QUAL", "PERF", "CSOR", "ATTR"), to = c("COMP", "LIKE")),
paths(from = c("COMP", "LIKE"), to = "CUSA"),
paths(from = "CUSA", to = "CUSL")
)
corp_pls_alt <- estimate_pls(
data = corp_rep_data,
measurement_model = corp_rep_mm,
structural_model = alt_sm,
missing = mean_replacement,
missing_value = "-99"
)
# Assess the focal model
cvpat_focal <- assess_cvpat(
corp_pls,
testtype = "two.sided",
nboot = 2000,
seed = 123,
technique = predict_DA,
noFolds = 10,
reps = 10
)
print(cvpat_focal$CVPAT_compare_LM, digits = 3)
## PLS Loss LM Loss Diff Boot T value Boot P Value
## COMP 1.198 1.232 -0.034 1.156 0.249
## LIKE 1.923 2.084 -0.162 4.014 0.000
## CUSA 0.988 0.973 0.015 -0.612 0.541
## CUSL 1.562 1.621 -0.058 3.510 0.001
## Overall 1.418 1.477 -0.060 4.177 0.000
##
## CVPAT as per Sharma et al. (2023).
print(cvpat_focal$CVPAT_compare_IA, digits = 3)
## PLS Loss IA Loss Diff Boot T value Boot P Value
## COMP 1.198 2.023 -0.825 8.578 0.000
## LIKE 1.923 3.103 -1.180 8.249 0.000
## CUSA 0.988 1.374 -0.386 5.070 0.000
## CUSL 1.562 2.663 -1.100 7.542 0.000
## Overall 1.418 2.290 -0.873 10.269 0.000
##
## CVPAT as per Sharma et al. (2023).
# Compare focal vs. alternative
cvpat_cmp <- assess_cvpat_compare(
established_model = corp_pls,
alternative_model = corp_pls_alt,
testtype = "two.sided",
nboot = 2000,
technique = predict_DA,
seed = 123,
noFolds = 10,
reps = 10,
cores = NULL
)
print(cvpat_cmp, digits = 3)
## Base Model Loss Alt Model Loss Diff Boot T value Boot P Value
## COMP 1.198 1.195 0.003 -0.460 0.645
## LIKE 1.923 1.933 -0.010 0.883 0.378
## CUSA 0.988 0.992 -0.004 0.809 0.419
## CUSL 1.562 1.715 -0.152 3.286 0.001
## Overall 1.418 1.459 -0.041 3.293 0.001
##
## CVPAT as per Sharma, Liengaard, Hair, Sarstedt, & Ringle, (2023).
## Both models under comparison have identical endogenous constructs with identical measurement models.
## Purely exogenous constructs can differ in regards to their relationships with both nomological
## partners and measurement indicators.
Tests whether the congruence coefficients between original PLS composite weights and bootstrapped weights are significantly close to 1.
cong_result <- congruence_test(
mobi_pls,
nboot = 2000,
seed = 123,
alpha = 0.05,
threshold = 1
)
print(cong_result)
## $results
## Original Est. Diff Bootstrap SD T Stat. 2.5% CI
## Image -> Expectation 0.947 0.053 0.024 2.222 0.888
## Image -> Value 0.924 0.076 0.028 2.700 0.852
## Image -> Satisfaction 0.974 0.026 0.016 1.680 0.932
## Image -> Loyalty 0.910 0.090 0.037 2.453 0.822
## Expectation -> Value 0.987 0.013 0.005 2.290 0.973
## Expectation -> Satisfaction 0.862 0.138 0.053 2.601 0.731
## Expectation -> Loyalty 0.947 0.053 0.025 2.122 0.883
## Value -> Satisfaction 0.937 0.063 0.027 2.355 0.875
## Value -> Loyalty 0.947 0.053 0.023 2.336 0.888
## Satisfaction -> Loyalty 0.989 0.011 0.013 0.846 0.950
## 97.5% CI
## Image -> Expectation 0.980
## Image -> Value 0.962
## Image -> Satisfaction 0.992
## Image -> Loyalty 0.962
## Expectation -> Value 0.995
## Expectation -> Satisfaction 0.939
## Expectation -> Loyalty 0.978
## Value -> Satisfaction 0.976
## Value -> Loyalty 0.977
## Satisfaction -> Loyalty 0.999
summary(cong_result)
## Length Class Mode
## results 60 table_output numeric
sessionInfo()
## R version 4.5.1 (2025-06-13)
## Platform: aarch64-apple-darwin20
## Running under: macOS Tahoe 26.3.1
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.5-arm64/Resources/lib/libRlapack.dylib; LAPACK version 3.12.1
##
## locale:
## [1] en_IE.UTF-8/en_IE.UTF-8/en_IE.UTF-8/C/en_IE.UTF-8/en_IE.UTF-8
##
## time zone: Europe/Dublin
## tzcode source: internal
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] seminrExtras_1.0.0 seminr_2.4.2
##
## loaded via a namespace (and not attached):
## [1] digest_0.6.39 R6_2.6.1 fastmap_1.2.0 xfun_0.56
## [5] rpart_4.1.24 cachem_1.1.0 knitr_1.51 htmltools_0.5.9
## [9] rmarkdown_2.30 lifecycle_1.0.5 cli_3.6.5 sass_0.4.10
## [13] jquerylib_0.1.4 compiler_4.5.1 tools_4.5.1 evaluate_1.0.5
## [17] bslib_0.10.0 yaml_2.3.12 rlang_1.1.7 jsonlite_2.0.0