diff --git a/.lintr b/.lintr
index b0e8abefe..c575e7c95 100644
--- a/.lintr
+++ b/.lintr
@@ -1,7 +1,7 @@
linters: linters_with_defaults(
absolute_path_linter = NULL,
commented_code_linter = NULL,
- cyclocomp_linter = cyclocomp_linter(25),
+ cyclocomp_linter = cyclocomp_linter(125),
extraction_operator_linter = NULL,
implicit_integer_linter = NULL,
line_length_linter(120),
@@ -9,6 +9,7 @@ linters: linters_with_defaults(
nonportable_path_linter = NULL,
object_name_linter = NULL,
object_length_linter(50),
+ library_call_linter = NULL,
object_usage_linter = NULL,
todo_comment_linter = NULL,
undesirable_function_linter(c("mapply" = NA, "sapply" = NA, "setwd" = NA)),
diff --git a/CRAN-SUBMISSION b/CRAN-SUBMISSION
index 6e4648664..521ab7ac2 100644
--- a/CRAN-SUBMISSION
+++ b/CRAN-SUBMISSION
@@ -1,3 +1,3 @@
-Version: 0.21.6
-Date: 2024-03-17 15:06:23 UTC
-SHA: fa0a2223f09a2b641762866e202cd54ddc65b3b3
+Version: 0.22.2
+Date: 2024-09-03 21:16:57 UTC
+SHA: 367dcec5489ffa53d5b768af6cfaadf1afe69666
diff --git a/DESCRIPTION b/DESCRIPTION
index 2ab5124f9..950418f45 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,18 +1,18 @@
Type: Package
Package: parameters
Title: Processing of Model Parameters
-Version: 0.21.6.7
+Version: 0.22.2.18
Authors@R:
c(person(given = "Daniel",
family = "Lüdecke",
role = c("aut", "cre"),
email = "d.luedecke@uke.de",
- comment = c(ORCID = "0000-0002-8895-3206", Twitter = "@strengejacke")),
+ comment = c(ORCID = "0000-0002-8895-3206")),
person(given = "Dominique",
family = "Makowski",
role = "aut",
email = "dom.makowski@gmail.com",
- comment = c(ORCID = "0000-0001-5375-9967", Twitter = "@Dom_Makowski")),
+ comment = c(ORCID = "0000-0001-5375-9967")),
person(given = "Mattan S.",
family = "Ben-Shachar",
role = "aut",
@@ -22,7 +22,7 @@ Authors@R:
family = "Patil",
role = "aut",
email = "patilindrajeet.science@gmail.com",
- comment = c(ORCID = "0000-0003-1995-6531", Twitter = "@patilindrajeets")),
+ comment = c(ORCID = "0000-0003-1995-6531")),
person(given = "Søren",
family = "Højsgaard",
role = "aut",
@@ -31,7 +31,7 @@ Authors@R:
family = "Wiernik",
role = "aut",
email = "brenton@wiernik.org",
- comment = c(ORCID = "0000-0001-9560-6336", Twitter = "@bmwiernik")),
+ comment = c(ORCID = "0000-0001-9560-6336")),
person(given = "Zen J.",
family = "Lau",
role = "ctb",
@@ -40,12 +40,12 @@ Authors@R:
family = "Arel-Bundock",
role = "ctb",
email = "vincent.arel-bundock@umontreal.ca",
- comment = c(ORCID = "0000-0003-1995-6531", Twitter = "@vincentab")),
+ comment = c(ORCID = "0000-0003-2042-7063")),
person(given = "Jeffrey",
family = "Girard",
role = "ctb",
email = "me@jmgirard.com",
- comment = c(ORCID = "0000-0002-7359-3746", Twitter = "@jeffreymgirard")),
+ comment = c(ORCID = "0000-0002-7359-3746")),
person(given = "Christina",
family = "Maimone",
role = "rev",
@@ -58,7 +58,7 @@ Authors@R:
family = "Morrison",
role = "ctb",
email = "dmorrison01@ucla.edu",
- comment = c(ORCID = "0000-0002-7195-830X", Twitter = "@demstats1")),
+ comment = c(ORCID = "0000-0002-7195-830X")),
person(given = "Joseph",
family = "Luchman",
role = "ctb",
@@ -79,9 +79,9 @@ BugReports: https://github.com/easystats/parameters/issues
Depends:
R (>= 3.6)
Imports:
- bayestestR (>= 0.13.2),
- datawizard (>= 0.10.0),
- insight (>= 0.19.10),
+ bayestestR (>= 0.14.0),
+ datawizard (>= 0.13.0),
+ insight (>= 0.20.5),
graphics,
methods,
stats,
@@ -101,6 +101,7 @@ Suggests:
brglm2,
brms,
broom,
+ broom.mixed,
cAIC4,
car,
carData,
@@ -108,10 +109,12 @@ Suggests:
ClassDiscovery,
clubSandwich,
cluster,
+ cobalt,
coda,
coxme,
cplm,
dbscan,
+ distributional,
domir (>= 0.2.0),
drc,
DRR,
@@ -133,11 +136,12 @@ Suggests:
ggeffects (>= 1.3.2),
ggplot2,
GLMMadaptive,
- glmmTMB,
+ glmmTMB (>= 1.1.10),
+ glmtoolbox,
GPArotation,
gt,
haven,
- httr,
+ httr2,
Hmisc,
ivreg,
knitr,
@@ -151,7 +155,7 @@ Suggests:
logspline,
lqmm,
M3C,
- marginaleffects (>= 0.16.0),
+ marginaleffects (>= 0.20.1),
MASS,
Matrix,
mclogit,
@@ -177,7 +181,7 @@ Suggests:
panelr,
pbkrtest,
PCDimension,
- performance (>= 0.10.8),
+ performance (>= 0.12.0),
plm,
PMCMRplus,
poorman,
@@ -188,8 +192,10 @@ Suggests:
pvclust,
quantreg,
randomForest,
+ RcppEigen,
rmarkdown,
rms,
+ rstan,
rstanarm,
sandwich,
see (>= 0.8.1),
@@ -197,25 +203,25 @@ Suggests:
sparsepca,
survey,
survival,
+ svylme,
testthat (>= 3.2.1),
tidyselect,
tinytable (>= 0.1.0),
TMB,
truncreg,
+ vdiffr,
VGAM,
+ WeightIt (>= 1.2.0),
withr,
WRS2
VignetteBuilder:
knitr
Encoding: UTF-8
Language: en-US
-RoxygenNote: 7.3.1
+RoxygenNote: 7.3.2
Roxygen: list(markdown = TRUE)
Config/testthat/edition: 3
Config/testthat/parallel: true
-Config/Needs/website:
- rstudio/bslib,
- r-lib/pkgdown,
- easystats/easystatstemplate
+Config/Needs/website: easystats/easystatstemplate
Config/rcmdcheck/ignore-inconsequential-notes: true
-Remotes: easystats/insight
+Remotes: easystats/bayestestR#678
diff --git a/NAMESPACE b/NAMESPACE
index b4974aef4..172919606 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -42,6 +42,7 @@ S3method(ci,gamm4)
S3method(ci,geeglm)
S3method(ci,glht)
S3method(ci,glm)
+S3method(ci,glmgee)
S3method(ci,glmm)
S3method(ci,glmmTMB)
S3method(ci,hglm)
@@ -81,11 +82,13 @@ S3method(ci,mmrm_fit)
S3method(ci,mmrm_tmb)
S3method(ci,model_fit)
S3method(ci,multinom)
+S3method(ci,multinom_weightit)
S3method(ci,negbin)
S3method(ci,negbinirr)
S3method(ci,negbinmfx)
S3method(ci,nestedLogit)
S3method(ci,nlrq)
+S3method(ci,ordinal_weightit)
S3method(ci,parameters_standardized)
S3method(ci,pgmm)
S3method(ci,phyloglm)
@@ -117,61 +120,6 @@ S3method(convert_efa_to_cfa,fa)
S3method(convert_efa_to_cfa,fa.ci)
S3method(convert_efa_to_cfa,parameters_efa)
S3method(convert_efa_to_cfa,parameters_pca)
-S3method(degrees_of_freedom,BBmm)
-S3method(degrees_of_freedom,BBreg)
-S3method(degrees_of_freedom,betamfx)
-S3method(degrees_of_freedom,betaor)
-S3method(degrees_of_freedom,bfsl)
-S3method(degrees_of_freedom,bigglm)
-S3method(degrees_of_freedom,biglm)
-S3method(degrees_of_freedom,cgam)
-S3method(degrees_of_freedom,cgamm)
-S3method(degrees_of_freedom,coeftest)
-S3method(degrees_of_freedom,complmrob)
-S3method(degrees_of_freedom,default)
-S3method(degrees_of_freedom,emmGrid)
-S3method(degrees_of_freedom,emm_list)
-S3method(degrees_of_freedom,fixest)
-S3method(degrees_of_freedom,fixest_multi)
-S3method(degrees_of_freedom,garch)
-S3method(degrees_of_freedom,glht)
-S3method(degrees_of_freedom,gls)
-S3method(degrees_of_freedom,hglm)
-S3method(degrees_of_freedom,ivFixed)
-S3method(degrees_of_freedom,ivprobit)
-S3method(degrees_of_freedom,logitmfx)
-S3method(degrees_of_freedom,logitor)
-S3method(degrees_of_freedom,lqm)
-S3method(degrees_of_freedom,lqmm)
-S3method(degrees_of_freedom,mediate)
-S3method(degrees_of_freedom,merModList)
-S3method(degrees_of_freedom,mhurdle)
-S3method(degrees_of_freedom,mipo)
-S3method(degrees_of_freedom,mira)
-S3method(degrees_of_freedom,mmrm)
-S3method(degrees_of_freedom,mmrm_fit)
-S3method(degrees_of_freedom,mmrm_tmb)
-S3method(degrees_of_freedom,model_fit)
-S3method(degrees_of_freedom,multinom)
-S3method(degrees_of_freedom,negbinirr)
-S3method(degrees_of_freedom,negbinmfx)
-S3method(degrees_of_freedom,nestedLogit)
-S3method(degrees_of_freedom,nlrq)
-S3method(degrees_of_freedom,nnet)
-S3method(degrees_of_freedom,plm)
-S3method(degrees_of_freedom,poissonirr)
-S3method(degrees_of_freedom,poissonmfx)
-S3method(degrees_of_freedom,probitmfx)
-S3method(degrees_of_freedom,rlm)
-S3method(degrees_of_freedom,rq)
-S3method(degrees_of_freedom,rqs)
-S3method(degrees_of_freedom,rqss)
-S3method(degrees_of_freedom,selection)
-S3method(degrees_of_freedom,serp)
-S3method(degrees_of_freedom,summary.lm)
-S3method(degrees_of_freedom,systemfit)
-S3method(degrees_of_freedom,truncreg)
-S3method(degrees_of_freedom,vgam)
S3method(display,compare_parameters)
S3method(display,equivalence_test_lm)
S3method(display,parameters_brms_meta)
@@ -197,6 +145,7 @@ S3method(equivalence_test,lm)
S3method(equivalence_test,lme)
S3method(equivalence_test,merMod)
S3method(equivalence_test,mixed)
+S3method(equivalence_test,parameters_model)
S3method(equivalence_test,parameters_simulate_model)
S3method(equivalence_test,rma)
S3method(equivalence_test,wbm)
@@ -347,6 +296,7 @@ S3method(model_parameters,mmrm_fit)
S3method(model_parameters,mmrm_tmb)
S3method(model_parameters,model_fit)
S3method(model_parameters,multinom)
+S3method(model_parameters,multinom_weightit)
S3method(model_parameters,mvord)
S3method(model_parameters,negbin)
S3method(model_parameters,negbinirr)
@@ -354,6 +304,7 @@ S3method(model_parameters,negbinmfx)
S3method(model_parameters,nestedLogit)
S3method(model_parameters,omega)
S3method(model_parameters,onesampb)
+S3method(model_parameters,ordinal_weightit)
S3method(model_parameters,osrt)
S3method(model_parameters,pairwise.htest)
S3method(model_parameters,pam)
@@ -381,6 +332,7 @@ S3method(model_parameters,stanfit)
S3method(model_parameters,stanmvreg)
S3method(model_parameters,stanreg)
S3method(model_parameters,summary_emm)
+S3method(model_parameters,svy2lme)
S3method(model_parameters,svyglm)
S3method(model_parameters,svytable)
S3method(model_parameters,systemfit)
@@ -399,6 +351,39 @@ S3method(model_parameters,zeroinfl)
S3method(model_parameters,zoo)
S3method(p_calibrate,default)
S3method(p_calibrate,numeric)
+S3method(p_direction,coxph)
+S3method(p_direction,feis)
+S3method(p_direction,felm)
+S3method(p_direction,gee)
+S3method(p_direction,glm)
+S3method(p_direction,glmmTMB)
+S3method(p_direction,gls)
+S3method(p_direction,hurdle)
+S3method(p_direction,lm)
+S3method(p_direction,lme)
+S3method(p_direction,merMod)
+S3method(p_direction,mixed)
+S3method(p_direction,rma)
+S3method(p_direction,svyglm)
+S3method(p_direction,wbm)
+S3method(p_direction,zeroinfl)
+S3method(p_significance,coxph)
+S3method(p_significance,feis)
+S3method(p_significance,felm)
+S3method(p_significance,gee)
+S3method(p_significance,glm)
+S3method(p_significance,glmmTMB)
+S3method(p_significance,gls)
+S3method(p_significance,hurdle)
+S3method(p_significance,lm)
+S3method(p_significance,lme)
+S3method(p_significance,merMod)
+S3method(p_significance,mixed)
+S3method(p_significance,parameters_model)
+S3method(p_significance,rma)
+S3method(p_significance,svyglm)
+S3method(p_significance,wbm)
+S3method(p_significance,zeroinfl)
S3method(p_value,BBmm)
S3method(p_value,BBreg)
S3method(p_value,BFBayesFactor)
@@ -462,6 +447,7 @@ S3method(p_value,gee)
S3method(p_value,geeglm)
S3method(p_value,glht)
S3method(p_value,glimML)
+S3method(p_value,glmgee)
S3method(p_value,glmm)
S3method(p_value,glmx)
S3method(p_value,gls)
@@ -502,6 +488,7 @@ S3method(p_value,mmrm_fit)
S3method(p_value,mmrm_tmb)
S3method(p_value,model_fit)
S3method(p_value,multinom)
+S3method(p_value,multinom_weightit)
S3method(p_value,mvord)
S3method(p_value,negbin)
S3method(p_value,negbinirr)
@@ -510,6 +497,7 @@ S3method(p_value,nestedLogit)
S3method(p_value,nlrq)
S3method(p_value,numeric)
S3method(p_value,ols)
+S3method(p_value,ordinal_weightit)
S3method(p_value,pggls)
S3method(p_value,pglm)
S3method(p_value,plm)
@@ -534,6 +522,7 @@ S3method(p_value,speedlm)
S3method(p_value,stanreg)
S3method(p_value,summary.lm)
S3method(p_value,survreg)
+S3method(p_value,svy2lme)
S3method(p_value,svyglm)
S3method(p_value,svyglm.nb)
S3method(p_value,svyglm.zip)
@@ -592,6 +581,8 @@ S3method(print,n_clusters_hclust)
S3method(print,n_clusters_silhouette)
S3method(print,n_factors)
S3method(print,p_calibrate)
+S3method(print,p_direction_lm)
+S3method(print,p_significance_lm)
S3method(print,parameters_brms_meta)
S3method(print,parameters_clusters)
S3method(print,parameters_da)
@@ -718,6 +709,7 @@ S3method(simulate_model,zeroinfl)
S3method(simulate_parameters,default)
S3method(simulate_parameters,glmmTMB)
S3method(simulate_parameters,hurdle)
+S3method(simulate_parameters,mblogit)
S3method(simulate_parameters,mlm)
S3method(simulate_parameters,multinom)
S3method(simulate_parameters,nestedLogit)
@@ -792,6 +784,7 @@ S3method(standard_error,gee)
S3method(standard_error,geeglm)
S3method(standard_error,glht)
S3method(standard_error,glimML)
+S3method(standard_error,glmgee)
S3method(standard_error,glmm)
S3method(standard_error,glmmTMB)
S3method(standard_error,glmx)
@@ -837,6 +830,7 @@ S3method(standard_error,mmrm_fit)
S3method(standard_error,mmrm_tmb)
S3method(standard_error,model_fit)
S3method(standard_error,multinom)
+S3method(standard_error,multinom_weightit)
S3method(standard_error,mvord)
S3method(standard_error,mvstanreg)
S3method(standard_error,negbin)
@@ -846,6 +840,7 @@ S3method(standard_error,nestedLogit)
S3method(standard_error,nlrq)
S3method(standard_error,numeric)
S3method(standard_error,ols)
+S3method(standard_error,ordinal_weightit)
S3method(standard_error,parameters_kurtosis)
S3method(standard_error,parameters_skewness)
S3method(standard_error,parameters_standardized)
@@ -869,6 +864,7 @@ S3method(standard_error,sem)
S3method(standard_error,stanreg)
S3method(standard_error,summary.lm)
S3method(standard_error,survreg)
+S3method(standard_error,svy2lme)
S3method(standard_error,svyglm)
S3method(standard_error,svyglm.nb)
S3method(standard_error,svyglm.zip)
@@ -954,7 +950,9 @@ export(n_components)
export(n_factors)
export(n_parameters)
export(p_calibrate)
+export(p_direction)
export(p_function)
+export(p_significance)
export(p_value)
export(p_value_betwithin)
export(p_value_kenward)
@@ -992,6 +990,8 @@ export(supported_models)
export(visualisation_recipe)
importFrom(bayestestR,ci)
importFrom(bayestestR,equivalence_test)
+importFrom(bayestestR,p_direction)
+importFrom(bayestestR,p_significance)
importFrom(datawizard,demean)
importFrom(datawizard,describe_distribution)
importFrom(datawizard,kurtosis)
diff --git a/NEWS.md b/NEWS.md
index a4e219d06..397bb62c8 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,5 +1,122 @@
+# parameters 0.23.0
+
+## Breaking Changes
+
+* Argument `summary` in `model_parameters()` is now deprecated. Please use
+ `include_info` instead.
+
+* Changed output style for the included additional information on model formula,
+ sigma and R2 when printing model parameters. This information now also includes
+ the RMSE.
+
+## Changes
+
+* Used more accurate analytic approach to calculate normal distributions for
+ the SGPV in `equivalence_test()` and used in `p_significance()`.
+
+* Added `p_direction()` methods for frequentist models. This is a convenient
+ way to test the direction of the effect, which formerly was already (and still
+ is) possible with `pd = TRUE` in `model_parameters()`.
+
+* `p_function()`, `p_significance()` and `equivalence_test()` get a `vcov` and
+ `vcov_args` argument, so that results can be based on robust standard errors
+ and confidence intervals.
+
+* `equivalence_test()` and `p_significance()` work with objects returned by
+ `model_parameters()`.
+
+* `pool_parameters()` now better deals with models with multiple components
+ (e.g. zero-inflation or dispersion).
+
+* Revision / enhancement of some documentation.
+
+* Updated *glmmTMB* methods to work with the latest version of the package.
+
+* Improved printing for `simulate_parameters()` for models from packages *mclogit*.
+
+# parameters 0.22.2
+
+## New supported models
+
+* Support for models `glm_weightit`, `multinom_weightit` and `ordinal_weightit`
+ from package *WeightIt*.
+
+## Changes
+
+* Added `p_significance()` methods for frequentist models.
+
+* Methods for `degrees_of_freedom()` have been removed. `degrees_of_freedom()`
+ now calls `insight::get_df()`.
+
+* `model_parameters()` for data frames and `draws` objects from package
+ *posterior* also gets an `exponentiate` argument.
+
+## Bug fixes
+
+* Fixed issue with warning for spuriously high coefficients for Stan-models
+ (non-Gaussian).
+
+# parameters 0.22.1
+
+## Breaking changes
+
+* Revised calculation of the second generation p-value (SGPV) in `equivalence_test()`,
+ which should now be more accurate related to the proportion of the interval
+ that falls inside the ROPE. Formerly, the confidence interval was simply treated
+ as uniformly distributed when calculating the SGPV, now the interval is assumed
+ to be normally distributed.
+
+## New supported models
+
+* Support for `svy2lme` models from package *svylme*.
+
+## Changes
+
+* `standardize_parameters()` now also prettifies labels of factors.
+
+## Bug fixes
+
+* Fixed issue with `equivalence_test()` when ROPE range was not symmetrically
+ centered around zero (e.g., `range = c(-99, 0.1)`).
+
+* `model_parameters()` for `anova()` from mixed models now also includes the
+ denominator degrees of freedom in the output (`df_error`).
+
+* `print(..., pretty_names = "labels")` for tobit-models from package *AER* now
+ include value labels, if available.
+
+* Patch release, to ensure that performance runs with older version of datawizard
+ on Mac OS X with R (old-release).
+
+# parameters 0.22.0
+
+## Breaking changes
+
+* Deprecated arguments in `model_parameters()` for `htest`, `aov` and
+ `BFBayesFactor` objects were removed.
+
+* Argument `effectsize_type` is deprecated. Please use `es_type` now. This change
+ was necessary to avoid conflicts with partial matching of argument names (here:
+ `effects`).
+
+## New supported models
+
+* Support for objects from `stats::Box.test()`.
+
+* Support for `glmgee` models from package *glmtoolbox*.
+
+## Bug fix
+
+* Fixed edge case in `predict()` for `factor_analysis()`.
+
+* Fixed wrong ORCID in `DESCRIPTION`.
+
# parameters 0.21.7
+## Changes
+
+* Fixed issues related to latest release from _marginaleffects_.
+
## Bug fixes
* Fixes issue in `compare_parameters()` for models from package *blme*.
@@ -170,7 +287,7 @@
* `as.data.frame` methods for extracting posterior draws via `bootstrap_model()`
have been retired. Instead, directly using `bootstrap_model()` is recommended.
-
+
## Changes to functions
* `equivalence_test()` gets a method for `ggeffects` objects from package
@@ -322,7 +439,7 @@
* Following functions were moved from package *parameters* to *performance*:
`check_sphericity_bartlett()`, `check_kmo()`, `check_factorstructure()` and
`check_clusterstructure()`.
-
+
## Changes to functions
* Added `sparse` option to `principal_components()` for sparse PCA.
diff --git a/R/1_model_parameters.R b/R/1_model_parameters.R
index 991a3704d..ab2c75968 100644
--- a/R/1_model_parameters.R
+++ b/R/1_model_parameters.R
@@ -41,10 +41,10 @@
#' the number of digits for the output. If `s_value = TRUE`, the p-value will
#' be replaced by the S-value in the output (cf. _Rafi and Greenland 2020_).
#' `pd` adds an additional column with the _probability of direction_ (see
-#' [bayestestR::p_direction()] for details). `groups` can be used to group
+#' [`bayestestR::p_direction()`] for details). `groups` can be used to group
#' coefficients. It will be passed to the print-method, or can directly be used
-#' in `print()`, see documentation in [print.parameters_model()]. Furthermore,
-#' see 'Examples' in [model_parameters.default()]. For developers, whose
+#' in `print()`, see documentation in [`print.parameters_model()`]. Furthermore,
+#' see 'Examples' in [`model_parameters.default()`]. For developers, whose
#' interest mainly is to get a "tidy" data frame of model summaries, it is
#' recommended to set `pretty_names = FALSE` to speed up computation of the
#' summary table.
@@ -74,10 +74,7 @@
#' packages or other software packages (like SPSS). To mimic behaviour of SPSS
#' or packages such as **lm.beta**, use `standardize = "basic"`.
#'
-#' @section
-#'
-#' Standardization Methods:
-#'
+#' @section Standardization Methods:
#' - **refit**: This method is based on a complete model re-fit with a
#' standardized version of the data. Hence, this method is equal to
#' standardizing the variables before fitting the model. It is the "purest" and
@@ -87,7 +84,7 @@
#' include interactions or transformations (e.g., polynomial or spline terms).
#' The `robust` (default to `FALSE`) argument enables a robust standardization
#' of data, i.e., based on the `median` and `MAD` instead of the `mean` and
-#' `SD`. **See [standardize()] for more details.**
+#' `SD`. **See [`datawizard::standardize()`] for more details.**
#' **Note** that `standardize_parameters(method = "refit")` may not return
#' the same results as fitting a model on data that has been standardized with
#' `standardize()`; `standardize_parameters()` used the data used by the model
@@ -280,21 +277,105 @@
#' p-values are based on the probability of direction ([`bayestestR::p_direction()`]),
#' which is converted into a p-value using [`bayestestR::pd_to_p()`].
#'
+#' @section Statistical inference - how to quantify evidence:
+#' There is no standardized approach to drawing conclusions based on the
+#' available data and statistical models. A frequently chosen but also much
+#' criticized approach is to evaluate results based on their statistical
+#' significance (*Amrhein et al. 2017*).
+#'
+#' A more sophisticated way would be to test whether estimated effects exceed
+#' the "smallest effect size of interest", to avoid even the smallest effects
+#' being considered relevant simply because they are statistically significant,
+#' but clinically or practically irrelevant (*Lakens et al. 2018, Lakens 2024*).
+#'
+#' A rather unconventional approach, which is nevertheless advocated by various
+#' authors, is to interpret results from classical regression models either in
+#' terms of probabilities, similar to the usual approach in Bayesian statistics
+#' (*Schweder 2018; Schweder and Hjort 2003; Vos 2022*) or in terms of relative
+#' measure of "evidence" or "compatibility" with the data (*Greenland et al. 2022;
+#' Rafi and Greenland 2020*), which nevertheless comes close to a probabilistic
+#' interpretation.
+#'
+#' A more detailed discussion of this topic is found in the documentation of
+#' [`p_function()`].
+#'
+#' The **parameters** package provides several options or functions to aid
+#' statistical inference. These are, for example:
+#' - [`equivalence_test()`][equivalence_test.lm], to compute the (conditional)
+#' equivalence test for frequentist models
+#' - [`p_significance()`][p_significance.lm], to compute the probability of
+#' *practical significance*, which can be conceptualized as a unidirectional
+#' equivalence test
+#' - [`p_function()`], or _consonance function_, to compute p-values and
+#' compatibility (confidence) intervals for statistical models
+#' - the `pd` argument (setting `pd = TRUE`) in `model_parameters()` includes
+#' a column with the *probability of direction*, i.e. the probability that a
+#' parameter is strictly positive or negative. See [`bayestestR::p_direction()`]
+#' for details. If plotting is desired, the [`p_direction()`][p_direction.lm]
+#' function can be used, together with `plot()`.
+#' - the `s_value` argument (setting `s_value = TRUE`) in `model_parameters()`
+#' replaces the p-values with their related _S_-values (*Rafi and Greenland 2020*)
+#' - finally, it is possible to generate distributions of model coefficients by
+#' generating bootstrap-samples (setting `bootstrap = TRUE`) or simulating
+#' draws from model coefficients using [`simulate_model()`]. These samples
+#' can then be treated as "posterior samples" and used in many functions from
+#' the **bayestestR** package.
+#'
+#' Most of the above shown options or functions derive from methods originally
+#' implemented for Bayesian models (*Makowski et al. 2019*). However, assuming
+#' that model assumptions are met (which means, the model fits well to the data,
+#' the correct model is chosen that reflects the data generating process
+#' (distributional model family) etc.), it seems appropriate to interpret
+#' results from classical frequentist models in a "Bayesian way" (more details:
+#' documentation in [`p_function()`]).
+#'
#' @inheritSection format_parameters Interpretation of Interaction Terms
#' @inheritSection print.parameters_model Global Options to Customize Messages and Tables when Printing
#'
#' @references
#'
+#' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is
+#' flat (p > 0.05): Significance thresholds and the crisis of unreplicable
+#' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544}
+#'
+#' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference,
+#' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022)
+#' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022)
+#'
#' - Hoffman, L. (2015). Longitudinal analysis: Modeling within-person
-#' fluctuation and change. Routledge.
+#' fluctuation and change. Routledge.
#'
-#' - Neter, J., Wasserman, W., & Kutner, M. H. (1989). Applied linear
-#' regression models.
+#' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1).
+#' Retrieved from https://lakens.github.io/statistical_inferences/.
+#' \doi{10.5281/ZENODO.6409077}
+#'
+#' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing
+#' for Psychological Research: A Tutorial. Advances in Methods and Practices
+#' in Psychological Science, 1(2), 259–269. \doi{10.1177/2515245918770963}
+#'
+#' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019).
+#' Indices of Effect Existence and Significance in the Bayesian Framework.
+#' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767}
+#'
+#' - Neter, J., Wasserman, W., and Kutner, M. H. (1989). Applied linear
+#' regression models.
#'
#' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical
-#' science: replace confidence and significance by compatibility and surprise.
-#' BMC Medical Research Methodology (2020) 20:244.
-
+#' science: replace confidence and significance by compatibility and surprise.
+#' BMC Medical Research Methodology (2020) 20:244.
+#'
+#' - Schweder T. Confidence is epistemic probability for empirical science.
+#' Journal of Statistical Planning and Inference (2018) 195:116–125.
+#' \doi{10.1016/j.jspi.2017.09.016}
+#'
+#' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors.
+#' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory
+#' Data Confrontation in Economics, pp. 285-217. Princeton University Press,
+#' Princeton, NJ, 2003
+#'
+#' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling.
+#' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x}
+#'
#' @return A data frame of indices related to the model's parameters.
#' @export
model_parameters <- function(model, ...) {
@@ -317,8 +398,8 @@ model_parameters <- function(model, ...) {
# Add new options to the docs in "print.parameters_model"
-# getOption("parameters_summary"): show model summary
-# getOption("parameters_mixed_summary"): show model summary for mixed models
+# getOption("parameters_info"): show model summary
+# getOption("parameters_mixed_info"): show model summary for mixed models
# getOption("parameters_cimethod"): show message about CI approximation
# getOption("parameters_exponentiate"): show warning about exp for log/logit links
# getOption("parameters_labels"): use value/variable labels instead pretty names
@@ -331,10 +412,11 @@ model_parameters <- function(model, ...) {
parameters <- model_parameters
-#' Parameters from (General) Linear Models
+#' @title Parameters from (General) Linear Models
+#' @name model_parameters.default
#'
-#' Extract and compute indices and measures to describe parameters of (general)
-#' linear models (GLMs).
+#' @description Extract and compute indices and measures to describe parameters
+#' of (generalized) linear models (GLMs).
#'
#' @param model Model object.
#' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`).
@@ -384,9 +466,10 @@ parameters <- model_parameters
#' _Confidence intervals and approximation of degrees of freedom_ in
#' [`model_parameters()`] for further details. When `ci_method=NULL`, in most
#' cases `"wald"` is used then.
-#' @param summary Logical, if `TRUE`, prints summary information about the
+#' @param include_info Logical, if `TRUE`, prints summary information about the
#' model (model formula, number of observations, residual standard deviation
#' and more).
+#' @param summary Deprecated, please use `info` instead.
#' @param keep Character containing a regular expression pattern that
#' describes the parameters that should be included (for `keep`) or excluded
#' (for `drop`) in the returned data frame. `keep` may also be a
@@ -407,14 +490,25 @@ parameters <- model_parameters
#' `$Parameter` column of the parameters table to get the exact parameter
#' names.
#' @param ... Arguments passed to or from other methods. For instance, when
-#' `bootstrap = TRUE`, arguments like `type` or `parallel` are
-#' passed down to `bootstrap_model()`.
+#' `bootstrap = TRUE`, arguments like `type` or `parallel` are passed down to
+#' `bootstrap_model()`. Further non-documented arguments are `digits`,
+#' `p_digits`, `ci_digits` and `footer_digits` to set the number of digits for
+#' the output. If `s_value = TRUE`, the p-value will be replaced by the
+#' S-value in the output (cf. _Rafi and Greenland 2020_). `pd` adds an
+#' additional column with the _probability of direction_ (see
+#' [`bayestestR::p_direction()`] for details). `groups` can be used to group
+#' coefficients. It will be passed to the print-method, or can directly be
+#' used in `print()`, see documentation in [`print.parameters_model()`].
+#' Furthermore, see 'Examples' for this function. For developers, whose
+#' interest mainly is to get a "tidy" data frame of model summaries, it is
+#' recommended to set `pretty_names = FALSE` to speed up computation of the
+#' summary table.
#' @param drop See `keep`.
#' @param verbose Toggle warnings and messages.
#' @inheritParams standard_error
#'
-#' @seealso [`insight::standardize_names()`] to
-#' rename columns into a consistent, standardized naming scheme.
+#' @seealso [`insight::standardize_names()`] to rename columns into a
+#' consistent, standardized naming scheme.
#'
#' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom
#'
@@ -441,6 +535,11 @@ parameters <- model_parameters
#' # different p-value style in output
#' model_parameters(model, p_digits = 5)
#' model_parameters(model, digits = 3, ci_digits = 4, p_digits = "scientific")
+#'
+#' # report S-value or probability of direction for parameters
+#' model_parameters(model, s_value = TRUE)
+#' model_parameters(model, pd = TRUE)
+#'
#' \donttest{
#' # logistic regression model
#' model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial")
@@ -468,16 +567,23 @@ model_parameters.default <- function(model,
standardize = NULL,
exponentiate = FALSE,
p_adjust = NULL,
+ vcov = NULL,
+ vcov_args = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
- vcov = NULL,
- vcov_args = NULL,
...) {
# validation check for inputs
.is_model_valid(model)
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# validation check, warn if unsupported argument is used.
# unsupported arguments will be removed from the argument list.
dots <- .check_dots(
@@ -499,7 +605,7 @@ model_parameters.default <- function(model,
standardize = standardize,
exponentiate = exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
keep_parameters = keep,
drop_parameters = drop,
vcov = vcov,
@@ -556,12 +662,12 @@ model_parameters.default <- function(model,
component = "conditional",
ci_method = NULL,
p_adjust = NULL,
- summary = FALSE,
+ include_info = FALSE,
keep_parameters = NULL,
drop_parameters = NULL,
- verbose = TRUE,
vcov = NULL,
vcov_args = NULL,
+ verbose = TRUE,
...) {
dots <- list(...)
@@ -628,7 +734,7 @@ model_parameters.default <- function(model,
iterations,
ci_method = ci_method,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
verbose = verbose,
...
)
@@ -653,15 +759,22 @@ model_parameters.glm <- function(model,
standardize = NULL,
exponentiate = FALSE,
p_adjust = NULL,
+ vcov = NULL,
+ vcov_args = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
- vcov = NULL,
- vcov_args = NULL,
verbose = TRUE,
...) {
dots <- list(...)
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# set default
if (is.null(ci_method)) {
if (isTRUE(bootstrap)) {
@@ -699,7 +812,7 @@ model_parameters.glm <- function(model,
standardize = standardize,
exponentiate = exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
keep_parameters = keep,
drop_parameters = drop,
vcov = vcov,
diff --git a/R/2_ci.R b/R/2_ci.R
index df2b3b966..c010dcf37 100644
--- a/R/2_ci.R
+++ b/R/2_ci.R
@@ -7,10 +7,9 @@
#' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`).
#' @param dof Number of degrees of freedom to be used when calculating
#' confidence intervals. If `NULL` (default), the degrees of freedom are
-#' retrieved by calling [`degrees_of_freedom()`] with
-#' approximation method defined in `method`. If not `NULL`, use this argument
-#' to override the default degrees of freedom used to compute confidence
-#' intervals.
+#' retrieved by calling [`insight::get_df()`] with approximation method
+#' defined in `method`. If not `NULL`, use this argument to override the
+#' default degrees of freedom used to compute confidence intervals.
#' @param method Method for computing degrees of freedom for
#' confidence intervals (CI) and the related p-values. Allowed are following
#' options (which vary depending on the model class): `"residual"`,
@@ -25,13 +24,25 @@
#' @param iterations The number of bootstrap replicates. Only applies to models
#' of class `merMod` when `method=boot`.
#' @param verbose Toggle warnings and messages.
-#' @param ... Additional arguments
+#' @param ... Additional arguments passed down to the underlying functions.
+#' E.g., arguments like `vcov` or `vcov_args` can be used to compute confidence
+#' intervals using a specific variance-covariance matrix for the standard
+#' errors.
#'
#' @return A data frame containing the CI bounds.
#'
#' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom
#'
-#' @examplesIf require("glmmTMB")
+#' @examplesIf require("glmmTMB") && requireNamespace("sandwich")
+#' data(qol_cancer)
+#' model <- lm(QoL ~ time + age + education, data = qol_cancer)
+#'
+#' # regular confidence intervals
+#' ci(model)
+#'
+#' # using heteroscedasticity-robust standard errors
+#' ci(model, vcov = "HC3")
+#'
#' \donttest{
#' library(parameters)
#' data(Salamanders, package = "glmmTMB")
diff --git a/R/3_p_value.R b/R/3_p_value.R
index a357443f2..c2c76c886 100644
--- a/R/3_p_value.R
+++ b/R/3_p_value.R
@@ -48,6 +48,7 @@ p_value.default <- function(model,
.is_model_valid(model)
dots <- list(...)
+ p <- NULL
if (is.character(method)) {
method <- tolower(method)
@@ -66,33 +67,29 @@ p_value.default <- function(model,
}
if (method == "ml1") {
- p <- p_value_ml1(model)
- return(p)
+ return(p_value_ml1(model))
}
if (method == "betwithin") {
- p <- p_value_betwithin(model)
- return(p)
+ return(p_value_betwithin(model))
}
if (method %in% c("residual", "wald", "normal", "satterthwaite", "kenward", "kr")) {
if (is.null(dof)) {
- dof <- degrees_of_freedom(model, method = method, verbose = FALSE)
+ dof <- insight::get_df(x = model, type = method, verbose = FALSE)
}
- p <- .p_value_dof(
+ return(.p_value_dof(
model,
dof = dof,
method = method,
component = component,
verbose = verbose,
...
- )
- return(p)
+ ))
}
if (method %in% c("hdi", "eti", "si", "bci", "bcai", "quantile")) {
- p <- bayestestR::p_direction(model, ...)
- return(p)
+ return(bayestestR::p_direction(model, ...))
}
# robust standard errors
@@ -114,7 +111,7 @@ p_value.default <- function(model,
se <- do.call("standard_error", fun_args)
}
- dof <- degrees_of_freedom(model, method = "wald", verbose = FALSE)
+ dof <- insight::get_df(x = model, type = "wald", verbose = FALSE)
se <- merge(se, co, sort = FALSE)
se$Statistic <- se$Estimate / se$SE
se$p <- 2 * stats::pt(abs(se$Statistic), df = dof, lower.tail = FALSE)
@@ -144,20 +141,20 @@ p_value.default <- function(model,
})
}
- # output
- if (!is.null(p)) {
- params <- insight::get_parameters(model, component = component)
- if (length(p) == nrow(params) && "Component" %in% colnames(params)) {
- p <- .data_frame(Parameter = params$Parameter, p = as.vector(p), Component = params$Component)
- } else {
- p <- .data_frame(Parameter = names(p), p = as.vector(p))
+ # failure warning
+ if (is.null(p)) {
+ if (isTRUE(verbose)) {
+ insight::format_warning("Could not extract p-values from model object.")
}
- return(p)
+ return(NULL)
}
- # failure warning
- if (is.null(p) && isTRUE(verbose)) {
- insight::format_warning("Could not extract p-values from model object.")
+ # output
+ params <- insight::get_parameters(model, component = component)
+ if (length(p) == nrow(params) && "Component" %in% colnames(params)) {
+ .data_frame(Parameter = params$Parameter, p = as.vector(p), Component = params$Component)
+ } else {
+ .data_frame(Parameter = names(p), p = as.vector(p))
}
}
@@ -172,23 +169,22 @@ p_value.default <- function(model,
if (ncol(cs) >= 4) {
# do we have a p-value column based on t?
pvcn <- which(colnames(cs) == "Pr(>|t|)")
-
# if not, do we have a p-value column based on z?
if (length(pvcn) == 0) {
pvcn <- which(colnames(cs) == "Pr(>|z|)")
}
-
# if not, default to 4
- if (length(pvcn) == 0) pvcn <- 4
-
+ if (length(pvcn) == 0) {
+ pvcn <- 4
+ }
p <- cs[, pvcn]
-
if (is.null(names(p))) {
coef_names <- rownames(cs)
- if (length(coef_names) == length(p)) names(p) <- coef_names
+ if (length(coef_names) == length(p)) {
+ names(p) <- coef_names
+ }
}
}
-
names(p) <- .remove_backticks_from_string(names(p))
p
}
diff --git a/R/4_standard_error.R b/R/4_standard_error.R
index 6dc4b0292..88a2466cb 100644
--- a/R/4_standard_error.R
+++ b/R/4_standard_error.R
@@ -18,11 +18,12 @@
#' * A string which indicates the kind of uncertainty estimates to return.
#' - Heteroskedasticity-consistent: `"vcovHC"`, `"HC"`, `"HC0"`, `"HC1"`,
#' `"HC2"`, `"HC3"`, `"HC4"`, `"HC4m"`, `"HC5"`. See `?sandwich::vcovHC`.
-#' - Cluster-robust: `"vcovCR"`, `"CR0"`, `"CR1"`, `"CR1p"`, `"CR1S"`, `"CR2"`,
-#' `"CR3"`. See `?clubSandwich::vcovCR`.
-#' - Bootstrap: `"vcovBS"`, `"xy"`, `"residual"`, `"wild"`, `"mammen"`, `"webb"`.
-#' See `?sandwich::vcovBS`.
-#' - Other `sandwich` package functions: `"vcovHAC"`, `"vcovPC"`, `"vcovCL"`, `"vcovPL"`.
+#' - Cluster-robust: `"vcovCR"`, `"CR0"`, `"CR1"`, `"CR1p"`, `"CR1S"`,
+#' `"CR2"`, `"CR3"`. See `?clubSandwich::vcovCR`.
+#' - Bootstrap: `"vcovBS"`, `"xy"`, `"residual"`, `"wild"`, `"mammen"`,
+#' `"webb"`. See `?sandwich::vcovBS`.
+#' - Other `sandwich` package functions: `"vcovHAC"`, `"vcovPC"`, `"vcovCL"`,
+#' `"vcovPL"`.
#' @param vcov_args List of arguments to be passed to the function identified by
#' the `vcov` argument. This function is typically supplied by the **sandwich**
#' or **clubSandwich** packages. Please refer to their documentation (e.g.,
@@ -46,18 +47,18 @@
#' standard errors. Depending on the model, may also include columns for model
#' components etc.
#'
-#' @examples
+#' @examplesIf require("sandwich") && require("clubSandwich")
#' model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris)
#' standard_error(model)
#'
-#' if (require("sandwich") && require("clubSandwich")) {
-#' standard_error(model, vcov = "HC3")
+#' # robust standard errors
+#' standard_error(model, vcov = "HC3")
#'
-#' standard_error(model,
-#' vcov = "vcovCL",
-#' vcov_args = list(cluster = iris$Species)
-#' )
-#' }
+#' # cluster-robust standard errors
+#' standard_error(model,
+#' vcov = "vcovCL",
+#' vcov_args = list(cluster = iris$Species)
+#' )
#' @export
standard_error <- function(model, ...) {
UseMethod("standard_error")
@@ -81,6 +82,10 @@ standard_error.default <- function(model,
dots <- list(...)
se <- NULL
+ # if a vcov is provided, we calculate standard errors based on that matrix
+ # this is usually the case for HC (robust) standard errors
+ # ------------------------------------------------------------------------
+
# vcov: matrix
if (is.matrix(vcov)) {
se <- sqrt(diag(vcov))
@@ -105,7 +110,9 @@ standard_error.default <- function(model,
se <- sqrt(diag(.vcov))
}
- # classical se from summary()
+ # classical SE from summary()
+ # ------------------------------------------------------------------------
+
if (is.null(se)) {
se <- .safe({
if (grepl("Zelig-", class(model)[1], fixed = TRUE)) {
@@ -116,7 +123,10 @@ standard_error.default <- function(model,
})
}
- # classical se from get_varcov()
+ # if retrieving SE from summary() failed, we try to calculate SE based
+ # on classical se from get_varcov()
+ # ------------------------------------------------------------------------
+
if (is.null(se)) {
se <- .safe({
varcov <- insight::get_varcov(model, component = component)
@@ -150,26 +160,27 @@ standard_error.default <- function(model,
.get_se_from_summary <- function(model, component = NULL) {
- cs <- suppressWarnings(stats::coef(summary(model)))
+ cs <- .safe(suppressWarnings(stats::coef(summary(model))))
se <- NULL
- if (is.list(cs) && !is.null(component)) cs <- cs[[component]]
-
+ if (is.list(cs) && !is.null(component)) {
+ cs <- cs[[component]]
+ }
if (!is.null(cs)) {
# do we have a se column?
se_col <- which(colnames(cs) == "Std. Error")
-
# if not, default to 2
- if (length(se_col) == 0) se_col <- 2
-
+ if (length(se_col) == 0) {
+ se_col <- 2
+ }
se <- as.vector(cs[, se_col])
-
if (is.null(names(se))) {
coef_names <- rownames(cs)
- if (length(coef_names) == length(se)) names(se) <- coef_names
+ if (length(coef_names) == length(se)) {
+ names(se) <- coef_names
+ }
}
}
-
names(se) <- .remove_backticks_from_string(names(se))
se
}
diff --git a/R/bootstrap_model.R b/R/bootstrap_model.R
index 949bc415a..cdfc66109 100644
--- a/R/bootstrap_model.R
+++ b/R/bootstrap_model.R
@@ -178,7 +178,7 @@ bootstrap_model.merMod <- function(model,
} else {
params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector
}
- return(params)
+ params
}
if (verbose) {
@@ -244,9 +244,7 @@ bootstrap_model.nestedLogit <- function(model,
}
params <- insight::get_parameters(fit, verbose = FALSE)
- params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector
-
- return(params)
+ stats::setNames(params$Estimate, params$Parameter) # Transform to named vector
}
results <- boot::boot(
diff --git a/R/bootstrap_parameters.R b/R/bootstrap_parameters.R
index 474731c09..98c284ab7 100644
--- a/R/bootstrap_parameters.R
+++ b/R/bootstrap_parameters.R
@@ -9,6 +9,8 @@
#' or `"all"` to compute all tests. For each "test", the corresponding
#' **bayestestR** function is called (e.g. [bayestestR::rope()] or
#' [bayestestR::p_direction()]) and its results included in the summary output.
+#' @param ... Arguments passed to other methods, like [`bootstrap_model()`] or
+#' [`bayestestR::describe_posterior()`].
#' @inheritParams bootstrap_model
#' @inheritParams bayestestR::describe_posterior
#'
@@ -40,6 +42,11 @@
#' b <- bootstrap_parameters(model)
#' print(b)
#'
+#' # different type of bootstrapping
+#' set.seed(2)
+#' b <- bootstrap_parameters(model, type = "balanced")
+#' print(b)
+#'
#' est <- emmeans::emmeans(b, trt.vs.ctrl ~ Species)
#' print(model_parameters(est))
#' }
diff --git a/R/ci_generic.R b/R/ci_generic.R
index d026adc13..1bfad93e3 100644
--- a/R/ci_generic.R
+++ b/R/ci_generic.R
@@ -97,10 +97,14 @@
}
method <- tolower(method)
+ # Fist, we want standard errors for parameters
+ # --------------------------------------------
+
# if we have adjusted SE, e.g. from kenward-roger, don't recompute
# standard errors to save time...
if (is.null(se)) {
if (!is.null(vcov) || isTRUE(list(...)[["robust"]])) {
+ # robust (HC) standard errors?
stderror <- standard_error(model,
component = component,
vcov = vcov,
@@ -109,6 +113,7 @@
...
)
} else {
+ # normal standard errors, including small-sample approximations
stderror <- switch(method,
kenward = se_kenward(model),
kr = se_kenward(model),
@@ -133,10 +138,13 @@
se <- stderror$SE
}
+ # Next, we need degrees of freedom
+ # --------------------------------
+
# check if we have a valid dof vector
if (is.null(dof)) {
# residual df
- dof <- degrees_of_freedom(model, method = method, verbose = FALSE)
+ dof <- insight::get_df(x = model, type = method, verbose = FALSE)
# make sure we have a value for degrees of freedom
if (is.null(dof) || length(dof) == 0 || .is_chi2_model(model, dof)) {
dof <- Inf
@@ -146,7 +154,9 @@
}
}
- # calculate CIs
+ # Now we can calculate CIs
+ # ------------------------
+
alpha <- (1 + ci) / 2
fac <- suppressWarnings(stats::qt(alpha, df = dof))
out <- cbind(
@@ -171,7 +181,6 @@
}
-
.is_chi2_model <- function(model, dof) {
statistic <- insight::find_statistic(model)
(all(dof == 1) && identical(statistic, "chi-squared statistic"))
diff --git a/R/compare_parameters.R b/R/compare_parameters.R
index 05305e244..2f03e4576 100644
--- a/R/compare_parameters.R
+++ b/R/compare_parameters.R
@@ -310,3 +310,19 @@ compare_models <- compare_parameters
x
}
+
+
+.is_bayesian_model <- function(x, exclude = NULL) {
+ bayes_classes <- c(
+ "brmsfit", "stanfit", "MCMCglmm", "stanreg",
+ "stanmvreg", "bmerMod", "BFBayesFactor", "bamlss",
+ "bayesx", "mcmc", "bcplm", "bayesQR", "BGGM",
+ "meta_random", "meta_fixed", "meta_bma", "blavaan",
+ "blrm", "blmerMod"
+ )
+ # if exclude is not NULL, remove elements in exclude from bayes_class
+ if (!is.null(exclude)) {
+ bayes_classes <- bayes_classes[!bayes_classes %in% exclude]
+ }
+ inherits(x, bayes_classes)
+}
diff --git a/R/display.R b/R/display.R
index 334854250..6b348bc9e 100644
--- a/R/display.R
+++ b/R/display.R
@@ -2,15 +2,13 @@
#' @name display.parameters_model
#'
#' @description Prints tables (i.e. data frame) in different output formats.
-#' `print_md()` is a alias for `display(format = "markdown")`, `print_html()`
-#' is a alias for `display(format = "html")`. `print_table()` is for specific
+#' `print_md()` is an alias for `display(format = "markdown")`, `print_html()`
+#' is an alias for `display(format = "html")`. `print_table()` is for specific
#' use cases only, and currently only works for `compare_parameters()` objects.
#'
-#' @param x An object returned by [`model_parameters()`][model_parameters].
-#' @param object An object returned by [`model_parameters()`][model_parameters],
-#' [`simulate_parameters()`][simulate_parameters],
-#' [`equivalence_test()`][equivalence_test.lm] or
-#' [`principal_components()`][principal_components].
+#' @param x An object returned by [`model_parameters()`].
+#' @param object An object returned by [`model_parameters()`],[`simulate_parameters()`],
+#' [`equivalence_test()`] or [`principal_components()`].
#' @param format String, indicating the output format. Can be `"markdown"`
#' or `"html"`.
#' @param align Only applies to HTML tables. May be one of `"left"`,
@@ -56,7 +54,7 @@
#' effects in the table. See 'Examples'. An alternative is to set `engine = "tt"`
#' in `print_html()` to use the _tinytable_ package for creating HTML tables.
#'
-#' @seealso [print.parameters_model()]
+#' @seealso [print.parameters_model()] and [print.compare_parameters()]
#'
#' @examplesIf require("gt", quietly = TRUE)
#' model <- lm(mpg ~ wt + cyl, data = mtcars)
diff --git a/R/dof.R b/R/dof.R
index f0848eb2c..fb777adde 100644
--- a/R/dof.R
+++ b/R/dof.R
@@ -3,39 +3,32 @@
#' Estimate or extract degrees of freedom of models parameters.
#'
#' @param model A statistical model.
-#' @param method Can be `"analytical"` (default, DoFs are estimated based
-#' on the model type), `"residual"` in which case they are directly taken
-#' from the model if available (for Bayesian models, the goal (looking for
-#' help to make it happen) would be to refit the model as a frequentist one
-#' before extracting the DoFs), `"ml1"` (see [dof_ml1()]), `"betwithin"`
-#' (see [dof_betwithin()]), `"satterthwaite"` (see [`dof_satterthwaite()`]),
-#' `"kenward"` (see [`dof_kenward()`]) or `"any"`, which tries to extract DoF
-#' by any of those methods, whichever succeeds. See 'Details'.
-#' @param ... Currently not used.
-#'
-#' @details
-#' Methods for calculating degrees of freedom:
+#' @param method Type of approximation for the degrees of freedom. Can be one of
+#' the following:
#'
-#' - `"analytical"` for models of class `lmerMod`, Kenward-Roger approximated
-#' degrees of freedoms are calculated, for other models, `n-k` (number of
-#' observations minus number of parameters).
-#' - `"residual"` tries to extract residual degrees of freedom, and returns
-#' `Inf` if residual degrees of freedom could not be extracted.
-#' - `"any"` first tries to extract residual degrees of freedom, and if these
-#' are not available, extracts analytical degrees of freedom.
-#' - `"nokr"` same as `"analytical"`, but does not Kenward-Roger approximation
-#' for models of class `lmerMod`. Instead, always uses `n-k` to calculate df
-#' for any model.
-#' - `"normal"` returns `Inf`.
-#' - `"wald"` returns residual df for models with t-statistic, and `Inf` for all other models.
-#' - `"kenward"` calls [`dof_kenward()`].
-#' - `"satterthwaite"` calls [`dof_satterthwaite()`].
-#' - `"ml1"` calls [`dof_ml1()`].
-#' - `"betwithin"` calls [`dof_betwithin()`].
+#' + `"residual"` (aka `"analytical"`) returns the residual degrees of
+#' freedom, which usually is what [`stats::df.residual()`] returns. If a
+#' model object has no method to extract residual degrees of freedom, these
+#' are calculated as `n-p`, i.e. the number of observations minus the number
+#' of estimated parameters. If residual degrees of freedom cannot be extracted
+#' by either approach, returns `Inf`.
+#' + `"wald"` returns residual (aka analytical) degrees of freedom for models
+#' with t-statistic, `1` for models with Chi-squared statistic, and `Inf` for
+#' all other models. Also returns `Inf` if residual degrees of freedom cannot
+#' be extracted.
+#' + `"normal"` always returns `Inf`.
+#' + `"model"` returns model-based degrees of freedom, i.e. the number of
+#' (estimated) parameters.
+#' + For mixed models, can also be `"ml1"` (or `"m-l-1"`, approximation of
+#' degrees of freedom based on a "m-l-1" heuristic as suggested by _Elff et
+#' al. 2019_) or `"between-within"` (or `"betwithin"`).
+#' + For mixed models of class `merMod`, `type` can also be `"satterthwaite"`
+#' or `"kenward-roger"` (or `"kenward"`). See 'Details'.
#'
-#' For models with z-statistic, the returned degrees of freedom for model parameters
-#' is `Inf` (unless `method = "ml1"` or `method = "betwithin"`), because there is
-#' only one distribution for the related test statistic.
+#' Usually, when degrees of freedom are required to calculate p-values or
+#' confidence intervals, `type = "wald"` is likely to be the best choice in
+#' most cases.
+#' @param ... Currently not used.
#'
#' @note
#' In many cases, `degrees_of_freedom()` returns the same as `df.residuals()`,
@@ -47,17 +40,15 @@
#' `"satterthwaite"`, each model parameter can have a different degree of
#' freedom.
#'
-#' @examples
+#' @examplesIf require("lme4", quietly = TRUE)
#' model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris)
#' dof(model)
#'
#' model <- glm(vs ~ mpg * cyl, data = mtcars, family = "binomial")
#' dof(model)
#' \donttest{
-#' if (require("lme4", quietly = TRUE)) {
-#' model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris)
-#' dof(model)
-#' }
+#' model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris)
+#' dof(model)
#'
#' if (require("rstanarm", quietly = TRUE)) {
#' model <- stan_glm(
@@ -70,84 +61,8 @@
#' }
#' }
#' @export
-degrees_of_freedom <- function(model, ...) {
- UseMethod("degrees_of_freedom")
-}
-
-
-
-
-#' @rdname degrees_of_freedom
-#' @export
-degrees_of_freedom.default <- function(model, method = "analytical", ...) {
- # check for valid input
- .is_model_valid(model)
-
- if (is.null(method)) {
- method <- "wald"
- }
- method <- tolower(method)
-
- method <- match.arg(method, choices = c(
- "analytical", "any", "fit", "ml1", "betwithin", "satterthwaite", "kenward",
- "nokr", "wald", "kr", "profile", "boot", "uniroot", "residual", "normal",
- "likelihood"
- ))
-
- if (!.dof_method_ok(model, method, ...) || method %in% c("profile", "likelihood", "boot", "uniroot")) {
- method <- "any"
- }
-
- stat <- insight::find_statistic(model)
-
- # for z-statistic, always return Inf
- if (!is.null(stat) && stat == "z-statistic" && !(method %in% c("ml1", "betwithin"))) {
- if (method == "residual") {
- return(.degrees_of_freedom_residual(model, verbose = FALSE))
- } else {
- return(Inf)
- }
- }
-
- # Chi2-distributions usually have 1 df
- if (!is.null(stat) && stat == "chi-squared statistic") {
- if (method == "residual") {
- return(.degrees_of_freedom_residual(model, verbose = FALSE))
- } else {
- return(1)
- }
- }
-
- if (method == "any") { # nolint
- dof <- .degrees_of_freedom_residual(model, verbose = FALSE)
- if (is.null(dof) || all(is.infinite(dof)) || anyNA(dof)) {
- dof <- .degrees_of_freedom_analytical(model, kenward = FALSE)
- }
- } else if (method == "ml1") {
- dof <- dof_ml1(model)
- } else if (method == "wald") {
- dof <- .degrees_of_freedom_residual(model, verbose = FALSE)
- } else if (method == "normal") {
- dof <- Inf
- } else if (method == "satterthwaite") {
- dof <- dof_satterthwaite(model)
- } else if (method == "betwithin") {
- dof <- dof_betwithin(model)
- } else if (method %in% c("kenward", "kr")) {
- dof <- dof_kenward(model)
- } else if (method == "analytical") {
- dof <- .degrees_of_freedom_analytical(model, kenward = TRUE)
- } else if (method == "nokr") {
- dof <- .degrees_of_freedom_analytical(model, kenward = FALSE)
- } else {
- dof <- .degrees_of_freedom_residual(model)
- }
-
- if (!is.null(dof) && length(dof) > 0 && all(dof == 0)) {
- insight::format_warning("Model has zero degrees of freedom!")
- }
-
- dof
+degrees_of_freedom <- function(model, method = "analytical", ...) {
+ insight::get_df(x = model, type = method, ...)
}
#' @rdname degrees_of_freedom
@@ -155,110 +70,6 @@ degrees_of_freedom.default <- function(model, method = "analytical", ...) {
dof <- degrees_of_freedom
-
-
-
-# Analytical approach ------------------------------
-
-
-#' @keywords internal
-.degrees_of_freedom_analytical <- function(model, kenward = TRUE) {
- nparam <- n_parameters(model)
- n <- insight::n_obs(model)
-
- if (is.null(n)) {
- n <- Inf
- }
-
- if (isTRUE(kenward) && inherits(model, "lmerMod")) {
- dof <- as.numeric(dof_kenward(model))
- } else {
- dof <- rep(n - nparam, nparam)
- }
-
- dof
-}
-
-
-
-
-
-# Model approach (Residual df) ------------------------------
-
-
-#' @keywords internal
-.degrees_of_freedom_residual <- function(model, verbose = TRUE) {
- if (.is_bayesian_model(model, exclude = c("bmerMod", "bayesx", "blmerMod", "bglmerMod"))) {
- model <- bayestestR::bayesian_as_frequentist(model)
- }
-
- # 1st try
- dof <- try(stats::df.residual(model), silent = TRUE)
-
- # 2nd try
- if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) {
- junk <- utils::capture.output(dof = try(summary(model)$df[2], silent = TRUE))
- }
-
- # 3rd try, nlme
- if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) {
- dof <- try(unname(model$fixDF$X), silent = TRUE)
- }
-
- # last try
- if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) {
- dof <- Inf
- if (verbose) {
- insight::format_alert("Could not extract degrees of freedom.")
- }
- }
-
-
- # special cases
- # if (inherits(model, "gam")) {
- # dof <- .dof_fit_gam(model, dof)
- # }
-
- dof
-}
-
-
-
-
-# residual df - for models with residual df, but no "df.residual()" method --------------
-
-
-#' @keywords internal
-.degrees_of_freedom_no_dfresid_method <- function(model, method = NULL) {
- if (identical(method, "normal")) {
- return(Inf)
- } else if (!is.null(method) && method %in% c("ml1", "satterthwaite", "betwithin")) {
- degrees_of_freedom.default(model, method = method)
- } else {
- .degrees_of_freedom_analytical(model, kenward = FALSE)
- }
-}
-
-
-
-
-
-
-# helper --------------
-
-.dof_fit_gam <- function(model, dof) {
- params <- insight::find_parameters(model)
- if (!is.null(params$conditional)) {
- dof <- rep(dof, length(params$conditional))
- }
- if (!is.null(params$smooth_terms)) {
- s <- summary(model)
- dof <- c(dof, s$s.table[, "Ref.df"])
- }
- dof
-}
-
-
# Helper, check args ------------------------------
.dof_method_ok <- function(model, method, type = "df_method", verbose = TRUE, ...) {
@@ -329,23 +140,3 @@ dof <- degrees_of_freedom
return(TRUE)
}
-
-
-
-
-# helper
-
-.is_bayesian_model <- function(x, exclude = NULL) {
- bayes_classes <- c(
- "brmsfit", "stanfit", "MCMCglmm", "stanreg",
- "stanmvreg", "bmerMod", "BFBayesFactor", "bamlss",
- "bayesx", "mcmc", "bcplm", "bayesQR", "BGGM",
- "meta_random", "meta_fixed", "meta_bma", "blavaan",
- "blrm", "blmerMod"
- )
- # if exclude is not NULL, remove elements in exclude from bayes_class
- if (!is.null(exclude)) {
- bayes_classes <- bayes_classes[!bayes_classes %in% exclude]
- }
- inherits(x, bayes_classes)
-}
diff --git a/R/dof_kenward.R b/R/dof_kenward.R
index aa9f00c59..4d8976ace 100644
--- a/R/dof_kenward.R
+++ b/R/dof_kenward.R
@@ -263,7 +263,7 @@ dof_kenward <- function(model) {
Gp <- lme4::getME(model, "Gp")
n.RT <- length(Gp) - 1 ## Number of random terms (i.e. of (|)'s)
- n.lev.by.RT <- sapply(lme4::getME(model, "flist"), function(x) length(levels(x)))
+ n.lev.by.RT <- sapply(lme4::getME(model, "flist"), nlevels)
n.comp.by.RT <- .get.RT.dim.by.RT(model)
n.parm.by.RT <- (n.comp.by.RT + 1) * n.comp.by.RT / 2
n.RE.by.RT <- diff(Gp)
diff --git a/R/equivalence_test.R b/R/equivalence_test.R
index 6f26f60b0..8247de837 100644
--- a/R/equivalence_test.R
+++ b/R/equivalence_test.R
@@ -10,35 +10,34 @@ bayestestR::equivalence_test
#'
#' @param x A statistical model.
#' @param range The range of practical equivalence of an effect. May be
-#' `"default"`, to automatically define this range based on properties of the
-#' model's data.
+#' `"default"`, to automatically define this range based on properties of the
+#' model's data.
#' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`).
#' @param rule Character, indicating the rules when testing for practical
-#' equivalence. Can be `"bayes"`, `"classic"` or `"cet"`. See
-#' 'Details'.
+#' equivalence. Can be `"bayes"`, `"classic"` or `"cet"`. See 'Details'.
#' @param test Hypothesis test for computing contrasts or pairwise comparisons.
-#' See [`?ggeffects::test_predictions`](https://strengejacke.github.io/ggeffects/reference/test_predictions.html)
-#' for details.
+#' See [`?ggeffects::test_predictions`](https://strengejacke.github.io/ggeffects/reference/test_predictions.html)
+#' for details.
#' @param verbose Toggle warnings and messages.
#' @param ... Arguments passed to or from other methods.
#' @inheritParams model_parameters.merMod
#' @inheritParams p_value
#'
-#' @seealso For more details, see [bayestestR::equivalence_test()].
-#' Further readings can be found in the references.
+#' @seealso For more details, see [bayestestR::equivalence_test()]. Further
+#' readings can be found in the references. See also [`p_significance()`] for
+#' a unidirectional equivalence test.
#'
-#' @details
-#' In classical null hypothesis significance testing (NHST) within a frequentist
-#' framework, it is not possible to accept the null hypothesis, H0 - unlike
-#' in Bayesian statistics, where such probability statements are possible.
-#' "[...] one can only reject the null hypothesis if the test
+#' @details In classical null hypothesis significance testing (NHST) within a
+#' frequentist framework, it is not possible to accept the null hypothesis, H0 -
+#' unlike in Bayesian statistics, where such probability statements are
+#' possible. "[...] one can only reject the null hypothesis if the test
#' statistics falls into the critical region(s), or fail to reject this
#' hypothesis. In the latter case, all we can say is that no significant effect
#' was observed, but one cannot conclude that the null hypothesis is true."
-#' (_Pernet 2017_). One way to address this issues without Bayesian methods
-#' is *Equivalence Testing*, as implemented in `equivalence_test()`.
-#' While you either can reject the null hypothesis or claim an inconclusive result
-#' in NHST, the equivalence test - according to _Pernet_ - adds a third category,
+#' (_Pernet 2017_). One way to address this issues without Bayesian methods is
+#' *Equivalence Testing*, as implemented in `equivalence_test()`. While you
+#' either can reject the null hypothesis or claim an inconclusive result in
+#' NHST, the equivalence test - according to _Pernet_ - adds a third category,
#' *"accept"*. Roughly speaking, the idea behind equivalence testing in a
#' frequentist framework is to check whether an estimate and its uncertainty
#' (i.e. confidence interval) falls within a region of "practical equivalence".
@@ -62,14 +61,17 @@ bayestestR::equivalence_test
#' - "classic" - The TOST rule (Lakens 2017)
#'
#' This rule follows the "TOST rule", i.e. a two one-sided test procedure
-#' (_Lakens 2017_). Following this rule, practical equivalence of an effect
-#' (i.e. H0) is *rejected*, when the coefficient is statistically significant
-#' *and* the narrow confidence intervals (i.e. `1-2*alpha`) *include* or
-#' *exceed* the ROPE boundaries. Practical equivalence is assumed
-#' (i.e. H0 "accepted") when the narrow confidence intervals are completely
-#' inside the ROPE, no matter if the effect is statistically significant
-#' or not. Else, the decision whether to accept or reject practical
-#' equivalence is undecided.
+#' (_Lakens 2017_). Following this rule...
+#' - practical equivalence is assumed (i.e. H0 *"accepted"*) when the narrow
+#' confidence intervals are completely inside the ROPE, no matter if the
+#' effect is statistically significant or not;
+#' - practical equivalence (i.e. H0) is *rejected*, when the coefficient is
+#' statistically significant, both when the narrow confidence intervals
+#' (i.e. `1-2*alpha`) include or exclude the the ROPE boundaries, but the
+#' narrow confidence intervals are *not fully covered* by the ROPE;
+#' - else the decision whether to accept or reject practical equivalence is
+#' undecided (i.e. when effects are *not* statistically significant *and*
+#' the narrow confidence intervals overlaps the ROPE).
#'
#' - "cet" - Conditional Equivalence Testing (Campbell/Gustafson 2018)
#'
@@ -91,63 +93,141 @@ bayestestR::equivalence_test
#' ## p-Values
#' The equivalence p-value is the area of the (cumulative) confidence
#' distribution that is outside of the region of equivalence. It can be
-#' interpreted as p-value for *rejecting* the alternative hypothesis
-#' and *accepting* the "null hypothesis" (i.e. assuming practical
-#' equivalence). That is, a high p-value means we reject the assumption of
-#' practical equivalence and accept the alternative hypothesis.
+#' interpreted as p-value for *rejecting* the alternative hypothesis and
+#' *accepting* the "null hypothesis" (i.e. assuming practical equivalence). That
+#' is, a high p-value means we reject the assumption of practical equivalence
+#' and accept the alternative hypothesis.
#'
#' ## Second Generation p-Value (SGPV)
#' Second generation p-values (SGPV) were proposed as a statistic that
#' represents _the proportion of data-supported hypotheses that are also null
#' hypotheses_ _(Blume et al. 2018, Lakens and Delacre 2020)_. It represents the
-#' proportion of the confidence interval range that is inside the ROPE.
+#' proportion of the _full_ confidence interval range (assuming a normally or
+#' t-distributed, equal-tailed interval, based on the model) that is inside the
+#' ROPE. The SGPV ranges from zero to one. Higher values indicate that the
+#' effect is more likely to be practically equivalent ("not of interest").
+#'
+#' Note that the assumed interval, which is used to calculate the SGPV, is an
+#' estimation of the _full interval_ based on the chosen confidence level. For
+#' example, if the 95% confidence interval of a coefficient ranges from -1 to 1,
+#' the underlying _full (normally or t-distributed) interval_ approximately
+#' ranges from -1.9 to 1.9, see also following code:
+#'
+#' ```
+#' # simulate full normal distribution
+#' out <- bayestestR::distribution_normal(10000, 0, 0.5)
+#' # range of "full" distribution
+#' range(out)
+#' # range of 95% CI
+#' round(quantile(out, probs = c(0.025, 0.975)), 2)
+#' ```
+#'
+#' This ensures that the SGPV always refers to the general compatible parameter
+#' space of coefficients, independent from the confidence interval chosen for
+#' testing practical equivalence. Therefore, the SGPV of the _full interval_ is
+#' similar to the ROPE coverage of Bayesian equivalence tests, see following
+#' code:
+#'
+#' ```
+#' library(bayestestR)
+#' library(brms)
+#' m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars)
+#' m2 <- brm(mpg ~ gear + wt + cyl + hp, data = mtcars)
+#' # SGPV for frequentist models
+#' equivalence_test(m)
+#' # similar to ROPE coverage of Bayesian models
+#' equivalence_test(m2)
+#' # similar to ROPE coverage of simulated draws / bootstrap samples
+#' equivalence_test(simulate_model(m))
+#' ```
#'
#' ## ROPE range
#' Some attention is required for finding suitable values for the ROPE limits
#' (argument `range`). See 'Details' in [bayestestR::rope_range()]
#' for further information.
#'
+#' @inheritSection model_parameters Statistical inference - how to quantify evidence
+#'
#' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html)
#' implemented in the [**see**-package](https://easystats.github.io/see/).
#'
#' @references
#'
+#' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is
+#' flat (p > 0.05): Significance thresholds and the crisis of unreplicable
+#' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544}
+#'
#' - Blume, J. D., D'Agostino McGowan, L., Dupont, W. D., & Greevy, R. A.
-#' (2018). Second-generation p-values: Improved rigor, reproducibility, &
-#' transparency in statistical analyses. PLOS ONE, 13(3), e0188299.
-#' https://doi.org/10.1371/journal.pone.0188299
+#' (2018). Second-generation p-values: Improved rigor, reproducibility, &
+#' transparency in statistical analyses. PLOS ONE, 13(3), e0188299.
+#' https://doi.org/10.1371/journal.pone.0188299
#'
#' - Campbell, H., & Gustafson, P. (2018). Conditional equivalence
-#' testing: An alternative remedy for publication bias. PLOS ONE, 13(4),
-#' e0195145. doi: 10.1371/journal.pone.0195145
+#' testing: An alternative remedy for publication bias. PLOS ONE, 13(4),
+#' e0195145. doi: 10.1371/journal.pone.0195145
+#'
+#' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference,
+#' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022)
+#' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022)
#'
#' - Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with
-#' R, JAGS, and Stan. Academic Press
+#' R, JAGS, and Stan. Academic Press
#'
#' - Kruschke, J. K. (2018). Rejecting or accepting parameter values in
-#' Bayesian estimation. Advances in Methods and Practices in Psychological
-#' Science, 1(2), 270-280. doi: 10.1177/2515245918771304
+#' Bayesian estimation. Advances in Methods and Practices in Psychological
+#' Science, 1(2), 270-280. doi: 10.1177/2515245918771304
#'
#' - Lakens, D. (2017). Equivalence Tests: A Practical Primer for t Tests,
-#' Correlations, and Meta-Analyses. Social Psychological and Personality
-#' Science, 8(4), 355–362. doi: 10.1177/1948550617697177
+#' Correlations, and Meta-Analyses. Social Psychological and Personality
+#' Science, 8(4), 355–362. doi: 10.1177/1948550617697177
+#'
+#' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1).
+#' Retrieved from https://lakens.github.io/statistical_inferences/.
+#' \doi{10.5281/ZENODO.6409077}
#'
-#' - Lakens, D., & Delacre, M. (2020). Equivalence Testing and the Second
-#' Generation P-Value. Meta-Psychology, 4.
-#' https://doi.org/10.15626/MP.2018.933
+#' - Lakens, D., and Delacre, M. (2020). Equivalence Testing and the Second
+#' Generation P-Value. Meta-Psychology, 4.
+#' https://doi.org/10.15626/MP.2018.933
+#'
+#' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing
+#' for Psychological Research: A Tutorial. Advances in Methods and Practices
+#' in Psychological Science, 1(2), 259–269. \doi{10.1177/2515245918770963}
+#'
+#' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019).
+#' Indices of Effect Existence and Significance in the Bayesian Framework.
+#' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767}
#'
#' - Pernet, C. (2017). Null hypothesis significance testing: A guide to
-#' commonly misunderstood concepts and recommendations for good practice.
-#' F1000Research, 4, 621. doi: 10.12688/f1000research.6963.5
+#' commonly misunderstood concepts and recommendations for good practice.
+#' F1000Research, 4, 621. doi: 10.12688/f1000research.6963.5
+#'
+#' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical
+#' science: replace confidence and significance by compatibility and surprise.
+#' BMC Medical Research Methodology (2020) 20:244.
+#'
+#' - Schweder T. Confidence is epistemic probability for empirical science.
+#' Journal of Statistical Planning and Inference (2018) 195:116–125.
+#' \doi{10.1016/j.jspi.2017.09.016}
+#'
+#' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors.
+#' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory
+#' Data Confrontation in Economics, pp. 285-217. Princeton University Press,
+#' Princeton, NJ, 2003
+#'
+#' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling.
+#' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x}
#'
#' @return A data frame.
-#' @examples
+#' @examplesIf requireNamespace("sandwich")
#' data(qol_cancer)
#' model <- lm(QoL ~ time + age + education, data = qol_cancer)
#'
#' # default rule
#' equivalence_test(model)
#'
+#' # using heteroscedasticity-robust standard errors
+#' equivalence_test(model, vcov = "HC3")
+#'
#' # conditional equivalence test
#' equivalence_test(model, rule = "cet")
#'
@@ -161,10 +241,21 @@ equivalence_test.lm <- function(x,
range = "default",
ci = 0.95,
rule = "classic",
+ vcov = NULL,
+ vcov_args = NULL,
verbose = TRUE,
...) {
rule <- match.arg(tolower(rule), choices = c("bayes", "classic", "cet"))
- out <- .equivalence_test_frequentist(x, range, ci, rule, verbose, ...)
+ out <- .equivalence_test_frequentist(
+ x,
+ range = range,
+ ci = ci,
+ rule = rule,
+ vcov = vcov,
+ vcov_args = vcov_args,
+ verbose,
+ ...
+ )
if (is.null(attr(out, "pretty_names", exact = TRUE))) {
attr(out, "pretty_names") <- format_parameters(x)
@@ -225,6 +316,8 @@ equivalence_test.merMod <- function(x,
ci = 0.95,
rule = "classic",
effects = c("fixed", "random"),
+ vcov = NULL,
+ vcov_args = NULL,
verbose = TRUE,
...) {
# ==== argument matching ====
@@ -236,7 +329,16 @@ equivalence_test.merMod <- function(x,
# ==== equivalent testing for fixed or random effects ====
if (effects == "fixed") {
- out <- .equivalence_test_frequentist(x, range, ci, rule, verbose, ...)
+ out <- .equivalence_test_frequentist(
+ x,
+ range = range,
+ ci = ci,
+ rule = rule,
+ vcov = vcov,
+ vcov_args = vcov_args,
+ verbose,
+ ...
+ )
} else {
out <- .equivalence_test_frequentist_random(x, range, ci, rule, verbose, ...)
}
@@ -297,6 +399,18 @@ equivalence_test.parameters_simulate_model <- function(x,
}
+#' @export
+equivalence_test.parameters_model <- function(x,
+ range = "default",
+ ci = 0.95,
+ rule = "classic",
+ verbose = TRUE,
+ ...) {
+ model <- .get_object(x)
+ equivalence_test(x = model, range = range, ci = ci, rule = rule, verbose = verbose, ...)
+}
+
+
#' @rdname equivalence_test.lm
#' @export
equivalence_test.ggeffects <- function(x,
@@ -311,6 +425,7 @@ equivalence_test.ggeffects <- function(x,
focal <- attributes(x)$original.terms
obj_name <- attributes(x)$model.name
ci <- attributes(x)$ci.lvl
+ dof <- attributes(x)$df
x <- .get_ggeffects_model(x)
@@ -335,10 +450,12 @@ equivalence_test.ggeffects <- function(x,
l <- Map(
function(ci_wide, ci_narrow) {
.equivalence_test_numeric(
+ ci = ci,
ci_wide,
ci_narrow,
range_rope = range,
rule = rule,
+ dof = dof,
verbose = verbose
)
}, conf_int, conf_int2
@@ -398,6 +515,8 @@ equivalence_test.ggeffects <- function(x,
range = "default",
ci = 0.95,
rule = "classic",
+ vcov = NULL,
+ vcov_args = NULL,
verbose = TRUE,
...) {
# ==== define rope range ====
@@ -410,16 +529,28 @@ equivalence_test.ggeffects <- function(x,
}
+ # ==== check degrees of freedom ====
+
+ df_column <- grep("(df|df_error)", colnames(x))
+ if (length(df_column) > 0) {
+ dof <- unique(x[[df_column]])
+ if (length(dof) > 1) {
+ dof <- Inf
+ }
+ } else {
+ dof <- Inf
+ }
+
# ==== requested confidence intervals ====
- params <- conf_int <- .ci_generic(x, ci = ci)
+ params <- conf_int <- .ci_generic(x, ci = ci, vcov = vcov, vcov_args = vcov_args, ...)
conf_int <- as.data.frame(t(conf_int[, c("CI_low", "CI_high")]))
# ==== the "narrower" intervals (1-2*alpha) for CET-rules. ====
alpha <- 1 - ci
- conf_int2 <- .ci_generic(x, ci = (ci - alpha))
+ conf_int2 <- .ci_generic(x, ci = (ci - alpha), vcov = vcov, vcov_args = vcov_args, ...)
conf_int2 <- as.data.frame(t(conf_int2[, c("CI_low", "CI_high")]))
@@ -428,11 +559,12 @@ equivalence_test.ggeffects <- function(x,
l <- Map(
function(ci_wide, ci_narrow) {
.equivalence_test_numeric(
+ ci = ci,
ci_wide,
ci_narrow,
range_rope = range,
rule = rule,
- ci = ci,
+ dof = dof,
verbose = verbose
)
}, conf_int, conf_int2
@@ -451,7 +583,7 @@ equivalence_test.ggeffects <- function(x,
# ==== (adjusted) p-values for tests ====
- out$p <- .add_p_to_equitest(x, ci, range)
+ out$p <- .add_p_to_equitest(x, ci, range, vcov = vcov, vcov_args = vcov_args, ...)
attr(out, "rope") <- range
out
@@ -493,7 +625,7 @@ equivalence_test.ggeffects <- function(x,
out <- do.call(rbind, lapply(names(params), function(np) {
est <- params[[np]][, "(Intercept)"]
- stderr <- se[[np]][, "(Intercept)"]
+ std_err <- se[[np]][, "(Intercept)"]
d <- data.frame(
Parameter = rownames(params[[np]]),
@@ -504,23 +636,23 @@ equivalence_test.ggeffects <- function(x,
)
conf_int <- as.data.frame(t(data.frame(
- CI_low = est - stderr * fac,
- CI_high = est + stderr * fac
+ CI_low = est - std_err * fac,
+ CI_high = est + std_err * fac
)))
conf_int2 <- as.data.frame(t(data.frame(
- CI_low = est - stderr * fac_narrow,
- CI_high = est + stderr * fac_narrow
+ CI_low = est - std_err * fac_narrow,
+ CI_high = est + std_err * fac_narrow
)))
l <- Map(
function(ci_wide, ci_narrow) {
.equivalence_test_numeric(
+ ci = ci,
ci_wide,
ci_narrow,
range_rope = range,
rule = rule,
- ci = ci,
verbose = verbose
)
}, conf_int, conf_int2
@@ -538,7 +670,13 @@ equivalence_test.ggeffects <- function(x,
#' @keywords internal
-.equivalence_test_numeric <- function(ci_wide, ci_narrow, range_rope, rule, ci = 0.95, verbose) {
+.equivalence_test_numeric <- function(ci = 0.95,
+ ci_wide,
+ ci_narrow,
+ range_rope,
+ rule,
+ dof = Inf,
+ verbose) {
final_ci <- NULL
# ==== HDI+ROPE decision rule, by Kruschke ====
@@ -559,22 +697,14 @@ equivalence_test.ggeffects <- function(x,
if (rule == "classic") {
final_ci <- ci_narrow
- # significant result?
- if (min(ci_narrow) > 0 || max(ci_narrow) < 0) {
- # check if CI are entirely inside ROPE. If CI crosses ROPE, reject H0, else accept
- if (min(abs(ci_narrow)) < max(abs(range_rope)) && max(abs(ci_narrow)) < max(abs(range_rope))) {
- decision <- "Accepted"
- } else {
- decision <- "Rejected"
- }
- # non-significant results
+ if (all(ci_narrow < max(range_rope)) && all(ci_narrow > min(range_rope))) {
+ # narrow CI is fully inside ROPE - always accept
+ decision <- "Accepted"
+ } else if (min(ci_narrow) < 0 && max(ci_narrow) > 0) {
+ # non-significant results - undecided
+ decision <- "Undecided"
} else {
- # check if CI are entirely inside ROPE. If CI crosses ROPE, reject H0, else accept
- if (min(abs(ci_narrow)) < max(abs(range_rope)) && max(abs(ci_narrow)) < max(abs(range_rope))) {
- decision <- "Accepted"
- } else {
- decision <- "Undecided"
- }
+ decision <- "Rejected"
}
}
@@ -587,7 +717,7 @@ equivalence_test.ggeffects <- function(x,
if (min(ci_wide) > 0 || max(ci_wide) < 0) {
decision <- "Rejected"
# non-significant results, all narrow CI inside ROPE
- } else if (min(abs(ci_narrow)) < max(abs(range_rope)) && max(abs(ci_narrow)) < max(abs(range_rope))) {
+ } else if (all(ci_narrow < max(range_rope)) && all(ci_narrow > min(range_rope))) {
decision <- "Accepted"
} else {
decision <- "Undecided"
@@ -597,10 +727,9 @@ equivalence_test.ggeffects <- function(x,
data.frame(
CI_low = final_ci[1],
CI_high = final_ci[2],
- SGPV = .sgpv(range_rope, final_ci),
+ SGPV = .rope_coverage(ci = ci, range_rope, ci_range = final_ci, dof = dof),
ROPE_low = range_rope[1],
ROPE_high = range_rope[2],
- # ROPE_Percentage = .rope_coverage(range_rope, final_ci),
ROPE_Equivalence = decision,
stringsAsFactors = FALSE
)
@@ -611,31 +740,34 @@ equivalence_test.ggeffects <- function(x,
# helper ---------------------
-
-.sgpv <- function(rope, ci) {
- diff_rope <- abs(diff(rope))
+# this function simply takes the length of the range and calculates the proportion
+# of that range that is inside the rope. However, this assumed a "flat", i.e.
+# uniformly distributed interval, which is not accurate for standard confidence
+# intervals. thus, we no longer use this function, but switch to ".rope_coverage()".
+.sgpv <- function(range_rope, ci) {
+ diff_rope <- abs(diff(range_rope))
diff_ci <- abs(diff(ci))
# inside?
- if (min(ci) >= min(rope) && max(ci) <= max(rope)) {
+ if (min(ci) >= min(range_rope) && max(ci) <= max(range_rope)) {
coverage <- 1
# outside?
- } else if (max(ci) < min(rope) || min(ci) > max(rope)) {
+ } else if (max(ci) < min(range_rope) || min(ci) > max(range_rope)) {
coverage <- 0
# CI covers completely rope?
- } else if (max(ci) > max(rope) && min(ci) < min(rope)) {
+ } else if (max(ci) > max(range_rope) && min(ci) < min(range_rope)) {
coverage <- diff_rope / diff_ci
# CI inside rope and outside max rope?
- } else if (min(ci) >= min(rope) && max(ci) > max(rope)) {
- diff_in_rope <- max(rope) - min(ci)
+ } else if (min(ci) >= min(range_rope) && max(ci) > max(range_rope)) {
+ diff_in_rope <- max(range_rope) - min(ci)
coverage <- diff_in_rope / diff_ci
# CI inside rope and outside min rope?
- } else if (max(ci) <= max(rope) && min(ci) < min(rope)) {
- diff_in_rope <- max(ci) - min(rope)
+ } else if (max(ci) <= max(range_rope) && min(ci) < min(range_rope)) {
+ diff_in_rope <- max(ci) - min(range_rope)
coverage <- diff_in_rope / diff_ci
}
@@ -643,36 +775,72 @@ equivalence_test.ggeffects <- function(x,
}
-## FIXME make sure this works for different CI levels
-.rope_coverage <- function(rope, ci_range, ci) {
- diff_ci <- abs(diff(ci_range))
- out <- bayestestR::distribution_normal(
- n = 1000,
- mean = ci_range[2] - (diff_ci / 2),
- sd = diff_ci / 3.28
- )
-
- rc <- bayestestR::rope(out, range = rope, ci = ci)
+# this function simulates a normal distribution, which approximately has the
+# same range / limits as the confidence interval, thus indeed representing a
+# normally distributed confidence interval. We then calculate the probability
+# mass of this interval that is inside the ROPE.
+.rope_coverage <- function(ci = 0.95, range_rope, ci_range, dof = Inf) {
+ out <- .generate_posterior_from_ci(ci, ci_range, dof = dof)
+ # compare: ci_range and range(out)
+ # The SGPV refers to the proportion of the confidence interval inside the
+ # full ROPE - thus, we set ci = 1 here
+ rc <- bayestestR::rope(out, range = range_rope, ci = 1)
rc$ROPE_Percentage
}
-.add_p_to_equitest <- function(model, ci, range) {
+.generate_posterior_from_ci <- function(ci = 0.95, ci_range, dof = Inf, precision = 10000) {
+ # this function creates an approximate normal distribution that covers the
+ # CI-range, i.e. we "simulate" a posterior distribution from a frequentist CI
+
+ # sanity check - dof argument
+ if (is.null(dof)) {
+ dof <- Inf
+ }
+ # first we need the range of the CI (in units), also to calculate the mean of
+ # the normal distribution
+ diff_ci <- abs(diff(ci_range))
+ mean_dist <- ci_range[2] - (diff_ci / 2)
+ # then we need the critical values of the quantiles from the CI range
+ z_value <- stats::qt((1 + ci) / 2, df = dof)
+ # the range of Z-scores (from lower to upper quantile) gives us the range of
+ # the provided interval in terms of standard deviations. now we divide the
+ # known range of the provided CI (in units) by the z-score-range, which will
+ # give us the standard deviation of the distribution.
+ sd_dist <- diff_ci / diff(c(-1 * z_value, z_value))
+ # generate normal-distribution if we don't have t-distribution, or if
+ # we don't have necessary packages installed
+ if (is.infinite(dof) || !insight::check_if_installed("distributional", quietly = TRUE)) {
+ # tell user to install "distributional"
+ if (!is.infinite(dof)) {
+ insight::format_alert("For models with only few degrees of freedom, install the {distributional} package to increase accuracy of `p_direction()`, `p_significance()` and `equivalence_test()`.") # nolint
+ }
+ # we now know all parameters (mean and sd) to simulate a normal distribution
+ bayestestR::distribution_normal(n = precision, mean = mean_dist, sd = sd_dist)
+ } else {
+ insight::check_if_installed("distributional")
+ out <- distributional::dist_student_t(df = dof, mu = mean_dist, sigma = sd_dist)
+ sort(unlist(distributional::generate(out, times = precision), use.names = FALSE))
+ }
+}
+
+
+.add_p_to_equitest <- function(model, ci, range, vcov = NULL, vcov_args = NULL, ...) {
tryCatch(
{
params <- insight::get_parameters(model)
# degrees of freedom
- df <- degrees_of_freedom(model, method = "any")
+ dof <- insight::get_df(x = model, type = "wald")
# mu
params$mu <- params$Estimate * -1
# se
- se <- standard_error(model)
+ se <- standard_error(model, vcov = vcov, vcov_args = vcov_args, ...)
- stats::pt((range[1] - params$mu) / se$SE, df, lower.tail = TRUE) +
- stats::pt((range[2] - params$mu) / se$SE, df, lower.tail = FALSE)
+ stats::pt((range[1] - params$mu) / se$SE, df = dof, lower.tail = TRUE) +
+ stats::pt((range[2] - params$mu) / se$SE, df = dof, lower.tail = FALSE)
},
error = function(e) {
NULL
@@ -743,14 +911,12 @@ print.equivalence_test_lm <- function(x,
orig_x <- x
rule <- attributes(x)$rule
- if (!is.null(rule)) {
- if (rule == "cet") {
- insight::print_color("# Conditional Equivalence Testing\n\n", "blue")
- } else if (rule == "classic") {
- insight::print_color("# TOST-test for Practical Equivalence\n\n", "blue")
- } else {
- insight::print_color("# Test for Practical Equivalence\n\n", "blue")
- }
+ if (is.null(rule)) {
+ insight::print_color("# Test for Practical Equivalence\n\n", "blue")
+ } else if (rule == "cet") {
+ insight::print_color("# Conditional Equivalence Testing\n\n", "blue")
+ } else if (rule == "classic") {
+ insight::print_color("# TOST-test for Practical Equivalence\n\n", "blue")
} else {
insight::print_color("# Test for Practical Equivalence\n\n", "blue")
}
@@ -788,55 +954,3 @@ plot.equivalence_test_lm <- function(x, ...) {
insight::check_if_installed("see")
NextMethod()
}
-#'
-#'
-#' #' @export
-#' print_md.equivalence_test_lm <- function(x,
-#' digits = 2,
-#' ci_digits = digits,
-#' p_digits = 3,
-#' ci_brackets = NULL,
-#' zap_small = FALSE,
-#' ...) {
-#' orig_x <- x
-#'
-#' rule <- attributes(x)$rule
-#' if (!is.null(rule)) {
-#' if (rule == "cet") {
-#' title <- "Conditional Equivalence Testing"
-#' } else if (rule == "classic") {
-#' title <- "TOST-test for Practical Equivalence"
-#' } else {
-#' title <- "Test for Practical Equivalence"
-#' }
-#' } else {
-#' title <- "Test for Practical Equivalence"
-#' }
-#'
-#' .rope <- attr(x, "rope", exact = TRUE)
-#' subtitle <- sprintf(" ROPE: [%.*f %.*f]\n\n", digits, .rope[1], digits, .rope[2])
-#'
-#' # formatting
-#' x <- format(x,
-#' digits = digits,
-#' ci_digits = ci_digits,
-#' p_digits = p_digits,
-#' ci_width = NULL,
-#' ci_brackets = ci_brackets,
-#' format = "md",
-#' zap_small = zap_small,
-#' ...)
-#'
-#' if ("Group" %in% colnames(x)) {
-#' group_by <- "Group"
-#' } else {
-#' group_by <- NULL
-#' }
-#'
-#' cat(insight::export_table(x,
-#' format = "md",
-#' title = title,
-#' subtitle = subtitle,
-#' group_by = group_by))
-#' invisible(orig_x)
-#' }
diff --git a/R/extract_parameters.R b/R/extract_parameters.R
index dcccdf5c9..6f6653bd4 100644
--- a/R/extract_parameters.R
+++ b/R/extract_parameters.R
@@ -16,7 +16,7 @@
keep_parameters = NULL,
drop_parameters = NULL,
include_sigma = TRUE,
- summary = FALSE,
+ include_info = FALSE,
vcov = NULL,
vcov_args = NULL,
...) {
@@ -91,6 +91,9 @@
c("intercept", "location", "scale"),
lengths(model[c("Alpha", "beta", "zeta")])
)
+ } else if (inherits(model, "ordinal_weightit")) {
+ intercept_groups <- rep("conditional", nrow(parameters))
+ intercept_groups[grep("|", parameters$Parameter, fixed = TRUE)] <- "intercept"
} else {
intercept_groups <- NULL
}
@@ -103,9 +106,9 @@
# ==== CI - only if we don't already have CI for std. parameters
- if (is.null(ci)) {
- ci_cols <- NULL
- } else {
+ ci_cols <- NULL
+ if (!is.null(ci)) {
+ # set up arguments for CI function
fun_args <- list(model,
ci = ci,
component = component,
@@ -114,14 +117,13 @@
verbose = verbose
)
fun_args <- c(fun_args, dots)
+ # add method argument if provided
if (!is.null(ci_method)) {
fun_args[["method"]] <- ci_method
}
ci_df <- suppressMessages(do.call("ci", fun_args))
-
- if (is.null(ci_df)) {
- ci_cols <- NULL
- } else {
+ # success? merge CI into parameters
+ if (!is.null(ci_df)) {
# for multiple CI columns, reshape CI-dataframe to match parameters df
if (length(ci) > 1) {
ci_df <- datawizard::reshape_ci(ci_df)
@@ -145,7 +147,7 @@
)
fun_args <- c(fun_args, dots)
pval <- do.call("p_value", fun_args)
-
+ # success? merge p-values into parameters
if (!is.null(pval)) {
parameters <- merge(parameters, pval, by = merge_by, sort = FALSE)
}
@@ -166,7 +168,7 @@
fun_args[["method"]] <- ci_method
}
std_err <- do.call("standard_error", fun_args)
-
+ # success? merge SE into parameters
if (!is.null(std_err)) {
parameters <- merge(parameters, std_err, by = merge_by, sort = FALSE)
}
@@ -174,7 +176,6 @@
# ==== test statistic - fix values for robust vcov
-
# deprecated argument `robust = TRUE`
if (!is.null(vcov) || isTRUE(dots[["robust"]])) {
parameters$Statistic <- parameters$Estimate / parameters$SE
@@ -186,9 +187,9 @@
# ==== degrees of freedom
if (is.null(ci_method)) {
- df_error <- degrees_of_freedom(model, method = "any", verbose = FALSE)
+ df_error <- insight::get_df(x = model, type = "wald", verbose = FALSE)
} else {
- df_error <- degrees_of_freedom(model, method = ci_method, verbose = FALSE)
+ df_error <- insight::get_df(x = model, type = ci_method, verbose = FALSE)
}
if (!is.null(df_error) && (length(df_error) == 1 || length(df_error) == nrow(parameters))) {
if (length(df_error) == 1) {
@@ -224,7 +225,7 @@
if (inherits(model, "polr") && !is.null(intercept_groups)) {
parameters$Component <- "beta"
parameters$Component[intercept_groups] <- "alpha"
- } else if (inherits(model, c("clm", "clm2")) && !is.null(intercept_groups)) {
+ } else if (inherits(model, c("clm", "clm2", "ordinal_weightit")) && !is.null(intercept_groups)) {
parameters$Component <- intercept_groups
}
@@ -300,7 +301,7 @@
# ==== add sigma and residual df
- if (isTRUE(include_sigma) || isTRUE(summary)) {
+ if (isTRUE(include_sigma) || isTRUE(include_info)) {
parameters <- .add_sigma_residual_df(parameters, model)
}
@@ -318,7 +319,7 @@
sig <- .safe(suppressWarnings(insight::get_sigma(model, ci = NULL, verbose = FALSE)))
attr(params, "sigma") <- as.numeric(sig)
- resdf <- .safe(suppressWarnings(insight::get_df(model, type = "residual")))
+ resdf <- .safe(suppressWarnings(insight::get_df(x = model, type = "residual")))
attr(params, "residual_df") <- as.numeric(resdf)
}
params
@@ -355,7 +356,7 @@
verbose = TRUE) {
# check pattern
if (!is.null(keep) && length(keep) > 1) {
- keep <- paste0("(", paste0(keep, collapse = "|"), ")")
+ keep <- paste0("(", paste(keep, collapse = "|"), ")")
if (verbose) {
insight::format_alert(
sprintf("The `keep` argument has more than 1 element. Merging into following regular expression: `%s`.", keep)
@@ -365,7 +366,7 @@
# check pattern
if (!is.null(drop) && length(drop) > 1) {
- drop <- paste0("(", paste0(drop, collapse = "|"), ")")
+ drop <- paste0("(", paste(drop, collapse = "|"), ")")
if (verbose) {
insight::format_alert(
sprintf("The `drop` argument has more than 1 element. Merging into following regular expression: `%s`.", drop)
@@ -423,7 +424,7 @@
keep_parameters = NULL,
drop_parameters = NULL,
include_sigma = FALSE,
- summary = FALSE,
+ include_info = FALSE,
vcov = NULL,
vcov_args = NULL,
verbose = TRUE,
@@ -451,7 +452,7 @@
# Degrees of freedom
if (.dof_method_ok(model, ci_method)) {
- dof <- degrees_of_freedom(model, method = ci_method, verbose = FALSE)
+ dof <- insight::get_df(x = model, type = ci_method, verbose = FALSE)
} else {
dof <- Inf
}
@@ -467,9 +468,8 @@
# CI - only if we don't already have CI for std. parameters
- if (is.null(ci)) {
- ci_cols <- NULL
- } else {
+ ci_cols <- NULL
+ if (!is.null(ci)) {
# robust (current or deprecated)
if (!is.null(vcov) || isTRUE(list(...)[["robust"]])) {
fun_args <- list(model,
@@ -486,7 +486,9 @@
} else {
ci_df <- ci(model, ci = ci, method = ci_method, effects = "fixed")
}
- if (length(ci) > 1) ci_df <- datawizard::reshape_ci(ci_df)
+ if (length(ci) > 1) {
+ ci_df <- datawizard::reshape_ci(ci_df)
+ }
ci_cols <- names(ci_df)[!names(ci_df) %in% c("CI", "Parameter")]
parameters <- merge(parameters, ci_df, by = "Parameter", sort = FALSE)
}
@@ -563,7 +565,7 @@
if (!ci_method %in% special_ci_methods) {
df_error <- data.frame(
Parameter = parameters$Parameter,
- df_error = degrees_of_freedom(model, method = "any"),
+ df_error = insight::get_df(x = model, type = "wald"),
stringsAsFactors = FALSE
)
}
@@ -637,7 +639,7 @@
# add sigma
- if (isTRUE(include_sigma) || isTRUE(summary)) {
+ if (isTRUE(include_sigma) || isTRUE(include_info)) {
parameters <- .add_sigma_residual_df(parameters, model)
}
diff --git a/R/extract_parameters_anova.R b/R/extract_parameters_anova.R
index ab3e2df9b..b46b750ed 100644
--- a/R/extract_parameters_anova.R
+++ b/R/extract_parameters_anova.R
@@ -49,6 +49,7 @@
names(parameters) <- gsub("Resid. Dev", "Deviance_error", names(parameters), fixed = TRUE)
# error-df
if (!"df_error" %in% names(parameters)) {
+ names(parameters) <- gsub("DenDF", "df_error", names(parameters), fixed = TRUE)
names(parameters) <- gsub("den Df", "df_error", names(parameters), fixed = TRUE)
names(parameters) <- gsub("Res.Df", "df_error", names(parameters), fixed = TRUE)
names(parameters) <- gsub("Resid. Df", "df_error", names(parameters), fixed = TRUE)
@@ -68,8 +69,13 @@
# Reorder
row.names(parameters) <- NULL
- order <- c("Response", "Group", "Parameter", "Coefficient", "SE", "Pillai", "AIC", "BIC", "Log_Likelihood", "Chi2", "Chi2_df", "RSS", "Sum_Squares", "Sum_Squares_Partial", "Sum_Squares_Error", "df", "Deviance", "Statistic", "df_num", "df_error", "Deviance_error", "Mean_Square", "F", "Rao", "p")
- parameters <- parameters[order[order %in% names(parameters)]]
+ col_order <- c(
+ "Response", "Group", "Parameter", "Coefficient", "SE", "Pillai", "AIC",
+ "BIC", "Log_Likelihood", "Chi2", "Chi2_df", "RSS", "Sum_Squares",
+ "Sum_Squares_Partial", "Sum_Squares_Error", "df", "Deviance", "Statistic",
+ "df_num", "df_error", "Deviance_error", "Mean_Square", "F", "Rao", "p"
+ )
+ parameters <- parameters[col_order[col_order %in% names(parameters)]]
insight::text_remove_backticks(parameters, verbose = FALSE)
}
@@ -235,7 +241,7 @@
Parameter = model$terms[i],
df = model$df[i],
Statistic = test[1],
- `F` = test[2],
+ `F` = test[2], # nolint
df_num = test[3],
df_error = test[4],
p = stats::pf(test[2], test[3], test[4], lower.tail = FALSE),
@@ -311,7 +317,7 @@
.power_for_aov <- function(model, params) {
if (requireNamespace("effectsize", quietly = TRUE)) {
- power <- tryCatch(
+ power_aov <- tryCatch(
{
cohens_f2 <- effectsize::cohens_f_squared(model, partial = TRUE, verbose = FALSE)
@@ -329,8 +335,8 @@
)
}
- if (!is.null(power)) {
- params <- merge(params, cohens_f2[c("Parameter", "Power")], sort = FALSE, all = TRUE)
+ if (!is.null(power_aov)) {
+ params <- merge(params, power_aov[c("Parameter", "Power")], sort = FALSE, all = TRUE)
}
params
diff --git a/R/extract_random_parameters.R b/R/extract_random_parameters.R
index c8dc8325d..6b5fbb03f 100644
--- a/R/extract_random_parameters.R
+++ b/R/extract_random_parameters.R
@@ -24,7 +24,7 @@
out$CI_high <- out$Coefficient + fac * out$SE
ci_cols <- c("CI_low", "CI_high")
} else {
- ci_cols <- c()
+ ci_cols <- NULL
for (i in ci) {
fac <- stats::qnorm((1 + i) / 2)
ci_low <- paste0("CI_low_", i)
@@ -66,7 +66,10 @@
out <- switch(component,
zi = ,
zero_inflated = out[out$Component == "zi", ],
+ cond = ,
conditional = out[out$Component == "cond", ],
+ disp = ,
+ dispersion = out[out$Component == "disp", ],
out
)
@@ -79,6 +82,7 @@
# rename
out$Component[out$Component == "zi"] <- "zero_inflated"
out$Component[out$Component == "cond"] <- "conditional"
+ out$Component[out$Component == "disp"] <- "dispersion"
if (length(ci) == 1) {
fac <- stats::qnorm((1 + ci) / 2)
@@ -86,7 +90,7 @@
out$CI_high <- out$Coefficient + fac * out$SE
ci_cols <- c("CI_low", "CI_high")
} else {
- ci_cols <- c()
+ ci_cols <- NULL
for (i in ci) {
fac <- stats::qnorm((1 + i) / 2)
ci_low <- paste0("CI_low_", i)
@@ -104,7 +108,10 @@
out$df_error <- NA
out$p <- NA
- out <- out[c("Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Component", "Effects", "Group")]
+ out <- out[c(
+ "Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column,
+ "df_error", "p", "Component", "Effects", "Group"
+ )]
if (effects == "random") {
out[c(stat_column, "df_error", "p")] <- NULL
diff --git a/R/extract_random_variances.R b/R/extract_random_variances.R
index 620aabfa0..9b042a79e 100644
--- a/R/extract_random_variances.R
+++ b/R/extract_random_variances.R
@@ -108,6 +108,42 @@
.extract_random_variances.MixMod <- .extract_random_variances.glmmTMB
+# svy2lme ------------------------
+
+.extract_random_variances.svy2lme <- function(model, ci = 0.95, effects = "random", ...) {
+ s <- sqrt(as.vector(model$s2))
+ stdev <- matrix(s * sqrt(diag(model$L)), ncol = 1)
+ vcnames <- c(paste0("SD (", model$znames, ")"), "SD (Observations)")
+ grp_names <- names(model$znames)
+ if (is.null(grp_names)) {
+ grp_names <- model$znames
+ }
+
+ out <- data.frame(
+ Parameter = vcnames,
+ Level = NA,
+ Coefficient = c(as.vector(stdev), s),
+ SE = NA,
+ CI_low = NA,
+ CI_high = NA,
+ t = NA,
+ df_error = NA,
+ p = NA,
+ Effects = "random",
+ Group = c(grp_names, "Residual"),
+ stringsAsFactors = FALSE
+ )
+
+ # fix intercept names
+ out$Parameter <- gsub("(Intercept)", "Intercept", out$Parameter, fixed = TRUE)
+
+ if (effects == "random") {
+ out[c("t", "df_error", "p")] <- NULL
+ }
+
+ rownames(out) <- NULL
+ out
+}
diff --git a/R/format.R b/R/format.R
index 790489abf..857068863 100644
--- a/R/format.R
+++ b/R/format.R
@@ -1,7 +1,7 @@
# usual models ---------------------------------
#' @inheritParams print.parameters_model
-#' @rdname display.parameters_model
+#' @rdname print.parameters_model
#' @export
format.parameters_model <- function(x,
pretty_names = TRUE,
@@ -240,6 +240,7 @@ format.parameters_brms_meta <- format.parameters_model
# Compare parameters ----------------------
+#' @rdname print.compare_parameters
#' @inheritParams print.parameters_model
#' @export
format.compare_parameters <- function(x,
@@ -311,7 +312,7 @@ format.compare_parameters <- function(x,
# since we merged all models together, and we only have model-specific
# columns for estimates, CI etc. but not for Effects and Component, we
# extract "valid" rows via non-NA values in the coefficient column
- coef_column <- which(colnames(cols) %in% c(.all_coefficient_types(), "Coefficient"))
+ coef_column <- which(colnames(cols) %in% c(.all_coefficient_types, "Coefficient"))
valid_rows <- which(!is.na(cols[[coef_column]]))
# check if we have mixed models with random variance parameters
# in such cases, we don't need the group-column, but we rather
@@ -541,6 +542,7 @@ format.parameters_sem <- function(x,
show_sigma = FALSE,
show_formula = FALSE,
show_r2 = FALSE,
+ show_rmse = FALSE,
format = "text") {
# prepare footer
footer <- NULL
@@ -548,6 +550,7 @@ format.parameters_sem <- function(x,
sigma_value <- attributes(x)$sigma
r2 <- attributes(x)$r2
+ rmse <- attributes(x)$rmse
residual_df <- attributes(x)$residual_df
p_adjust <- attributes(x)$p_adjust
model_formula <- attributes(x)$model_formula
@@ -574,29 +577,35 @@ format.parameters_sem <- function(x,
footer <- .add_footer_r2(footer, digits, r2, type)
}
+ # footer: r-squared
+ if (isTRUE(show_rmse)) {
+ footer <- .add_footer_values(footer, digits, value = rmse, text = "RMSE ", type)
+ }
+
# footer: p-adjustment
- if ("p" %in% colnames(x) && isTRUE(verbose)) {
- footer <- .add_footer_padjust(footer, p_adjust, type)
+ if ("p" %in% colnames(x) && isTRUE(verbose) && !is.null(p_adjust) && p_adjust != "none") {
+ footer <- .add_footer_text(footer, text = paste("p-value adjustment method:", format_p_adjust(p_adjust)))
}
# footer: anova test
if (!is.null(anova_test)) {
- footer <- .add_footer_anova_test(footer, anova_test, type)
+ footer <- .add_footer_text(footer, text = sprintf("%s test statistic", anova_test))
}
- # footer: anova test
+ # footer: anova type
if (!is.null(anova_type)) {
- footer <- .add_footer_anova_type(footer, anova_type, type)
+ footer <- .add_footer_text(footer, text = sprintf("Anova Table (Type %s tests)", anova_type))
}
+
# footer: marginaleffects::comparisons()
if (!is.null(prediction_type)) {
- footer <- .add_footer_prediction_type(footer, prediction_type, type)
+ footer <- .add_footer_text(footer, text = sprintf("Prediction type: %s", prediction_type))
}
# footer: htest alternative
if (!is.null(text_alternative)) {
- footer <- .add_footer_alternative(footer, text_alternative, type)
+ footer <- .add_footer_text(footer, text = text_alternative)
}
# footer: generic text
@@ -626,7 +635,7 @@ format.parameters_sem <- function(x,
# footer: generic text
.add_footer_text <- function(footer = NULL, text = NULL, type = "text", is_ggeffects = FALSE) {
- if (!is.null(text)) {
+ if (!is.null(text) && length(text)) {
if (type == "text" || type == "markdown") {
if (is.null(footer)) {
fill <- "\n"
@@ -643,6 +652,29 @@ format.parameters_sem <- function(x,
}
+# footer: generic values
+.add_footer_values <- function(footer = NULL,
+ digits = 3,
+ value = NULL,
+ text = NULL,
+ type = "text") {
+ if (!is.null(value) && !is.null(text)) {
+ string <- sprintf("%s: %s", text, insight::format_value(value, digits = digits))
+ if (type == "text" || type == "markdown") {
+ if (is.null(footer)) {
+ fill <- "\n"
+ } else {
+ fill <- ""
+ }
+ footer <- paste0(footer, fill, string, "\n")
+ } else if (type == "html") {
+ footer <- c(footer, string)
+ }
+ }
+ footer
+}
+
+
# footer: residual standard deviation
.add_footer_sigma <- function(footer = NULL, digits = 3, sigma = NULL, residual_df = NULL, type = "text") {
if (!is.null(sigma)) {
@@ -659,9 +691,9 @@ format.parameters_sem <- function(x,
} else {
fill <- ""
}
- footer <- paste0(footer, sprintf("%sResidual standard deviation: %.*f%s\n", fill, digits, sigma, res_df))
+ footer <- paste0(footer, sprintf("%sSigma: %.*f%s\n", fill, digits, sigma, res_df))
} else if (type == "html") {
- footer <- c(footer, insight::trim_ws(sprintf("Residual standard deviation: %.*f%s", digits, sigma, res_df)))
+ footer <- c(footer, insight::trim_ws(sprintf("Sigma: %.*f%s", digits, sigma, res_df)))
}
}
footer
@@ -671,7 +703,7 @@ format.parameters_sem <- function(x,
# footer: r-squared
.add_footer_r2 <- function(footer = NULL, digits = 3, r2 = NULL, type = "text") {
if (!is.null(r2)) {
- rsq <- .safe(paste0(unlist(lapply(r2, function(i) {
+ rsq <- .safe(paste(unlist(lapply(r2, function(i) {
paste0(attributes(i)$names, ": ", insight::format_value(i, digits = digits))
})), collapse = "; "))
@@ -692,96 +724,6 @@ format.parameters_sem <- function(x,
}
-# footer: anova type
-.add_footer_anova_type <- function(footer = NULL, aov_type = NULL, type = "text") {
- if (!is.null(aov_type)) {
- if (type == "text" || type == "markdown") {
- if (is.null(footer)) {
- fill <- "\n"
- } else {
- fill <- ""
- }
- footer <- paste0(footer, sprintf("%sAnova Table (Type %s tests)\n", fill, aov_type))
- } else if (type == "html") {
- footer <- c(footer, sprintf("Anova Table (Type %s tests)", aov_type))
- }
- }
- footer
-}
-
-
-# footer: marginaleffects::comparisions() prediction_type
-.add_footer_prediction_type <- function(footer = NULL, prediction_type = NULL, type = "text") {
- if (!is.null(prediction_type)) {
- if (type == "text" || type == "markdown") {
- if (is.null(footer)) {
- fill <- "\n"
- } else {
- fill <- ""
- }
- footer <- paste0(footer, sprintf("%sPrediction type: %s\n", fill, prediction_type))
- } else if (type == "html") {
- footer <- c(footer, sprintf("Prediction type: %s", prediction_type))
- }
- }
- footer
-}
-
-
-# footer: anova test
-.add_footer_anova_test <- function(footer = NULL, test = NULL, type = "text") {
- if (!is.null(test)) {
- if (type == "text" || type == "markdown") {
- if (is.null(footer)) {
- fill <- "\n"
- } else {
- fill <- ""
- }
- footer <- paste0(footer, sprintf("%s%s test statistic\n", fill, test))
- } else if (type == "html") {
- footer <- c(footer, sprintf("%s test statistic", test))
- }
- }
- footer
-}
-
-
-# footer: htest alternative
-.add_footer_alternative <- function(footer = NULL, text_alternative = NULL, type = "text") {
- if (!is.null(text_alternative)) {
- if (type == "text" || type == "markdown") {
- if (is.null(footer)) {
- fill <- "\n"
- } else {
- fill <- ""
- }
- footer <- paste0(footer, sprintf("%s%s\n", fill, text_alternative))
- } else if (type == "html") {
- footer <- c(footer, text_alternative)
- }
- }
- footer
-}
-
-
-# footer: p-adjustment
-.add_footer_padjust <- function(footer = NULL, p_adjust = NULL, type = "text") {
- if (!is.null(p_adjust) && p_adjust != "none") {
- if (type == "text" || type == "markdown") {
- if (is.null(footer)) {
- fill <- "\n"
- } else {
- fill <- ""
- }
- footer <- paste0(footer, fill, "p-value adjustment method: ", format_p_adjust(p_adjust), "\n")
- } else if (type == "html") {
- footer <- c(footer, paste0("p-value adjustment method: ", format_p_adjust(p_adjust)))
- }
- }
- footer
-}
-
-
# footer: model formula
.add_footer_formula <- function(footer = NULL, model_formula = NULL, n_obs = NULL, type = "text") {
if (!is.null(model_formula)) {
@@ -918,11 +860,13 @@ format.parameters_sem <- function(x,
.print_footer_exp <- function(x) {
+ # we need this to check whether we have extremely large cofficients
if (isTRUE(getOption("parameters_exponentiate", TRUE))) {
msg <- NULL
- # we need this to check whether we have extremely large cofficients
- if (all(c("Coefficient", "Parameter") %in% colnames(x))) {
- spurious_coefficients <- abs(x$Coefficient[!.in_intercepts(x$Parameter)])
+ # try to find out the name of the coefficient column
+ coef_column <- intersect(colnames(x), .all_coefficient_names)
+ if (length(coef_column) && "Parameter" %in% colnames(x)) {
+ spurious_coefficients <- abs(x[[coef_column[1]]][!.in_intercepts(x$Parameter)])
} else {
spurious_coefficients <- NULL
}
@@ -930,8 +874,10 @@ format.parameters_sem <- function(x,
if (!.is_valid_exponentiate_argument(exponentiate)) {
if (isTRUE(.additional_arguments(x, "log_link", FALSE))) {
msg <- "The model has a log- or logit-link. Consider using `exponentiate = TRUE` to interpret coefficients as ratios." # nolint
- # we only check for exp(coef), so exp() here soince coefficients are on logit-scale
- spurious_coefficients <- exp(spurious_coefficients)
+ # we only check for exp(coef), so exp() here since coefficients are on logit-scale
+ if (!is.null(spurious_coefficients)) {
+ spurious_coefficients <- exp(spurious_coefficients)
+ }
} else if (isTRUE(.additional_arguments(x, "log_response", FALSE))) {
msg <- "The model has a log-transformed response variable. Consider using `exponentiate = TRUE` to interpret coefficients as ratios." # nolint
# don't show warning about complete separation
diff --git a/R/format_parameters.R b/R/format_parameters.R
index 818556416..c9f5851da 100644
--- a/R/format_parameters.R
+++ b/R/format_parameters.R
@@ -11,7 +11,7 @@
#' @section Interpretation of Interaction Terms:
#' Note that the *interpretation* of interaction terms depends on many
#' characteristics of the model. The number of parameters, and overall
-#' performance of the model, can differ *or not* between `a * b`
+#' performance of the model, can differ *or not* between `a * b`,
#' `a : b`, and `a / b`, suggesting that sometimes interaction terms
#' give different parameterizations of the same model, but other times it gives
#' completely different models (depending on `a` or `b` being factors
@@ -72,10 +72,10 @@ format_parameters.parameters_model <- function(model, ...) {
# save some time, if model info is passed as argument
dot_args <- list(...)
- if (!is.null(dot_args$model_info)) {
- info <- dot_args$model_info
- } else {
+ if (is.null(dot_args$model_info)) {
info <- insight::model_info(model, verbose = FALSE)
+ } else {
+ info <- dot_args$model_info
}
## TODO remove is.list() when insight 0.8.3 on CRAN
@@ -299,16 +299,16 @@ format_parameters.parameters_model <- function(model, ...) {
if (type == "interaction") {
components <- paste0(
"(",
- paste0(utils::head(components, -1), collapse = sep),
+ paste(utils::head(components, -1), collapse = sep),
")",
sep,
utils::tail(components, 1)
)
} else {
- components <- paste0(components, collapse = sep)
+ components <- paste(components, collapse = sep)
}
} else {
- components <- paste0(components, collapse = sep)
+ components <- paste(components, collapse = sep)
}
components
}
@@ -378,6 +378,12 @@ format_parameters.parameters_model <- function(model, ...) {
if (!is.null(model) && insight::is_regression_model(model) && !is.data.frame(model)) {
# get data, but exclude response - we have no need for that label
mf <- insight::get_data(model, source = "mf", verbose = FALSE)
+ # sanity check - any labels?
+ has_labels <- vapply(mf, function(i) !is.null(attr(i, "labels", exact = TRUE)), logical(1))
+ # if we don't have labels, we try to get data from environment
+ if (!any(has_labels)) {
+ mf <- insight::get_data(model, source = "environment", verbose = FALSE)
+ }
resp <- insight::find_response(model, combine = FALSE)
mf <- mf[, setdiff(colnames(mf), resp), drop = FALSE]
@@ -432,7 +438,7 @@ format_parameters.parameters_model <- function(model, ...) {
# extract single coefficient names from interaction term
out <- unlist(strsplit(i, ":", fixed = TRUE))
# combine labels
- labs <- c(labs, paste0(sapply(out, function(l) pretty_labels[l]), collapse = " * "))
+ labs <- c(labs, paste(sapply(out, function(l) pretty_labels[l]), collapse = " * "))
}
# add interaction terms to labels string
names(labs) <- names(interactions)
@@ -461,10 +467,10 @@ format_parameters.parameters_model <- function(model, ...) {
win_os <- tryCatch(
{
si <- Sys.info()
- if (!is.null(si["sysname"])) {
- si["sysname"] == "Windows" || startsWith(R.version$os, "mingw")
- } else {
+ if (is.null(si["sysname"])) {
FALSE
+ } else {
+ si["sysname"] == "Windows" || startsWith(R.version$os, "mingw")
}
},
error = function(e) {
diff --git a/R/methods_BBMM.R b/R/methods_BBMM.R
index 912b16b0c..15ae03155 100644
--- a/R/methods_BBMM.R
+++ b/R/methods_BBMM.R
@@ -64,18 +64,3 @@ p_value.BBreg <- function(model, ...) {
p = as.data.frame(summary(model)$coefficients)$p.value
)
}
-
-
-
-#' @export
-degrees_of_freedom.BBmm <- function(model, method = "residual", ...) {
- if (method %in% c("residual", "wald")) {
- return(model$df)
- } else {
- return(degrees_of_freedom.default(model = model, method = method, ...))
- }
-}
-
-
-#' @export
-degrees_of_freedom.BBreg <- degrees_of_freedom.BBmm
diff --git a/R/methods_BayesFactor.R b/R/methods_BayesFactor.R
index 99573ef62..cb8b85cd8 100644
--- a/R/methods_BayesFactor.R
+++ b/R/methods_BayesFactor.R
@@ -13,7 +13,6 @@
#' @inheritParams bayestestR::describe_posterior
#' @inheritParams p_value
#' @inheritParams model_parameters.htest
-#' @param cohens_d,cramers_v Deprecated. Please use `effectsize_type`.
#'
#' @details
#' The meaning of the extracted parameters:
@@ -29,24 +28,26 @@
#' the *g* parameters; See the *Bayes Factors for ANOVAs* paper
#' (\doi{10.1016/j.jmp.2012.08.001}).
#'
-#' @examples
+#' @examplesIf require("BayesFactor")
#' \donttest{
-#' if (require("BayesFactor")) {
-#' # Bayesian t-test
-#' model <- ttestBF(x = rnorm(100, 1, 1))
-#' model_parameters(model)
-#' model_parameters(model, cohens_d = TRUE, ci = .9)
+#' # Bayesian t-test
+#' model <- BayesFactor::ttestBF(x = rnorm(100, 1, 1))
+#' model_parameters(model)
+#' model_parameters(model, es_type = "cohens_d", ci = 0.9)
#'
-#' # Bayesian contingency table analysis
-#' data(raceDolls)
-#' bf <- contingencyTableBF(raceDolls, sampleType = "indepMulti", fixedMargin = "cols")
-#' model_parameters(bf,
-#' centrality = "mean",
-#' dispersion = TRUE,
-#' verbose = FALSE,
-#' effectsize_type = "cramers_v"
-#' )
-#' }
+#' # Bayesian contingency table analysis
+#' data(raceDolls)
+#' bf <- BayesFactor::contingencyTableBF(
+#' raceDolls,
+#' sampleType = "indepMulti",
+#' fixedMargin = "cols"
+#' )
+#' model_parameters(bf,
+#' centrality = "mean",
+#' dispersion = TRUE,
+#' verbose = FALSE,
+#' es_type = "cramers_v"
+#' )
#' }
#' @return A data frame of indices related to the model's parameters.
#' @export
@@ -59,19 +60,12 @@ model_parameters.BFBayesFactor <- function(model,
rope_range = "default",
rope_ci = 0.95,
priors = TRUE,
- effectsize_type = NULL,
+ es_type = NULL,
include_proportions = FALSE,
verbose = TRUE,
- cohens_d = NULL,
- cramers_v = NULL,
...) {
insight::check_if_installed("BayesFactor")
- ## TODO: remove in a later update
- # handle deprected arguments ------
- if (!is.null(cramers_v)) effectsize_type <- "cramers_v"
- if (!is.null(cohens_d)) effectsize_type <- "cohens_d"
-
if (any(startsWith(names(model@numerator), "Null"))) {
if (isTRUE(verbose)) {
insight::format_alert(
@@ -121,10 +115,10 @@ model_parameters.BFBayesFactor <- function(model,
tryCatch(
{
bfm <- as.data.frame(bayestestR::bayesfactor_models(model)[-1, ])
- if (!is.null(bfm$log_BF)) {
- out$BF <- exp(bfm$log_BF)
- } else {
+ if (is.null(bfm$log_BF)) {
out$BF <- bfm$BF
+ } else {
+ out$BF <- exp(bfm$log_BF)
}
},
error = function(e) {
@@ -138,19 +132,19 @@ model_parameters.BFBayesFactor <- function(model,
}
# Effect size?
- if (!is.null(effectsize_type)) {
+ if (!is.null(es_type)) {
# needs {effectsize} to be installed
insight::check_if_installed("effectsize")
- ## TODO: add back ci-argument, once effectsize >= 0.7.1 is on CRAN.
tryCatch(
{
effsize <- effectsize::effectsize(model,
centrality = centrality,
dispersion = dispersion,
+ ci = ci,
ci_method = ci_method,
rope_ci = rope_ci,
- type = effectsize_type,
+ type = es_type,
...
)
diff --git a/R/methods_DirichletReg.R b/R/methods_DirichletReg.R
index eb97718f3..300a8c2a0 100644
--- a/R/methods_DirichletReg.R
+++ b/R/methods_DirichletReg.R
@@ -21,20 +21,22 @@ model_parameters.DirichletRegModel <- function(model,
## TODO check merge by
- junk <- utils::capture.output(out <- .model_parameters_generic( # nolint
- model = model,
- ci = ci,
- component = component,
- bootstrap = bootstrap,
- iterations = iterations,
- merge_by = merge_by,
- standardize = standardize,
- exponentiate = exponentiate,
- p_adjust = p_adjust,
- keep_parameters = keep,
- drop_parameters = drop,
- ...
- ))
+ junk <- utils::capture.output({
+ out <- .model_parameters_generic(
+ model = model,
+ ci = ci,
+ component = component,
+ bootstrap = bootstrap,
+ iterations = iterations,
+ merge_by = merge_by,
+ standardize = standardize,
+ exponentiate = exponentiate,
+ p_adjust = p_adjust,
+ keep_parameters = keep,
+ drop_parameters = drop,
+ ...
+ )
+ })
out$Response[is.na(out$Response)] <- ""
attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model))
diff --git a/R/methods_aod.R b/R/methods_aod.R
index fe98716ae..ef067d3e2 100644
--- a/R/methods_aod.R
+++ b/R/methods_aod.R
@@ -18,6 +18,7 @@ model_parameters.glimML <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -29,6 +30,12 @@ model_parameters.glimML <- function(model,
merge_by <- "Parameter"
}
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# dispersion is just an alias...
if (component == "dispersion") {
component <- "random"
@@ -46,7 +53,7 @@ model_parameters.glimML <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
verbose = verbose,
...
)
diff --git a/R/methods_aov.R b/R/methods_aov.R
index 61ca4cb36..0d13590c0 100644
--- a/R/methods_aov.R
+++ b/R/methods_aov.R
@@ -7,7 +7,7 @@
#' @param model Object of class [aov()], [anova()],
#' `aovlist`, `Gam`, [manova()], `Anova.mlm`,
#' `afex_aov` or `maov`.
-#' @param effectsize_type The effect size of interest. Not that possibly not all
+#' @param es_type The effect size of interest. Not that possibly not all
#' effect sizes are applicable to the model object. See 'Details'. For Anova
#' models, can also be a character vector with multiple effect size names.
#' @param df_error Denominator degrees of freedom (or degrees of freedom of the
@@ -18,7 +18,7 @@
#' ANOVA-tables using `car::Anova()` will be returned. (Ignored for
#' `afex_aov`.)
#' @param ci Confidence Interval (CI) level for effect sizes specified in
-#' `effectsize_type`. The default, `NULL`, will compute no confidence
+#' `es_type`. The default, `NULL`, will compute no confidence
#' intervals. `ci` should be a scalar between 0 and 1.
#' @param test String, indicating the type of test for `Anova.mlm` to be
#' returned. If `"multivariate"` (or `NULL`), returns the summary of
@@ -35,7 +35,6 @@
#' (e.g., `"g"`, `"l"`, `"two"`...). See section *One-Sided CIs* in
#' the [effectsize_CIs vignette](https://easystats.github.io/effectsize/).
#' @inheritParams model_parameters.default
-#' @param omega_squared,eta_squared,epsilon_squared Deprecated. Please use `effectsize_type`.
#' @param ... Arguments passed to [`effectsize::effectsize()`]. For example,
#' to calculate _partial_ effect sizes types, use `partial = TRUE`. For objects
#' of class `htest` or `BFBayesFactor`, `adjust = TRUE` can be used to return
@@ -66,13 +65,13 @@
#' model <- aov(Sepal.Length ~ Sepal.Big, data = df)
#' model_parameters(model)
#'
-#' model_parameters(model, effectsize_type = c("omega", "eta"), ci = 0.9)
+#' model_parameters(model, es_type = c("omega", "eta"), ci = 0.9)
#'
#' model <- anova(lm(Sepal.Length ~ Sepal.Big, data = df))
#' model_parameters(model)
#' model_parameters(
#' model,
-#' effectsize_type = c("omega", "eta", "epsilon"),
+#' es_type = c("omega", "eta", "epsilon"),
#' alternative = "greater"
#' )
#'
@@ -92,7 +91,7 @@
#' # parameters table including effect sizes
#' model_parameters(
#' model,
-#' effectsize_type = "eta",
+#' es_type = "eta",
#' ci = 0.9,
#' df_error = dof_satterthwaite(mm)[2:3]
#' )
@@ -105,48 +104,21 @@ model_parameters.aov <- function(model,
alternative = NULL,
test = NULL,
power = FALSE,
- effectsize_type = NULL,
+ es_type = NULL,
keep = NULL,
drop = NULL,
table_wide = FALSE,
verbose = TRUE,
- omega_squared = NULL,
- eta_squared = NULL,
- epsilon_squared = NULL,
...) {
- ## TODO: remove in a later update
- # handle deprected arguments ------
- if (!is.null(omega_squared)) {
- insight::format_warning(
- "Argument `omega_squared` is deprecated.",
- "Please use `effectsize_type = \"omega\"` instead."
- )
- effectsize_type <- "omega"
- }
- if (!is.null(eta_squared)) {
- insight::format_warning(
- "Argument `eta_squared` is deprecated.",
- "Please use `effectsize_type = \"eta\"` instead."
- )
- effectsize_type <- "eta"
- }
- if (!is.null(epsilon_squared)) {
- insight::format_warning(
- "Argument `epsilon_squared` is deprecated.",
- "Please use `effectsize_type = \"epsilon\"` instead."
- )
- effectsize_type <- "epsilon"
- }
-
# save model object, for later checks
original_model <- model
object_name <- insight::safe_deparse_symbol(substitute(model))
if (inherits(model, "aov") && !is.null(type) && type > 1) {
- if (!requireNamespace("car", quietly = TRUE)) {
- insight::format_warning("Package {.pkg car} required for type-2 or type-3 Anova. Defaulting to type-1.")
- } else {
+ if (requireNamespace("car", quietly = TRUE)) {
model <- car::Anova(model, type = type)
+ } else {
+ insight::format_warning("Package {.pkg car} required for type-2 or type-3 Anova. Defaulting to type-1.")
}
}
@@ -172,7 +144,7 @@ model_parameters.aov <- function(model,
params <- .effectsizes_for_aov(
model,
params = params,
- effectsize_type = effectsize_type,
+ es_type = es_type,
df_error = df_error,
ci = ci,
alternative = alternative,
@@ -278,7 +250,7 @@ model_parameters.aovlist <- model_parameters.aov
#' @rdname model_parameters.aov
#' @export
model_parameters.afex_aov <- function(model,
- effectsize_type = NULL,
+ es_type = NULL,
df_error = NULL,
type = NULL,
keep = NULL,
@@ -298,7 +270,7 @@ model_parameters.afex_aov <- function(model,
out <- .effectsizes_for_aov(
model,
params = out,
- effectsize_type = effectsize_type,
+ es_type = es_type,
df_error = df_error,
verbose = verbose,
...
@@ -349,6 +321,7 @@ model_parameters.maov <- model_parameters.aov
if (is.numeric(type)) {
return(type)
}
+ # nolint start
switch(type,
`1` = ,
`I` = 1,
@@ -358,6 +331,7 @@ model_parameters.maov <- model_parameters.aov
`III` = 3,
1
)
+ # nolint end
}
# default to 1
@@ -430,28 +404,26 @@ model_parameters.maov <- model_parameters.aov
predictors <- .safe(insight::get_predictors(model))
# if data available, check contrasts and mean centering
- if (!is.null(predictors)) {
+ if (is.null(predictors)) {
+ treatment_contrasts_or_not_centered <- FALSE
+ } else {
treatment_contrasts_or_not_centered <- vapply(predictors, function(i) {
if (is.factor(i)) {
cn <- stats::contrasts(i)
if (is.null(cn) || (all(cn %in% c(0, 1)))) {
return(TRUE)
}
- } else {
- if (abs(mean(i, na.rm = TRUE)) > 1e-2) {
- return(TRUE)
- }
+ } else if (abs(mean(i, na.rm = TRUE)) > 1e-2) {
+ return(TRUE)
}
- return(FALSE)
+ FALSE
}, TRUE)
- } else {
- treatment_contrasts_or_not_centered <- FALSE
}
# successfully checked predictors, or if not possible, at least found interactions?
if (!is.null(interaction_terms) && (any(treatment_contrasts_or_not_centered) || is.null(predictors))) {
insight::format_alert(
- "Type 3 ANOVAs only give sensible and informative results when covariates are mean-centered and factors are coded with orthogonal contrasts (such as those produced by `contr.sum`, `contr.poly`, or `contr.helmert`, but *not* by the default `contr.treatment`)."
+ "Type 3 ANOVAs only give sensible and informative results when covariates are mean-centered and factors are coded with orthogonal contrasts (such as those produced by `contr.sum`, `contr.poly`, or `contr.helmert`, but *not* by the default `contr.treatment`)." # nolint
)
}
}
@@ -460,19 +432,19 @@ model_parameters.maov <- model_parameters.aov
.effectsizes_for_aov <- function(model,
params,
- effectsize_type = NULL,
+ es_type = NULL,
df_error = NULL,
ci = NULL,
alternative = NULL,
verbose = TRUE,
...) {
# user actually does not want to compute effect sizes
- if (is.null(effectsize_type)) {
+ if (is.null(es_type)) {
return(params)
}
# is valid effect size?
- if (!all(effectsize_type %in% c("eta", "omega", "epsilon", "f", "f2"))) {
+ if (!all(es_type %in% c("eta", "omega", "epsilon", "f", "f2"))) {
return(params)
}
@@ -490,7 +462,7 @@ model_parameters.maov <- model_parameters.aov
}
# multiple effect sizes possible
- for (es in effectsize_type) {
+ for (es in es_type) {
fx <- effectsize::effectsize(
model,
type = es,
diff --git a/R/methods_averaging.R b/R/methods_averaging.R
index 5c3d9f690..f3c7e00a3 100644
--- a/R/methods_averaging.R
+++ b/R/methods_averaging.R
@@ -41,11 +41,19 @@ model_parameters.averaging <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
...) {
component <- match.arg(component)
+
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
out <- .model_parameters_generic(
model = model,
ci = ci,
@@ -55,7 +63,7 @@ model_parameters.averaging <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
...
)
diff --git a/R/methods_base.R b/R/methods_base.R
index 955660264..08236bab4 100644
--- a/R/methods_base.R
+++ b/R/methods_base.R
@@ -1,9 +1,13 @@
#' @rdname model_parameters.stanreg
#' @export
-model_parameters.data.frame <- function(model, as_draws = FALSE, verbose = TRUE, ...) {
+model_parameters.data.frame <- function(model,
+ as_draws = FALSE,
+ exponentiate = FALSE,
+ verbose = TRUE,
+ ...) {
# treat data frame as bootstraps/posteriors?
if (isTRUE(as_draws)) {
- return(model_parameters.draws(model, verbose = verbose, ...))
+ return(model_parameters.draws(model, exponentiate = exponentiate, verbose = verbose, ...))
}
if (isTRUE(verbose)) {
insight::format_warning(
@@ -22,14 +26,13 @@ model_parameters.data.frame <- function(model, as_draws = FALSE, verbose = TRUE,
#' @rdname standard_error
#' @export
standard_error.factor <- function(model, force = FALSE, verbose = TRUE, ...) {
- if (force) {
- standard_error(as.numeric(model), ...)
- } else {
+ if (!force) {
if (verbose) {
insight::format_warning("Can't compute standard error of non-numeric variables.")
}
return(NA)
}
+ standard_error(as.numeric(model), ...)
}
@@ -55,10 +58,8 @@ standard_error.list <- function(model, verbose = TRUE, ...) {
model <- model$gam
class(model) <- c("gam", "lm", "glm")
standard_error(model)
- } else {
- if (isTRUE(verbose)) {
- insight::print_color("\nCould not extract standard errors from model object.\n", "red")
- }
+ } else if (isTRUE(verbose)) {
+ insight::print_color("\nCould not extract standard errors from model object.\n", "red")
}
}
@@ -136,10 +137,10 @@ p_value.numeric <- function(model, null = 0, ...) {
#' @export
p_value.data.frame <- function(model, ...) {
- data <- model[vapply(model, is.numeric, TRUE)]
+ model_data <- model[vapply(model, is.numeric, TRUE)]
.data_frame(
- Parameter = names(data),
- p = vapply(data, p_value, 1)
+ Parameter = names(model_data),
+ p = vapply(model_data, p_value, 1)
)
}
@@ -150,9 +151,7 @@ p_value.list <- function(model, method = NULL, verbose = TRUE, ...) {
model <- model$gam
class(model) <- c("gam", "lm", "glm")
p_value(model, method = method)
- } else {
- if (isTRUE(verbose)) {
- insight::format_warning("Could not extract p-values from model object.")
- }
+ } else if (isTRUE(verbose)) {
+ insight::format_warning("Could not extract p-values from model object.")
}
}
diff --git a/R/methods_betareg.R b/R/methods_betareg.R
index 4c4ce273b..37fbb8124 100644
--- a/R/methods_betareg.R
+++ b/R/methods_betareg.R
@@ -11,6 +11,7 @@ model_parameters.betareg <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -23,6 +24,12 @@ model_parameters.betareg <- function(model,
verbose = verbose
)
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
component <- match.arg(component)
if (component == "all") {
merge_by <- c("Parameter", "Component")
@@ -44,7 +51,7 @@ model_parameters.betareg <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
vcov = NULL,
vcov_args = NULL
)
diff --git a/R/methods_bfsl.R b/R/methods_bfsl.R
index 5297e0632..69aa7c3f4 100644
--- a/R/methods_bfsl.R
+++ b/R/methods_bfsl.R
@@ -4,10 +4,17 @@ model_parameters.bfsl <- function(model,
ci_method = "residual",
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
...) {
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
out <- .model_parameters_generic(
model = model,
ci = ci,
@@ -16,7 +23,7 @@ model_parameters.bfsl <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
...
)
@@ -37,20 +44,3 @@ standard_error.bfsl <- function(model, ...) {
)
insight::text_remove_backticks(params, verbose = FALSE)
}
-
-
-
-#' @export
-degrees_of_freedom.bfsl <- function(model, method = "residual", ...) {
- if (is.null(method)) {
- method <- "wald"
- }
-
- method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal"))
-
- if (method %in% c("wald", "residual", "fit")) {
- model$df.residual
- } else {
- degrees_of_freedom.default(model, method = method, ...)
- }
-}
diff --git a/R/methods_biglm.R b/R/methods_biglm.R
index 56454116a..ddb9e47a1 100644
--- a/R/methods_biglm.R
+++ b/R/methods_biglm.R
@@ -8,12 +8,3 @@ standard_error.biglm <- function(model, ...) {
SE = as.vector(cs[, 4])
)
}
-
-
-#' @export
-degrees_of_freedom.biglm <- function(model, method = NULL, ...) {
- .degrees_of_freedom_no_dfresid_method(model, method)
-}
-
-#' @export
-degrees_of_freedom.bigglm <- degrees_of_freedom.biglm
diff --git a/R/methods_brglm2.R b/R/methods_brglm2.R
index 2b9eb4ca9..acfd9a1c5 100644
--- a/R/methods_brglm2.R
+++ b/R/methods_brglm2.R
@@ -15,6 +15,7 @@ model_parameters.bracl <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -27,6 +28,12 @@ model_parameters.bracl <- function(model,
verbose = verbose
)
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# detect number of levels of response
resp <- insight::get_response(model)
@@ -56,7 +63,7 @@ model_parameters.bracl <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
vcov = NULL,
vcov_args = NULL
)
@@ -153,10 +160,17 @@ model_parameters.multinom <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
...) {
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
model_parameters.bracl(
model,
ci = ci,
@@ -166,7 +180,7 @@ model_parameters.multinom <- function(model,
standardize = standardize,
exponentiate = exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
keep = keep,
drop = drop,
verbose = verbose,
@@ -181,19 +195,6 @@ ci.multinom <- function(x, ci = 0.95, method = "normal", verbose = TRUE, ...) {
}
-#' @export
-degrees_of_freedom.multinom <- function(model, method = NULL, ...) {
- if (is.null(method) || identical(method, "normal")) {
- Inf
- } else {
- insight::n_obs(model) - model$edf
- }
-}
-
-#' @export
-degrees_of_freedom.nnet <- degrees_of_freedom.multinom
-
-
#' @export
standard_error.multinom <- function(model, ...) {
se <- tryCatch(
diff --git a/R/methods_car.R b/R/methods_car.R
index 0284cb283..3d2502cbc 100644
--- a/R/methods_car.R
+++ b/R/methods_car.R
@@ -46,7 +46,7 @@ model_parameters.deltaMethod <- function(model, p_adjust = NULL, verbose = TRUE,
iterations = NULL,
ci_method = "residual",
p_adjust = p_adjust,
- summary = FALSE,
+ include_info = FALSE,
verbose = verbose
)
fun_args <- c(fun_args, dots)
diff --git a/R/methods_cgam.R b/R/methods_cgam.R
index a70416cd2..0204efa0f 100644
--- a/R/methods_cgam.R
+++ b/R/methods_cgam.R
@@ -166,38 +166,3 @@ standard_error.cgam <- function(model, ...) {
Component = params$Component
)
}
-
-
-#' @export
-degrees_of_freedom.cgam <- function(model, method = "wald", ...) {
- if (is.null(method)) {
- method <- "wald"
- }
- method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal"))
-
- if (method %in% c("wald", "residual", "fit")) {
- stats::df.residual(model)
- } else {
- degrees_of_freedom.default(model, method = method, ...)
- }
-}
-
-
-#' @export
-degrees_of_freedom.cgamm <- function(model, method = "wald", ...) {
- if (is.null(method)) {
- method <- "wald"
- }
- method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal"))
-
- if (method %in% c("wald", "residual", "fit")) {
- dof <- model$resid_df_obs
- if (is.null(dof)) {
- dof <- degrees_of_freedom.default(model, method = method, ...)
- }
- } else {
- dof <- degrees_of_freedom.default(model, method = method, ...)
- }
-
- dof
-}
diff --git a/R/methods_coxme.R b/R/methods_coxme.R
index 9bd652b47..ede50ca14 100644
--- a/R/methods_coxme.R
+++ b/R/methods_coxme.R
@@ -1,10 +1,10 @@
#' @export
standard_error.coxme <- function(model, ...) {
- beta <- model$coefficients
+ beta_coef <- model$coefficients
- if (length(beta) > 0) {
+ if (length(beta_coef) > 0) {
.data_frame(
- Parameter = .remove_backticks_from_string(names(beta)),
+ Parameter = .remove_backticks_from_string(names(beta_coef)),
SE = sqrt(diag(stats::vcov(model)))
)
}
diff --git a/R/methods_cplm.R b/R/methods_cplm.R
index 3ba6bf0ee..ad8efa25f 100644
--- a/R/methods_cplm.R
+++ b/R/methods_cplm.R
@@ -36,9 +36,10 @@ model_parameters.zcpglm <- function(model,
standardize = NULL,
exponentiate = FALSE,
p_adjust = NULL,
+ summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
- summary = getOption("parameters_summary", FALSE),
verbose = TRUE,
...) {
component <- match.arg(component)
@@ -48,6 +49,11 @@ model_parameters.zcpglm <- function(model,
component <- "conditional"
}
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
# Processing
if (bootstrap) {
@@ -75,7 +81,7 @@ model_parameters.zcpglm <- function(model,
ci,
exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
verbose = verbose,
...
)
diff --git a/R/methods_effect_size.R b/R/methods_effect_size.R
index 579477b25..06896670c 100644
--- a/R/methods_effect_size.R
+++ b/R/methods_effect_size.R
@@ -17,21 +17,21 @@ ci.parameters_standardized <- function(x, ci = 0.95, verbose = TRUE, ...) {
# check if we have model. if so, use df from model
model <- .get_object(x)
if (!is.null(model)) {
- df <- degrees_of_freedom(model, method = "any")
- if (!is.null(df)) {
- if (length(df) > 1 && length(df) != nrow(x)) {
- df <- Inf
+ dof <- insight::get_df(model, type = "wald")
+ if (!is.null(dof)) {
+ if (length(dof) > 1 && length(dof) != nrow(x)) {
+ dof <- Inf
}
} else {
- df <- Inf
+ dof <- Inf
}
} else {
- df <- Inf
+ dof <- Inf
}
out <- lapply(ci, function(i) {
alpha <- (1 + i) / 2
- fac <- stats::qt(alpha, df = df)
+ fac <- stats::qt(alpha, df = dof)
data.frame(
Parameter = x$Parameter,
CI = i,
diff --git a/R/methods_emmeans.R b/R/methods_emmeans.R
index 0c5e1bb84..c8ed97188 100644
--- a/R/methods_emmeans.R
+++ b/R/methods_emmeans.R
@@ -27,23 +27,7 @@ model_parameters.emmGrid <- function(model,
s <- summary(model, level = ci, adjust = "none")
params <- as.data.frame(s)
- # we assume frequentist here...
- if (!.is_bayesian_emmeans(model)) {
- # get statistic, se and p
- statistic <- insight::get_statistic(model, ci = ci, adjust = "none")
- SE <- standard_error(model)
- p <- p_value(model, ci = ci, adjust = "none")
-
- params$Statistic <- statistic$Statistic
- params$SE <- SE$SE
- params$p <- p$p
-
- # ==== adjust p-values?
-
- if (!is.null(p_adjust)) {
- params <- .p_adjust(params, p_adjust, model, verbose)
- }
- } else {
+ if (.is_bayesian_emmeans(model)) {
# Bayesian models go here...
params <- bayestestR::describe_posterior(
model,
@@ -60,8 +44,22 @@ model_parameters.emmGrid <- function(model,
verbose = verbose,
...
)
-
statistic <- NULL
+ } else {
+ # we assume frequentist here...
+ statistic <- insight::get_statistic(model, ci = ci, adjust = "none")
+ SE <- standard_error(model)
+ p <- p_value(model, ci = ci, adjust = "none")
+
+ params$Statistic <- statistic$Statistic
+ params$SE <- SE$SE
+ params$p <- p$p
+
+ # ==== adjust p-values?
+
+ if (!is.null(p_adjust)) {
+ params <- .p_adjust(params, p_adjust, model, verbose)
+ }
}
@@ -88,11 +86,11 @@ model_parameters.emmGrid <- function(model,
if (!any(startsWith(colnames(params), "CI_"))) {
df_column <- grep("(df|df_error)", colnames(params))
if (length(df_column) > 0) {
- df <- params[[df_column[1]]]
+ dof <- params[[df_column[1]]]
} else {
- df <- Inf
+ dof <- Inf
}
- fac <- stats::qt((1 + ci) / 2, df = df)
+ fac <- stats::qt((1 + ci) / 2, df = dof)
params$CI_low <- params$Estimate - fac * params$SE
params$CI_high <- params$Estimate + fac * params$SE
}
@@ -105,12 +103,12 @@ model_parameters.emmGrid <- function(model,
# Reorder
estimate_pos <- which(colnames(s) == estName)
parameter_names <- colnames(params)[seq_len(estimate_pos - 1)]
- order <- c(
+ col_order <- c(
parameter_names, "Estimate", "Median", "Mean", "SE", "SD", "MAD",
"CI_low", "CI_high", "F", "t", "z", "df", "df_error", "p", "pd",
"ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage"
)
- params <- params[order[order %in% names(params)]]
+ params <- params[col_order[col_order %in% names(params)]]
# rename
names(params) <- gsub("Estimate", "Coefficient", names(params), fixed = TRUE)
@@ -174,7 +172,15 @@ model_parameters.emm_list <- function(model,
# exponentiate coefficients and SE/CI, if requested
params <- .exponentiate_parameters(params, model, exponentiate)
- params <- .add_model_parameters_attributes(params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ...)
+ params <- .add_model_parameters_attributes(
+ params,
+ model,
+ ci,
+ exponentiate,
+ p_adjust = p_adjust,
+ verbose = verbose,
+ ...
+ )
attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model))
class(params) <- c("parameters_model", "see_parameters_model", class(params))
@@ -210,12 +216,12 @@ model_parameters.summary_emm <- function(model,
# Reorder
estimate_pos <- which(colnames(model) == estName)
parameter_names <- colnames(params)[seq_len(estimate_pos - 1)]
- order <- c(
+ col_order <- c(
parameter_names, "Estimate", "Median", "Mean", "SE", "SD", "MAD",
"CI_low", "CI_high", "F", "t", "z", "df", "df_error", "p", "pd",
"ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage"
)
- params <- params[order[order %in% names(params)]]
+ params <- params[col_order[col_order %in% names(params)]]
# rename
names(params) <- gsub("Estimate", "Coefficient", names(params), fixed = TRUE)
@@ -315,42 +321,6 @@ boot_em_standard_error <- function(model) {
-
-# degrees of freedom --------------------
-
-
-#' @export
-degrees_of_freedom.emmGrid <- function(model, ...) {
- if (!is.null(model@misc$is_boot) && model@misc$is_boot) {
- return(boot_em_df(model))
- }
-
- summary(model)$df
-}
-
-
-#' @export
-degrees_of_freedom.emm_list <- function(model, ...) {
- if (!is.null(model[[1]]@misc$is_boot) && model[[1]]@misc$is_boot) {
- return(boot_em_df(model))
- }
-
- s <- summary(model)
- unlist(lapply(s, function(i) {
- if (is.null(i$df)) {
- rep(Inf, nrow(i))
- } else {
- i$df
- }
- }), use.names = FALSE)
-}
-
-boot_em_df <- function(model) {
- est <- insight::get_parameters(model, summary = FALSE)
- rep(NA, ncol(est))
-}
-
-
# p values ----------------------
@@ -361,21 +331,20 @@ p_value.emmGrid <- function(model, ci = 0.95, adjust = "none", ...) {
return(boot_em_pval(model, adjust))
}
-
s <- summary(model, level = ci, adjust = adjust)
estimate_pos <- which(colnames(s) == attr(s, "estName"))
- if (length(estimate_pos)) {
- stat <- insight::get_statistic(model, ci = ci, adjust = adjust)
- p <- 2 * stats::pt(abs(stat$Statistic), df = s$df, lower.tail = FALSE)
-
- .data_frame(
- Parameter = .pretty_emmeans_Parameter_names(model),
- p = as.vector(p)
- )
- } else {
+ if (!length(estimate_pos)) {
return(NULL)
}
+
+ stat <- insight::get_statistic(model, ci = ci, adjust = adjust)
+ p <- 2 * stats::pt(abs(stat$Statistic), df = s$df, lower.tail = FALSE)
+
+ .data_frame(
+ Parameter = .pretty_emmeans_Parameter_names(model),
+ p = as.vector(p)
+ )
}
@@ -418,8 +387,8 @@ p_value.emm_list <- function(model, adjust = "none", ...) {
# test statistic and p-values
stat <- params$Estimate / se
- df <- degrees_of_freedom(model)
- p_val <- 2 * stats::pt(abs(stat), df = df, lower.tail = FALSE)
+ dof <- insight::get_df(model)
+ p_val <- 2 * stats::pt(abs(stat), df = dof, lower.tail = FALSE)
out$p[is.na(out$p)] <- p_val[is.na(out$p)]
}
diff --git a/R/methods_fixest.R b/R/methods_fixest.R
index bb2a04d17..7bfc9a945 100644
--- a/R/methods_fixest.R
+++ b/R/methods_fixest.R
@@ -9,12 +9,13 @@ model_parameters.fixest <- function(model,
standardize = NULL,
exponentiate = FALSE,
p_adjust = NULL,
+ vcov = NULL,
+ vcov_args = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
- vcov = NULL,
- vcov_args = NULL,
...) {
# default ci-method, based on statistic
if (is.null(ci_method)) {
@@ -25,6 +26,12 @@ model_parameters.fixest <- function(model,
}
}
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# extract model parameters table, as data frame
out <- tryCatch(
{
@@ -38,7 +45,7 @@ model_parameters.fixest <- function(model,
standardize = standardize,
exponentiate = exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
keep_parameters = keep,
drop_parameters = drop,
vcov = vcov,
@@ -65,14 +72,14 @@ model_parameters.fixest <- function(model,
standard_error.fixest <- function(model, vcov = NULL, vcov_args = NULL, ...) {
params <- insight::get_parameters(model)
- if (!is.null(vcov)) {
+ if (is.null(vcov)) {
+ stats <- summary(model)
+ SE <- as.vector(stats$se)
+ } else {
# we don't want to wrap this in a tryCatch because the `fixest` error is
# informative when `vcov` is wrong.
V <- insight::get_varcov(model, vcov = vcov, vcov_args = vcov_args)
SE <- sqrt(diag(V))
- } else {
- stats <- summary(model)
- SE <- as.vector(stats$se)
}
.data_frame(
@@ -82,34 +89,6 @@ standard_error.fixest <- function(model, vcov = NULL, vcov_args = NULL, ...) {
}
-#' @export
-degrees_of_freedom.fixest <- function(model, method = "wald", ...) {
- # fixest degrees of freedom can be tricky. best to use the function by the
- # package.
- insight::check_if_installed("fixest")
- if (is.null(method)) {
- method <- "wald"
- }
- method <- match.arg(
- tolower(method),
- choices = c("wald", "residual", "normal")
- )
-
- # we may have Inf DF, too
- if (method == "normal") {
- return(Inf)
- }
-
- method <- switch(method,
- wald = "t",
- residual = "resid"
- )
- fixest::degrees_freedom(model, type = method)
-}
-
-
-
-
# .feglm -----------------------
#' @export
@@ -152,13 +131,20 @@ model_parameters.fixest_multi <- function(model,
standardize = NULL,
exponentiate = FALSE,
p_adjust = NULL,
+ vcov = NULL,
+ vcov_args = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
- vcov = NULL,
- vcov_args = NULL,
...) {
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# iterate over responses
out <- lapply(
model,
@@ -170,7 +156,7 @@ model_parameters.fixest_multi <- function(model,
standardize = standardize,
exponentiate = exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
keep = keep,
drop = drop,
verbose = verbose,
@@ -226,22 +212,6 @@ standard_error.fixest_multi <- function(model, ...) {
}
-#' @export
-degrees_of_freedom.fixest_multi <- function(model, ...) {
- out <- do.call(rbind, lapply(model, degrees_of_freedom, ...))
-
- # add response and group columns
- id_columns <- .get_fixest_multi_columns(model)
-
- # add response column
- out$Response <- id_columns$Response
- out$Group <- id_columns$Group
-
- row.names(out) <- NULL
- out
-}
-
-
#' @export
p_value.fixest_multi <- function(model, ...) {
out <- do.call(rbind, lapply(model, p_value, ...))
diff --git a/R/methods_flexsurvreg.R b/R/methods_flexsurvreg.R
index 8cf8c9113..c213abec4 100644
--- a/R/methods_flexsurvreg.R
+++ b/R/methods_flexsurvreg.R
@@ -17,7 +17,7 @@ p_value.flexsurvreg <- function(model, ...) {
params <- insight::get_parameters(model)
est <- params$Estimate
se <- standard_error(model)$SE
- p <- 2 * stats::pt(abs(est / se), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE)
+ p <- 2 * stats::pt(abs(est / se), df = insight::get_df(x = model, type = "wald"), lower.tail = FALSE)
.data_frame(
Parameter = params$Parameter,
p = as.vector(p)
diff --git a/R/methods_gee.R b/R/methods_gee.R
index cc0723edc..9dcaa3b65 100644
--- a/R/methods_gee.R
+++ b/R/methods_gee.R
@@ -26,13 +26,13 @@ p_value.gee <- function(model, method = NULL, ...) {
if (isTRUE(list(...)$robust) || "vcov" %in% names(list(...))) {
p <- 2 * stats::pt(
abs(cs[, "Estimate"] / cs[, "Robust S.E."]),
- df = degrees_of_freedom(model, method = method),
+ df = insight::get_df(x = model, type = method),
lower.tail = FALSE
)
} else {
p <- 2 * stats::pt(
abs(cs[, "Estimate"] / cs[, "Naive S.E."]),
- df = degrees_of_freedom(model, method = method),
+ df = insight::get_df(x = model, type = method),
lower.tail = FALSE
)
}
@@ -56,7 +56,7 @@ p_value.geeglm <- function(model, method = "wald", ...) {
if (!is.null(stat)) {
if (identical(method, "residual")) {
- dof <- degrees_of_freedom(model, method = "residual")
+ dof <- insight::get_df(model, type = "residual")
p <- as.vector(2 * stats::pt(
sqrt(abs(stat$Statistic)),
df = dof,
diff --git a/R/methods_glmgee.R b/R/methods_glmgee.R
new file mode 100644
index 000000000..82b8e5b58
--- /dev/null
+++ b/R/methods_glmgee.R
@@ -0,0 +1,68 @@
+#' @export
+standard_error.glmgee <- function(model,
+ vcov = c("robust", "df-adjusted", "model", "bias-corrected", "jackknife"),
+ verbose = TRUE,
+ ...) {
+ vcov <- match.arg(vcov)
+ se <- NULL
+
+ .vcov <- insight::get_varcov(
+ model,
+ vcov = vcov,
+ verbose = verbose,
+ ...
+ )
+ se <- sqrt(diag(.vcov))
+ .data_frame(Parameter = names(se), SE = as.vector(se))
+}
+
+
+#' @export
+p_value.glmgee <- function(model,
+ method = NULL,
+ vcov = c("robust", "df-adjusted", "model", "bias-corrected", "jackknife"),
+ ...) {
+ vcov <- match.arg(vcov)
+ est <- insight::get_parameters(model, component = "conditional")
+ se <- standard_error(model, vcov = vcov, verbose = FALSE)
+
+ if (is.null(method)) {
+ method <- "wald"
+ }
+
+ p <- 2 * stats::pt(
+ abs(est$Estimate / se$SE),
+ df = insight::get_df(x = model, type = method),
+ lower.tail = FALSE
+ )
+
+ .data_frame(
+ Parameter = est$Parameter,
+ p = as.vector(p)
+ )
+}
+
+
+#' @export
+ci.glmgee <- function(x,
+ ci = 0.95,
+ dof = NULL,
+ method = NULL,
+ vcov = c("robust", "df-adjusted", "model", "bias-corrected", "jackknife"),
+ verbose = TRUE,
+ ...) {
+ vcov <- match.arg(vcov)
+ out <- .ci_generic(
+ model = x,
+ ci = ci,
+ dof = dof,
+ method = method,
+ vcov = vcov,
+ vcov_args = NULL,
+ component = "conditional",
+ verbose = verbose
+ )
+ # Return the CI bounds as a data frame.
+ row.names(out) <- NULL
+ out
+}
diff --git a/R/methods_glmmTMB.R b/R/methods_glmmTMB.R
index 291a4ca7a..26ad72dd1 100644
--- a/R/methods_glmmTMB.R
+++ b/R/methods_glmmTMB.R
@@ -21,13 +21,20 @@ model_parameters.glmmTMB <- function(model,
p_adjust = NULL,
wb_component = TRUE,
summary = getOption("parameters_mixed_summary", FALSE),
+ include_info = getOption("parameters_mixed_info", FALSE),
+ include_sigma = FALSE,
keep = NULL,
drop = NULL,
verbose = TRUE,
- include_sigma = FALSE,
...) {
insight::check_if_installed("glmmTMB")
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# validation check, warn if unsupported argument is used.
dot_args <- .check_dots(
dots = list(...),
@@ -98,7 +105,7 @@ model_parameters.glmmTMB <- function(model,
keep_component_column = component != "conditional",
include_sigma = include_sigma,
wb_component = wb_component,
- summary = summary
+ include_info = include_info
)
fun_args <- c(fun_args, dot_args)
params <- do.call(".extract_parameters_generic", fun_args)
@@ -236,7 +243,7 @@ model_parameters.glmmTMB <- function(model,
p_adjust = p_adjust,
verbose = verbose,
group_level = group_level,
- summary = summary,
+ include_info = include_info,
wb_component = wb_component,
...
)
diff --git a/R/methods_hglm.R b/R/methods_hglm.R
index 4f237e112..a8f27fea7 100644
--- a/R/methods_hglm.R
+++ b/R/methods_hglm.R
@@ -23,6 +23,7 @@ model_parameters.hglm <- function(model,
component = "all",
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -31,13 +32,19 @@ model_parameters.hglm <- function(model,
effects <- match.arg(effects, choices = c("fixed", "random", "all"))
component <- match.arg(component, choices = c("all", "conditional", "dispersion"))
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# fixed effects
mp <- model_parameters.default(
model,
ci = ci, ci_method = ci_method, bootstrap = bootstrap,
effects = "fixed", component = "conditional", iterations = iterations,
- exponentiate = exponentiate, p_adjust = p_adjust, summary = summary,
+ exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info,
keep = keep, drop = drop, verbose = verbose, ...
)
@@ -154,15 +161,6 @@ standard_error.hglm <- function(model,
}
-#' @export
-degrees_of_freedom.hglm <- function(model, method = "residual", ...) {
- if (method == "any") {
- method <- "residual"
- }
- insight::get_df(model, type = method, ...)
-}
-
-
#' @export
ci.hglm <- function(x,
ci = 0.95,
diff --git a/R/methods_htest.R b/R/methods_htest.R
index 4d4648025..4cf38ba3f 100644
--- a/R/methods_htest.R
+++ b/R/methods_htest.R
@@ -8,8 +8,6 @@
#' only applies to objects from `chisq.test()` or `oneway.test()`.
#' @inheritParams model_parameters.default
#' @inheritParams model_parameters.aov
-#' @param cramers_v,phi,cohens_g,standardized_d,hedges_g,omega_squared,eta_squared,epsilon_squared,rank_biserial,rank_epsilon_squared,kendalls_w Deprecated. Please use `effectsize_type`.
-#'
#' @inherit effectsize::effectsize details
#'
#' @examples
@@ -18,13 +16,13 @@
#' model_parameters(model)
#'
#' model <- t.test(iris$Sepal.Width, iris$Sepal.Length)
-#' model_parameters(model, effectsize_type = "hedges_g")
+#' model_parameters(model, es_type = "hedges_g")
#'
#' model <- t.test(mtcars$mpg ~ mtcars$vs)
-#' model_parameters(model, effectsize_type = "hedges_g")
+#' model_parameters(model, es_type = "hedges_g")
#'
#' model <- t.test(iris$Sepal.Width, mu = 1)
-#' model_parameters(model, effectsize_type = "cohens_d")
+#' model_parameters(model, es_type = "cohens_d")
#'
#' data(airquality)
#' airquality$Month <- factor(airquality$Month, labels = month.abb[5:9])
@@ -37,7 +35,7 @@
#' model_parameters(model)
#'
#' model <- suppressWarnings(chisq.test(table(mtcars$am, mtcars$cyl)))
-#' model_parameters(model, effectsize_type = "cramers_v")
+#' model_parameters(model, es_type = "cramers_v")
#'
#' @return A data frame of indices related to the model's parameters.
#'
@@ -46,41 +44,15 @@ model_parameters.htest <- function(model,
ci = 0.95,
alternative = NULL,
bootstrap = FALSE,
- effectsize_type = NULL,
+ es_type = NULL,
verbose = TRUE,
- cramers_v = NULL,
- phi = NULL,
- standardized_d = NULL,
- hedges_g = NULL,
- omega_squared = NULL,
- eta_squared = NULL,
- epsilon_squared = NULL,
- cohens_g = NULL,
- rank_biserial = NULL,
- rank_epsilon_squared = NULL,
- kendalls_w = NULL,
...) {
- ## TODO: remove in a later update
- # handle deprected arguments ------
- if (!is.null(cramers_v)) effectsize_type <- "cramers_v"
- if (!is.null(phi)) effectsize_type <- "phi"
- if (!is.null(standardized_d)) effectsize_type <- "standardized_d"
- if (!is.null(hedges_g)) effectsize_type <- "hedges_g"
- if (!is.null(omega_squared)) effectsize_type <- "omega_squared"
- if (!is.null(eta_squared)) effectsize_type <- "eta_squared"
- if (!is.null(epsilon_squared)) effectsize_type <- "epsilon_squared"
- if (!is.null(cohens_g)) effectsize_type <- "cohens_g"
- if (!is.null(rank_biserial)) effectsize_type <- "rank_biserial"
- if (!is.null(rank_epsilon_squared)) effectsize_type <- "rank_epsilon_squared"
- if (!is.null(kendalls_w)) effectsize_type <- "rank_epsilon_squared"
-
-
if (bootstrap) {
insight::format_error("Bootstrapped h-tests are not yet implemented.")
} else {
parameters <- .extract_parameters_htest(
model,
- effectsize_type = effectsize_type,
+ es_type = es_type,
ci = ci,
alternative = alternative,
verbose = verbose,
@@ -149,14 +121,17 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
#' @keywords internal
.extract_parameters_htest <- function(model,
- effectsize_type = NULL,
+ es_type = NULL,
ci = 0.95,
alternative = NULL,
verbose = TRUE,
...) {
m_info <- insight::model_info(model, verbose = FALSE)
- if (m_info$is_correlation) {
+ if (!is.null(model$method) && startsWith(model$method, "Box-")) {
+ # Box-Pierce ---------
+ out <- .extract_htest_boxpierce(model)
+ } else if (m_info$is_correlation) {
# correlation ---------
out <- .extract_htest_correlation(model)
} else if (.is_levenetest(model)) {
@@ -189,7 +164,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
out <- .add_effectsize_htest(model,
out,
- effectsize_type = effectsize_type,
+ es_type = es_type,
ci = ci,
alternative = alternative,
verbose = verbose,
@@ -203,14 +178,30 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
+# extract htest Box-Pierce ----------------------
+
+#' @keywords internal
+.extract_htest_boxpierce <- function(model) {
+ data.frame(
+ Parameter = model$data.name,
+ Chi2 = model$statistic,
+ df_error = model$parameter,
+ p = model$p.value,
+ Method = model$method,
+ stringsAsFactors = FALSE
+ )
+}
+
+
+
# extract htest correlation ----------------------
#' @keywords internal
.extract_htest_correlation <- function(model) {
- names <- unlist(strsplit(model$data.name, " (and|by) "))
+ data_names <- unlist(strsplit(model$data.name, " (and|by) "))
out <- data.frame(
- Parameter1 = names[1],
- Parameter2 = names[2],
+ Parameter1 = data_names[1],
+ Parameter2 = data_names[2],
stringsAsFactors = FALSE
)
@@ -258,10 +249,10 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
.extract_htest_ranktest <- function(model) {
# survey
if (grepl("design-based", tolower(model$method), fixed = TRUE)) {
- names <- gsub("~", "", unlist(strsplit(model$data.name, " + ", fixed = TRUE)), fixed = TRUE)
+ data_names <- gsub("~", "", unlist(strsplit(model$data.name, " + ", fixed = TRUE)), fixed = TRUE)
out <- data.frame(
- Parameter1 = names[1],
- Parameter2 = names[2],
+ Parameter1 = data_names[1],
+ Parameter2 = data_names[2],
Statistic = model$statistic[[1]],
df_error = model$parameter[[1]],
Method = model$method,
@@ -272,10 +263,10 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
colnames(out)[colnames(out) == "Statistic"] <- names(model$statistic)[1]
} else {
if (grepl(" (and|by) ", model$data.name)) {
- names <- unlist(strsplit(model$data.name, " (and|by) "))
+ data_names <- unlist(strsplit(model$data.name, " (and|by) "))
out <- data.frame(
- Parameter1 = names[1],
- Parameter2 = names[2],
+ Parameter1 = data_names[1],
+ Parameter2 = data_names[2],
stringsAsFactors = FALSE
)
} else {
@@ -312,7 +303,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
data.frame(
df = model$Df[1],
df_error = model$Df[2],
- `F` = model$`F value`[1],
+ `F` = model$`F value`[1], # nolint
p = model$`Pr(>F)`[1],
Method = "Levene's Test for Homogeneity of Variance",
stringsAsFactors = FALSE
@@ -331,7 +322,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
Estimate = model$estimate,
df = model$parameter[1],
df_error = model$parameter[2],
- `F` = model$statistic,
+ `F` = model$statistic, # nolint
CI_low = model$conf.int[1],
CI_high = model$conf.int[2],
p = model$p.value,
@@ -349,10 +340,10 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
.extract_htest_ttest <- function(model, standardized_d = NULL, hedges_g = NULL) {
# survey
if (grepl("design-based", tolower(model$method), fixed = TRUE)) {
- names <- unlist(strsplit(model$data.name, " ~ ", fixed = TRUE))
+ data_names <- unlist(strsplit(model$data.name, " ~ ", fixed = TRUE))
out <- data.frame(
- Parameter1 = names[1],
- Parameter2 = names[2],
+ Parameter1 = data_names[1],
+ Parameter2 = data_names[2],
Difference = model$estimate[[1]],
t = model$statistic[[1]],
df_error = model$parameter[[1]],
@@ -365,10 +356,10 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
} else {
paired_test <- startsWith(model$method, "Paired") && length(model$estimate) == 1
if (grepl(" and ", model$data.name, fixed = TRUE) && isFALSE(paired_test)) {
- names <- unlist(strsplit(model$data.name, " and ", fixed = TRUE))
+ data_names <- unlist(strsplit(model$data.name, " and ", fixed = TRUE))
out <- data.frame(
- Parameter1 = names[1],
- Parameter2 = names[2],
+ Parameter1 = data_names[1],
+ Parameter2 = data_names[2],
Mean_Parameter1 = model$estimate[1],
Mean_Parameter2 = model$estimate[2],
Difference = model$estimate[1] - model$estimate[2],
@@ -382,10 +373,10 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
)
attr(out, "mean_group_values") <- gsub("mean in group ", "", names(model$estimate), fixed = TRUE)
} else if (isTRUE(paired_test)) {
- names <- unlist(strsplit(model$data.name, " (and|by) "))
+ data_names <- unlist(strsplit(model$data.name, " (and|by) "))
out <- data.frame(
- Parameter = names[1],
- Group = names[2],
+ Parameter = data_names[1],
+ Group = data_names[2],
Difference = model$estimate,
t = model$statistic,
df_error = model$parameter,
@@ -397,10 +388,10 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
)
} else if (grepl(" by ", model$data.name, fixed = TRUE)) {
if (length(model$estimate) == 1) {
- names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE))
+ data_names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE))
out <- data.frame(
- Parameter = names[1],
- Group = names[2],
+ Parameter = data_names[1],
+ Group = data_names[2],
Difference = model$estimate,
CI = 0.95,
CI_low = as.vector(model$conf.int[, 1]),
@@ -412,10 +403,10 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
stringsAsFactors = FALSE
)
} else {
- names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE))
+ data_names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE))
out <- data.frame(
- Parameter = names[1],
- Group = names[2],
+ Parameter = data_names[1],
+ Group = data_names[2],
Mean_Group1 = model$estimate[1],
Mean_Group2 = model$estimate[2],
Difference = model$estimate[1] - model$estimate[2],
@@ -458,7 +449,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
#' @keywords internal
.extract_htest_oneway <- function(model) {
data.frame(
- `F` = model$statistic,
+ `F` = model$statistic, # nolint
df = model$parameter[1],
df_error = model$parameter[2],
p = model$p.value,
@@ -482,7 +473,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
}
if (names(model$statistic) == "F") {
data.frame(
- `F` = model$statistic,
+ `F` = model$statistic, # nolint
df = model$parameter[1],
df_error = model$parameter[2],
p = model$p.value,
@@ -498,27 +489,25 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
stringsAsFactors = FALSE
)
}
+ } else if (!is.null(model$estimate) && identical(names(model$estimate), "odds ratio")) {
+ data.frame(
+ `Odds Ratio` = model$estimate,
+ # CI = attributes(model$conf.int)$conf.level,
+ CI_low = model$conf.int[1],
+ CI_high = model$conf.int[2],
+ p = model$p.value,
+ Method = model$method,
+ stringsAsFactors = FALSE,
+ check.names = FALSE
+ )
} else {
- if (!is.null(model$estimate) && identical(names(model$estimate), "odds ratio")) {
- data.frame(
- `Odds Ratio` = model$estimate,
- # CI = attributes(model$conf.int)$conf.level,
- CI_low = model$conf.int[1],
- CI_high = model$conf.int[2],
- p = model$p.value,
- Method = model$method,
- stringsAsFactors = FALSE,
- check.names = FALSE
- )
- } else {
- data.frame(
- Chi2 = model$statistic,
- df = model$parameter,
- p = model$p.value,
- Method = model$method,
- stringsAsFactors = FALSE
- )
- }
+ data.frame(
+ Chi2 = model$statistic,
+ df = model$parameter,
+ p = model$p.value,
+ Method = model$method,
+ stringsAsFactors = FALSE
+ )
}
}
@@ -530,7 +519,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
#' @keywords internal
.extract_htest_prop <- function(model) {
out <- data.frame(
- Proportion = paste0(insight::format_value(model$estimate, as_percent = TRUE), collapse = " / "),
+ Proportion = paste(insight::format_value(model$estimate, as_percent = TRUE), collapse = " / "),
stringsAsFactors = FALSE
)
if (length(model$estimate) == 2) {
@@ -579,13 +568,20 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
.add_effectsize_htest <- function(model,
out,
- effectsize_type = NULL,
+ es_type = NULL,
ci = 0.95,
alternative = NULL,
verbose = TRUE,
...) {
# check if effect sizes are requested
- if (!requireNamespace("effectsize", quietly = TRUE) || is.null(effectsize_type)) {
+ if (!requireNamespace("effectsize", quietly = TRUE) || is.null(es_type)) {
+ return(out)
+ }
+
+ # return on invalid options. We may have partial matching with argument
+ # `effects` for `es_type`, and thus all "effects" options should be
+ # ignored.
+ if (es_type %in% c("fixed", "random", "all")) {
return(out)
}
@@ -594,7 +590,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
{
effectsize::effectsize(
model,
- type = effectsize_type,
+ type = es_type,
ci = ci,
alternative = alternative,
verbose = verbose,
@@ -604,7 +600,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
error = function(e) {
if (verbose) {
msg <- c(
- paste0("Could not compute effectsize ", effectsize::get_effectsize_label(effectsize_type), "."),
+ paste0("Could not compute effectsize ", effectsize::get_effectsize_label(es_type), "."),
paste0("Possible reason: ", e$message)
)
insight::format_alert(msg)
@@ -620,7 +616,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
## TODO: check if effectsize prefixes are correct @mattansb
# Find prefix for CI-columns
- prefix <- switch(effectsize_type,
+ prefix <- switch(es_type,
cohens_g = "Cohens_",
cramers_v = "Cramers_",
phi = "phi_",
@@ -667,24 +663,22 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
if (!is.null(model$alternative)) {
h1_text <- "Alternative hypothesis: "
- if (!is.null(model$null.value)) {
- if (length(model$null.value) == 1L) {
- alt.char <- switch(model$alternative,
- two.sided = "not equal to",
- less = "less than",
- greater = "greater than"
- )
- h1_text <- paste0(h1_text, "true ", names(model$null.value), " is ", alt.char, " ", model$null.value)
- } else {
- h1_text <- paste0(h1_text, model$alternative)
- }
+ if (is.null(model$null.value)) {
+ h1_text <- paste0(h1_text, model$alternative)
+ } else if (length(model$null.value) == 1L) {
+ alt.char <- switch(model$alternative,
+ two.sided = "not equal to",
+ less = "less than",
+ greater = "greater than"
+ )
+ h1_text <- paste0(h1_text, "true ", names(model$null.value), " is ", alt.char, " ", model$null.value)
} else {
h1_text <- paste0(h1_text, model$alternative)
}
attr(params, "text_alternative") <- h1_text
}
- dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x)
+ dot.arguments <- lapply(match.call(expand.dots = FALSE)[["..."]], function(x) x)
if ("digits" %in% names(dot.arguments)) {
attr(params, "digits") <- eval(dot.arguments[["digits"]])
} else {
@@ -731,7 +725,7 @@ model_parameters.svytable <- function(model, verbose = TRUE, ...) {
p_adjust = NULL,
verbose = TRUE,
...) {
- dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x)
+ dot.arguments <- lapply(match.call(expand.dots = FALSE)[["..."]], function(x) x)
attr(params, "p_adjust") <- p_adjust
attr(params, "model_class") <- class(model)
diff --git a/R/methods_ivfixed.R b/R/methods_ivfixed.R
index 6c175646c..25cb92734 100644
--- a/R/methods_ivfixed.R
+++ b/R/methods_ivfixed.R
@@ -6,28 +6,17 @@ ci.ivFixed <- ci.default
standard_error.ivFixed <- standard_error.coxr
-#' @export
-degrees_of_freedom.ivFixed <- function(model, method = "wald", ...) {
- if (is.null(method)) {
- method <- "wald"
- }
- method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal"))
-
- if (method %in% c("wald", "residual", "fit")) {
- as.vector(model$df)
- } else {
- degrees_of_freedom.default(model, method = method, ...)
- }
-}
-
-
#' @export
p_value.ivFixed <- function(model, method = "wald", ...) {
stat <- insight::get_statistic(model)
if (!is.null(stat)) {
.data_frame(
Parameter = stat$Parameter,
- p = as.vector(2 * stats::pt(abs(stat$Statistic), df = degrees_of_freedom(model, method = method), lower.tail = FALSE))
+ p = as.vector(2 * stats::pt(
+ abs(stat$Statistic),
+ df = insight::get_df(model, type = method),
+ lower.tail = FALSE
+ ))
)
}
}
diff --git a/R/methods_ivprobit.R b/R/methods_ivprobit.R
index 826719b71..d2fe1cec4 100644
--- a/R/methods_ivprobit.R
+++ b/R/methods_ivprobit.R
@@ -2,11 +2,6 @@
ci.ivprobit <- ci.default
-
-#' @export
-degrees_of_freedom.ivprobit <- degrees_of_freedom.ivFixed
-
-
#' @export
standard_error.ivprobit <- function(model, ...) {
.data_frame(
diff --git a/R/methods_kmeans.R b/R/methods_kmeans.R
index 22b630f79..e28a7a11f 100644
--- a/R/methods_kmeans.R
+++ b/R/methods_kmeans.R
@@ -102,10 +102,12 @@ model_parameters.hkmeans <- model_parameters.kmeans
#' @export
print.parameters_clusters <- function(x, digits = 2, ...) {
- title <- "# Clustering Solution"
- if ("title" %in% attributes(x)) title <- attributes(x)$title
+ clusterHeading <- "# Clustering Solution"
+ if ("title" %in% attributes(x)) {
+ clusterHeading <- attributes(x)$title
+ }
- insight::print_color(title, "blue")
+ insight::print_color(clusterHeading, "blue")
cat("\n\n")
insight::print_colour(.text_components_variance(x), "yellow")
diff --git a/R/methods_lm.R b/R/methods_lm.R
index 800776144..42c13e0d5 100644
--- a/R/methods_lm.R
+++ b/R/methods_lm.R
@@ -45,11 +45,5 @@ p_value.summary.lm <- function(model, ...) {
#' @export
ci.summary.lm <- function(x, ci = 0.95, method = "residual", ...) {
- .ci_generic(model = x, ci = ci, method = method, dof = degrees_of_freedom(x), ...)
-}
-
-
-#' @export
-degrees_of_freedom.summary.lm <- function(model, ...) {
- model$fstatistic[3]
+ .ci_generic(model = x, ci = ci, method = method, dof = insight::get_df(x), ...)
}
diff --git a/R/methods_lme4.R b/R/methods_lme4.R
index 854edd29a..4d55dac15 100644
--- a/R/methods_lme4.R
+++ b/R/methods_lme4.R
@@ -33,8 +33,20 @@
#' intervals will be included. Set explicitly to `TRUE` or `FALSE` to enforce
#' or omit calculation of confidence intervals.
#' @param ... Arguments passed to or from other methods. For instance, when
-#' `bootstrap = TRUE`, arguments like `type` or `parallel` are
-#' passed down to `bootstrap_model()`.
+#' `bootstrap = TRUE`, arguments like `type` or `parallel` are passed down to
+#' `bootstrap_model()`. Further non-documented arguments are `digits`,
+#' `p_digits`, `ci_digits` and `footer_digits` to set the number of digits for
+#' the output. If `s_value = TRUE`, the p-value will be replaced by the
+#' S-value in the output (cf. _Rafi and Greenland 2020_). `pd` adds an
+#' additional column with the _probability of direction_ (see
+#' [`bayestestR::p_direction()`] for details). `groups` can be used to group
+#' coefficients. It will be passed to the print-method, or can directly be
+#' used in `print()`, see documentation in [`print.parameters_model()`].
+#' Furthermore, see 'Examples' in [`model_parameters.default()`]. For
+#' developers, whose interest mainly is to get a "tidy" data frame of model
+#' summaries, it is recommended to set `pretty_names = FALSE` to speed up
+#' computation of the summary table.
+#'
#' @inheritParams model_parameters.default
#' @inheritParams model_parameters.stanreg
#'
@@ -147,17 +159,24 @@ model_parameters.merMod <- function(model,
group_level = FALSE,
exponentiate = FALSE,
p_adjust = NULL,
+ vcov = NULL,
+ vcov_args = NULL,
wb_component = TRUE,
summary = getOption("parameters_mixed_summary", FALSE),
+ include_info = getOption("parameters_mixed_info", FALSE),
+ include_sigma = FALSE,
keep = NULL,
drop = NULL,
verbose = TRUE,
- include_sigma = FALSE,
- vcov = NULL,
- vcov_args = NULL,
...) {
dots <- list(...)
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# set default
if (is.null(ci_method)) {
if (isTRUE(bootstrap)) {
@@ -236,7 +255,7 @@ model_parameters.merMod <- function(model,
drop_parameters = drop,
verbose = verbose,
include_sigma = include_sigma,
- summary = summary,
+ include_info = include_info,
vcov = vcov,
vcov_args = vcov_args
)
@@ -301,7 +320,7 @@ model_parameters.merMod <- function(model,
ci_method = ci_method,
p_adjust = p_adjust,
verbose = verbose,
- summary = summary,
+ include_info = include_info,
group_level = group_level,
wb_component = wb_component,
...
diff --git a/R/methods_lmtest.R b/R/methods_lmtest.R
index c73d0a528..3809572e2 100644
--- a/R/methods_lmtest.R
+++ b/R/methods_lmtest.R
@@ -1,8 +1,3 @@
-#' @export
-degrees_of_freedom.coeftest <- function(model, ...) {
- attributes(model)$df
-}
-
#' @export
ci.coeftest <- ci.default
diff --git a/R/methods_lqmm.R b/R/methods_lqmm.R
index 5b3c8ecae..108af5274 100644
--- a/R/methods_lqmm.R
+++ b/R/methods_lqmm.R
@@ -67,17 +67,6 @@ standard_error.lqmm <- function(model, ...) {
standard_error.lqm <- standard_error.lqmm
-#' @export
-degrees_of_freedom.lqmm <- function(model, ...) {
- out <- model_parameters(model, ...)
- out$df_error
-}
-
-
-#' @export
-degrees_of_freedom.lqm <- degrees_of_freedom.lqmm
-
-
#' @export
p_value.lqmm <- function(model, ...) {
out <- model_parameters(model, ...)
diff --git a/R/methods_lrm.R b/R/methods_lrm.R
index f919e0cb9..b5915fc4d 100644
--- a/R/methods_lrm.R
+++ b/R/methods_lrm.R
@@ -47,13 +47,13 @@ p_value.lrm <- function(model, ...) {
# Issue: 697: typically the degrees of freedom are the same for every
# observation, but the value is repeated. This poses problems in multiple
# imputation models with Hmisc when we get more df values than parameters.
- df <- degrees_of_freedom(model, method = "any")
- dfu <- unique(df)
+ dof <- insight::get_df(model, type = "wald")
+ dfu <- unique(dof)
if (length(dfu) == 1) {
- df <- dfu
+ dof <- dfu
}
- p <- 2 * stats::pt(abs(stat$Statistic), df = df, lower.tail = FALSE)
+ p <- 2 * stats::pt(abs(stat$Statistic), df = dof, lower.tail = FALSE)
.data_frame(
Parameter = .remove_backticks_from_string(stat$Parameter),
diff --git a/R/methods_marginaleffects.R b/R/methods_marginaleffects.R
index 87a6eae3f..803970fa1 100644
--- a/R/methods_marginaleffects.R
+++ b/R/methods_marginaleffects.R
@@ -23,6 +23,9 @@ model_parameters.marginaleffects <- function(model,
attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model))
+ # do not print or report these columns
+ out <- out[, !colnames(out) %in% c("predicted_lo", "predicted_hi"), drop = FALSE]
+
if (inherits(model, "marginalmeans")) {
attr(out, "coefficient_name") <- "Marginal Means"
} else if (inherits(model, "comparisons")) {
diff --git a/R/methods_mass.R b/R/methods_mass.R
index 57804441d..7d1f54728 100644
--- a/R/methods_mass.R
+++ b/R/methods_mass.R
@@ -1,15 +1,3 @@
-# degrees of freedom -----------------
-
-#' @export
-degrees_of_freedom.rlm <- function(model, method = "residual", ...) {
- .degrees_of_freedom_no_dfresid_method(model, method)
-}
-
-
-
-
-
-
# ci -----------------
#' @export
@@ -49,9 +37,6 @@ ci.polr <- function(x, ci = 0.95, dof = NULL, method = "profile", ...) {
-
-
-
# SE -----------------
#' @export
@@ -73,9 +58,6 @@ standard_error.polr <- function(model, method = NULL, ...) {
-
-
-
# p -----------------
#' @export
@@ -85,7 +67,7 @@ p_value.negbin <- p_value.default
#' @export
p_value.rlm <- function(model, ...) {
cs <- stats::coef(summary(model))
- p <- 2 * stats::pt(abs(cs[, 3]), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE)
+ p <- 2 * stats::pt(abs(cs[, 3]), df = insight::get_df(model, type = "wald"), lower.tail = FALSE)
.data_frame(
Parameter = .remove_backticks_from_string(names(p)),
p = as.vector(p)
@@ -102,7 +84,7 @@ p_value.polr <- function(model, method = NULL, ...) {
smry <- suppressMessages(as.data.frame(stats::coef(summary(model))))
tstat <- smry[[3]]
- p <- 2 * stats::pt(abs(tstat), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE)
+ p <- 2 * stats::pt(abs(tstat), df = insight::get_df(x = model, type = "wald"), lower.tail = FALSE)
names(p) <- rownames(smry)
.data_frame(
@@ -113,8 +95,6 @@ p_value.polr <- function(model, method = NULL, ...) {
-
-
# parameters -----------------
#' @rdname model_parameters.default
diff --git a/R/methods_mclogit.R b/R/methods_mclogit.R
index 7189da17b..802e851a3 100644
--- a/R/methods_mclogit.R
+++ b/R/methods_mclogit.R
@@ -7,10 +7,17 @@ model_parameters.mblogit <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
...) {
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
out <- .model_parameters_generic(
model = model,
ci = ci,
@@ -22,7 +29,7 @@ model_parameters.mblogit <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
...
)
@@ -53,3 +60,6 @@ p_value.mblogit <- function(model, ...) {
row.names = NULL
)
}
+
+#' @export
+simulate_parameters.mblogit <- simulate_parameters.multinom
diff --git a/R/methods_mediate.R b/R/methods_mediate.R
index ef907a9ca..a728ecd6d 100644
--- a/R/methods_mediate.R
+++ b/R/methods_mediate.R
@@ -95,12 +95,6 @@ standard_error.mediate <- function(model, ...) {
}
-#' @export
-degrees_of_freedom.mediate <- function(model, ...) {
- NULL
-}
-
-
#' @export
p_value.mediate <- function(model, ...) {
info <- insight::model_info(model$model.y, verbose = FALSE)
diff --git a/R/methods_merTools.R b/R/methods_merTools.R
index c5f5ff060..4276c5a91 100644
--- a/R/methods_merTools.R
+++ b/R/methods_merTools.R
@@ -43,13 +43,6 @@ standard_error.merModList <- function(model, ...) {
}
-#' @export
-degrees_of_freedom.merModList <- function(model, ...) {
- s <- suppressWarnings(summary(model))
- s$fe$df
-}
-
-
#' @export
format_parameters.merModList <- function(model, brackets = c("[", "]"), ...) {
.format_parameter_default(model[[1]], brackets = brackets)
diff --git a/R/methods_mfx.R b/R/methods_mfx.R
index 7709fb31d..847eb79a7 100644
--- a/R/methods_mfx.R
+++ b/R/methods_mfx.R
@@ -296,50 +296,6 @@ standard_error.betamfx <- function(model,
-
-# degrees of freedom ------------------
-
-
-#' @export
-degrees_of_freedom.logitor <- function(model, ...) {
- degrees_of_freedom.default(model$fit, ...)
-}
-
-
-#' @export
-degrees_of_freedom.poissonirr <- degrees_of_freedom.logitor
-
-
-#' @export
-degrees_of_freedom.negbinirr <- degrees_of_freedom.logitor
-
-
-#' @export
-degrees_of_freedom.poissonmfx <- degrees_of_freedom.logitor
-
-
-#' @export
-degrees_of_freedom.logitmfx <- degrees_of_freedom.logitor
-
-
-#' @export
-degrees_of_freedom.negbinmfx <- degrees_of_freedom.logitor
-
-
-#' @export
-degrees_of_freedom.probitmfx <- degrees_of_freedom.logitor
-
-
-#' @export
-degrees_of_freedom.betaor <- degrees_of_freedom.logitor
-
-
-#' @export
-degrees_of_freedom.betamfx <- degrees_of_freedom.logitor
-
-
-
-
# p values ------------------
diff --git a/R/methods_mhurdle.R b/R/methods_mhurdle.R
index a28befd11..47775b8c2 100644
--- a/R/methods_mhurdle.R
+++ b/R/methods_mhurdle.R
@@ -61,12 +61,6 @@ ci.mhurdle <- function(x, ci = 0.95, ...) {
}
-#' @export
-degrees_of_freedom.mhurdle <- function(model, method = NULL, ...) {
- .degrees_of_freedom_no_dfresid_method(model, method)
-}
-
-
#' @export
standard_error.mhurdle <- function(model, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) {
component <- match.arg(component)
diff --git a/R/methods_mice.R b/R/methods_mice.R
index cdf326399..ecadff56c 100644
--- a/R/methods_mice.R
+++ b/R/methods_mice.R
@@ -11,21 +11,6 @@ ci.mira <- function(x, ci = 0.95, ...) {
}
-# degrees of freedom ----------------------------
-
-#' @export
-degrees_of_freedom.mira <- function(model, ...) {
- insight::check_if_installed("mice")
- degrees_of_freedom(mice::pool(model), ...)
-}
-
-
-#' @export
-degrees_of_freedom.mipo <- function(model, ...) {
- as.vector(summary(model)$df)
-}
-
-
# p values ---------------------------------------
#' @export
@@ -140,41 +125,33 @@ model_parameters.mipo <- function(model,
#' similar to `summary(mice::pool())`, i.e. it generates the pooled summary
#' of multiple imputed repeated regression analyses.
#'
-#' @examples
+#' @examplesIf require("mice", quietly = TRUE) && require("gee", quietly = TRUE)
#' library(parameters)
-#' if (require("mice", quietly = TRUE)) {
-#' data(nhanes2)
-#' imp <- mice(nhanes2)
-#' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl))
-#' model_parameters(fit)
-#' }
+#' data(nhanes2, package = "mice")
+#' imp <- mice::mice(nhanes2)
+#' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl))
+#' model_parameters(fit)
#' \donttest{
#' # model_parameters() also works for models that have no "tidy"-method in mice
-#' if (require("mice", quietly = TRUE) && require("gee", quietly = TRUE)) {
-#' data(warpbreaks)
-#' set.seed(1234)
-#' warpbreaks$tension[sample(1:nrow(warpbreaks), size = 10)] <- NA
-#' imp <- mice(warpbreaks)
-#' fit <- with(data = imp, expr = gee(breaks ~ tension, id = wool))
+#' data(warpbreaks)
+#' set.seed(1234)
+#' warpbreaks$tension[sample(1:nrow(warpbreaks), size = 10)] <- NA
+#' imp <- mice::mice(warpbreaks)
+#' fit <- with(data = imp, expr = gee::gee(breaks ~ tension, id = wool))
#'
-#' # does not work:
-#' # summary(pool(fit))
+#' # does not work:
+#' # summary(mice::pool(fit))
#'
-#' model_parameters(fit)
-#' }
+#' model_parameters(fit)
#' }
#'
-#'
-#'
#' # and it works with pooled results
-#' if (require("mice")) {
-#' data("nhanes2")
-#' imp <- mice(nhanes2)
-#' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl))
-#' pooled <- pool(fit)
+#' data("nhanes2", package = "mice")
+#' imp <- mice::mice(nhanes2)
+#' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl))
+#' pooled <- mice::pool(fit)
#'
-#' model_parameters(pooled)
-#' }
+#' model_parameters(pooled)
#' @export
model_parameters.mira <- function(model,
ci = 0.95,
diff --git a/R/methods_mmrm.R b/R/methods_mmrm.R
index b97598df5..d786dca5d 100644
--- a/R/methods_mmrm.R
+++ b/R/methods_mmrm.R
@@ -10,6 +10,7 @@ model_parameters.mmrm <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -19,6 +20,12 @@ model_parameters.mmrm <- function(model,
"kenward"
)
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# extract model parameters table, as data frame
out <- tryCatch(
.model_parameters_generic(
@@ -31,7 +38,7 @@ model_parameters.mmrm <- function(model,
standardize = standardize,
exponentiate = exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
keep_parameters = keep,
drop_parameters = drop,
vcov = NULL,
@@ -126,18 +133,3 @@ standard_error.mmrm_fit <- standard_error.mmrm
#' @export
standard_error.mmrm_tmb <- standard_error.mmrm
-
-
-# degrees of freedom ------------------
-
-#' @export
-degrees_of_freedom.mmrm <- function(model, ...) {
- summary_table <- stats::coef(summary(model))
- unname(summary_table[, "df"])
-}
-
-#' @export
-degrees_of_freedom.mmrm_fit <- degrees_of_freedom.mmrm
-
-#' @export
-degrees_of_freedom.mmrm_tmb <- degrees_of_freedom.mmrm
diff --git a/R/methods_model_fit.R b/R/methods_model_fit.R
index 1f02b2032..306a305a8 100644
--- a/R/methods_model_fit.R
+++ b/R/methods_model_fit.R
@@ -54,16 +54,6 @@ standard_error.model_fit <- function(model, ...) {
-# degrees of freedom ------------------
-
-
-#' @export
-degrees_of_freedom.model_fit <- function(model, ...) {
- degrees_of_freedom(model$fit, ...)
-}
-
-
-
# p values ------------------
diff --git a/R/methods_multcomp.R b/R/methods_multcomp.R
index f906c1560..963cc85d1 100644
--- a/R/methods_multcomp.R
+++ b/R/methods_multcomp.R
@@ -103,12 +103,6 @@ standard_error.glht <- function(model, ...) {
}
-#' @export
-degrees_of_freedom.glht <- function(model, ...) {
- model$df
-}
-
-
#' @export
p_value.glht <- function(model, ...) {
s <- summary(model)
diff --git a/R/methods_mvord.R b/R/methods_mvord.R
index 563a03e8a..43ec187f1 100644
--- a/R/methods_mvord.R
+++ b/R/methods_mvord.R
@@ -12,11 +12,19 @@ model_parameters.mvord <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
...) {
component <- match.arg(component)
+
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
out <- .model_parameters_generic(
model = model,
ci = ci,
@@ -29,7 +37,7 @@ model_parameters.mvord <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
...
)
attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model))
diff --git a/R/methods_nestedLogit.R b/R/methods_nestedLogit.R
index 90bc86169..bec97e33a 100644
--- a/R/methods_nestedLogit.R
+++ b/R/methods_nestedLogit.R
@@ -8,15 +8,22 @@ model_parameters.nestedLogit <- function(model,
standardize = NULL,
exponentiate = FALSE,
p_adjust = NULL,
+ vcov = NULL,
+ vcov_args = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
- vcov = NULL,
- vcov_args = NULL,
verbose = TRUE,
...) {
dots <- list(...)
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# set default
if (is.null(ci_method)) {
if (isTRUE(bootstrap)) {
@@ -61,7 +68,7 @@ model_parameters.nestedLogit <- function(model,
standardize = standardize,
exponentiate = exponentiate,
p_adjust = p_adjust,
- summary = summary,
+ include_info = include_info,
keep_parameters = keep,
drop_parameters = drop,
vcov = vcov,
@@ -75,40 +82,6 @@ model_parameters.nestedLogit <- function(model,
}
-#' @export
-degrees_of_freedom.nestedLogit <- function(model,
- method = NULL,
- component = "all",
- verbose = TRUE,
- ...) {
- if (is.null(method)) {
- method <- "wald"
- }
- if (tolower(method) == "residual") {
- cf <- as.data.frame(stats::coef(model))
- dof <- rep(vapply(model$models, stats::df.residual, numeric(1)), each = nrow(cf))
- if (!is.null(component) && !identical(component, "all")) {
- comp <- intersect(names(dof), component)
- if (length(comp)) {
- dof <- dof[comp]
- } else {
- if (verbose) {
- insight::format_alert(paste0(
- "No matching model found. Possible values for `component` are ",
- toString(paste0("'", names(model$models), "'")),
- "."
- ))
- }
- dof <- Inf
- }
- }
- } else {
- dof <- Inf
- }
- dof
-}
-
-
#' @export
standard_error.nestedLogit <- function(model,
component = "all",
diff --git a/R/methods_nlme.R b/R/methods_nlme.R
index 9b589b32e..ecd99429d 100644
--- a/R/methods_nlme.R
+++ b/R/methods_nlme.R
@@ -70,8 +70,8 @@ p_value.lme <- function(model,
se <- standard_error(model, vcov = vcov, vcov_args = vcov_args, ...)
tstat <- b$Estimate / se$SE
# residuals are defined like this in `nlme:::summary.lme`
- df <- model$fixDF[["X"]]
- p <- 2 * stats::pt(-abs(tstat), df = df)
+ dof <- model$fixDF[["X"]]
+ p <- 2 * stats::pt(-abs(tstat), df = dof)
param <- se$Parameter
}
@@ -97,9 +97,3 @@ standard_error.gls <- standard_error.default
#' @export
p_value.gls <- p_value.default
-
-
-#' @export
-degrees_of_freedom.gls <- function(model, method = NULL, ...) {
- .degrees_of_freedom_no_dfresid_method(model, method)
-}
diff --git a/R/methods_ordinal.R b/R/methods_ordinal.R
index aab826e99..09ada5454 100644
--- a/R/methods_ordinal.R
+++ b/R/methods_ordinal.R
@@ -12,6 +12,7 @@ model_parameters.clm2 <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -23,6 +24,12 @@ model_parameters.clm2 <- function(model,
merge_by <- "Parameter"
}
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
## TODO check merge by
out <- .model_parameters_generic(
@@ -37,7 +44,7 @@ model_parameters.clm2 <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
...
)
diff --git a/R/methods_other.R b/R/methods_other.R
index f1afb42e1..1da9fccef 100644
--- a/R/methods_other.R
+++ b/R/methods_other.R
@@ -20,13 +20,6 @@ p_value.complmrob <- p_value.default
ci.complmrob <- ci.default
-#' @export
-degrees_of_freedom.complmrob <- function(model, method = "wald", ...) {
- .degrees_of_freedom_no_dfresid_method(model, method)
-}
-
-
-
############# .Gam --------------
@@ -36,7 +29,7 @@ degrees_of_freedom.complmrob <- function(model, method = "wald", ...) {
#' @inheritParams model_parameters.aov
#' @export
model_parameters.Gam <- function(model,
- effectsize_type = NULL,
+ es_type = NULL,
df_error = NULL,
type = NULL,
table_wide = FALSE,
@@ -44,7 +37,7 @@ model_parameters.Gam <- function(model,
...) {
model_parameters(
summary(model)$parametric.anova,
- effectsize_type = effectsize_type,
+ es_type = es_type,
df_error = df_error,
type = type,
table_wide = table_wide,
diff --git a/R/methods_plm.R b/R/methods_plm.R
index 00a120df9..271c9ee93 100644
--- a/R/methods_plm.R
+++ b/R/methods_plm.R
@@ -4,16 +4,6 @@
# plm ---------------------------
-#' @export
-degrees_of_freedom.plm <- function(model, method = "wald", ...) {
- if (identical(method, "normal")) {
- return(Inf)
- } else {
- model$df.residual
- }
-}
-
-
#' @export
standard_error.plm <- function(model, vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) {
dots <- list(...)
diff --git a/R/methods_posterior.R b/R/methods_posterior.R
index 3b58b59bf..a79e9bb08 100644
--- a/R/methods_posterior.R
+++ b/R/methods_posterior.R
@@ -8,6 +8,7 @@ model_parameters.draws <- function(model,
test = "pd",
rope_range = "default",
rope_ci = 0.95,
+ exponentiate = FALSE,
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -33,6 +34,9 @@ model_parameters.draws <- function(model,
...
)
+ # exponentiate coefficients and SE/CI, if requested
+ params <- .exponentiate_parameters(params, exponentiate = exponentiate)
+
attr(params, "ci") <- ci
attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model))
class(params) <- c("parameters_model", "see_parameters_model", class(params))
diff --git a/R/methods_quantreg.R b/R/methods_quantreg.R
index 9e61e68f7..bed08b33e 100644
--- a/R/methods_quantreg.R
+++ b/R/methods_quantreg.R
@@ -323,36 +323,3 @@ p_value.rqss <- function(model,
p
}
-
-
-
-
-# degrees of freedom ---------------------
-
-
-#' @export
-degrees_of_freedom.rqs <- function(model, ...) {
- tryCatch(
- {
- s <- suppressWarnings(summary(model, covariance = TRUE))
- cs <- lapply(s, function(i) i$rdf)
- unique(unlist(cs))
- },
- error = function(e) {
- NULL
- }
- )
-}
-
-
-
-#' @export
-degrees_of_freedom.rqss <- degrees_of_freedom.multinom
-
-
-#' @export
-degrees_of_freedom.rq <- degrees_of_freedom.rqs
-
-
-#' @export
-degrees_of_freedom.nlrq <- degrees_of_freedom.mhurdle
diff --git a/R/methods_rstanarm.R b/R/methods_rstanarm.R
index f7a40353a..625e2b68b 100644
--- a/R/methods_rstanarm.R
+++ b/R/methods_rstanarm.R
@@ -101,7 +101,7 @@ model_parameters.stanreg <- function(model,
if (effects != "fixed") {
random_effect_levels <- which(
- params$Effects %in% "random" & !startsWith(params$Parameter, "Sigma[")
+ params$Effects == "random" & !startsWith(params$Parameter, "Sigma[")
)
if (length(random_effect_levels) && isFALSE(group_level)) {
params <- params[-random_effect_levels, , drop = FALSE]
diff --git a/R/methods_selection.R b/R/methods_selection.R
index 81582060b..728adba85 100644
--- a/R/methods_selection.R
+++ b/R/methods_selection.R
@@ -9,11 +9,19 @@ model_parameters.selection <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
...) {
component <- match.arg(component)
+
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
out <- .model_parameters_generic(
model = model,
ci = ci,
@@ -25,7 +33,7 @@ model_parameters.selection <- function(model,
exponentiate = exponentiate,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
p_adjust = p_adjust,
...
)
@@ -83,7 +91,6 @@ standard_error.selection <- function(model, component = c("all", "selection", "o
}
-
#' @export
simulate_model.selection <- function(model,
iterations = 1000,
@@ -100,10 +107,3 @@ simulate_model.selection <- function(model,
#' @export
ci.selection <- ci.default
-
-
-#' @export
-degrees_of_freedom.selection <- function(model, ...) {
- s <- summary(model)
- s$param$df
-}
diff --git a/R/methods_serp.R b/R/methods_serp.R
deleted file mode 100644
index c4f0fff71..000000000
--- a/R/methods_serp.R
+++ /dev/null
@@ -1,14 +0,0 @@
-#' @export
-degrees_of_freedom.serp <- function(model, method = "normal", ...) {
- if (is.null(method)) {
- method <- "wald"
- }
-
- method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal"))
-
- if (method %in% c("residual", "fit")) {
- model$rdf
- } else {
- degrees_of_freedom.default(model, method = method, ...)
- }
-}
diff --git a/R/methods_survey.R b/R/methods_survey.R
index 04674bf8f..7112b822b 100644
--- a/R/methods_survey.R
+++ b/R/methods_survey.R
@@ -8,6 +8,7 @@ model_parameters.svyglm <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = getOption("parameters_summary", FALSE),
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -18,6 +19,12 @@ model_parameters.svyglm <- function(model,
)
}
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
# validation check, warn if unsupported argument is used.
dot_args <- .check_dots(
dots = list(...),
@@ -35,7 +42,7 @@ model_parameters.svyglm <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
verbose = verbose
)
fun_args <- c(fun_args, dot_args)
@@ -141,7 +148,7 @@ p_value.svyglm.nb <- function(model, ...) {
est <- stats::coef(model)
se <- sqrt(diag(stats::vcov(model, stderr = "robust")))
- p <- 2 * stats::pt(abs(est / se), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE)
+ p <- 2 * stats::pt(abs(est / se), df = insight::get_df(model, type = "wald"), lower.tail = FALSE)
.data_frame(
Parameter = .remove_backticks_from_string(names(p)),
diff --git a/R/methods_svy2lme.R b/R/methods_svy2lme.R
new file mode 100644
index 000000000..cc7e9f83a
--- /dev/null
+++ b/R/methods_svy2lme.R
@@ -0,0 +1,107 @@
+#' @export
+model_parameters.svy2lme <- function(model,
+ ci = 0.95,
+ effects = "all",
+ include_sigma = FALSE,
+ keep = NULL,
+ drop = NULL,
+ verbose = TRUE,
+ ...) {
+ dots <- list(...)
+ # which component to return?
+ effects <- match.arg(effects, choices = c("fixed", "random", "all"))
+ params <- params_variance <- NULL
+
+ if (effects %in% c("fixed", "all")) {
+ # Processing
+ fun_args <- list(
+ model,
+ ci = ci,
+ ci_method = "wald",
+ standardize = NULL,
+ p_adjust = NULL,
+ wb_component = FALSE,
+ keep_parameters = keep,
+ drop_parameters = drop,
+ verbose = verbose,
+ include_sigma = include_sigma,
+ summary = FALSE,
+ vcov = NULL,
+ vcov_args = NULL
+ )
+ fun_args <- c(fun_args, dots)
+ params <- do.call(".extract_parameters_mixed", fun_args)
+
+ params$Effects <- "fixed"
+ }
+
+ att <- attributes(params)
+
+ if (effects %in% c("random", "all")) {
+ params_variance <- .extract_random_variances(
+ model,
+ ci = ci,
+ effects = effects
+ )
+ }
+
+ # merge random and fixed effects, if necessary
+ if (!is.null(params) && !is.null(params_variance)) {
+ params$Level <- NA
+ params$Group <- ""
+ params <- params[match(colnames(params_variance), colnames(params))]
+ }
+
+ params <- rbind(params, params_variance)
+ # remove empty column
+ if (!is.null(params$Level) && all(is.na(params$Level))) {
+ params$Level <- NULL
+ }
+
+ # due to rbind(), we lose attributes from "extract_parameters()",
+ # so we add those attributes back here...
+ if (!is.null(att)) {
+ attributes(params) <- utils::modifyList(att, attributes(params))
+ }
+
+ params <- .add_model_parameters_attributes(
+ params,
+ model,
+ ci = ci,
+ exponentiate = FALSE,
+ bootstrap = FALSE,
+ iterations = 1000,
+ ci_method = "wald",
+ p_adjust = NULL,
+ verbose = verbose,
+ summary = FALSE,
+ group_level = FALSE,
+ wb_component = FALSE,
+ ...
+ )
+
+ attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model))
+ class(params) <- c("parameters_model", "see_parameters_model", class(params))
+
+ params
+}
+
+
+#' @export
+standard_error.svy2lme <- function(model, ...) {
+ .data_frame(
+ Parameter = .remove_backticks_from_string(colnames(model$Vbeta)),
+ SE = as.vector(sqrt(diag(model$Vbeta)))
+ )
+}
+
+
+#' @export
+p_value.svy2lme <- function(model, ...) {
+ stat <- insight::get_statistic(model)
+ p <- 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE)
+ .data_frame(
+ Parameter = stat$Parameter,
+ p = as.vector(p)
+ )
+}
diff --git a/R/methods_systemfit.R b/R/methods_systemfit.R
index 7e087d77b..4df115737 100644
--- a/R/methods_systemfit.R
+++ b/R/methods_systemfit.R
@@ -8,10 +8,17 @@ model_parameters.systemfit <- function(model,
exponentiate = FALSE,
p_adjust = NULL,
summary = FALSE,
+ include_info = getOption("parameters_info", FALSE),
keep = NULL,
drop = NULL,
verbose = TRUE,
...) {
+ ## TODO remove deprecated later
+ if (!missing(summary)) {
+ .deprecated_warning("summary", "include_info", verbose)
+ include_info <- summary
+ }
+
out <- .model_parameters_generic(
model = model,
ci = ci,
@@ -24,7 +31,7 @@ model_parameters.systemfit <- function(model,
p_adjust = p_adjust,
keep_parameters = keep,
drop_parameters = drop,
- summary = summary,
+ include_info = include_info,
...
)
@@ -33,7 +40,6 @@ model_parameters.systemfit <- function(model,
}
-
#' @export
standard_error.systemfit <- function(model, ...) {
cf <- stats::coef(summary(model))
@@ -57,7 +63,6 @@ standard_error.systemfit <- function(model, ...) {
}
-
#' @export
p_value.systemfit <- function(model, ...) {
cf <- stats::coef(summary(model))
@@ -81,25 +86,5 @@ p_value.systemfit <- function(model, ...) {
}
-
-#' @export
-degrees_of_freedom.systemfit <- function(model, ...) {
- df <- NULL
- s <- summary(model)$eq
- params <- insight::find_parameters(model)
- f <- insight::find_formula(model)
- system_names <- names(f)
-
- for (i in seq_along(system_names)) {
- dfs <- rep(s[[i]]$df[2], length(params[[i]]))
- df_names <- rep(names(params[i]), length(params[[i]]))
- df <- c(df, stats::setNames(dfs, df_names))
- }
-
- df
-}
-
-
-
#' @export
ci.systemfit <- ci.lm
diff --git a/R/methods_truncreg.R b/R/methods_truncreg.R
index dd260b243..ca04cd950 100644
--- a/R/methods_truncreg.R
+++ b/R/methods_truncreg.R
@@ -7,7 +7,3 @@ standard_error.truncreg <- standard_error.default
#' @export
p_value.truncreg <- p_value.default
-
-
-#' @export
-degrees_of_freedom.truncreg <- degrees_of_freedom.mhurdle
diff --git a/R/methods_tseries.R b/R/methods_tseries.R
deleted file mode 100644
index eefb3256d..000000000
--- a/R/methods_tseries.R
+++ /dev/null
@@ -1,4 +0,0 @@
-# classes: .garch
-
-#' @export
-degrees_of_freedom.garch <- degrees_of_freedom.mhurdle
diff --git a/R/methods_vgam.R b/R/methods_vgam.R
index 750189fac..a0018afdb 100644
--- a/R/methods_vgam.R
+++ b/R/methods_vgam.R
@@ -22,19 +22,10 @@ standard_error.vgam <- function(model, ...) {
}
-#' @export
-degrees_of_freedom.vgam <- function(model, ...) {
- params <- insight::get_parameters(model)
- out <- stats::setNames(rep(NA, nrow(params)), params$Parameter)
- out[names(model@nl.df)] <- model@nl.df
- out
-}
-
-
#' @export
p_value.vgam <- function(model, ...) {
stat <- insight::get_statistic(model)
- stat$p <- as.vector(stats::pchisq(stat$Statistic, df = degrees_of_freedom(model), lower.tail = FALSE))
+ stat$p <- as.vector(stats::pchisq(stat$Statistic, df = insight::get_df(model), lower.tail = FALSE))
stat[c("Parameter", "p", "Component")]
}
diff --git a/R/methods_weightit.R b/R/methods_weightit.R
new file mode 100644
index 000000000..440323d9b
--- /dev/null
+++ b/R/methods_weightit.R
@@ -0,0 +1,31 @@
+# model parameters -------------------
+
+#' @export
+model_parameters.ordinal_weightit <- model_parameters.clm2
+
+#' @export
+model_parameters.multinom_weightit <- model_parameters.bracl
+
+# CI ---------------------
+
+#' @export
+ci.ordinal_weightit <- ci.clm2
+
+#' @export
+ci.multinom_weightit <- ci.bracl
+
+# standard errors -----------------
+
+#' @export
+standard_error.ordinal_weightit <- standard_error.clm2
+
+#' @export
+standard_error.multinom_weightit <- standard_error.bracl
+
+# p values ----------------
+
+#' @export
+p_value.ordinal_weightit <- p_value.clm2
+
+#' @export
+p_value.multinom_weightit <- p_value.bracl
diff --git a/R/n_clusters_easystats.R b/R/n_clusters_easystats.R
index f069e0d6a..486d1c4c9 100644
--- a/R/n_clusters_easystats.R
+++ b/R/n_clusters_easystats.R
@@ -26,9 +26,9 @@ n_clusters_elbow <- function(x,
names(out) <- c("n_Clusters", "WSS")
gradient <- c(0, diff(out$WSS))
- optim <- out$n_Clusters[which.min(gradient)]
+ optimal <- out$n_Clusters[which.min(gradient)]
- attr(out, "n") <- optim
+ attr(out, "n") <- optimal
attr(out, "gradient") <- gradient
attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs"))
class(out) <- c("n_clusters_elbow", class(out))
@@ -75,9 +75,9 @@ n_clusters_gap <- function(x,
out <- rez[c("clusters", "gap", "SE.sim")]
names(out) <- c("n_Clusters", "Gap", "SE")
- optim <- cluster::maxSE(f = out$Gap, SE.f = out$SE, method = gap_method)
+ optimal <- cluster::maxSE(f = out$Gap, SE.f = out$SE, method = gap_method)
- attr(out, "n") <- optim
+ attr(out, "n") <- optimal
attr(out, "ymin") <- rez$ymin
attr(out, "ymax") <- rez$ymax
attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs"))
@@ -118,9 +118,9 @@ n_clusters_silhouette <- function(x,
)
names(out) <- c("n_Clusters", "Silhouette")
- optim <- which.max(out$Silhouette)
+ optimal <- which.max(out$Silhouette)
- attr(out, "n") <- optim
+ attr(out, "n") <- optimal
attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs"))
class(out) <- c("n_clusters_silhouette", class(out))
out
@@ -149,7 +149,14 @@ n_clusters_silhouette <- function(x,
#' }
#' }
#' @export
-n_clusters_dbscan <- function(x, standardize = TRUE, include_factors = FALSE, method = c("kNN", "SS"), min_size = 0.1, eps_n = 50, eps_range = c(0.1, 3), ...) {
+n_clusters_dbscan <- function(x,
+ standardize = TRUE,
+ include_factors = FALSE,
+ method = c("kNN", "SS"),
+ min_size = 0.1,
+ eps_n = 50,
+ eps_range = c(0.1, 3),
+ ...) {
method <- match.arg(method)
t0 <- Sys.time()
x <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...)
@@ -250,7 +257,13 @@ n_clusters_hclust <- function(x,
#' @keywords internal
-.n_clusters_factoextra <- function(x, method = "wss", standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ...) {
+.n_clusters_factoextra <- function(x,
+ method = "wss",
+ standardize = TRUE,
+ include_factors = FALSE,
+ clustering_function = stats::kmeans,
+ n_max = 10,
+ ...) {
x <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...)
insight::check_if_installed("factoextra")
@@ -265,31 +278,31 @@ n_clusters_hclust <- function(x,
#' @export
print.n_clusters_elbow <- function(x, ...) {
- insight::print_color(paste0("The Elbow method, that aims at minimizing the total intra-cluster variation (i.e., the total within-cluster sum of square), suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green")
+ insight::print_color(paste0("The Elbow method, that aims at minimizing the total intra-cluster variation (i.e., the total within-cluster sum of square), suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") # nolint
invisible(x)
}
#' @export
print.n_clusters_gap <- function(x, ...) {
- insight::print_color(paste0("The Gap method, that compares the total intracluster variation of k clusters with their expected values under null reference distribution of the data, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green")
+ insight::print_color(paste0("The Gap method, that compares the total intracluster variation of k clusters with their expected values under null reference distribution of the data, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") # nolint
invisible(x)
}
#' @export
print.n_clusters_silhouette <- function(x, ...) {
- insight::print_color(paste0("The Silhouette method, based on the average quality of clustering, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green")
+ insight::print_color(paste0("The Silhouette method, based on the average quality of clustering, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") # nolint
invisible(x)
}
#' @export
print.n_clusters_dbscan <- function(x, ...) {
- insight::print_color(paste0("The DBSCAN method, based on the total clusters sum of squares, suggests that the optimal eps = ", attributes(x)$eps, " (with min. cluster size set to ", attributes(x)$min_size, "), which corresponds to ", attributes(x)$n, " clusters."), "green")
+ insight::print_color(paste0("The DBSCAN method, based on the total clusters sum of squares, suggests that the optimal eps = ", attributes(x)$eps, " (with min. cluster size set to ", attributes(x)$min_size, "), which corresponds to ", attributes(x)$n, " clusters."), "green") # nolint
invisible(x)
}
#' @export
print.n_clusters_hclust <- function(x, ...) {
- insight::print_color(paste0("The bootstrap analysis of hierachical clustering highlighted ", attributes(x)$n, " significant clusters."), "green")
+ insight::print_color(paste0("The bootstrap analysis of hierachical clustering highlighted ", attributes(x)$n, " significant clusters."), "green") # nolint
invisible(x)
}
@@ -297,25 +310,28 @@ print.n_clusters_hclust <- function(x, ...) {
#' @export
visualisation_recipe.n_clusters_elbow <- function(x, ...) {
- data <- as.data.frame(x)
- data$Gradient <- datawizard::rescale(attributes(x)$gradient, c(min(data$WSS, max(data$WSS))))
+ input_df <- as.data.frame(x)
+ input_df$Gradient <- datawizard::rescale(
+ attributes(x)$gradient,
+ min(input_df$WSS, max(input_df$WSS))
+ )
layers <- list()
# Layers -----------------------
layers[["l1"]] <- list(
geom = "line",
- data = data,
+ data = input_df,
aes = list(x = "n_Clusters", y = "WSS", group = 1),
size = 1
)
layers[["l2"]] <- list(
geom = "point",
- data = data,
+ data = input_df,
aes = list(x = "n_Clusters", y = "WSS")
)
layers[["l3"]] <- list(
geom = "line",
- data = data,
+ data = input_df,
aes = list(x = "n_Clusters", y = "Gradient", group = 1),
size = 0.5,
color = "red",
@@ -323,7 +339,7 @@ visualisation_recipe.n_clusters_elbow <- function(x, ...) {
)
layers[["l4"]] <- list(
geom = "vline",
- data = data,
+ data = input_df,
xintercept = attributes(x)$n,
linetype = "dotted"
)
@@ -336,32 +352,32 @@ visualisation_recipe.n_clusters_elbow <- function(x, ...) {
# Out
class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers))
- attr(layers, "data") <- data
+ attr(layers, "data") <- input_df
layers
}
#' @export
visualisation_recipe.n_clusters_gap <- function(x, ...) {
- data <- as.data.frame(x)
- data$ymin <- attributes(x)$ymin
- data$ymax <- attributes(x)$ymax
+ dataset <- as.data.frame(x)
+ dataset$ymin <- attributes(x)$ymin
+ dataset$ymax <- attributes(x)$ymax
layers <- list()
# Layers -----------------------
layers[["l1"]] <- list(
geom = "line",
- data = data,
+ data = dataset,
aes = list(x = "n_Clusters", y = "Gap", group = 1)
)
layers[["l2"]] <- list(
geom = "pointrange",
- data = data,
+ data = dataset,
aes = list(x = "n_Clusters", y = "Gap", ymin = "ymin", ymax = "ymax")
)
layers[["l4"]] <- list(
geom = "vline",
- data = data,
+ data = dataset,
xintercept = attributes(x)$n,
linetype = "dotted"
)
@@ -374,30 +390,30 @@ visualisation_recipe.n_clusters_gap <- function(x, ...) {
# Out
class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers))
- attr(layers, "data") <- data
+ attr(layers, "data") <- dataset
layers
}
#' @export
visualisation_recipe.n_clusters_silhouette <- function(x, ...) {
- data <- as.data.frame(x)
+ dataset <- as.data.frame(x)
layers <- list()
# Layers -----------------------
layers[["l1"]] <- list(
geom = "line",
- data = data,
+ data = dataset,
aes = list(x = "n_Clusters", y = "Silhouette", group = 1)
)
layers[["l2"]] <- list(
geom = "point",
- data = data,
+ data = dataset,
aes = list(x = "n_Clusters", y = "Silhouette")
)
layers[["l4"]] <- list(
geom = "vline",
- data = data,
+ data = dataset,
xintercept = attributes(x)$n,
linetype = "dotted"
)
@@ -410,38 +426,41 @@ visualisation_recipe.n_clusters_silhouette <- function(x, ...) {
# Out
class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers))
- attr(layers, "data") <- data
+ attr(layers, "data") <- dataset
layers
}
#' @export
visualisation_recipe.n_clusters_dbscan <- function(x, ...) {
- data <- as.data.frame(x)
+ dataset <- as.data.frame(x)
layers <- list()
# Layers -----------------------
if ("gradient" %in% names(attributes(x))) {
- data$gradient <- datawizard::rescale(attributes(x)$gradient, c(min(data$eps), max(data$eps)))
+ dataset$gradient <- datawizard::rescale(
+ attributes(x)$gradient,
+ c(min(dataset$eps), max(dataset$eps))
+ )
layers[["l1"]] <- list(
geom = "line",
- data = data,
+ data = dataset,
aes = list(x = "n_Obs", y = "eps"),
size = 1
)
layers[["l2"]] <- list(
geom = "line",
- data = data,
+ data = dataset,
aes = list(x = "n_Obs", y = "gradient"),
color = "red",
linetype = "dashed"
)
layers[["l3"]] <- list(
geom = "hline",
- data = data,
+ data = dataset,
yintercept = attributes(x)$eps,
linetype = "dotted"
)
@@ -452,24 +471,27 @@ visualisation_recipe.n_clusters_dbscan <- function(x, ...) {
title = "DBSCAN Method"
)
} else {
- data$y <- datawizard::rescale(data$total_SS, c(min(data$n_Clusters), max(data$n_Clusters)))
+ dataset$y <- datawizard::rescale(
+ dataset$total_SS,
+ c(min(dataset$n_Clusters), max(dataset$n_Clusters))
+ )
layers[["l1"]] <- list(
geom = "line",
- data = data,
+ data = dataset,
aes = list(x = "eps", y = "n_Clusters"),
size = 1
)
layers[["l2"]] <- list(
geom = "line",
- data = data,
+ data = dataset,
aes = list(x = "eps", y = "y"),
color = "red",
linetype = "dashed"
)
layers[["l3"]] <- list(
geom = "vline",
- data = data,
+ data = dataset,
xintercept = attributes(x)$eps,
linetype = "dotted"
)
@@ -483,7 +505,7 @@ visualisation_recipe.n_clusters_dbscan <- function(x, ...) {
# Out
class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers))
- attr(layers, "data") <- data
+ attr(layers, "data") <- dataset
layers
}
@@ -505,6 +527,6 @@ plot.n_clusters_dbscan <- plot.n_clusters_elbow
#' @export
plot.n_clusters_hclust <- function(x, ...) {
insight::check_if_installed("pvclust")
- graphics::plot(attributes(x)$model)
- pvclust::pvrect(attributes(x)$model, alpha = attributes(x)$ci, pv = "si")
+ graphics::plot(attributes(x)[["model"]])
+ pvclust::pvrect(attributes(x)[["model"]], alpha = attributes(x)$ci, pv = "si")
}
diff --git a/R/p_direction.R b/R/p_direction.R
new file mode 100644
index 000000000..313051669
--- /dev/null
+++ b/R/p_direction.R
@@ -0,0 +1,178 @@
+#' @importFrom bayestestR p_direction
+#' @export
+bayestestR::p_direction
+
+
+#' @title Probability of Direction (pd)
+#' @name p_direction.lm
+#'
+#' @description Compute the **Probability of Direction** (*pd*, also known as
+#' the Maximum Probability of Effect - *MPE*). This can be interpreted as the
+#' probability that a parameter (described by its full confidence, or
+#' "compatibility" interval) is strictly positive or negative (whichever is the
+#' most probable). Although differently expressed, this index is fairly similar
+#' (i.e., is strongly correlated) to the frequentist *p-value* (see 'Details').
+#'
+#' @param x A statistical model.
+#' @inheritParams bayestestR::p_direction
+#' @inheritParams model_parameters.default
+#' @param ... Arguments passed to other methods, e.g. `ci()`. Arguments like
+#' `vcov` or `vcov_args` can be used to compute confidence intervals using a
+#' specific variance-covariance matrix for the standard errors.
+#'
+#' @seealso See also [`equivalence_test()`], [`p_function()`] and
+#' [`p_significance()`] for functions related to checking effect existence and
+#' significance.
+#'
+#' @inheritSection bayestestR::p_direction What is the *pd*?
+#'
+#' @inheritSection bayestestR::p_direction Relationship with the p-value
+#'
+#' @inheritSection bayestestR::p_direction Possible Range of Values
+#'
+#' @inheritSection model_parameters Statistical inference - how to quantify evidence
+#'
+#' @references
+#'
+#' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is
+#' flat (p > 0.05): Significance thresholds and the crisis of unreplicable
+#' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544}
+#'
+#' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference,
+#' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022)
+#' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022)
+#'
+#' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1).
+#' Retrieved from https://lakens.github.io/statistical_inferences/.
+#' \doi{10.5281/ZENODO.6409077}
+#'
+#' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing
+#' for Psychological Research: A Tutorial. Advances in Methods and Practices
+#' in Psychological Science, 1(2), 259–269. \doi{10.1177/2515245918770963}
+#'
+#' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019).
+#' Indices of Effect Existence and Significance in the Bayesian Framework.
+#' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767}
+#'
+#' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical
+#' science: replace confidence and significance by compatibility and surprise.
+#' BMC Medical Research Methodology (2020) 20:244.
+#'
+#' - Schweder T. Confidence is epistemic probability for empirical science.
+#' Journal of Statistical Planning and Inference (2018) 195:116–125.
+#' \doi{10.1016/j.jspi.2017.09.016}
+#'
+#' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors.
+#' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory
+#' Data Confrontation in Economics, pp. 285-217. Princeton University Press,
+#' Princeton, NJ, 2003
+#'
+#' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling.
+#' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x}
+#'
+#' @return A data frame.
+#'
+#' @examplesIf requireNamespace("bayestestR") && require("see", quietly = TRUE) && requireNamespace("sandwich")
+#' data(qol_cancer)
+#' model <- lm(QoL ~ time + age + education, data = qol_cancer)
+#' p_direction(model)
+#'
+#' # based on heteroscedasticity-robust standard errors
+#' p_direction(model, vcov = "HC3")
+#'
+#' result <- p_direction(model)
+#' plot(result)
+#' @export
+p_direction.lm <- function(x,
+ ci = 0.95,
+ method = "direct",
+ null = 0,
+ vcov = NULL,
+ vcov_args = NULL,
+ ...) {
+ # generate normal distribution based on CI range
+ result <- .posterior_ci(x, ci, vcov = vcov, vcov_args = vcov_args, ...)
+
+ # copy
+ out <- result$out
+ posterior <- result$posterior
+
+ # add pd
+ out$pd <- as.numeric(bayestestR::p_direction(
+ posterior,
+ method = method,
+ null = null,
+ ...
+ ))
+
+ # reorder
+ out <- out[intersect(c("Parameter", "CI", "CI_low", "CI_high", "pd", "Effects", "Component"), colnames(out))]
+
+ attr(out, "data") <- posterior
+ attr(out, "null") <- null
+ class(out) <- c("p_direction_lm", "p_direction", "see_p_direction", "data.frame")
+ out
+}
+
+
+# methods ---------------------------------------------------------------------
+
+#' @export
+print.p_direction_lm <- function(x, digits = 2, p_digits = 3, ...) {
+ null <- attributes(x)$null
+ caption <- sprintf(
+ "Probability of Direction (null: %s)",
+ insight::format_value(null, digits = digits, protect_integer = TRUE)
+ )
+
+ x <- insight::format_table(x, digits = digits, p_digits = p_digits)
+ cat(insight::export_table(x, title = caption, ...))
+}
+
+
+# other classes --------------------------------------------------------------
+
+#' @export
+p_direction.glm <- p_direction.lm
+
+#' @export
+p_direction.coxph <- p_direction.lm
+
+#' @export
+p_direction.svyglm <- p_direction.lm
+
+#' @export
+p_direction.glmmTMB <- p_direction.lm
+
+#' @export
+p_direction.merMod <- p_direction.lm
+
+#' @export
+p_direction.wbm <- p_direction.lm
+
+#' @export
+p_direction.lme <- p_direction.lm
+
+#' @export
+p_direction.gee <- p_direction.lm
+
+#' @export
+p_direction.gls <- p_direction.lm
+
+#' @export
+p_direction.feis <- p_direction.lm
+
+#' @export
+p_direction.felm <- p_direction.lm
+
+#' @export
+p_direction.mixed <- p_direction.lm
+
+#' @export
+p_direction.hurdle <- p_direction.lm
+
+#' @export
+p_direction.zeroinfl <- p_direction.lm
+
+#' @export
+p_direction.rma <- p_direction.lm
diff --git a/R/p_function.R b/R/p_function.R
index d60afed34..14ee7c94a 100644
--- a/R/p_function.R
+++ b/R/p_function.R
@@ -18,12 +18,16 @@
#' @inheritParams model_parameters
#' @inheritParams model_parameters.default
#' @inheritParams model_parameters.glmmTMB
+#' @inheritParams standard_error
#'
#' @note
#' Curently, `p_function()` computes intervals based on Wald t- or z-statistic.
#' For certain models (like mixed models), profiled intervals may be more
#' accurate, however, this is currently not supported.
#'
+#' @seealso See also [`equivalence_test()`] and [`p_significance()`] for
+#' functions related to checking effect existence and significance.
+#'
#' @details
#' ## Compatibility intervals and continuous _p_-values for different estimate values
#'
@@ -75,7 +79,7 @@
#' _p_<0.05 can arise from assumption violations even if the effect under
#' study is null" (_Gelman/Greenland 2019_).
#'
-#' ## Probabilistic interpretation of compatibility intervals
+#' ## Probabilistic interpretation of p-values and compatibility intervals
#'
#' Schweder (2018) resp. Schweder and Hjort (2016) (and others) argue that
#' confidence curves (as produced by `p_function()`) have a valid probabilistic
@@ -106,21 +110,55 @@
#' "The realized confidence distribution is clearly an epistemic probability
#' distribution" (_Schweder 2018_). In Bayesian words, compatibility intervals
#' (or confidence distributons, or consonance curves) are "posteriors without
-#' priors" (_Schweder, Hjort, 2003_). In this regard, interpretation of _p_-values
-#' might be guided using [`bayestestR::p_to_pd()`].
-#'
-#' ## Compatibility intervals - is their interpretation conditional or not?
-#'
-#' The fact that the term "conditional" is used in different meanings, is
-#' confusing and unfortunate. Thus, we would summarize the probabilistic
-#' interpretation of compatibility intervals as follows: The intervals are built
-#' from the data _and_ our modeling assumptions. The accuracy of the intervals
-#' depends on our model assumptions. If a value is outside the interval, that
-#' might be because (1) that parameter value isn't supported by the data, or
-#' (2) the modeling assumptions are a poor fit for the situation. When we make
-#' bad assumptions, the compatibility interval might be too wide or (more
-#' commonly and seriously) too narrow, making us think we know more about the
-#' parameter than is warranted.
+#' priors" (_Schweder, Hjort, 2003_).
+#'
+#' The _p_-value indicates the degree of compatibility of the endpoints of the
+#' interval at a given confidence level with (1) the observed data and (2) model
+#' assumptions. The observed point estimate (_p_-value = 1) is the value
+#' estimated to be _most compatible_ with the data and model assumptions,
+#' whereas values values far from the observed point estimate (where _p_
+#' approaches 0) are least compatible with the data and model assumptions
+#' (_Schweder and Hjort 2016, pp. 60-61; Amrhein and Greenland 2022_). In this
+#' regards, _p_-values are statements about _confidence_ or _compatibility_:
+#' The p-value is not an absolute measure of evidence for a model (such as the
+#' null/alternative model), it is a continuous measure of the compatibility of
+#' the observed data with the model used to compute it (_Greenland et al. 2016_,
+#' _Greenland 2023_). Going one step further, and following _Schweder_, p-values
+#' can be considered as _epistemic probability_ - "not necessarily of the
+#' hypothesis being true, but of it _possibly_ being true" (_Schweder 2018_).
+#' Hence, the interpretation of _p_-values might be guided using
+#' [`bayestestR::p_to_pd()`].
+#'
+#' ## Probability or compatibility?
+#'
+#' We here presented the discussion of p-values and confidence intervals from the
+#' perspective of two paradigms, one saying that probability statements can be
+#' made, one saying that interpretation is guided in terms of "compatibility".
+#' Cox and Hinkley say, "interval estimates cannot be taken as probability
+#' statements" (_Cox and Hinkley 1979: 208_), which conflicts with the Schweder
+#' and Hjort confidence distribution school. However, if you view interval
+#' estimates as being intervals of values being consistent with the data,
+#' this comes close to the idea of (epistemic) probability. We do not believe that
+#' these two paradigms contradict or exclude each other. Rather, the aim is to
+#' emphasize one point of view or the other, i.e. to place the linguistic
+#' nuances either on 'compatibility' or 'probability'.
+#'
+#' The main take-away is *not* to interpret p-values as dichotomous decisions
+#' that distinguish between "we found an effect" (statistically significant)" vs.
+#' "we found no effect" (statistically not significant) (_Altman and Bland 1995_).
+#'
+#' ## Compatibility intervals - is their interpretation "conditional" or not?
+#'
+#' The fact that the term "conditional" is used in different meanings in
+#' statistics, is confusing and unfortunate. Thus, we would summarize the
+#' (probabilistic) interpretation of compatibility intervals as follows: The
+#' intervals are built from the data _and_ our modeling assumptions. The
+#' accuracy of the intervals depends on our model assumptions. If a value is
+#' outside the interval, that might be because (1) that parameter value isn't
+#' supported by the data, or (2) the modeling assumptions are a poor fit for the
+#' situation. When we make bad assumptions, the compatibility interval might be
+#' too wide or (more commonly and seriously) too narrow, making us think we know
+#' more about the parameter than is warranted.
#'
#' When we say "there is a 95% chance the true value is in the interval", that is
#' a statement of _epistemic probability_ (i.e. description of uncertainty related
@@ -130,14 +168,42 @@
#' probability properties, from which we can draw _epistemic_ probabilistic
#' statements of uncertainty (_Schweder and Hjort 2016_).
#'
+#' ## Functions in the parameters package to check for effect existence and significance
+#'
+#' The **parameters** package provides several options or functions to aid
+#' statistical inference. Beyond `p_function()`, there are, for example:
+#' - [`equivalence_test()`][equivalence_test.lm], to compute the (conditional)
+#' equivalence test for frequentist models
+#' - [`p_significance()`][p_significance.lm], to compute the probability of
+#' *practical significance*, which can be conceptualized as a unidirectional
+#' equivalence test
+#' - the `pd` argument (setting `pd = TRUE`) in `model_parameters()` includes
+#' a column with the *probability of direction*, i.e. the probability that a
+#' parameter is strictly positive or negative. See [`bayestestR::p_direction()`]
+#' for details. If plotting is desired, the [`p_direction()`][p_direction.lm]
+#' function can be used, together with `plot()`.
+#' - the `s_value` argument (setting `s_value = TRUE`) in `model_parameters()`
+#' replaces the p-values with their related _S_-values (*Rafi and Greenland 2020*)
+#' - finally, it is possible to generate distributions of model coefficients by
+#' generating bootstrap-samples (setting `bootstrap = TRUE`) or simulating
+#' draws from model coefficients using [`simulate_model()`]. These samples
+#' can then be treated as "posterior samples" and used in many functions from
+#' the **bayestestR** package.
+#'
#' @return A data frame with p-values and compatibility intervals.
#'
#' @references
+#' - Altman DG, Bland JM. Absence of evidence is not evidence of absence. BMJ.
+#' 1995;311(7003):485. \doi{10.1136/bmj.311.7003.485}
+#'
#' - Amrhein V, Greenland S. Discuss practical importance of results based on
#' interval estimates and p-value functions, not only on point estimates and
#' null p-values. Journal of Information Technology 2022;37:316–20.
#' \doi{10.1177/02683962221105904}
#'
+#' - Cox DR, Hinkley DV. 1979. Theoretical Statistics. 6th edition.
+#' Chapman and Hall/CRC
+#'
#' - Fraser DAS. The P-value function and statistical inference. The American
#' Statistician. 2019;73(sup1):135-147. \doi{10.1080/00031305.2018.1556735}
#'
@@ -148,6 +214,16 @@
#' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022)
#' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022)
#'
+#' - Greenland S, Senn SJ, Rothman KJ, Carlin JB, Poole C, Goodman SN, et al.
+#' (2016). Statistical tests, P values, confidence intervals, and power: A
+#' guide to misinterpretations. European Journal of Epidemiology. 31:337-350.
+#' \doi{10.1007/s10654-016-0149-3}
+#'
+#' - Greenland S (2023). Divergence versus decision P-values: A distinction
+#' worth making in theory and keeping in practice: Or, how divergence P-values
+#' measure evidence even when decision P-values do not. Scand J Statist, 50(1),
+#' 54-88. \doi{doi.org/10.1111/sjos.12625}
+#'
#' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical
#' science: Replace confidence and significance by compatibility and surprise.
#' BMC Medical Research Methodology. 2020;20(1):244. \doi{10.1186/s12874-020-01105-9}
@@ -188,6 +264,8 @@ p_function <- function(model,
exponentiate = FALSE,
effects = "fixed",
component = "all",
+ vcov = NULL,
+ vcov_args = NULL,
keep = NULL,
drop = NULL,
verbose = TRUE,
@@ -199,7 +277,9 @@ p_function <- function(model,
se <- standard_error(
model,
effects = effects,
- component = component
+ component = component,
+ vcov = vcov,
+ vcov_args = vcov_args
)$SE
if (is.null(dof) || length(dof) == 0 || .is_chi2_model(model, dof)) {
diff --git a/R/p_significance.R b/R/p_significance.R
new file mode 100644
index 000000000..c8f8eeb33
--- /dev/null
+++ b/R/p_significance.R
@@ -0,0 +1,337 @@
+#' @importFrom bayestestR p_significance
+#' @export
+bayestestR::p_significance
+
+
+
+#' @title Practical Significance (ps)
+#'
+#' @description Compute the probability of **Practical Significance** (*ps*),
+#' which can be conceptualized as a unidirectional equivalence test. It returns
+#' the probability that an effect is above a given threshold corresponding to a
+#' negligible effect in the median's direction, considering a parameter's _full_
+#' confidence interval. In other words, it returns the probability of a clear
+#' direction of an effect, which is larger than the smallest effect size of
+#' interest (e.g., a minimal important difference). Its theoretical range is
+#' from zero to one, but the *ps* is typically larger than 0.5 (to indicate
+#' practical significance).
+#'
+#' In comparison the the [`equivalence_test()`] function, where the *SGPV*
+#' (second generation p-value) describes the proportion of the _full_ confidence
+#' interval that is _inside_ the ROPE, the value returned by `p_significance()`
+#' describes the _larger_ proportion of the _full_ confidence interval that is
+#' _outside_ the ROPE. This makes `p_significance()` comparable to
+#' [`bayestestR::p_direction()`], however, while `p_direction()` compares to a
+#' point-null by default, `p_significance()` compares to a range-null.
+#'
+#' @param x A statistical model.
+#' @inheritParams bayestestR::p_significance
+#' @inheritParams model_parameters.default
+#' @param verbose Toggle warnings and messages.
+#' @param ... Arguments passed to other methods.
+#'
+#' @seealso For more details, see [`bayestestR::p_significance()`]. See also
+#' [`equivalence_test()`], [`p_function()`] and [`bayestestR::p_direction()`]
+#' for functions related to checking effect existence and significance.
+#'
+#' @details `p_significance()` returns the proportion of the _full_ confidence
+#' interval range (assuming a normally or t-distributed, equal-tailed interval,
+#' based on the model) that is outside a certain range (the negligible effect,
+#' or ROPE, see argument `threshold`). If there are values of the distribution
+#' both below and above the ROPE, `p_significance()` returns the higher
+#' probability of a value being outside the ROPE. Typically, this value should
+#' be larger than 0.5 to indicate practical significance. However, if the range
+#' of the negligible effect is rather large compared to the range of the
+#' confidence interval, `p_significance()` will be less than 0.5, which
+#' indicates no clear practical significance.
+#'
+#' Note that the assumed interval, which is used to calculate the practical
+#' significance, is an estimation of the _full interval_ based on the chosen
+#' confidence level. For example, if the 95% confidence interval of a
+#' coefficient ranges from -1 to 1, the underlying _full (normally or
+#' t-distributed) interval_ approximately ranges from -1.9 to 1.9, see also
+#' following code:
+#'
+#' ```
+#' # simulate full normal distribution
+#' out <- bayestestR::distribution_normal(10000, 0, 0.5)
+#' # range of "full" distribution
+#' range(out)
+#' # range of 95% CI
+#' round(quantile(out, probs = c(0.025, 0.975)), 2)
+#' ```
+#'
+#' This ensures that the practical significance always refers to the general
+#' compatible parameter space of coefficients. Therefore, the _full interval_ is
+#' similar to a Bayesian posterior distribution of an equivalent Bayesian model,
+#' see following code:
+#'
+#' ```
+#' library(bayestestR)
+#' library(brms)
+#' m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars)
+#' m2 <- brm(mpg ~ gear + wt + cyl + hp, data = mtcars)
+#' # probability of significance (ps) for frequentist model
+#' p_significance(m)
+#' # similar to ps of Bayesian models
+#' p_significance(m2)
+#' # similar to ps of simulated draws / bootstrap samples
+#' p_significance(simulate_model(m))
+#' ```
+#'
+#' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html)
+#' implemented in the [**see**-package](https://easystats.github.io/see/).
+#'
+#' @inheritSection model_parameters Statistical inference - how to quantify evidence
+#'
+#' @references
+#'
+#' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is
+#' flat (p > 0.05): Significance thresholds and the crisis of unreplicable
+#' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544}
+#'
+#' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference,
+#' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022)
+#' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022)
+#'
+#' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1).
+#' Retrieved from https://lakens.github.io/statistical_inferences/.
+#' \doi{10.5281/ZENODO.6409077}
+#'
+#' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing
+#' for Psychological Research: A Tutorial. Advances in Methods and Practices
+#' in Psychological Science, 1(2), 259–269. \doi{10.1177/2515245918770963}
+#'
+#' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019).
+#' Indices of Effect Existence and Significance in the Bayesian Framework.
+#' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767}
+#'
+#' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical
+#' science: replace confidence and significance by compatibility and surprise.
+#' BMC Medical Research Methodology (2020) 20:244.
+#'
+#' - Schweder T. Confidence is epistemic probability for empirical science.
+#' Journal of Statistical Planning and Inference (2018) 195:116–125.
+#' \doi{10.1016/j.jspi.2017.09.016}
+#'
+#' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors.
+#' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory
+#' Data Confrontation in Economics, pp. 285-217. Princeton University Press,
+#' Princeton, NJ, 2003
+#'
+#' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling.
+#' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x}
+#'
+#' @return A data frame with columns for the parameter names, the confidence
+#' intervals and the values for practical significance. Higher values indicate
+#' more practical significance (upper bound is one).
+#'
+#' @examplesIf requireNamespace("bayestestR") && packageVersion("bayestestR") > "0.14.0" && requireNamespace("sandwich")
+#' data(qol_cancer)
+#' model <- lm(QoL ~ time + age + education, data = qol_cancer)
+#'
+#' p_significance(model)
+#' p_significance(model, threshold = c(-0.5, 1.5))
+#'
+#' # based on heteroscedasticity-robust standard errors
+#' p_significance(model, vcov = "HC3")
+#'
+#' if (require("see", quietly = TRUE)) {
+#' result <- p_significance(model)
+#' plot(result)
+#' }
+#' @export
+p_significance.lm <- function(x,
+ threshold = "default",
+ ci = 0.95,
+ vcov = NULL,
+ vcov_args = NULL,
+ verbose = TRUE,
+ ...) {
+ # generate normal distribution based on CI range
+ result <- .posterior_ci(x, ci, vcov = vcov, vcov_args = vcov_args, ...)
+
+ # copy
+ out <- result$out
+ posterior <- result$posterior
+
+ # calculate the ROPE range - for multiple thresholds, we have to check
+ # each list element for "default", to replace it with the appropriate range
+ if (is.list(threshold)) {
+ threshold <- lapply(threshold, function(i) {
+ if (all(i == "default")) {
+ i <- bayestestR::rope_range(x, verbose = verbose)
+ }
+ i
+ })
+ } else if (all(threshold == "default")) {
+ threshold <- bayestestR::rope_range(x, verbose = verbose)
+ }
+
+ # add ps
+ result_ps <- bayestestR::p_significance(
+ posterior,
+ threshold = threshold,
+ verbose = verbose
+ )
+ out$ps <- as.numeric(result_ps)
+
+ # for list-thresholds, we have the list as attribute and need to save it as
+ # data.frame
+ if (is.list(threshold)) {
+ # save for later
+ threshold_data <- stats::setNames(
+ as.data.frame(do.call(rbind, attributes(result_ps)$threshold)),
+ c("ROPE_low", "ROPE_high")
+ )
+ out <- cbind(out, threshold_data)
+ keep <- c("Parameter", "CI", "CI_low", "CI_high", "ROPE_low", "ROPE_high", "ps", "Effects", "Component")
+ } else {
+ keep <- c("Parameter", "CI", "CI_low", "CI_high", "ps", "Effects", "Component")
+ }
+
+ # for plot, we need to have it numeric
+ if (!is.numeric(threshold) && !is.list(threshold)) {
+ threshold <- 0.1
+ }
+
+ # Reorder columns of 'out' to keep only the relevant ones
+ out <- out[intersect(keep, colnames(out))]
+
+ attr(out, "data") <- posterior
+ attr(out, "threshold") <- threshold
+ class(out) <- c("p_significance_lm", "p_significance", "see_p_significance", "data.frame")
+ out
+}
+
+
+# helper ----------------------------------------------------------------------
+
+.posterior_ci <- function(x, ci, vcov = NULL, vcov_args = NULL, ...) {
+ # first, we need CIs
+ if (inherits(x, "parameters_model")) {
+ # for model_parameters objects, directly extract CIs
+ out <- as.data.frame(x)[intersect(
+ c("Parameter", "CI_low", "CI_high", "Component", "Effects"),
+ colnames(x)
+ )]
+ ci <- attributes(x)$ci
+ # and extract degrees of freedom
+ df_column <- grep("(df|df_error)", colnames(x))
+ if (length(df_column) > 0) {
+ dof <- unique(x[[df_column]])
+ if (length(dof) > 1) {
+ dof <- Inf
+ }
+ } else {
+ dof <- Inf
+ }
+ } else {
+ out <- ci(x, ci = ci, vcov = vcov, vcov_args = vcov_args, ...)
+ dof <- .safe(insight::get_df(x, type = "wald"), Inf)
+ }
+ # we now iterate all confidence intervals and create an approximate normal
+ # distribution that covers the CI-range.
+ posterior <- as.data.frame(lapply(seq_len(nrow(out)), function(i) {
+ ci_range <- as.numeric(out[i, c("CI_low", "CI_high")])
+ .generate_posterior_from_ci(ci, ci_range, dof = dof)
+ }))
+ colnames(posterior) <- out$Parameter
+
+ # deal with Effects and Component columns
+ if ("Effects" %in% colnames(out) && insight::n_unique(out$Effects) == 1) {
+ out$Effects <- NULL
+ }
+ if ("Component" %in% colnames(out) && insight::n_unique(out$Component) == 1) {
+ out$Component <- NULL
+ }
+
+ # check we don't have duplicated columns in "posterior" we need this for
+ # plotting
+ if (anyDuplicated(colnames(posterior)) > 0 && !is.null(out$Component)) {
+ comps <- .rename_values(out$Component, "zero_inflated", "zi")
+ comps <- .rename_values(comps, "conditional", "cond")
+ colnames(posterior) <- paste0(out$Parameter, "_", comps)
+ out$Parameter <- paste0(out$Parameter, "_", comps)
+ }
+ list(out = out, posterior = posterior)
+}
+
+
+# methods ---------------------------------------------------------------------
+
+#' @export
+print.p_significance_lm <- function(x, digits = 2, ...) {
+ threshold <- attributes(x)$threshold
+ # Check if threshold is a list, which indicates multiple thresholds
+ if (is.list(threshold)) {
+ caption <- "Practical Significance"
+ } else {
+ # make sure it's numeric
+ if (!is.numeric(threshold)) {
+ threshold <- 0.1
+ }
+ # make sure we have both bounds for the range
+ if (length(threshold) == 1) {
+ threshold <- c(threshold * -1, threshold)
+ }
+ caption <- sprintf(
+ "Practical Significance (threshold: %s)",
+ toString(insight::format_value(threshold, digits = 2))
+ )
+ }
+ x$ps <- insight::format_pd(x$ps, name = NULL)
+ x <- insight::format_table(x, digits = digits)
+ cat(insight::export_table(x, title = caption, ...))
+}
+
+
+# other classes --------------------------------------------------------------
+
+#' @export
+p_significance.glm <- p_significance.lm
+
+#' @export
+p_significance.coxph <- p_significance.lm
+
+#' @export
+p_significance.svyglm <- p_significance.lm
+
+#' @export
+p_significance.glmmTMB <- p_significance.lm
+
+#' @export
+p_significance.merMod <- p_significance.lm
+
+#' @export
+p_significance.wbm <- p_significance.lm
+
+#' @export
+p_significance.lme <- p_significance.lm
+
+#' @export
+p_significance.gee <- p_significance.lm
+
+#' @export
+p_significance.gls <- p_significance.lm
+
+#' @export
+p_significance.feis <- p_significance.lm
+
+#' @export
+p_significance.felm <- p_significance.lm
+
+#' @export
+p_significance.mixed <- p_significance.lm
+
+#' @export
+p_significance.hurdle <- p_significance.lm
+
+#' @export
+p_significance.zeroinfl <- p_significance.lm
+
+#' @export
+p_significance.rma <- p_significance.lm
+
+#' @export
+p_significance.parameters_model <- p_significance.lm
diff --git a/R/pool_parameters.R b/R/pool_parameters.R
index f985a84af..8693641af 100644
--- a/R/pool_parameters.R
+++ b/R/pool_parameters.R
@@ -17,12 +17,10 @@
#'
#' @note
#' Models with multiple components, (for instance, models with zero-inflation,
-#' where predictors appear in the count and zero-inflation part) may fail in
-#' case of identical names for coefficients in the different model components,
-#' since the coefficient table is grouped by coefficient names for pooling. In
-#' such cases, coefficients of count and zero-inflation model parts would be
-#' combined. Therefore, the `component` argument defaults to
-#' `"conditional"` to avoid this.
+#' where predictors appear in the count and zero-inflation part, or models with
+#' dispersion component) may fail in rare situations. In this case, compute
+#' the pooled parameters for components separately, using the `component`
+#' argument.
#'
#' Some model objects do not return standard errors (e.g. objects of class
#' `htest`). For these models, no pooled confidence intervals nor p-values
@@ -68,7 +66,7 @@
pool_parameters <- function(x,
exponentiate = FALSE,
effects = "fixed",
- component = "conditional",
+ component = "all",
verbose = TRUE,
...) {
# check input, save original model -----
@@ -163,75 +161,92 @@ pool_parameters <- function(x,
params <- params[params$Effects != "random", ]
parameter_values <- x[[1]]$Parameter[x[[1]]$Effects != "random"]
}
- estimates <- split(params, factor(params$Parameter, levels = unique(parameter_values)))
+ # split by component
+ if (!is.null(params$Component) && insight::n_unique(params$Component) > 1) {
+ component_values <- x[[1]]$Component
+ estimates <- split(
+ params,
+ list(
+ factor(params$Parameter, levels = unique(parameter_values)),
+ factor(params$Component, levels = unique(component_values))
+ )
+ )
+ } else {
+ component_values <- NULL
+ estimates <- split(
+ params,
+ factor(params$Parameter, levels = unique(parameter_values))
+ )
+ }
# pool estimates etc. -----
pooled_params <- do.call(rbind, lapply(estimates, function(i) {
- # pooled estimate
- pooled_estimate <- mean(i$Coefficient)
+ # if we split by "component", some of the data frames might be empty
+ # in this case, just skip...
+ if (nrow(i) > 0) {
+ # pooled estimate
+ pooled_estimate <- mean(i$Coefficient)
+
+ # special models that have no standard errors (like "htest" objects)
+ if (is.null(i$SE) || all(is.na(i$SE))) {
+ out <- data.frame(
+ Coefficient = pooled_estimate,
+ SE = NA,
+ CI_low = NA,
+ CI_high = NA,
+ Statistic = NA,
+ df_error = NA,
+ p = NA,
+ stringsAsFactors = FALSE
+ )
+
+ if (verbose) {
+ insight::format_alert("Model objects had no standard errors. Cannot compute pooled confidence intervals and p-values.")
+ }
- # special models that have no standard errors (like "htest" objects)
- if (is.null(i$SE) || all(is.na(i$SE))) {
- out <- data.frame(
- Coefficient = pooled_estimate,
- SE = NA,
- CI_low = NA,
- CI_high = NA,
- Statistic = NA,
- df_error = NA,
- p = NA,
- stringsAsFactors = FALSE
- )
+ # regular models that have coefficients and standard errors
+ } else {
+ # pooled standard error
+ ubar <- mean(i$SE^2)
+ tmp <- ubar + (1 + 1 / len) * stats::var(i$Coefficient)
+ pooled_se <- sqrt(tmp)
+
+ # pooled degrees of freedom, Barnard-Rubin adjustment for small samples
+ df_column <- grep("(\\bdf\\b|\\bdf_error\\b)", colnames(i), value = TRUE)[1]
+ if (length(df_column)) {
+ pooled_df <- .barnad_rubin(m = nrow(i), b = stats::var(i$Coefficient), t = tmp, dfcom = unique(i[[df_column]]))
+ # validation check length
+ if (length(pooled_df) > 1 && length(pooled_se) == 1) {
+ pooled_df <- round(mean(pooled_df, na.rm = TRUE))
+ }
+ } else {
+ pooled_df <- Inf
+ }
- if (verbose) {
- insight::format_alert("Model objects had no standard errors. Cannot compute pooled confidence intervals and p-values.")
+ # pooled statistic
+ pooled_statistic <- pooled_estimate / pooled_se
+
+ # confidence intervals
+ alpha <- (1 + ci) / 2
+ fac <- suppressWarnings(stats::qt(alpha, df = pooled_df))
+
+ out <- data.frame(
+ Coefficient = pooled_estimate,
+ SE = pooled_se,
+ CI_low = pooled_estimate - pooled_se * fac,
+ CI_high = pooled_estimate + pooled_se * fac,
+ Statistic = pooled_statistic,
+ df_error = pooled_df,
+ p = 2 * stats::pt(abs(pooled_statistic), df = pooled_df, lower.tail = FALSE),
+ stringsAsFactors = FALSE
+ )
}
-
- # regular models that have coefficients and standard errors
+ out
} else {
- # pooled standard error
- ubar <- mean(i$SE^2)
- tmp <- ubar + (1 + 1 / len) * stats::var(i$Coefficient)
- pooled_se <- sqrt(tmp)
-
- # pooled degrees of freedom, Barnard-Rubin adjustment for small samples
- df_column <- grep("(\\bdf\\b|\\bdf_error\\b)", colnames(i), value = TRUE)[1]
- if (length(df_column)) {
- pooled_df <- .barnad_rubin(m = nrow(i), b = stats::var(i$Coefficient), t = tmp, dfcom = unique(i[[df_column]]))
- # validation check length
- if (length(pooled_df) > 1 && length(pooled_se) == 1) {
- pooled_df <- round(mean(pooled_df, na.rm = TRUE))
- }
- } else {
- pooled_df <- Inf
- }
-
- # pooled statistic
- pooled_statistic <- pooled_estimate / pooled_se
-
- # confidence intervals
- alpha <- (1 + ci) / 2
- fac <- suppressWarnings(stats::qt(alpha, df = pooled_df))
-
- out <- data.frame(
- Coefficient = pooled_estimate,
- SE = pooled_se,
- CI_low = pooled_estimate - pooled_se * fac,
- CI_high = pooled_estimate + pooled_se * fac,
- Statistic = pooled_statistic,
- df_error = pooled_df,
- p = 2 * stats::pt(abs(pooled_statistic), df = pooled_df, lower.tail = FALSE),
- stringsAsFactors = FALSE
- )
- }
-
- # add component, when pooling for all components
- if (identical(component, "all") && "Component" %in% colnames(i)) {
- out$Component <- i$Component[1]
+ NULL
}
- out
}))
@@ -249,13 +264,13 @@ pool_parameters <- function(x,
stringsAsFactors = FALSE
)
}))
+ pooled_params$Effects <- "fixed"
}
-
# reorder ------
pooled_params$Parameter <- parameter_values
- columns <- c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p", "Component")
+ columns <- c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p", "Effects", "Component")
pooled_params <- pooled_params[intersect(columns, colnames(pooled_params))]
@@ -268,6 +283,11 @@ pool_parameters <- function(x,
pooled_params <- merge(pooled_params, pooled_random, all = TRUE, sort = FALSE)
}
+ # add back component column
+ if (!is.null(component_values)) {
+ pooled_params$Component <- component_values
+ }
+
# this needs to be done extra here, cannot call ".add_model_parameters_attributes()"
pooled_params <- .add_pooled_params_attributes(
pooled_params,
diff --git a/R/principal_components.R b/R/principal_components.R
index 2166507eb..9f3e74f8e 100644
--- a/R/principal_components.R
+++ b/R/principal_components.R
@@ -42,7 +42,10 @@
#' predicted data and original data is equal.
#' @param ... Arguments passed to or from other methods.
#' @param pca_results The output of the `principal_components()` function.
-#' @param digits,labels Arguments for `print()`.
+#' @param digits Argument for `print()`, indicates the number of digits
+#' (rounding) to be used.
+#' @param labels Argument for `print()`, character vector of same length as
+#' columns in `x`. If provided, adds an additional column with the labels.
#' @param verbose Toggle warnings.
#' @inheritParams n_factors
#'
@@ -140,6 +143,17 @@
#' # Automated number of components
#' principal_components(mtcars[, 1:4], n = "auto")
#'
+#' # labels can be useful if variable names are not self-explanatory
+#' print(
+#' principal_components(mtcars[, 1:4], n = "auto"),
+#' labels = c(
+#' "Miles/(US) gallon",
+#' "Number of cylinders",
+#' "Displacement (cu.in.)",
+#' "Gross horsepower"
+#' )
+#' )
+#'
#' # Sparse PCA
#' principal_components(mtcars[, 1:7], n = 4, sparse = TRUE)
#' principal_components(mtcars[, 1:7], n = 4, sparse = "robust")
@@ -214,24 +228,26 @@ rotated_data <- function(pca_results, verbose = TRUE) {
rotated_matrix <- insight::get_predicted(attributes(pca_results)$model)
out <- NULL
- if (!is.null(original_data) && !is.null(rotated_matrix)) {
- compl_cases <- attributes(pca_results)$complete_cases
- if (is.null(compl_cases) && nrow(original_data) != nrow(rotated_matrix)) {
- if (verbose) {
- insight::format_warning("Could not retrieve information about missing data.")
- }
- return(NULL)
- }
- original_data$.parameters_merge_id <- seq_len(nrow(original_data))
- rotated_matrix$.parameters_merge_id <- (seq_len(nrow(original_data)))[compl_cases]
- out <- merge(original_data, rotated_matrix, by = ".parameters_merge_id", all = TRUE, sort = FALSE)
- out$.parameters_merge_id <- NULL
- } else {
+ if (is.null(original_data) || is.null(rotated_matrix)) {
if (verbose) {
insight::format_warning("Either the original or the rotated data could not be retrieved.")
}
return(NULL)
}
+
+ compl_cases <- attributes(pca_results)$complete_cases
+ if (is.null(compl_cases) && nrow(original_data) != nrow(rotated_matrix)) {
+ if (verbose) {
+ insight::format_warning("Could not retrieve information about missing data.")
+ }
+ return(NULL)
+ }
+
+ original_data$.parameters_merge_id <- seq_len(nrow(original_data))
+ rotated_matrix$.parameters_merge_id <- (seq_len(nrow(original_data)))[compl_cases]
+ out <- merge(original_data, rotated_matrix, by = ".parameters_merge_id", all = TRUE, sort = FALSE)
+ out$.parameters_merge_id <- NULL
+
out
}
@@ -267,7 +283,7 @@ principal_components.data.frame <- function(x,
insight::format_error("Sparse PCA is currently incompatible with rotation. Use either `sparse=TRUE` or `rotation`.")
}
- loadings <- .pca_rotate(
+ pca_loadings <- .pca_rotate(
x,
n,
rotation = rotation,
@@ -277,8 +293,8 @@ principal_components.data.frame <- function(x,
...
)
- attr(loadings, "data") <- data_name
- return(loadings)
+ attr(pca_loadings, "data") <- data_name
+ return(pca_loadings)
}
# Compute PCA
@@ -350,60 +366,62 @@ principal_components.data.frame <- function(x,
# Compute loadings
if (length(model$sdev) > 1) {
- loadings <- as.data.frame(model$rotation %*% diag(model$sdev))
+ pca_loadings <- as.data.frame(model$rotation %*% diag(model$sdev))
} else {
- loadings <- as.data.frame(model$rotation %*% model$sdev)
+ pca_loadings <- as.data.frame(model$rotation %*% model$sdev)
}
- names(loadings) <- data_summary$Component
+ names(pca_loadings) <- data_summary$Component
# Format
- loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings)
- row.names(loadings) <- NULL
+ pca_loadings <- cbind(data.frame(Variable = row.names(pca_loadings)), pca_loadings)
+ row.names(pca_loadings) <- NULL
# Add information
loading_cols <- 2:(n + 1)
- loadings$Complexity <- (apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 /
- apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4))
+ pca_loadings$Complexity <- (apply(pca_loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 /
+ apply(pca_loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4))
# Add attributes
- attr(loadings, "summary") <- data_summary
- attr(loadings, "model") <- model
- attr(loadings, "rotation") <- "none"
- attr(loadings, "scores") <- model$x
- attr(loadings, "standardize") <- standardize
- attr(loadings, "additional_arguments") <- list(...)
- attr(loadings, "n") <- n
- attr(loadings, "type") <- "prcomp"
- attr(loadings, "loadings_columns") <- loading_cols
- attr(loadings, "complete_cases") <- stats::complete.cases(original_data)
+ attr(pca_loadings, "summary") <- data_summary
+ attr(pca_loadings, "model") <- model
+ attr(pca_loadings, "rotation") <- "none"
+ attr(pca_loadings, "scores") <- model$x
+ attr(pca_loadings, "standardize") <- standardize
+ attr(pca_loadings, "additional_arguments") <- list(...)
+ attr(pca_loadings, "n") <- n
+ attr(pca_loadings, "type") <- "prcomp"
+ attr(pca_loadings, "loadings_columns") <- loading_cols
+ attr(pca_loadings, "complete_cases") <- stats::complete.cases(original_data)
# Sorting
if (isTRUE(sort)) {
- loadings <- .sort_loadings(loadings)
+ pca_loadings <- .sort_loadings(pca_loadings)
}
# Replace by NA all cells below threshold
if (!is.null(threshold)) {
- loadings <- .filter_loadings(loadings, threshold = threshold)
+ pca_loadings <- .filter_loadings(pca_loadings, threshold = threshold)
}
# Add some more attributes
- attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold)
+ attr(pca_loadings, "loadings_long") <- .long_loadings(pca_loadings, threshold = threshold)
# here we match the original columns in the data set with the assigned components
# for each variable, so we know which column in the original data set belongs
# to which extracted component...
- attr(loadings, "closest_component") <-
- .closest_component(loadings, loadings_columns = loading_cols, variable_names = colnames(x))
- attr(loadings, "data") <- data_name
-
- attr(loadings, "dataset") <- original_data
+ attr(pca_loadings, "closest_component") <- .closest_component(
+ pca_loadings,
+ loadings_columns = loading_cols,
+ variable_names = colnames(x)
+ )
+ attr(pca_loadings, "data") <- data_name
+ attr(pca_loadings, "dataset") <- original_data
# add class-attribute for printing
- class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings)))
+ class(pca_loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(pca_loadings)))
- loadings
+ pca_loadings
}
diff --git a/R/print.compare_parameters.R b/R/print.compare_parameters.R
index 8845d0701..7d058267f 100644
--- a/R/print.compare_parameters.R
+++ b/R/print.compare_parameters.R
@@ -1,3 +1,35 @@
+#' @title Print comparisons of model parameters
+#' @name print.compare_parameters
+#'
+#' @description A `print()`-method for objects from [`compare_parameters()`].
+#'
+#' @param x An object returned by [`compare_parameters()`].
+#' @param engine Character string, naming the package or engine to be used for
+#' printing into HTML or markdown format. Currently supported `"gt"` (or
+#' `"default"`) to use the *gt* package to print to HTML and the default easystats
+#' engine to create markdown tables. If `engine = "tt"`, the *tinytable* package
+#' is used for printing to HTML or markdown. Not all `print()` methods support
+#' the `"tt"` engine yet. If a specific `print()` method has no `engine` argument,
+#' `insight::export_table()` is used, which uses *gt* for HTML printing.
+#' @inheritParams print.parameters_model
+#' @inheritSection print.parameters_model Global Options to Customize Messages and Tables when Printing
+#'
+#' @return Invisibly returns the original input object.
+#'
+#' @examplesIf require("gt", quietly = TRUE)
+#' \donttest{
+#' data(iris)
+#' lm1 <- lm(Sepal.Length ~ Species, data = iris)
+#' lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris)
+#'
+#' # custom style
+#' result <- compare_parameters(lm1, lm2, select = "{estimate}{stars} ({se})")
+#' print(result)
+#'
+#' # custom style, in HTML
+#' result <- compare_parameters(lm1, lm2, select = "{estimate}
({se})|{p}")
+#' print_html(result)
+#' }
#' @export
print.compare_parameters <- function(x,
split_components = TRUE,
@@ -46,7 +78,8 @@ print.compare_parameters <- function(x,
ci_brackets = ci_brackets,
format = "text",
groups = groups,
- zap_small = zap_small
+ zap_small = zap_small,
+ ...
)
# if we have multiple components, we can align colum width across components here
diff --git a/R/print.parameters_model.R b/R/print.parameters_model.R
index 422791258..e8f6d1206 100644
--- a/R/print.parameters_model.R
+++ b/R/print.parameters_model.R
@@ -92,8 +92,11 @@
#' categorical predictors. The coefficient for the reference level is always
#' `0` (except when `exponentiate = TRUE`, then the coefficient will be `1`),
#' so this is just for completeness.
+#' @param ... Arguments passed down to [`format.parameters_model()`],
+#' [`insight::format_table()`] and [`insight::export_table()`]
#' @inheritParams insight::format_table
#' @inheritParams compare_parameters
+#' @inheritParams display.parameters_model
#'
#' @inheritSection format_parameters Interpretation of Interaction Terms
#' @inheritSection model_parameters Labeling the Degrees of Freedom
@@ -104,13 +107,13 @@
#' some messages providing additional information can be displayed or suppressed
#' using `options()`:
#'
-#' - `parameters_summary`: `options(parameters_summary = TRUE)` will override the
-#' `summary` argument in `model_parameters()` and always show the model summary
-#' for non-mixed models.
+#' - `parameters_info`: `options(parameters_info = TRUE)` will override the
+#' `include_info` argument in `model_parameters()` and always show the model
+#' summary for non-mixed models.
#'
-#' - `parameters_mixed_summary`: `options(parameters_mixed_summary = TRUE)` will
-#' override the `summary` argument in `model_parameters()` for mixed models, and
-#' will then always show the model summary.
+#' - `parameters_mixed_info`: `options(parameters_mixed_info = TRUE)` will
+#' override the `include_info` argument in `model_parameters()` for mixed
+#' models, and will then always show the model summary.
#'
#' - `parameters_cimethod`: `options(parameters_cimethod = TRUE)` will show the
#' additional information about the approximation method used to calculate
@@ -141,14 +144,16 @@
#' the default HTML engine for tables to `gt`, i.e. the _gt_ package is used to
#' create HTML tables. If set to `tt`, the _tinytable_ package is used.
#'
+#' - `insight_use_symbols`: `options(insight_use_symbols = TRUE)` will try to
+#' print unicode-chars for symbols as column names, wherever possible (e.g.,
+#' \ifelse{html}{\out{ω}}{\eqn{\omega}} instead of `Omega`).
+#'
#' @details `summary()` is a convenient shortcut for
-#' `print(object, select = "minimal", show_sigma = TRUE, show_formula = TRUE)`.
+#' `print(object, select = "minimal", show_sigma = TRUE, show_formula = TRUE)`.
#'
#' @return Invisibly returns the original input object.
#'
-#' @seealso There is a dedicated method to use inside rmarkdown files,
-#' [`print_md()`][print_md.parameters_model]. See also
-#' [`display()`][display.parameters_model].
+#' @seealso See also [`display()`][display.parameters_model].
#'
#' @examplesIf require("gt", quietly = TRUE) && require("glmmTMB", quietly = TRUE)
#' \donttest{
@@ -386,7 +391,7 @@ print.parameters_random <- function(x, digits = 2, ...) {
ci_width = "auto",
ci_brackets = TRUE,
format = "text",
- group = NULL,
+ groups = NULL,
include_reference = FALSE,
...) {
format(
@@ -401,7 +406,7 @@ print.parameters_random <- function(x, digits = 2, ...) {
ci_brackets = ci_brackets,
zap_small = zap_small,
format = format,
- group = group,
+ groups = groups,
include_reference = include_reference,
...
)
@@ -425,6 +430,7 @@ print.parameters_random <- function(x, digits = 2, ...) {
show_sigma <- ifelse(show_summary, TRUE, show_sigma)
show_formula <- ifelse(show_summary, TRUE, show_formula)
show_r2 <- .additional_arguments(x, "show_summary", FALSE)
+ show_rmse <- .additional_arguments(x, "show_summary", FALSE)
# set defaults, if necessary
if (is.null(model_sigma)) {
@@ -438,6 +444,7 @@ print.parameters_random <- function(x, digits = 2, ...) {
show_sigma = show_sigma,
show_formula = show_formula,
show_r2 = show_r2,
+ show_rmse = show_rmse,
format = format
)
}
diff --git a/R/print_html.R b/R/print_html.R
index d71aa194b..5c37b831e 100644
--- a/R/print_html.R
+++ b/R/print_html.R
@@ -1,6 +1,6 @@
# normal print ----------------------------
-#' @rdname display.parameters_model
+#' @rdname print.parameters_model
#' @export
print_html.parameters_model <- function(x,
pretty_names = TRUE,
@@ -138,6 +138,7 @@ print_html.parameters_simulate <- print_html.parameters_model
#' @export
print_html.parameters_sem <- print_html.parameters_model
+#' @rdname print.compare_parameters
#' @export
print_html.compare_parameters <- function(x,
caption = NULL,
diff --git a/R/print_md.R b/R/print_md.R
index 5a4990f54..f7bc40e64 100644
--- a/R/print_md.R
+++ b/R/print_md.R
@@ -1,6 +1,6 @@
# normal print ----------------------------
-#' @rdname display.parameters_model
+#' @rdname print.parameters_model
#' @export
print_md.parameters_model <- function(x,
pretty_names = TRUE,
@@ -123,6 +123,7 @@ print_md.parameters_simulate <- print_md.parameters_model
# compare parameters -------------------------
+#' @rdname print.compare_parameters
#' @export
print_md.compare_parameters <- function(x,
digits = 2,
@@ -251,9 +252,9 @@ print_md.parameters_efa_summary <- function(x, digits = 3, ...) {
table_caption <- "(Explained) Variance of Components"
if ("Parameter" %in% names(x)) {
- x$Parameter <- c("Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)")
+ x$Parameter <- c("Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") # nolint
} else if ("Component" %in% names(x)) {
- names(x) <- c("Component", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)")
+ names(x) <- c("Component", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") # nolint
}
insight::export_table(x, digits = digits, format = "markdown", caption = table_caption, align = "firstleft")
}
@@ -326,7 +327,7 @@ print_md.equivalence_test_lm <- function(x, digits = 2, ci_brackets = c("(", ")"
}
if (!is.null(rope)) {
- names(formatted_table)[names(formatted_table) == "% in ROPE"] <- sprintf("%% in ROPE (%.*f, %.*f)", digits, rope[1], digits, rope[2])
+ names(formatted_table)[names(formatted_table) == "% in ROPE"] <- sprintf("%% in ROPE (%.*f, %.*f)", digits, rope[1], digits, rope[2]) # nolint
}
insight::export_table(formatted_table, format = "markdown", caption = table_caption, align = "firstleft")
diff --git a/R/reduce_parameters.R b/R/reduce_parameters.R
index 8e12c852c..92bdb0aa5 100644
--- a/R/reduce_parameters.R
+++ b/R/reduce_parameters.R
@@ -86,7 +86,7 @@ reduce_data <- function(x, method = "PCA", n = "max", distance = "euclidean", ..
#' @export
reduce_parameters.data.frame <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) {
- x <- datawizard::to_numeric(x)
+ x <- datawizard::to_numeric(x, dummy_factors = TRUE)
# N factors
if (n == "max") {
@@ -144,7 +144,7 @@ reduce_parameters.data.frame <- function(x, method = "PCA", n = "max", distance
#' @export
reduce_parameters.lm <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) {
model_data <- reduce_parameters(
- datawizard::to_numeric(insight::get_predictors(x, ...), ...),
+ datawizard::to_numeric(insight::get_predictors(x, ...), ..., dummy_factors = TRUE),
method = method,
n = n,
distance = distance
diff --git a/R/select_parameters.R b/R/select_parameters.R
index 3588d74e4..93c697017 100644
--- a/R/select_parameters.R
+++ b/R/select_parameters.R
@@ -6,21 +6,17 @@
#' @param model A statistical model (of class `lm`, `glm`, or `merMod`).
#' @param ... Arguments passed to or from other methods.
#'
-#' @details
-#' \subsection{Classical lm and glm}{
-#' For frequentist GLMs, `select_parameters()` performs an AIC-based
-#' stepwise selection.
-#' }
+#' @section Classical lm and glm:
+#' For frequentist GLMs, `select_parameters()` performs an AIC-based stepwise
+#' selection.
#'
-#' \subsection{Mixed models}{
-#' For mixed-effects models of class `merMod`, stepwise selection is
-#' based on [cAIC4::stepcAIC()]. This step function
-#' only searches the "best" model based on the random-effects structure,
-#' i.e. `select_parameters()` adds or excludes random-effects until
-#' the cAIC can't be improved further.
-#' }
+#' @section Mixed models:
+#' For mixed-effects models of class `merMod`, stepwise selection is based on
+#' [`cAIC4::stepcAIC()`]. This step function only searches the "best" model
+#' based on the random-effects structure, i.e. `select_parameters()` adds or
+#' excludes random-effects until the cAIC can't be improved further.
#'
-#' @examples
+#' @examplesIf requireNamespace("lme4")
#' model <- lm(mpg ~ ., data = mtcars)
#' select_parameters(model)
#'
@@ -28,13 +24,11 @@
#' select_parameters(model)
#' \donttest{
#' # lme4 -------------------------------------------
-#' if (require("lme4")) {
-#' model <- lmer(
-#' Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species),
-#' data = iris
-#' )
-#' select_parameters(model)
-#' }
+#' model <- lme4::lmer(
+#' Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species),
+#' data = iris
+#' )
+#' select_parameters(model)
#' }
#'
#' @return The model refitted with optimal number of parameters.
@@ -45,6 +39,9 @@ select_parameters <- function(model, ...) {
#' @rdname select_parameters
+#' @param k The multiple of the number of degrees of freedom used for the penalty.
+#' Only `k = 2` gives the genuine AIC: `k = log(n)` is sometimes referred to as
+#' BIC or SBC.
#' @inheritParams stats::step
#' @export
select_parameters.lm <- function(model,
@@ -108,10 +105,15 @@ select_parameters.merMod <- function(model,
)
- # Using MuMIn's dredge(): works nicely BUT throws unnecessary warnings and requires to set global options for na.action even tho no NaNs.
+ # Using MuMIn's dredge(): works nicely BUT throws unnecessary warnings and
+ # requires to set global options for na.action even tho no NaNs.
# The code is here: https://github.com/cran/MuMIn/blob/master/R/dredge.R Maybe it could be reimplemented?
# insight::check_if_installed("MuMIn")
- # model <- lmer(Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris, na.action = na.fail)
+ # model <- lmer(
+ # Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species),
+ # data = iris,
+ # na.action = na.fail
+ # )
# summary(MuMIn::get.models(MuMIn::dredge(model), 1)[[1]])
best
diff --git a/R/standardize_info.R b/R/standardize_info.R
index 0d6fe5b9f..dfb300b77 100644
--- a/R/standardize_info.R
+++ b/R/standardize_info.R
@@ -17,7 +17,7 @@
#'
#' @family standardize
#'
-#' @examples
+#' @examplesIf insight::check_if_installed("datawizard", minimum_version = "0.12.0", quietly = TRUE)
#' model <- lm(mpg ~ ., data = mtcars)
#' standardize_info(model)
#' standardize_info(model, robust = TRUE)
@@ -52,8 +52,8 @@ standardize_info.default <- function(model,
types <- parameters_type(model)
# model_matrix <- as.data.frame(stats::model.matrix(model))
model_matrix <- as.data.frame(insight::get_modelmatrix(model))
- data <- insight::get_data(model, source = "mf", verbose = FALSE)
- wgts <- insight::get_weights(model, na_rm = TRUE)
+ model_data <- insight::get_data(model, source = "mf", verbose = FALSE)
+ wgts <- insight::get_weights(model, remove_na = TRUE)
# validation check for ZI
if (mi$is_zero_inflated && verbose) {
@@ -94,7 +94,7 @@ standardize_info.default <- function(model,
# Response - Smart
out <- merge(
out,
- .std_info_response_smart(model, mi, data, model_matrix, types, robust = robust, w = wgts),
+ .std_info_response_smart(model, mi, data = model_data, model_matrix, types, robust = robust, w = wgts),
by = "Parameter", all = TRUE
)
@@ -109,7 +109,7 @@ standardize_info.default <- function(model,
out <- merge(
out,
.std_info_predictors_smart(model,
- data,
+ data = model_data,
params,
types,
robust = robust,
@@ -134,7 +134,7 @@ standardize_info.default <- function(model,
model, mi,
params,
model_matrix,
- data,
+ data = model_data,
types = types$Type,
robust = robust,
two_sd = two_sd,
@@ -181,11 +181,11 @@ standardize_info.default <- function(model,
# Get deviations for all parameters
means <- deviations <- rep(NA_real_, times = length(params))
for (i in seq_along(params)) {
- var <- params[i]
+ variable <- params[i]
info <- .std_info_predictor_smart(
data = data,
- variable = types[types$Parameter == var, "Variable"],
- type = types[types$Parameter == var, "Type"],
+ variable = types[types$Parameter == variable, "Variable"],
+ type = types[types$Parameter == variable, "Type"],
robust = robust,
two_sd = two_sd,
weights = w
@@ -213,7 +213,7 @@ standardize_info.default <- function(model,
two_sd = FALSE,
weights = NULL,
...) {
- if (type == "intercept") {
+ if (type == "intercept") { # nolint
info <- list(sd = 0, mean = 0)
} else if (type == "numeric") {
info <- .compute_std_info(
@@ -272,12 +272,12 @@ standardize_info.default <- function(model,
# Get deviations for all parameters
means <- deviations <- rep(NA_real_, length = length(names(model_matrix)))
for (i in seq_along(names(model_matrix))) {
- var <- names(model_matrix)[i]
+ variable <- names(model_matrix)[i]
if (types[i, "Type"] == "intercept") {
means[i] <- deviations[i] <- 0
} else {
std_info <- .compute_std_info(
- data = model_matrix, variable = var,
+ data = model_matrix, variable = variable,
robust = robust, two_sd = two_sd, weights = w
)
deviations[i] <- std_info$sd
@@ -337,9 +337,9 @@ standardize_info.default <- function(model,
}
means <- deviations <- rep(NA_real_, length = length(names(model_matrix)))
for (i in seq_along(names(model_matrix))) {
- var <- names(model_matrix)[i]
- if (any(types$Parameter == var) && types$Link[types$Parameter == var] == "Difference") {
- parent_var <- types$Variable[types$Parameter == var]
+ variable <- names(model_matrix)[i]
+ if (any(types$Parameter == variable) && types$Link[types$Parameter == variable] == "Difference") {
+ parent_var <- types$Variable[types$Parameter == variable]
intercept <- unique(data[[parent_var]])[1]
response_at_intercept <- response[data[[parent_var]] == intercept]
weights_at_intercept <- if (length(w)) w[data[[parent_var]] == intercept] else NULL
@@ -422,18 +422,19 @@ standardize_info.default <- function(model,
}
insight::check_if_installed("performance")
+ insight::check_if_installed("datawizard", minimum_version = "0.12.0")
f <- if (two_sd) 2 else 1
within_vars <- unclass(performance::check_heterogeneity_bias(model))
id <- insight::get_random(model)[[1]]
- w <- insight::get_weights(model, na_rm = TRUE)
+ w <- insight::get_weights(model, remove_na = TRUE)
## Find which parameters vary on level 1 ("within")
is_within <- logical(length = length(params))
is_within[] <- NA
for (i in seq_along(params)) {
- if (types[i] == "intercept") {
+ if (types[i] == "intercept") { # nolint
is_within[i] <- FALSE
} else if (types[i] == "numeric") {
is_within[i] <- insight::clean_names(params[i]) %in% within_vars
@@ -459,7 +460,7 @@ standardize_info.default <- function(model,
dm <- datawizard::demean(cbind(id, temp_d),
select = colnames(temp_d),
- group = "id"
+ by = "id"
)
dm <- dm[, paste0(colnames(temp_d), "_between"), drop = FALSE]
diff --git a/R/standardize_parameters.R b/R/standardize_parameters.R
index 1c3885271..8a2d2b3fc 100644
--- a/R/standardize_parameters.R
+++ b/R/standardize_parameters.R
@@ -13,7 +13,7 @@
#' @inheritParams datawizard::standardize.default
#' @inheritParams effectsize::chisq_to_phi
#' @param ... For `standardize_parameters()`, arguments passed to
-#' [model_parameters()], such as:
+#' [`model_parameters()`], such as:
#' - `ci_method`, `centrality` for Mixed models and Bayesian models...
#' - `exponentiate`, ...
#' - etc.
@@ -30,7 +30,7 @@
#' include interactions or transformations (e.g., polynomial or spline terms).
#' The `robust` (default to `FALSE`) argument enables a robust standardization
#' of data, i.e., based on the `median` and `MAD` instead of the `mean` and
-#' `SD`. **See [standardize()] for more details.**
+#' `SD`. **See [`datawizard::standardize()`] for more details.**
#' - **Note** that `standardize_parameters(method = "refit")` may not return
#' the same results as fitting a model on data that has been standardized with
#' `standardize()`; `standardize_parameters()` used the data used by the model
@@ -52,7 +52,7 @@
#' deviation of model's matrix' parameter of factors levels (transformed to
#' integers) or binary predictors. Although being inappropriate for these cases,
#' this method is the one implemented by default in other software packages,
-#' such as [lm.beta::lm.beta()].
+#' such as [`lm.beta::lm.beta()`].
#' - **smart** (Standardization of Model's parameters with Adjustment,
#' Reconnaissance and Transformation - *experimental*): Similar to `method =
#' "posthoc"` in that it does not involve model refitting. The difference is
@@ -64,9 +64,9 @@
#' delta.
#' - **pseudo** (*for 2-level (G)LMMs only*): In this (post-hoc) method, the
#' response and the predictor are standardized based on the level of prediction
-#' (levels are detected with [performance::check_heterogeneity_bias()]): Predictors
+#' (levels are detected with [`performance::check_heterogeneity_bias()`]): Predictors
#' are standardized based on their SD at level of prediction (see also
-#' [datawizard::demean()]); The outcome (in linear LMMs) is standardized based
+#' [`datawizard::demean()`]); The outcome (in linear LMMs) is standardized based
#' on a fitted random-intercept-model, where `sqrt(random-intercept-variance)`
#' is used for level 2 predictors, and `sqrt(residual-variance)` is used for
#' level 1 predictors (Hoffman 2015, page 342). A warning is given when a
@@ -91,9 +91,9 @@
#' equivalent to `exp(scale(X))`), the `"basic"` method standardizes the
#' transformed data (e.g. equivalent to `scale(exp(X))`).
#' \cr\cr
-#' See the *Transformed Variables* section in [standardize.default()] for more
-#' details on how different transformations are dealt with when `method =
-#' "refit"`.
+#' See the *Transformed Variables* section in [`datawizard::standardize.default()`]
+#' for more details on how different transformations are dealt with when
+#' `method = "refit"`.
#'
#' ## Confidence Intervals
#' The returned confidence intervals are re-scaled versions of the
@@ -225,6 +225,8 @@ standardize_parameters.default <- function(model,
class(model) <- class(model)[class(model) != "aov"]
}
pars <- model_parameters(model, ci = ci, standardize = NULL, effects = "fixed", as_draws = TRUE, ...)
+ # save attributes for later, these are lost in between
+ att <- attributes(pars)
# should post hoc exponentiate?
exponentiate <- isTRUE(eval(match.call()[["exponentiate"]], envir = parent.frame()))
@@ -267,6 +269,11 @@ standardize_parameters.default <- function(model,
pars$SE <- NULL
}
+ # add those attributes back here...
+ if (!is.null(att)) {
+ attributes(pars) <- utils::modifyList(att, attributes(pars))
+ }
+
## attributes
attr(pars, "std_method") <- method
attr(pars, "two_sd") <- two_sd
diff --git a/R/standardize_posteriors.R b/R/standardize_posteriors.R
index cccab54ca..27a9f5baf 100644
--- a/R/standardize_posteriors.R
+++ b/R/standardize_posteriors.R
@@ -72,7 +72,7 @@ standardise_posteriors <- standardize_posteriors
i <- match(deviations$Parameter, colnames(pars))
pars <- pars[, i]
- if (method == "basic") {
+ if (method == "basic") { # nolint
col_dev_resp <- "Deviation_Response_Basic"
col_dev_pred <- "Deviation_Basic"
} else if (method == "posthoc") {
diff --git a/R/utils.R b/R/utils.R
index 0e5e3c186..4725b710e 100644
--- a/R/utils.R
+++ b/R/utils.R
@@ -1,6 +1,6 @@
# small wrapper around this commonly used try-catch
.safe <- function(code, on_error = NULL) {
- if (getOption("easystats_erros", FALSE) && is.null(on_error)) {
+ if (isTRUE(getOption("easystats_errors", FALSE)) && is.null(on_error)) {
code
} else {
tryCatch(code, error = function(e) on_error)
@@ -219,3 +219,14 @@
}
ifnotfound
}
+
+.deprecated_warning <- function(old, new, verbose = TRUE) {
+ if (verbose) {
+ insight::format_warning(paste0(
+ "Argument `", old,
+ "` is deprecated and will be removed in the future. Please use `",
+ new,
+ "` instead."
+ ))
+ }
+}
diff --git a/R/utils_format.R b/R/utils_format.R
index 2b7e6c513..4b2a58d73 100644
--- a/R/utils_format.R
+++ b/R/utils_format.R
@@ -295,15 +295,15 @@
}
# fix coefficient column name for random effects
- if (!is.null(x$Effects) && all(x$Effects == "random") && any(colnames(x) %in% .all_coefficient_types())) {
- colnames(x)[colnames(x) %in% .all_coefficient_types()] <- "Coefficient"
+ if (!is.null(x$Effects) && all(x$Effects == "random") && any(colnames(x) %in% .all_coefficient_types)) {
+ colnames(x)[colnames(x) %in% .all_coefficient_types] <- "Coefficient"
}
# fix coefficient column name for mixed count and zi pars
if (!is.null(x$Component) &&
sum(c("conditional", "zero_inflated", "dispersion") %in% x$Component) >= 2 &&
- any(colnames(x) %in% .all_coefficient_types())) {
- colnames(x)[colnames(x) %in% .all_coefficient_types()] <- "Coefficient"
+ any(colnames(x) %in% .all_coefficient_types)) {
+ colnames(x)[colnames(x) %in% .all_coefficient_types] <- "Coefficient"
}
# random pars with level? combine into parameter column
@@ -476,13 +476,14 @@
# components into different tables, we change the column name for those "tables"
# that contain the random effects or zero-inflation parameters
-.all_coefficient_types <- function() {
- c(
- "Odds Ratio", "Risk Ratio", "Prevalence Ratio", "IRR", "Log-Odds",
- "Log-Mean", "Log-Ratio", "Log-Prevalence", "Probability", "Marginal Means",
- "Estimated Counts", "Ratio"
- )
-}
+.all_coefficient_types <- c(
+ "Odds Ratio", "Risk Ratio", "Prevalence Ratio", "IRR", "Log-Odds",
+ "Log-Mean", "Log-Ratio", "Log-Prevalence", "Probability", "Marginal Means",
+ "Estimated Counts", "Ratio"
+)
+
+
+.all_coefficient_names <- c("Coefficient", "Std_Coefficient", "Estimate", "Median", "Mean", "MAP")
.format_stan_parameters <- function(out) {
@@ -962,16 +963,16 @@
}
# fix column output
- if (inherits(attributes(x)$model, c("lavaan", "blavaan")) && "Label" %in% colnames(x)) {
+ if (inherits(attributes(x)[["model"]], c("lavaan", "blavaan")) && "Label" %in% colnames(x)) {
x$From <- ifelse(!nzchar(as.character(x$Label), keepNA = TRUE) | x$Label == x$To, x$From, paste0(x$From, " (", x$Label, ")")) # nolint
x$Label <- NULL
}
- if (inherits(attributes(x)$model, c("lavaan", "blavaan")) && !"Parameter" %in% colnames(x)) {
+ if (inherits(attributes(x)[["model"]], c("lavaan", "blavaan")) && !"Parameter" %in% colnames(x)) {
parameter_column <- colnames(x)[1]
}
- if (inherits(attributes(x)$model, c("lavaan", "blavaan")) && "Defined" %in% x$Component) {
+ if (inherits(attributes(x)[["model"]], c("lavaan", "blavaan")) && "Defined" %in% x$Component) {
x$From[x$Component == "Defined"] <- ""
x$Operator[x$Component == "Defined"] <- ""
x$To <- ifelse(x$Component == "Defined", paste0("(", x$To, ")"), x$To)
@@ -1095,8 +1096,8 @@
}
# rename columns for random part
- if (grepl("random", type, fixed = TRUE) && any(colnames(tables[[type]]) %in% .all_coefficient_types())) {
- colnames(tables[[type]])[colnames(tables[[type]]) %in% .all_coefficient_types()] <- "Coefficient"
+ if (grepl("random", type, fixed = TRUE) && any(colnames(tables[[type]]) %in% .all_coefficient_types)) {
+ colnames(tables[[type]])[colnames(tables[[type]]) %in% .all_coefficient_types] <- "Coefficient"
}
if (grepl("random", type, fixed = TRUE) && isTRUE(ran_pars)) {
@@ -1183,7 +1184,7 @@
# fix non-equal length of columns
final_table <- .fix_nonmatching_columns(
final_table,
- is_lavaan = inherits(attributes(x)$model, c("lavaan", "blavaan"))
+ is_lavaan = inherits(attributes(x)[["model"]], c("lavaan", "blavaan"))
)
do.call(rbind, final_table)
} else {
diff --git a/R/utils_model_parameters.R b/R/utils_model_parameters.R
index 07037d0dc..eaa1373ec 100644
--- a/R/utils_model_parameters.R
+++ b/R/utils_model_parameters.R
@@ -10,7 +10,7 @@
iterations = 1000,
ci_method = NULL,
p_adjust = NULL,
- summary = FALSE,
+ include_info = FALSE,
verbose = TRUE,
group_level = FALSE,
wb_component = FALSE,
@@ -54,7 +54,7 @@
attr(params, "robust_vcov") <- isTRUE(list(...)$robust) || "vcov" %in% names(list(...))
attr(params, "ignore_group") <- isFALSE(group_level)
attr(params, "ran_pars") <- isFALSE(group_level)
- attr(params, "show_summary") <- isTRUE(summary)
+ attr(params, "show_summary") <- isTRUE(include_info)
attr(params, "log_link") <- isTRUE(grepl("log", info$link_function, fixed = TRUE))
attr(params, "logit_link") <- isTRUE(identical(info$link_function, "logit"))
@@ -77,10 +77,12 @@
}
- # for summaries, add R2
- if (isTRUE(summary) && requireNamespace("performance", quietly = TRUE)) {
+ # for additional infos, add R2, RMSE
+ if (isTRUE(include_info) && requireNamespace("performance", quietly = TRUE)) {
rsq <- .safe(suppressWarnings(performance::r2(model)))
attr(params, "r2") <- rsq
+ rmse <- .safe(performance::performance_rmse(model))
+ attr(params, "rmse") <- rmse
}
@@ -98,7 +100,7 @@
# weighted nobs
weighted_nobs <- .safe({
- w <- insight::get_weights(model, na_rm = TRUE, null_as_ones = TRUE)
+ w <- insight::get_weights(model, remove_na = TRUE, null_as_ones = TRUE)
round(sum(w))
})
attr(params, "weighted_nobs") <- weighted_nobs
diff --git a/R/utils_pca_efa.R b/R/utils_pca_efa.R
index 440f74939..68eed7452 100644
--- a/R/utils_pca_efa.R
+++ b/R/utils_pca_efa.R
@@ -178,7 +178,7 @@ predict.parameters_efa <- function(object,
out <- as.matrix(newdata) %*% as.matrix(attri$model$loadings)
out <- stats::setNames(as.data.frame(out), paste0("Component", seq_len(ncol(out))))
} else if (inherits(attri$model, c("psych", "fa", "principal"))) {
- out <- as.data.frame(stats::predict(attri$model, data = newdata, ...))
+ out <- as.data.frame(stats::predict(attri$model, data = newdata[rownames(attri$model$weights)], ...))
} else {
out <- as.data.frame(stats::predict(attri$model, newdata = newdata, ...))
}
@@ -352,13 +352,20 @@ print.parameters_omega_summary <- function(x, ...) {
footer <- c(.text_components_variance(x, sep = ifelse(format == "markdown", "", "\n")), "yellow")
}
+ # alignment?
+ if (is.null(labels)) {
+ alignment <- NULL
+ } else {
+ alignment <- paste(c("ll", rep("r", ncol(x) - 2)), collapse = "")
+ }
+
insight::export_table(
x,
digits = digits,
format = format,
caption = table_caption,
footer = footer,
- align = "firstleft",
+ align = alignment,
...
)
}
diff --git a/README.Rmd b/README.Rmd
index 29b32bebb..6cde9fb81 100644
--- a/README.Rmd
+++ b/README.Rmd
@@ -1,5 +1,7 @@
---
output: github_document
+bibliography: paper/paper.bib
+csl: paper/apa.csl
---
```{r, warning=FALSE, message=FALSE, echo = FALSE}
@@ -21,8 +23,7 @@ library(parameters)
[![DOI](https://joss.theoj.org/papers/10.21105/joss.02445/status.svg)](https://doi.org/10.21105/joss.02445)
[![downloads](https://cranlogs.r-pkg.org/badges/parameters)](https://cran.r-project.org/package=parameters)
-[![total](https://cranlogs.r-pkg.org/badges/grand-total/parameters)](https://cranlogs.r-pkg.org/)
-[![status](https://tinyverse.netlify.com/badge/parameters)](https://CRAN.R-project.org/package=parameters)
+[![total](https://cranlogs.r-pkg.org/badges/grand-total/parameters)](https://cranlogs.r-pkg.org/)
***Describe and understand your model's parameters!***
@@ -50,7 +51,7 @@ Development | GitHub | `remotes::install_github("easystats/parameters")`
[![Documentation](https://img.shields.io/badge/documentation-parameters-orange.svg?colorB=E91E63)](https://easystats.github.io/parameters/)
[![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/)
-[![Features](https://img.shields.io/badge/features-parameters-orange.svg?colorB=2196F3)](https://easystats.github.io/parameters/reference/index.html)
+[![Features](https://img.shields.io/badge/features-parameters-orange.svg?colorB=2196F3)](https://easystats.github.io/parameters/reference/index.html)
Click on the buttons above to access the package [documentation](https://easystats.github.io/parameters/) and the [easystats blog](https://easystats.github.io/blog/posts/), and check-out these vignettes:
@@ -70,14 +71,14 @@ In case you want to file an issue or contribute in another way to the package, p
# Features
-
+
## Model's parameters description
```{r echo=FALSE, fig.align='center', dpi=96}
knitr::include_graphics("man/figures/figure1.png")
```
-The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function (that can be accessed via the `parameters()` shortcut) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to [`broom::tidy()`](https://github.com/tidymodels/broom), with some notable differences:
+The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function (that can be accessed via the `parameters()` shortcut) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to [`broom::tidy()`](https://github.com/tidymodels/broom), with some notable differences:
- The column names of the returned data frame are *specific* to their content. For instance, the column containing the statistic is named following the statistic name, i.e., *t*, *z*, etc., instead of a generic name such as *statistic* (however, you can get standardized (generic) column names using [`standardize_names()`](https://easystats.github.io/insight/reference/standardize_names.html)).
- It is able to compute or extract indices not available by default, such as *p-values*, *CIs*, etc.
@@ -139,6 +140,23 @@ lm(disp ~ ., data = mtcars) |>
model_parameters()
```
+
+## Statistical inference - how to quantify evidence
+There is no standardized approach to drawing conclusions based on the available data and statistical models. A frequently chosen but also much criticized approach is to evaluate results based on their statistical significance [@amrhein_earth_2017].
+
+A more sophisticated way would be to test whether estimated effects exceed the "smallest effect size of interest", to avoid even the smallest effects being considered relevant simply because they are statistically significant, but clinically or practically irrelevant [@lakens2020equivalence;@lakens_improving_2022]. A rather unconventional approach, which is nevertheless advocated by various authors, is to interpret results from classical regression models in terms of probabilities, similar to the usual approach in Bayesian statistics [@greenland_aid_2022;@rafi_semantic_2020;@schweder_confidence_2018;@schweder_frequentist_2003;@vos_frequentist_2022].
+
+The _parameters_ package provides several options or functions to aid statistical inference. These are, for example:
+
+- [`equivalence_test()`](https://easystats.github.io/parameters/reference/equivalence_test.lm.html), to compute the (conditional) equivalence test for frequentist models
+- [`p_significance()`](https://easystats.github.io/parameters/reference/p_significance.lm.html), to compute the probability of *practical significance*, which can be conceptualized as a unidirectional equivalence test
+- [`p_function()`](https://easystats.github.io/parameters/reference/p_function.html), or _consonance function_, to compute p-values and compatibility (confidence) intervals for statistical models
+- the `pd` argument (setting `pd = TRUE`) in `model_parameters()` includes a column with the *probability of direction*, i.e. the probability that a parameter is strictly positive or negative. See [`bayestestR::p_direction()`](https://easystats.github.io/bayestestR/reference/p_direction.html) for details.
+- the `s_value` argument (setting `s_value = TRUE`) in `model_parameters()` replaces the p-values with their related _S_-values [@@rafi_semantic_2020]
+- finally, it is possible to generate distributions of model coefficients by generating bootstrap-samples (setting `bootstrap = TRUE`) or simulating draws from model coefficients using [`simulate_model()`](https://easystats.github.io/parameters/reference/simulate_model.html). These samples can then be treated as "posterior samples" and used in many functions from the **bayestestR** package.
+
+Most of the above shown options or functions derive from methods originally implemented for Bayesian models [@makowski_indices_2019]. However, assuming that model assumptions are met (which means, the model fits well to the data, the correct model is chosen that reflects the data generating process (distributional model family) etc.), it seems appropriate to interpret results from classical frequentist models in a "Bayesian way" (more details: documentation in [`p_function()`](https://easystats.github.io/parameters/reference/p_function.html)).
+
## Citation
In order to cite this package, please use the following command:
@@ -148,5 +166,7 @@ citation("parameters")
```
## Code of Conduct
-
+
Please note that the parameters project is released with a [Contributor Code of Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/). By contributing to this project, you agree to abide by its terms.
+
+## References
diff --git a/README.md b/README.md
index 3b150d4aa..9e2ca989e 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,6 @@
[![DOI](https://joss.theoj.org/papers/10.21105/joss.02445/status.svg)](https://doi.org/10.21105/joss.02445)
[![downloads](https://cranlogs.r-pkg.org/badges/parameters)](https://cran.r-project.org/package=parameters)
[![total](https://cranlogs.r-pkg.org/badges/grand-total/parameters)](https://cranlogs.r-pkg.org/)
-[![status](https://tinyverse.netlify.com/badge/parameters)](https://CRAN.R-project.org/package=parameters)
***Describe and understand your model’s parameters!***
@@ -32,11 +31,11 @@ of (model) objects from many different packages.
badge](https://easystats.r-universe.dev/badges/parameters)](https://easystats.r-universe.dev)
[![R-CMD-check](https://github.com/easystats/parameters/workflows/R-CMD-check/badge.svg?branch=main)](https://github.com/easystats/parameters/actions)
-| Type | Source | Command |
-|-------------|--------------|------------------------------------------------------------------------------|
-| Release | CRAN | `install.packages("parameters")` |
+| Type | Source | Command |
+|----|----|----|
+| Release | CRAN | `install.packages("parameters")` |
| Development | r - universe | `install.packages("parameters", repos = "https://easystats.r-universe.dev")` |
-| Development | GitHub | `remotes::install_github("easystats/parameters")` |
+| Development | GitHub | `remotes::install_github("easystats/parameters")` |
> **Tip**
>
@@ -196,15 +195,15 @@ model <- psych::fa(attitude, nfactors = 3)
model_parameters(model)
#> # Rotated loadings from Factor Analysis (oblimin-rotation)
#>
-#> Variable | MR1 | MR2 | MR3 | Complexity | Uniqueness
+#> Variable | MR1 | MR2 | MR3 | Complexity | Uniqueness
#> ------------------------------------------------------------
-#> rating | 0.90 | -0.07 | -0.05 | 1.02 | 0.23
-#> complaints | 0.97 | -0.06 | 0.04 | 1.01 | 0.10
-#> privileges | 0.44 | 0.25 | -0.05 | 1.64 | 0.65
-#> learning | 0.47 | 0.54 | -0.28 | 2.51 | 0.24
-#> raises | 0.55 | 0.43 | 0.25 | 2.35 | 0.23
-#> critical | 0.16 | 0.17 | 0.48 | 1.46 | 0.67
-#> advance | -0.11 | 0.91 | 0.07 | 1.04 | 0.22
+#> rating | 0.90 | -0.07 | -0.05 | 1.02 | 0.23
+#> complaints | 0.97 | -0.06 | 0.04 | 1.01 | 0.10
+#> privileges | 0.44 | 0.25 | -0.05 | 1.64 | 0.65
+#> learning | 0.47 | 0.54 | -0.28 | 2.51 | 0.24
+#> raises | 0.55 | 0.43 | 0.25 | 2.35 | 0.23
+#> critical | 0.16 | 0.17 | 0.48 | 1.46 | 0.67
+#> advance | -0.11 | 0.91 | 0.07 | 1.04 | 0.22
#>
#> The 3 latent factors (oblimin rotation) accounted for 66.60% of the total variance of the original data (MR1 = 38.19%, MR2 = 22.69%, MR3 = 5.72%).
```
@@ -231,6 +230,59 @@ lm(disp ~ ., data = mtcars) |>
#> carb | -28.75 | 5.60 | [ -40.28, -17.23] | -5.13 | < .001
```
+## Statistical inference - how to quantify evidence
+
+There is no standardized approach to drawing conclusions based on the
+available data and statistical models. A frequently chosen but also much
+criticized approach is to evaluate results based on their statistical
+significance (Amrhein, Korner-Nievergelt, & Roth, 2017).
+
+A more sophisticated way would be to test whether estimated effects
+exceed the “smallest effect size of interest”, to avoid even the
+smallest effects being considered relevant simply because they are
+statistically significant, but clinically or practically irrelevant
+(Lakens, 2024; Lakens, Scheel, & Isager, 2018). A rather unconventional
+approach, which is nevertheless advocated by various authors, is to
+interpret results from classical regression models in terms of
+probabilities, similar to the usual approach in Bayesian statistics
+(Greenland, Rafi, Matthews, & Higgs, 2022; Rafi & Greenland, 2020;
+Schweder, 2018; Schweder & Hjort, 2003; Vos & Holbert, 2022).
+
+The *parameters* package provides several options or functions to aid
+statistical inference. These are, for example:
+
+- [`equivalence_test()`](https://easystats.github.io/parameters/reference/equivalence_test.lm.html),
+ to compute the (conditional) equivalence test for frequentist models
+- [`p_significance()`](https://easystats.github.io/parameters/reference/p_significance.lm.html),
+ to compute the probability of *practical significance*, which can be
+ conceptualized as a unidirectional equivalence test
+- [`p_function()`](https://easystats.github.io/parameters/reference/p_function.html),
+ or *consonance function*, to compute p-values and compatibility
+ (confidence) intervals for statistical models
+- the `pd` argument (setting `pd = TRUE`) in `model_parameters()`
+ includes a column with the *probability of direction*, i.e. the
+ probability that a parameter is strictly positive or negative. See
+ [`bayestestR::p_direction()`](https://easystats.github.io/bayestestR/reference/p_direction.html)
+ for details.
+- the `s_value` argument (setting `s_value = TRUE`) in
+ `model_parameters()` replaces the p-values with their related
+ *S*-values (@ Rafi & Greenland, 2020)
+- finally, it is possible to generate distributions of model
+ coefficients by generating bootstrap-samples (setting
+ `bootstrap = TRUE`) or simulating draws from model coefficients using
+ [`simulate_model()`](https://easystats.github.io/parameters/reference/simulate_model.html).
+ These samples can then be treated as “posterior samples” and used in
+ many functions from the **bayestestR** package.
+
+Most of the above shown options or functions derive from methods
+originally implemented for Bayesian models (Makowski, Ben-Shachar, Chen,
+& Lüdecke, 2019). However, assuming that model assumptions are met
+(which means, the model fits well to the data, the correct model is
+chosen that reflects the data generating process (distributional model
+family) etc.), it seems appropriate to interpret results from classical
+frequentist models in a “Bayesian way” (more details: documentation in
+[`p_function()`](https://easystats.github.io/parameters/reference/p_function.html)).
+
## Citation
In order to cite this package, please use the following command:
@@ -264,3 +316,85 @@ Please note that the parameters project is released with a [Contributor
Code of
Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/).
By contributing to this project, you agree to abide by its terms.
+
+## References
+
+