## GDINA R Package (version 2.9.3; 2022-08-13)
## For tutorials, see https://wenchao-ma.github.io/GDINA
# A simulated data in GDINA package
dat <- sim10GDINA$simdat
Q <- sim10GDINA$simQ
# Fit LCDM model - see Example 13a in GDINA help page also
est <- GDINA(dat = dat, Q = Q, model = "logitGDINA")
##
Iter = 1 Max. abs. change = 0.40691 Deviance = 12821.16
Iter = 2 Max. abs. change = 0.05266 Deviance = 11947.64
Iter = 3 Max. abs. change = 0.02836 Deviance = 11890.21
Iter = 4 Max. abs. change = 0.01736 Deviance = 11866.22
Iter = 5 Max. abs. change = 0.01173 Deviance = 11854.67
Iter = 6 Max. abs. change = 0.00886 Deviance = 11848.42
Iter = 7 Max. abs. change = 0.00763 Deviance = 11844.68
Iter = 8 Max. abs. change = 0.00643 Deviance = 11842.26
Iter = 9 Max. abs. change = 0.00557 Deviance = 11840.64
Iter = 10 Max. abs. change = 0.00503 Deviance = 11839.51
Iter = 11 Max. abs. change = 0.00449 Deviance = 11838.71
Iter = 12 Max. abs. change = 0.00397 Deviance = 11838.14
Iter = 13 Max. abs. change = 0.00351 Deviance = 11837.72
Iter = 14 Max. abs. change = 0.00308 Deviance = 11837.42
Iter = 15 Max. abs. change = 0.00270 Deviance = 11837.19
Iter = 16 Max. abs. change = 0.00237 Deviance = 11837.02
Iter = 17 Max. abs. change = 0.00208 Deviance = 11836.89
Iter = 18 Max. abs. change = 0.00183 Deviance = 11836.79
Iter = 19 Max. abs. change = 0.00161 Deviance = 11836.72
Iter = 20 Max. abs. change = 0.00142 Deviance = 11836.66
Iter = 21 Max. abs. change = 0.00125 Deviance = 11836.61
Iter = 22 Max. abs. change = 0.00110 Deviance = 11836.57
Iter = 23 Max. abs. change = 0.00097 Deviance = 11836.54
Iter = 24 Max. abs. change = 0.00086 Deviance = 11836.52
Iter = 25 Max. abs. change = 0.00077 Deviance = 11836.50
Iter = 26 Max. abs. change = 0.00069 Deviance = 11836.49
Iter = 27 Max. abs. change = 0.00062 Deviance = 11836.47
Iter = 28 Max. abs. change = 0.00055 Deviance = 11836.47
Iter = 29 Max. abs. change = 0.00049 Deviance = 11836.46
Iter = 30 Max. abs. change = 0.00044 Deviance = 11836.45
Iter = 31 Max. abs. change = 0.00040 Deviance = 11836.45
Iter = 32 Max. abs. change = 0.00036 Deviance = 11836.44
Iter = 33 Max. abs. change = 0.00032 Deviance = 11836.44
Iter = 34 Max. abs. change = 0.00029 Deviance = 11836.44
Iter = 35 Max. abs. change = 0.00026 Deviance = 11836.43
Iter = 36 Max. abs. change = 0.00024 Deviance = 11836.43
Iter = 37 Max. abs. change = 0.00021 Deviance = 11836.43
Iter = 38 Max. abs. change = 0.00019 Deviance = 11836.43
Iter = 39 Max. abs. change = 0.00017 Deviance = 11836.43
Iter = 40 Max. abs. change = 0.00016 Deviance = 11836.43
Iter = 41 Max. abs. change = 0.00014 Deviance = 11836.43
Iter = 42 Max. abs. change = 0.00013 Deviance = 11836.42
Iter = 43 Max. abs. change = 0.00012 Deviance = 11836.42
Iter = 44 Max. abs. change = 0.00011 Deviance = 11836.42
Iter = 45 Max. abs. change = 0.00007 Deviance = 11836.42
#####################################
#
# Summary Information
#
#####################################
# print estimation information
est
## Call:
## GDINA(dat = dat, Q = Q, model = "logitGDINA")
##
## GDINA version 2.9.3 (2022-08-13)
## ===============================================
## Data
## -----------------------------------------------
## # of individuals groups items
## 1000 1 10
## ===============================================
## Model
## -----------------------------------------------
## Fitted model(s) = LOGITGDINA
## Attribute structure = saturated
## Attribute level = Dichotomous
## ===============================================
## Estimation
## -----------------------------------------------
## Number of iterations = 45
##
## For the final iteration:
## Max abs change in item success prob. = 0.0001
## Max abs change in mixing proportions = 0.0000
## Change in -2 log-likelihood = 0.0004
## Converged? = TRUE
##
## Time used = 0.7941 secs
##
## Test Fit Statistics
##
## Loglik = -5918.21
##
## AIC = 11926.42 | penalty [2 * p] = 90.00
## BIC = 12147.27 | penalty [log(n) * p] = 310.85
## CAIC = 12192.27 | penalty [(log(n) + 1) * p] = 355.85
## SABIC = 12004.35 | penalty [log((n + 2)/24) * p] = 167.93
##
## No. of parameters (p) = 45
## No. of estimated item parameters = 38
## No. of fixed item parameters = 0
## No. of distribution parameters = 7
##
## Attribute Prevalence
##
## Level0 Level1
## A1 0.5028 0.4972
## A2 0.4974 0.5026
## A3 0.4784 0.5216
## [1] 11926.42
## [1] 12147.27
logLik(est) #log-likelihood value
## 'log Lik.' -5918.211 (df=45)
deviance(est) # deviance: -2 log-likelihood
## [1] 11836.42
npar(est) # number of parameters
## No. of total parameters = 45
## No. of population parameters = 7
## No. of free item parameters = 38
## No. of fixed item parameters = 0
nobs(est) # number of observations
## [1] 1000
# discrimination indices
extract(est, "discrim")
## P(1)-P(0) GDI
## Item 1 0.6934000 0.12019719
## Item 2 0.6405404 0.10257018
## Item 3 0.8199023 0.16774545
## Item 4 0.7797420 0.08454996
## Item 5 0.7173777 0.10214834
## Item 6 0.7406439 0.10203372
## Item 7 0.7169010 0.06523459
## Item 8 0.7893212 0.09124838
## Item 9 0.7062054 0.06232610
## Item 10 0.7254147 0.05496863
#####################################
#
# structural parameters
#
#####################################
coef(est) # item probabilities of success for each reduced latent class
## $`Item 1`
## P(0) P(1)
## 0.2052 0.8986
##
## $`Item 2`
## P(0) P(1)
## 0.1391 0.7796
##
## $`Item 3`
## P(0) P(1)
## 0.0893 0.9092
##
## $`Item 4`
## P(00) P(10) P(01) P(11)
## 0.1159 0.2951 0.4741 0.8956
##
## $`Item 5`
## P(00) P(10) P(01) P(11)
## 0.1057 0.0791 0.0904 0.8231
##
## $`Item 6`
## P(00) P(10) P(01) P(11)
## 0.1764 0.9029 0.9302 0.9170
##
## $`Item 7`
## P(00) P(10) P(01) P(11)
## 0.0543 0.4713 0.3923 0.7712
##
## $`Item 8`
## P(00) P(10) P(01) P(11)
## 0.1107 0.2585 0.2730 0.9001
##
## $`Item 9`
## P(00) P(10) P(01) P(11)
## 0.0995 0.3746 0.4189 0.8057
##
## $`Item 10`
## P(000) P(100) P(010) P(001) P(110) P(101) P(011) P(111)
## 0.1757 0.1327 0.2846 0.3823 0.4927 0.4996 0.6734 0.9011
coef(est, withSE = TRUE) # item probabilities of success & standard errors
## $`Item 1`
## P(0) P(1)
## Est. 0.2052 0.8986
## S.E. 0.0258 0.0224
##
## $`Item 2`
## P(0) P(1)
## Est. 0.1391 0.7796
## S.E. 0.0221 0.0247
##
## $`Item 3`
## P(0) P(1)
## Est. 0.0893 0.9092
## S.E. 0.0213 0.0199
##
## $`Item 4`
## P(00) P(10) P(01) P(11)
## Est. 0.1159 0.2951 0.4741 0.8956
## S.E. 0.0279 0.0369 0.0379 0.0282
##
## $`Item 5`
## P(00) P(10) P(01) P(11)
## Est. 0.1057 0.0791 0.0904 0.8231
## S.E. 0.0249 0.0252 0.0256 0.0335
##
## $`Item 6`
## P(00) P(10) P(01) P(11)
## Est. 0.1764 0.9029 0.9302 0.917
## S.E. 0.0412 0.0314 0.0280 0.023
##
## $`Item 7`
## P(00) P(10) P(01) P(11)
## Est. 0.0543 0.4713 0.3923 0.7712
## S.E. 0.0238 0.0393 0.0369 0.0322
##
## $`Item 8`
## P(00) P(10) P(01) P(11)
## Est. 0.1107 0.2585 0.2730 0.9001
## S.E. 0.0262 0.0382 0.0386 0.0349
##
## $`Item 9`
## P(00) P(10) P(01) P(11)
## Est. 0.0995 0.3746 0.4189 0.8057
## S.E. 0.0294 0.0372 0.0362 0.0288
##
## $`Item 10`
## P(000) P(100) P(010) P(001) P(110) P(101) P(011) P(111)
## Est. 0.1757 0.1327 0.2846 0.3823 0.4927 0.4996 0.6734 0.9011
## S.E. 0.0449 0.0570 0.0563 0.0554 0.0567 0.0517 0.0479 0.0391
coef(est, what = "delta") # delta parameters
## $`Item 1`
## d0 d1
## -1.3541 3.5359
##
## $`Item 2`
## d0 d1
## -1.8228 3.0864
##
## $`Item 3`
## d0 d1
## -2.3218 4.6261
##
## $`Item 4`
## d0 d1 d2 d12
## -2.0318 1.1608 1.9281 1.0926
##
## $`Item 5`
## d0 d1 d2 d12
## -2.1351 -0.3202 -0.1733 4.1661
##
## $`Item 6`
## d0 d1 d2 d12
## -1.5409 3.7711 4.1314 -3.9587
##
## $`Item 7`
## d0 d1 d2 d12
## -2.8580 2.7430 2.4202 -1.0903
##
## $`Item 8`
## d0 d1 d2 d12
## -2.0833 1.0295 1.1038 2.1478
##
## $`Item 9`
## d0 d1 d2 d12
## -2.2024 1.6898 1.8750 0.0602
##
## $`Item 10`
## d0 d1 d2 d3 d12 d13 d23 d123
## -1.5459 -0.3318 0.6242 1.0662 1.2243 0.8100 0.5790 -0.2165
coef(est, what = "delta", withSE = TRUE) # delta parameters
## $`Item 1`
## d0 d1
## Est. -1.3541 3.5359
## S.E. 0.1582 0.3250
##
## $`Item 2`
## d0 d1
## Est. -1.8228 3.0864
## S.E. 0.1847 0.2566
##
## $`Item 3`
## d0 d1
## Est. -2.3218 4.6261
## S.E. 0.2622 0.3866
##
## $`Item 4`
## d0 d1 d2 d12
## Est. -2.0318 1.1608 1.9281 1.0926
## S.E. 0.2722 0.3446 0.3259 0.5189
##
## $`Item 5`
## d0 d1 d2 d12
## Est. -2.1351 -0.3202 -0.1733 4.1661
## S.E. 0.2629 0.4876 0.4253 0.6576
##
## $`Item 6`
## d0 d1 d2 d12
## Est. -1.5409 3.7711 4.1314 -3.9587
## S.E. 0.2836 0.4974 0.5614 0.8120
##
## $`Item 7`
## d0 d1 d2 d12
## Est. -2.8580 2.7430 2.4202 -1.0903
## S.E. 0.4638 0.5127 0.5037 0.5958
##
## $`Item 8`
## d0 d1 d2 d12
## Est. -2.0833 1.0295 1.1038 2.1478
## S.E. 0.2665 0.3501 0.3510 0.6170
##
## $`Item 9`
## d0 d1 d2 d12
## Est. -2.2024 1.6898 1.8750 0.0602
## S.E. 0.3276 0.3895 0.3781 0.4815
##
## $`Item 10`
## d0 d1 d2 d3 d12 d13 d23 d123
## Est. -1.5459 -0.3318 0.6242 1.0662 1.2243 0.8100 0.5790 -0.2165
## S.E. 0.3098 0.6140 0.4381 0.4178 0.7581 0.7325 0.5741 1.0221
coef(est, what = "gs") # guessing and slip parameters
## guessing slip
## Item 1 0.2052 0.1014
## Item 2 0.1391 0.2204
## Item 3 0.0893 0.0908
## Item 4 0.1159 0.1044
## Item 5 0.1057 0.1769
## Item 6 0.1764 0.0830
## Item 7 0.0543 0.2288
## Item 8 0.1107 0.0999
## Item 9 0.0995 0.1943
## Item 10 0.1757 0.0989
coef(est, what = "gs", withSE = TRUE) # guessing and slip parameters & standard errors
## guessing slip SE[guessing] SE[slip]
## Item 1 0.2052 0.1014 0.0258 0.0224
## Item 2 0.1391 0.2204 0.0221 0.0247
## Item 3 0.0893 0.0908 0.0213 0.0199
## Item 4 0.1159 0.1044 0.0279 0.0282
## Item 5 0.1057 0.1769 0.0249 0.0335
## Item 6 0.1764 0.0830 0.0412 0.0230
## Item 7 0.0543 0.2288 0.0238 0.0322
## Item 8 0.1107 0.0999 0.0262 0.0349
## Item 9 0.0995 0.1943 0.0294 0.0288
## Item 10 0.1757 0.0989 0.0449 0.0391
# Estimated proportions of latent classes
coef(est,"lambda")
## p(000) p(100) p(010) p(001) p(110) p(101) p(011) p(111)
## 0.1268 0.1054 0.1149 0.1201 0.1313 0.1452 0.1409 0.1155
# success probabilities for each latent class
coef(est,"LCprob")
## 000 100 010 001 110 101 011 111
## Item 1 0.2052 0.8986 0.2052 0.2052 0.8986 0.8986 0.2052 0.8986
## Item 2 0.1391 0.1391 0.7796 0.1391 0.7796 0.1391 0.7796 0.7796
## Item 3 0.0893 0.0893 0.0893 0.9092 0.0893 0.9092 0.9092 0.9092
## Item 4 0.1159 0.2951 0.1159 0.4741 0.2951 0.8956 0.4741 0.8956
## Item 5 0.1057 0.1057 0.0791 0.0904 0.0791 0.0904 0.8231 0.8231
## Item 6 0.1764 0.9029 0.9302 0.1764 0.9170 0.9029 0.9302 0.9170
## Item 7 0.0543 0.4713 0.0543 0.3923 0.4713 0.7712 0.3923 0.7712
## Item 8 0.1107 0.2585 0.2730 0.1107 0.9001 0.2585 0.2730 0.9001
## Item 9 0.0995 0.0995 0.3746 0.4189 0.3746 0.4189 0.8057 0.8057
## Item 10 0.1757 0.1327 0.2846 0.3823 0.4927 0.4996 0.6734 0.9011
#####################################
#
# person parameters
#
#####################################
head(personparm(est)) # EAP estimates of attribute profiles
## A1 A2 A3
## [1,] 1 0 1
## [2,] 1 1 1
## [3,] 0 1 1
## [4,] 1 1 1
## [5,] 0 0 1
## [6,] 1 0 0
head(personparm(est, what = "MAP")) # MAP estimates of attribute profiles
## A1 A2 A3 multimodes
## 1 1 0 1 FALSE
## 2 1 1 1 FALSE
## 3 0 1 1 FALSE
## 4 1 1 1 FALSE
## 5 0 0 1 FALSE
## 6 0 0 0 FALSE
head(personparm(est, what = "MLE")) # MLE estimates of attribute profiles
## A1 A2 A3 multimodes
## 1 1 0 1 FALSE
## 2 1 1 1 FALSE
## 3 0 1 1 FALSE
## 4 1 1 1 FALSE
## 5 0 0 1 FALSE
## 6 0 0 0 FALSE
#####################################
#
# Plots
#
#####################################
#plot item response functions for item 10
plot(est, item = 10)
plot(est, item = 10, withSE = TRUE) # with error bars
#plot mastery probability for individuals 1, 20 and 50
plot(est, what = "mp", person = c(1, 20, 50))
#####################################
#
# Advanced elements
#
#####################################
head(indlogLik(est)) # individual log-likelihood
## 000 100 010 001 110 101
## [1,] -15.109056 -8.336573 -13.759433 -9.045536 -7.459181 -3.508357
## [2,] -19.066936 -12.294453 -14.951120 -13.176737 -8.650868 -7.639558
## [3,] -15.608337 -10.694426 -11.530667 -10.837410 -10.461132 -9.061423
## [4,] -17.900662 -10.796388 -15.805574 -11.201655 -10.397871 -6.142663
## [5,] -8.584884 -9.817112 -13.094556 -3.587564 -14.212489 -6.865073
## [6,] -6.356466 -7.258860 -9.138207 -9.267417 -6.554160 -11.407484
## 011 111
## [1,] -10.010443 -5.751469
## [2,] -7.209309 -2.950335
## [3,] -5.426950 -8.084817
## [4,] -7.773777 -5.000833
## [5,] -10.990742 -14.743433
## [6,] -14.363688 -13.823287
## 000 100 010 001 110 101
## [1,] -11.8422798 -5.254934 -10.591004 -5.83308506 -4.1579459 -0.1064956
## [2,] -16.0550238 -9.467678 -12.037554 -10.21914958 -5.6044962 -4.4925602
## [3,] -10.3821328 -5.653359 -6.402810 -5.66553128 -5.2004692 -3.7001329
## [4,] -13.2032253 -6.284089 -11.206484 -6.55854361 -5.6659748 -1.3101405
## [5,] -4.9967983 -6.414164 -9.604817 -0.05380293 -10.5899437 -3.1419018
## [6,] -0.8339241 -1.921455 -3.714011 -3.79919974 -0.9971581 -5.7498553
## 011 111
## [1,] -6.63844710 -2.57848004
## [2,] -4.09217612 -0.03220906
## [3,] -0.09552615 -2.95239923
## [4,] -2.97111983 -0.39718292
## [5,] -7.29743593 -11.24913440
## [6,] -8.73592504 -8.39453115
extract(est,"designmatrix") #design matrix
## [[1]]
## [,1] [,2]
## [1,] 1 0
## [2,] 1 1
##
## [[2]]
## [,1] [,2]
## [1,] 1 0
## [2,] 1 1
##
## [[3]]
## [,1] [,2]
## [1,] 1 0
## [2,] 1 1
##
## [[4]]
## [,1] [,2] [,3] [,4]
## [1,] 1 0 0 0
## [2,] 1 1 0 0
## [3,] 1 0 1 0
## [4,] 1 1 1 1
##
## [[5]]
## [,1] [,2] [,3] [,4]
## [1,] 1 0 0 0
## [2,] 1 1 0 0
## [3,] 1 0 1 0
## [4,] 1 1 1 1
##
## [[6]]
## [,1] [,2] [,3] [,4]
## [1,] 1 0 0 0
## [2,] 1 1 0 0
## [3,] 1 0 1 0
## [4,] 1 1 1 1
##
## [[7]]
## [,1] [,2] [,3] [,4]
## [1,] 1 0 0 0
## [2,] 1 1 0 0
## [3,] 1 0 1 0
## [4,] 1 1 1 1
##
## [[8]]
## [,1] [,2] [,3] [,4]
## [1,] 1 0 0 0
## [2,] 1 1 0 0
## [3,] 1 0 1 0
## [4,] 1 1 1 1
##
## [[9]]
## [,1] [,2] [,3] [,4]
## [1,] 1 0 0 0
## [2,] 1 1 0 0
## [3,] 1 0 1 0
## [4,] 1 1 1 1
##
## [[10]]
## [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]
## [1,] 1 0 0 0 0 0 0 0
## [2,] 1 1 0 0 0 0 0 0
## [3,] 1 0 1 0 0 0 0 0
## [4,] 1 0 0 1 0 0 0 0
## [5,] 1 1 1 0 1 0 0 0
## [6,] 1 1 0 1 0 1 0 0
## [7,] 1 0 1 1 0 0 1 0
## [8,] 1 1 1 1 1 1 1 1
extract(est,"linkfunc") #link functions
## [1] "logit" "logit" "logit" "logit" "logit" "logit" "logit" "logit" "logit"
## [10] "logit"
## R version 4.2.0 (2022-04-22 ucrt)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 22000)
##
## Matrix products: default
##
## locale:
## [1] LC_COLLATE=English_United States.utf8
## [2] LC_CTYPE=English_United States.utf8
## [3] LC_MONETARY=English_United States.utf8
## [4] LC_NUMERIC=C
## [5] LC_TIME=English_United States.utf8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] GDINA_2.9.3
##
## loaded via a namespace (and not attached):
## [1] Rcpp_1.0.9 rprojroot_2.0.3 digest_0.6.29
## [4] utf8_1.2.2 mime_0.12 truncnorm_1.0-8
## [7] R6_2.5.1 alabama_2022.4-1 evaluate_0.15
## [10] ggplot2_3.3.6 pillar_1.8.0 rlang_1.0.4
## [13] rstudioapi_0.13 jquerylib_0.1.4 nloptr_2.0.3
## [16] rmarkdown_2.14 pkgdown_2.0.6 textshaping_0.3.6
## [19] desc_1.4.1 labeling_0.4.2 stringr_1.4.0
## [22] munsell_0.5.0 shiny_1.7.2 compiler_4.2.0
## [25] numDeriv_2016.8-1.1 httpuv_1.6.5 xfun_0.31
## [28] pkgconfig_2.0.3 systemfonts_1.0.4 htmltools_0.5.3
## [31] Rsolnp_1.16 tidyselect_1.1.2 tibble_3.1.8
## [34] fansi_1.0.3 dplyr_1.0.9 later_1.3.0
## [37] MASS_7.3-56 grid_4.2.0 jsonlite_1.8.0
## [40] xtable_1.8-4 gtable_0.3.0 lifecycle_1.0.1
## [43] magrittr_2.0.3 scales_1.2.0 cli_3.3.0
## [46] stringi_1.7.8 cachem_1.0.6 farver_2.1.1
## [49] fs_1.5.2 promises_1.2.0.1 bslib_0.4.0
## [52] ellipsis_0.3.2 ragg_1.2.2 vctrs_0.4.1
## [55] generics_0.1.3 tools_4.2.0 glue_1.6.2
## [58] purrr_0.3.4 parallel_4.2.0 fastmap_1.1.0
## [61] yaml_2.3.5 colorspace_2.0-3 shinydashboard_0.7.2
## [64] memoise_2.0.1 knitr_1.39 sass_0.4.2