@article {2730, title = {A Blocked-CAT Procedure for CD-CAT}, journal = {Applied Psychological Measurement}, volume = {44}, number = {1}, year = {2020}, pages = {49-64}, abstract = {This article introduces a blocked-design procedure for cognitive diagnosis computerized adaptive testing (CD-CAT), which allows examinees to review items and change their answers during test administration. Four blocking versions of the new procedure were proposed. In addition, the impact of several factors, namely, item quality, generating model, block size, and test length, on the classification rates was investigated. Three popular item selection indices in CD-CAT were used and their efficiency compared using the new procedure. An additional study was carried out to examine the potential benefit of item review. The results showed that the new procedure is promising in that allowing item review resulted only in a small loss in attribute classification accuracy under some conditions. Moreover, using a blocked-design CD-CAT is beneficial to the extent that it alleviates the negative impact of test anxiety on examinees{\textquoteright} true performance.}, doi = {10.1177/0146621619835500}, url = {https://doi.org/10.1177/0146621619835500}, author = {Mehmet Kaplan and Jimmy de la Torre} } @article {2723, title = {Computerized Adaptive Testing for Cognitively Based Multiple-Choice Data}, journal = {Applied Psychological Measurement}, volume = {43}, number = {5}, year = {2019}, pages = {388-401}, abstract = {Cognitive diagnosis models (CDMs) are latent class models that hold great promise for providing diagnostic information about student knowledge profiles. The increasing use of computers in classrooms enhances the advantages of CDMs for more efficient diagnostic testing by using adaptive algorithms, referred to as cognitive diagnosis computerized adaptive testing (CD-CAT). When multiple-choice items are involved, CD-CAT can be further improved by using polytomous scoring (i.e., considering the specific options students choose), instead of dichotomous scoring (i.e., marking answers as either right or wrong). In this study, the authors propose and evaluate the performance of the Jensen{\textendash}Shannon divergence (JSD) index as an item selection method for the multiple-choice deterministic inputs, noisy {\textquotedblleft}and{\textquotedblright} gate (MC-DINA) model. Attribute classification accuracy and item usage are evaluated under different conditions of item quality and test termination rule. The proposed approach is compared with the random selection method and an approximate approach based on dichotomized responses. The results show that under the MC-DINA model, JSD improves the attribute classification accuracy significantly by considering the information from distractors, even with a very short test length. This result has important implications in practical classroom settings as it can allow for dramatically reduced testing times, thus resulting in more targeted learning opportunities.}, doi = {10.1177/0146621618798665}, url = {https://doi.org/10.1177/0146621618798665}, author = {Hulya D. Yigit and Miguel A. Sorrel and Jimmy de la Torre} }