@article {508, title = {Two-phase item selection procedure for flexible content balancing in CAT}, journal = {Applied Psychological. Measurement}, volume = {3}, year = {2007}, pages = {467{\textendash}482}, author = {Cheng, Y and Chang, Hua-Hua and Yi, Q.} } @conference {1285, title = {Identifying practical indices for enhancing item pool security}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education (NCME)}, year = {2005}, address = {Montreal, Canada}, author = {Yi, Q. and Zhang, J. and Chang, Hua-Hua} } @article {2133, title = {a-Stratified multistage CAT design with content-blocking}, journal = {British Journal of Mathematical and Statistical Psychology}, volume = {56}, year = {2003}, pages = {359{\textendash}378}, author = {Yi, Q. and Chang, H.-H.} } @conference {1280, title = {Effects of test administration mode on item parameter estimates}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {2003}, note = {{PDF file, 233 KB}}, address = {Chicago IL}, author = {Yi, Q. and Harris, D. J. and Wang, T. and Ban, J-C.} } @conference {1283, title = {Implementing the a-stratified method with b blocking in computerized adaptive testing with the generalized partial credit model}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {2003}, note = {$\#$YI03-01 {PDF file, 496 KB}}, address = {Chicago IL}, author = {Yi, Q. and Wang, T. and Wang, S} } @article {14, title = {Data sparseness and on-line pretest item calibration-scaling methods in CAT}, journal = {Journal of Educational Measurement}, volume = {39}, number = {3}, year = {2002}, pages = {207-218}, abstract = {Compared and evaluated 3 on-line pretest item calibration-scaling methods (the marginal maximum likelihood estimate with 1 expectation maximization [EM] cycle [OEM] method, the marginal maximum likelihood estimate with multiple EM cycles [MEM] method, and M. L. Stocking{\textquoteright}s Method B) in terms of item parameter recovery when the item responses to the pretest items in the pool are sparse. Simulations of computerized adaptive tests were used to evaluate the results yielded by the three methods. The MEM method produced the smallest average total error in parameter estimation, and the OEM method yielded the largest total error (PsycINFO Database Record (c) 2005 APA )}, keywords = {Computer Assisted Testing, Educational Measurement, Item Response Theory, Maximum Likelihood, Methodology, Scaling (Testing), Statistical Data}, author = {Ban, J-C. and Hanson, B. A. and Yi, Q. and Harris, D. J.} } @conference {1277, title = {Incorporating the Sympson-Hetter exposure control method into the a-stratified method with content blocking}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {2002}, note = {PDF file, 387 K}, address = {New Orleans, LA}, author = {Yi, Q.} } @conference {1278, title = {a-stratified CAT design with content-blocking}, booktitle = {Paper presented at the Annual Meeting of the Psychometric Society}, year = {2001}, note = {{PDF file, 410 KB}}, address = {King of Prussia, PA}, author = {Yi, Q. and Chang, Hua-Hua} } @article {13, title = {A comparative study of on line pretest item{\textemdash}Calibration/scaling methods in computerized adaptive testing}, journal = {Journal of Educational Measurement}, volume = {38}, number = {3}, year = {2001}, pages = {191-212}, abstract = {The purpose of this study was to compare and evaluate five on-line pretest item-calibration/scaling methods in computerized adaptive testing (CAT): marginal maximum likelihood estimate with one EM cycle (OEM), marginal maximum likelihood estimate with multiple EM cycles (MEM), Stocking{\textquoteright}s Method A, Stocking{\textquoteright}s Method B, and BILOG/Prior. The five methods were evaluated in terms ofitem-parameter recovery, using three different sample sizes (300, 1000 and 3000). The MEM method appeared to be the best choice among these, because it produced the smallest parameter-estimation errors for all sample size conditions. MEM and OEM are mathematically similar, although the OEM method produced larger errors. MEM also was preferable to OEM, unless the amount of timeinvolved in iterative computation is a concern. Stocking{\textquoteright}s Method B also worked very well, but it required anchor items that either would increase test lengths or require larger sample sizes depending on test administration design. Until more appropriate ways of handling sparse data are devised, the BILOG/Prior method may not be a reasonable choice for small sample sizes. Stocking{\textquoteright}s Method A hadthe largest weighted total error, as well as a theoretical weakness (i.e., treating estimated ability as true ability); thus, there appeared to be little reason to use it}, author = {Ban, J. C. and Hanson, B. A. and Wang, T. and Yi, Q. and Harris, D. J.} } @conference {1279, title = {Comparison of the SPRT and CMT procedures in computerized adaptive testing}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {2001}, address = {Seattle WA}, author = {Yi, Q. and Hanson, B. and Widiatmo, H. and Harris, D. J.} } @conference {835, title = {Data sparseness and online pretest calibration/scaling methods in CAT}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {2001}, note = {Also ACT Research Report 2002-1)}, address = {Seattle}, author = {Ban, J and Hanson, B. A. and Yi, Q. and Harris, D.} } @conference {1284, title = {Impact of scoring options for not reached items in CAT}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {2001}, note = {PDF file, 232 K}, address = {Seattle WA}, author = {Yi, Q. and Widiatmo, H. and Ban, J-C. and Harris, D. J.} } @conference {837, title = {Effects of nonequivalence of item pools on ability estimates in CAT}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {2000}, note = {PDF file, 657 K}, address = {New Orleans LA}, author = {Ban, J. C. and Wang, T. and Yi, Q. and Harris, D. J.} } @booklet {1584, title = {Multiple stratification CAT designs with content control}, year = {2000}, address = {Unpublished manuscript}, author = {Yi, Q. and Chang, Hua-Hua} } @conference {1244, title = {Adjusting "scores" from a CAT following successful item challenges}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {1999}, note = {$\#$WA99-01 {PDF file, 150 KB}}, address = {Montreal, Canada}, author = {Wang, T. and Yi, Q. and Ban, J. C. and Harris, D. J. and Hanson, B. A.} } @conference {836, title = {Comparison of the a-stratified method, the Sympson-Hetter method, and the matched difficulty method in CAT administration}, booktitle = {Paper presented at the annual meeting of the Psychometric Society}, year = {1999}, note = {$\#$BA99-01}, address = {Lawrence KS}, author = {Ban, J and Wang, T. and Yi, Q.} } @booklet {1585, title = {Simulating nonmodel-fitting responses in a CAT Environment (Research Report 98-10)}, year = {1998}, note = {$\#$YI-98-10}, address = {Iowa City IA: ACT Inc. (Also presented at National Council on Measurement in Education, 1999: ERIC No. ED 427 042)}, author = {Yi, Q. and Nering, M, L.} }