@article {198, title = {A randomized experiment to compare conventional, computerized, and computerized adaptive administration of ordinal polytomous attitude items}, journal = {Applied Psychological Measurement}, volume = {29}, number = {3}, year = {2005}, pages = {159-183}, abstract = {A total of 520 high school students were randomly assigned to a paper-and-pencil test (PPT), a computerized standard test (CST), or a computerized adaptive test (CAT) version of the Dutch School Attitude Questionnaire (SAQ), consisting of ordinal polytomous items. The CST administered items in the same order as the PPT. The CAT administered all items of three SAQ subscales in adaptive order using Samejima{\textquoteright}s graded response model, so that six different stopping rule settings could be applied afterwards. School marks were used as external criteria. Results showed significant but small multivariate administration mode effects on conventional raw scores and small to medium effects on maximum likelihood latent trait estimates. When the precision of CAT latent trait estimates decreased, correlations with grade point average in general decreased. However, the magnitude of the decrease was not very large as compared to the PPT, the CST, and the CAT without the stopping rule. (PsycINFO Database Record (c) 2005 APA ) (journal abstract)}, keywords = {Computer Assisted Testing, Test Administration, Test Items}, author = {Hol, A. M. and Vorst, H. C. M. and Mellenbergh, G. J.} }