@article {302, title = {The impact of receiving the same items on consecutive computer adaptive test administrations}, journal = {Journal of Applied Measurement}, volume = {1}, number = {2}, year = {2000}, note = {Richard M Smith, US}, pages = {131-151}, abstract = {Addresses item exposure in a Computerized Adaptive Test (CAT) when the item selection algorithm is permitted to present examinees with questions that they have already been asked in a previous test administration. The data were from a national certification exam in medical technology. The responses of 178 repeat examinees were compared. The results indicate that the combined use of an adaptive algorithm to select items and latent trait theory to estimate person ability provides substantial protection from score contamination. The implications for constraints that prohibit examinees from seeing an item twice are discussed. (PsycINFO Database Record (c) 2002 APA, all rights reserved).}, author = {O{\textquoteright}Neill, T. and Lunz, M. E. and Thiede, K.} } @inbook {26, title = {CAT for certification and licensure}, booktitle = {Innovations in computerized assessment}, year = {1999}, note = {Using Smart Source ParsingInnovations in computerized assessment. (pp. 67-91). xiv, 266pp}, pages = {67-91}, publisher = {Lawrence Erlbaum Associates}, organization = {Lawrence Erlbaum Associates}, address = {Mahwah, N.J.}, abstract = {(from the chapter) This chapter discusses implementing computerized adaptive testing (CAT) for high-stakes examinations that determine whether or not a particular candidate will be certified or licensed. The experience of several boards who have chosen to administer their licensure or certification examinations using the principles of CAT illustrates the process of moving into this mode of administration. Examples of the variety of options that can be utilized within a CAT administration are presented, the decisions that boards must make to implement CAT are discussed, and a timetable for completing the tasks that need to be accomplished is provided. In addition to the theoretical aspects of CAT, practical issues and problems are reviewed. (PsycINFO Database Record (c) 2002 APA, all rights reserved).}, keywords = {computerized adaptive testing}, author = {Bergstrom, Betty A. and Lunz, M. E.} } @conference {1037, title = {Patterns of item exposure using a randomized CAT algorithm}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {1998}, address = {San Diego, CA}, author = {Lunz, M. E. and Stahl, J. A.} } @article {267, title = {Validity of item selection: A comparison of automated computerized adaptive and manual paper and pencil examinations}, journal = {Teaching and Learning in Medicine}, volume = {8}, number = {3}, year = {1996}, pages = {152-157}, author = {Lunz, M. E. and Deville, C. W.} } @article {263, title = {Computerized adaptive testing: Tracking candidate response patterns}, journal = {Journal of Educational Computing Research}, volume = {13}, number = {2}, year = {1995}, note = {Baywood Publishing, US}, pages = {151-162}, abstract = {Tracked the effect of candidate response patterns on a computerized adaptive test. Data were from a certification examination in laboratory science administered in 1992 to 155 candidates, using a computerized adaptive algorithm. The 90-item certification examination was divided into 9 units of 10 items each to track the pattern of initial responses and response alterations on ability estimates and test precision across the 9 test units. The precision of the test was affected most by response alterations during early segments of the test. While candidates generally benefited from altering responses, individual candidates showed different patterns of response alterations across test segments. Test precision was minimally affected, suggesting that the tailoring of computerized adaptive testing is minimally affected by response alterations. (PsycINFO Database Record (c) 2002 APA, all rights reserved).}, author = {Lunz, M. E. and Bergstrom, Betty A.} } @conference {1036, title = {Equating computerized adaptive certification examinations: The Board of Registry series of studies}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {1995}, address = {San Francisco}, author = {Lunz, M. E. and Bergstrom, Betty A.} } @article {629, title = {Computer adaptive testing}, journal = {International journal of Educational Research}, volume = {6}, year = {1994}, pages = {623-634}, author = {Lunz, M. E. and Bergstrom, Betty A. and Gershon, R. C.} } @article {630, title = {The effect of review on the psychometric characteristics of computerized adaptive tests}, journal = {Applied Measurement in Education}, volume = {7(3)}, year = {1994}, pages = {211-222}, author = {Lunz, M. E. and Stone, G. E.} } @article {717, title = {The effect of review on the psychometric characteristics of computerized adaptive tests}, journal = {Applied Measurement in Education}, volume = {7}, year = {1994}, pages = {211-222}, author = {Stone, G. E. and Lunz, M. E.} } @article {376, title = {The effect of review on the psychometric characterstics of computerized adaptive tests}, journal = {Applied Measurement in Education}, volume = {7}, number = {3}, year = {1994}, note = {Lawrence Erlbaum, US}, pages = {211-222}, abstract = {Explored the effect of reviewing items and altering responses on examinee ability estimates, test precision, test information, decision confidence, and pass/fail status for computerized adaptive tests. Two different populations of examinees took different computerized certification examinations. For purposes of analysis, each population was divided into 3 ability groups (high, medium, and low). Ability measures before and after review were highly correlated, but slightly lower decision confidence was found after review. Pass/fail status was most affected for examinees with estimates close to the pass point. Decisions remained the same for 94\% of the examinees. Test precision is only slightly affected by review, and the average information loss can be recovered by the addition of one item. (PsycINFO Database Record (c) 2002 APA, all rights reserved).}, author = {Stone, G. E. and Lunz, M. E.} } @article {262, title = {An empirical study of computerized adaptive test administration conditions}, journal = {Journal of Educational Measurement}, volume = {31}, number = {3}, year = {1994}, month = {Fal}, pages = {251-263}, author = {Lunz, M. E. and Bergstrom, Betty A.} } @inbook {25, title = {The equivalence of Rasch item calibrations and ability estimates across modes of administration}, booktitle = {Objective measurement: Theory into practice}, volume = {2}, year = {1994}, pages = {122-128}, publisher = {Ablex Publishing Co.}, organization = {Ablex Publishing Co.}, address = {Norwood, N.J. USA}, keywords = {computerized adaptive testing}, author = {Bergstrom, Betty A. and Lunz, M. E.} } @conference {1185, title = {Item calibration considerations: A comparison of item calibrations on written and computerized adaptive examinations}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {1994}, address = {New Orleans LA}, author = {Stone, G. E. and Lunz, M. E.} } @inbook {266, title = {Reliability of alternate computer adaptive tests}, booktitle = {Objective measurement, theory into practice}, volume = {II}, year = {1994}, publisher = {Ablex}, organization = {Ablex}, address = {New Jersey}, author = {Lunz, M. E. and Bergstrom, Betty A. and Wright, B. D.} } @conference {1038, title = {Test targeting and precision before and after review on computer-adaptive tests}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {1993}, address = {Atlanta GA}, author = {Lunz, M. E. and Stahl, J. A. and Bergstrom, Betty A.} } @article {27, title = {Altering the level of difficulty in computer adaptive testing}, journal = {Applied Measurement in Education}, volume = {5}, number = {2}, year = {1992}, note = {Lawrence Erlbaum, US}, pages = {137-149}, abstract = {Examines the effect of altering test difficulty on examinee ability measures and test length in a computer adaptive test. The 225 Ss were randomly assigned to 3 test difficulty conditions and given a variable length computer adaptive test. Examinees in the hard, medium, and easy test condition took a test targeted at the 50\%, 60\%, or 70\% probability of correct response. The results show that altering the probability of a correct response does not affect estimation of examinee ability and that taking an easier computer adaptive test only slightly increases the number of items necessary to reach specified levels of precision. (PsycINFO Database Record (c) 2002 APA, all rights reserved).}, keywords = {computerized adaptive testing}, author = {Bergstrom, Betty A. and Lunz, M. E. and Gershon, R. C.} } @article {24, title = {Confidence in pass/fail decisions for computer adaptive and paper and pencil examinations}, journal = {Evaluation and the Health Professions}, volume = {15}, number = {4}, year = {1992}, note = {Sage Publications, US}, pages = {453-464}, abstract = {Compared the level of confidence in pass/fail decisions obtained with computer adaptive tests (CADTs) and pencil-and-paper tests (PPTs). 600 medical technology students took a variable-length CADT and 2 fixed-length PPTs. The CADT was stopped when the examinee ability estimate was either 1.3 times the standard error of measurement above or below the pass/fail point or when a maximum test length was reached. Results show that greater confidence in the accuracy of the pass/fail decisions was obtained for more examinees when the CADT implemented a 90\% confidence stopping rule than with PPTs of comparable test length. (PsycINFO Database Record (c) 2002 APA, all rights reserved).}, author = {Bergstrom, Betty A. and Lunz, M. E.} } @article {265, title = {The effect of review on student ability and test efficiency for computerized adaptive tests}, journal = {Applied Psychological Measurement}, volume = {16}, number = {1}, year = {1992}, note = {Sage Publications, US}, pages = {33-40}, abstract = {220 students were randomly assigned to a review condition for a medical technology test; their test instructions indicated that each item must be answered when presented, but that the responses could be reviewed and altered at the end of the test. A sample of 492 students did not have the opportunity to review and alter responses. Within the review condition, examinee ability estimates before and after review were correlated .98. The average efficiency of the test was decreased by 1\% after review. Approximately 32\% of the examinees improved their ability estimates after review but did not change their pass/fail status. Disallowing review on adaptive tests administered under these rules is not supported by these data. (PsycINFO Database Record (c) 2002 APA, all rights reserved).}, author = {Lunz, M. E. and Bergstrom, Betty A. and Wright, Benjamin D.} } @article {2024, title = {The Effect of Review on Student Ability and Test Efficiency for Computerized Adaptive Tests}, journal = {Applied Psychological Measurement}, volume = {16}, year = {1992}, pages = {33-40}, author = {Lunz, M. E. and Berstrom, B.A. and Wright, B. D.} } @article {628, title = {Comparability of decisions for computer adaptive and written examinations}, journal = {Journal of Allied Health}, volume = {20}, year = {1991}, pages = {15-23}, author = {Lunz, M. E. and Bergstrom, Betty A.} } @booklet {1329, title = {Comparisons of computer adaptive and pencil and paper tests}, year = {1991}, note = {Unpublished manuscript.}, address = {Chicago IL: American Society of Clinical Pathologists}, author = {Bergstrom, Betty A. and Lunz, M. E.} } @conference {848, title = {Confidence in pass/fail decisions for computer adaptive and paper and pencil examinations}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {1991}, address = {Chicago IL}, author = {Bergstrom, B. B and Lunz, M. E.} } @conference {847, title = {The stability of Rasch pencil and paper item calibrations on computer adaptive tests}, booktitle = {Paper presented at the Midwest Objective Measurement Seminar}, year = {1990}, address = {Chicago IL}, author = {Bergstrom, Betty A. and Lunz, M. E.} } @proceedings {264, title = {Test-retest consistency of computer adaptive tests.}, journal = {annual meeting of the National Council on Measurement in Education}, year = {1990}, month = {04/1990}, address = {Boston, MA USA}, author = {Lunz, M. E. and Bergstrom, Betty A. and Gershon, R. C.} }