@article {2751, title = {The (non)Impact of Misfitting Items in Computerized Adaptive Testing}, journal = {Journal of Computerized Adaptive Testing}, volume = {9}, year = {2022}, keywords = {computerized adaptive testing, item fit, three-parameter logistic model}, doi = {10.7333/2211-0902008}, url = {https://jcatpub.net/index.php/jcat/issue/view/26}, author = {Christine E. DeMars} } @article {2728, title = {New Efficient and Practicable Adaptive Designs for Calibrating Items Online}, journal = {Applied Psychological Measurement}, volume = {44}, number = {1}, year = {2020}, pages = {3-16}, abstract = {When calibrating new items online, it is practicable to first compare all new items according to some criterion and then assign the most suitable one to the current examinee who reaches a seeding location. The modified D-optimal design proposed by van der Linden and Ren (denoted as D-VR design) works within this practicable framework with the aim of directly optimizing the estimation of item parameters. However, the optimal design point for a given new item should be obtained by comparing all examinees in a static examinee pool. Thus, D-VR design still has room for improvement in calibration efficiency from the view of traditional optimal design. To this end, this article incorporates the idea of traditional optimal design into D-VR design and proposes a new online calibration design criterion, namely, excellence degree (ED) criterion. Four different schemes are developed to measure the information provided by the current examinee when implementing this new criterion, and four new ED designs equipped with them are put forward accordingly. Simulation studies were conducted under a variety of conditions to compare the D-VR design and the four proposed ED designs in terms of calibration efficiency. Results showed that the four ED designs outperformed D-VR design in almost all simulation conditions.}, doi = {10.1177/0146621618824854}, url = {https://doi.org/10.1177/0146621618824854}, author = {Yinhong He and Ping Chen and Yong Li} } @article {2726, title = {Nonparametric CAT for CD in Educational Settings With Small Samples}, journal = {Applied Psychological Measurement}, volume = {43}, number = {7}, year = {2019}, pages = {543-561}, abstract = {Cognitive diagnostic computerized adaptive testing (CD-CAT) has been suggested by researchers as a diagnostic tool for assessment and evaluation. Although model-based CD-CAT is relatively well researched in the context of large-scale assessment systems, this type of system has not received the same degree of research and development in small-scale settings, such as at the course-based level, where this system would be the most useful. The main obstacle is that the statistical estimation techniques that are successfully applied within the context of a large-scale assessment require large samples to guarantee reliable calibration of the item parameters and an accurate estimation of the examinees{\textquoteright} proficiency class membership. Such samples are simply not obtainable in course-based settings. Therefore, the nonparametric item selection (NPS) method that does not require any parameter calibration, and thus, can be used in small educational programs is proposed in the study. The proposed nonparametric CD-CAT uses the nonparametric classification (NPC) method to estimate an examinee{\textquoteright}s attribute profile and based on the examinee{\textquoteright}s item responses, the item that can best discriminate the estimated attribute profile and the other attribute profiles is then selected. The simulation results show that the NPS method outperformed the compared parametric CD-CAT algorithms and the differences were substantial when the calibration samples were small.}, doi = {10.1177/0146621618813113}, url = {https://doi.org/10.1177/0146621618813113}, author = {Yuan-Pei Chang and Chia-Yi Chiu and Rung-Ching Tsai} } @conference {2648, title = {New Challenges (With Solutions) and Innovative Applications of CAT}, booktitle = {IACAT 2017 Conference}, year = {2017}, month = {08/2017}, publisher = {Niigata Seiryo University}, organization = {Niigata Seiryo University}, address = {Niigata, Japan}, abstract = {

Over the past several decades, computerized adaptive testing (CAT) has profoundly changed the administration of large-scale aptitude tests, state-wide achievement tests, professional licensure exams, and health outcome measures. While many challenges of CAT have been successfully addressed due to the continual efforts of researchers in the field, there are still many remaining, longstanding challenges that have yet to be resolved. This symposium will begin with three presentations, each of which provides a sound solution to one of the unresolved challenges. They are (1) item calibration when responses are \“missing not at random\” from CAT administration; (2) online calibration of new items when person traits have non-ignorable measurement error; (3) establishing consistency and asymptotic normality of latent trait estimation when allowing item response revision in CAT. In addition, this symposium also features innovative applications of CAT. In particular, there is emerging interest in using cognitive diagnostic CAT to monitor and detect learning progress (4th presentation). Last but not least, the 5th presentation illustrates the power of multidimensional polytomous CAT that permits rapid identification of hospitalized patients\’ rehabilitative care needs in\ health outcomes measurement. We believe this symposium covers a wide range of interesting and important topics in CAT.

Session Video

}, keywords = {CAT, challenges, innovative applications}, url = {https://drive.google.com/open?id=1Wvgxw7in_QCq_F7kzID6zCZuVXWcFDPa}, author = {Chun Wang and David J. Weiss and Xue Zhang and Jian Tao and Yinhong He and Ping Chen and Shiyu Wang and Susu Zhang and Haiyan Lin and Xiaohong Gao and Hua-Hua Chang and Zhuoran Shang} } @conference {2638, title = {A New Cognitive Diagnostic Computerized Adaptive Testing for Simultaneously Diagnosing Skills and Misconceptions}, booktitle = {IACAT 2017 Conference}, year = {2017}, month = {08/2017}, publisher = {Niigata Seiryo University}, organization = {Niigata Seiryo University}, address = {Niigata, Japan}, abstract = {

In education diagnoses, diagnosing misconceptions is important as well as diagnosing skills. However, traditional cognitive diagnostic computerized adaptive testing (CD-CAT) is usually developed to diagnose skills. This study aims to propose a new CD-CAT that can simultaneously diagnose skills and misconceptions. The proposed CD-CAT is based on a recently published new CDM, called the simultaneously identifying skills and misconceptions (SISM) model (Kuo, Chen, \& de la Torre, in press). A new item selection algorithm is also proposed in the proposed CD-CAT for achieving high adaptive testing performance. In simulation studies, we compare our new item selection algorithm with three existing item selection methods, including the Kullback\–Leibler (KL) and posterior-weighted KL (PWKL) proposed by Cheng (2009) and the modified PWKL (MPWKL) proposed by Kaplan, de la Torre, and Barrada (2015). The results show that our proposed CD-CAT can efficiently diagnose skills and misconceptions; the accuracy of our new item selection algorithm is close to the MPWKL but less computational burden; and our new item selection algorithm outperforms the KL and PWKL methods on diagnosing skills and misconceptions.

References

Cheng, Y. (2009). When cognitive diagnosis meets computerized adaptive testing: CD-CAT. Psychometrika, 74(4), 619\–632. doi: 10.1007/s11336-009-9123-2

Kaplan, M., de la Torre, J., \& Barrada, J. R. (2015). New item selection methods for cognitive diagnosis computerized adaptive testing. Applied Psychological Measurement, 39(3), 167\–188. doi:10.1177/0146621614554650

Kuo, B.-C., Chen, C.-H., \& de la Torre, J. (in press). A cognitive diagnosis model for identifying coexisting skills and misconceptions. Applied Psychological Measurement.

Session Video

}, keywords = {CD-CAT, Misconceptions, Simultaneous diagnosis}, author = {Bor-Chen Kuo and Chun-Hua Chen} } @conference {2636, title = {New Results on Bias in Estimates due to Discontinue Rules in Intelligence Testing}, booktitle = {IACAT 2017 Conference}, year = {2017}, month = {08/2017}, publisher = {Niigata Seiryo University}, organization = {Niigata Seiryo University}, address = {Niigata, Japan}, abstract = {

The presentation provides new results on a form of adaptive testing that is used frequently in intelligence testing. In these tests, items are presented in order of increasing difficulty, and the presentation of items is adaptive in the sense that each subtest session is discontinued once a test taker produces a certain number of incorrect responses in sequence. The subsequent (not observed) responses are commonly scored as wrong for that subtest, even though the test taker has not seen these. Discontinuation rules allow a certain form of adaptiveness both in paper-based and computerbased testing, and help reducing testing time.

Two lines of research that are relevant are studies that directly assess the impact of discontinuation rules, and studies that more broadly look at the impact of scoring rules on test results with a large number of not administered or not reached items. He \& Wolf (2012) compared different ability estimation methods for this type of discontinuation rule adaptation of test length in a simulation study. However, to our knowledge there has been no rigorous analytical study of the underlying distributional changes of the response variables under discontinuation rules. It is important to point out that the results obtained by He \& Wolf (2012) agree with results presented by, for example, DeAyala, Plake \& Impara (2001) as well as Rose, von Davier \& Xu (2010) and Rose, von Davier \& Nagengast (2016) in that ability estimates are biased most when scoring the not observed responses as wrong. Discontinuation rules combined with scoring the non-administered items as wrong is used operationally in several major intelligence tests, so more research is needed in order to improve this particular type of adaptiveness in the testing practice.

The presentation extends existing research on adaptiveness by discontinue-rules in intelligence tests in multiple ways: First, a rigorous analytical study of the distributional properties of discontinue-rule scored items is presented. Second, an extended simulation is presented that includes additional alternative scoring rules as well as bias-corrected ability estimators that may be suitable to improve results for discontinue-rule scored intelligence tests.

References: DeAyala, R. J., Plake, B. S., \& Impara, J. C. (2001). The impact of omitted responses on the accuracy of ability estimation in item response theory. Journal of Educational Measurement, 38, 213-234.

He, W. \& Wolfe, E. W. (2012). Treatment of Not-Administered Items on Individually Administered Intelligence Tests. Educational and Psychological Measurement, Vol 72, Issue 5, pp. 808 \– 826. DOI: 10.1177/0013164412441937

Rose, N., von Davier, M., \& Xu, X. (2010). Modeling non-ignorable missing data with item response theory (IRT; ETS RR-10-11). Princeton, NJ: Educational Testing Service.

Rose, N., von Davier, M., \& Nagengast, B. (2016) Modeling omitted and not-reached items in irt models. Psychometrika. doi:10.1007/s11336-016-9544-7

Session Video

}, keywords = {Bias, CAT, Intelligence Testing}, author = {Matthias von Davier and Youngmi Cho and Tianshu Pan} } @article {2393, title = {New Item Selection Methods for Cognitive Diagnosis Computerized Adaptive Testing}, journal = {Applied Psychological Measurement}, volume = {39}, number = {3}, year = {2015}, pages = {167-188}, abstract = {This article introduces two new item selection methods, the modified posterior-weighted Kullback{\textendash}Leibler index (MPWKL) and the generalized deterministic inputs, noisy {\textquotedblleft}and{\textquotedblright} gate (G-DINA) model discrimination index (GDI), that can be used in cognitive diagnosis computerized adaptive testing. The efficiency of the new methods is compared with the posterior-weighted Kullback{\textendash}Leibler (PWKL) item selection index using a simulation study in the context of the G-DINA model. The impact of item quality, generating models, and test termination rules on attribute classification accuracy or test length is also investigated. The results of the study show that the MPWKL and GDI perform very similarly, and have higher correct attribute classification rates or shorter mean test lengths compared with the PWKL. In addition, the GDI has the shortest implementation time among the three indices. The proportion of item usage with respect to the required attributes across the different conditions is also tracked and discussed.}, doi = {10.1177/0146621614554650}, url = {http://apm.sagepub.com/content/39/3/167.abstract}, author = {Kaplan, Mehmet and de la Torre, Jimmy and Barrada, Juan Ram{\'o}n} } @article {2333, title = {A Numerical Investigation of the Recovery of Point Patterns With Minimal Information}, journal = {Applied Psychological Measurement}, volume = {38}, number = {4}, year = {2014}, pages = {329-335}, abstract = {

A method has been proposed (Tsogo et al. 2001) in order to reconstruct the geometrical configuration of a large point set using minimal information. This paper employs numerical examples to investigate the proposed procedure. The suggested method has two great advantages. It reduces the volume of the data collection exercise and eases the computational effort involved in analyzing the data. It is suggested, however, that the method while possibly providing a useful starting point for a solution, does not provide a universal panacea.

}, doi = {10.1177/0146621613516186}, url = {http://apm.sagepub.com/content/38/4/329.abstract}, author = {Cox, M. A. A.} } @article {2041, title = {National Tests in Denmark {\textendash} CAT as a Pedagogic Tool}, journal = {Journal of Applied Testing Technology}, volume = {12}, year = {2011}, abstract = {

Testing and test results can be used in different ways. They can be used for regulation and control, but they can also be a pedagogic tool for assessment of student proficiency in order to target teaching, improve learning and facilitate local pedagogical leadership. To serve these purposes the test has to be used for low stakes purposes, and to ensure this, the Danish National test results are made strictly confidential by law. The only test results that are made public are the overall national results. Because of the test design, test results are directly comparable, offering potential for monitoring added value and developing new ways of using test results in a pedagogical context. This article gives the background and status for the development of the Danish national tests, describes what is special about these tests (e.g., Information Technology [IT]-based, 3 tests in 1, adaptive), how the national test are carried out, and what
is tested. Furthermore, it describes strategies for disseminating the results to the pupil, parents, teacher, headmaster and municipality; and how the results can be used by the teacher and headmaster.

}, author = {Wandall, J.} } @article {2246, title = {A new adaptive testing algorithm for shortening health literacy assessments}, journal = {BMC Medical Informatics and Decision Making}, volume = {11}, year = {2011}, abstract = {

http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3178473/?tool=pmcentrez
}, doi = {10.1186/1472-6947-11-52}, author = {Kandula, S. and Ancker, J.S. and Kaufman, D.R. and Currie, L.M. and Qing, Z.-T.} } @article {2189, title = {A New Stopping Rule for Computerized Adaptive Testing}, journal = {Educational and Psychological Measurement}, volume = {71}, number = {1}, year = {2011}, pages = {37-53}, abstract = {

The goal of the current study was to introduce a new stopping rule for computerized adaptive testing (CAT). The predicted standard error reduction (PSER) stopping rule uses the predictive posterior variance to determine the reduction in standard error that would result from the administration of additional items. The performance of the PSER was compared with that of the minimum standard error stopping rule and a modified version of the minimum information stopping rule in a series of simulated adaptive tests, drawn from a number of item pools. Results indicate that the PSER makes efficient use of CAT item pools, administering fewer items when predictive gains in information are small and increasing measurement precision when information is abundant.

}, doi = {10.1177/0013164410387338}, url = {http://epm.sagepub.com/content/71/1/37.abstract}, author = {Choi, Seung W. and Grady, Matthew W. and Dodd, Barbara G.} } @article {80, title = {A new stopping rule for computerized adaptive testing}, journal = {Educational and Psychological Measurement}, volume = {70}, number = {6}, year = {2010}, note = {U01 AR052177-04/NIAMS NIH HHS/Educ Psychol Meas. 2010 Dec 1;70(6):1-17.}, month = {Dec 1}, pages = {1-17}, edition = {2011/02/01}, abstract = {The goal of the current study was to introduce a new stopping rule for computerized adaptive testing. The predicted standard error reduction stopping rule (PSER) uses the predictive posterior variance to determine the reduction in standard error that would result from the administration of additional items. The performance of the PSER was compared to that of the minimum standard error stopping rule and a modified version of the minimum information stopping rule in a series of simulated adaptive tests, drawn from a number of item pools. Results indicate that the PSER makes efficient use of CAT item pools, administering fewer items when predictive gains in information are small and increasing measurement precision when information is abundant.}, isbn = {0013-1644 (Print)0013-1644 (Linking)}, author = {Choi, S. W. and Grady, M. W. and Dodd, B. G.} } @inbook {1868, title = {The nine lives of CAT-ASVAB: Innovations and revelations}, year = {2009}, note = {{PDF File, 169 KB}}, address = {In D. J. Weiss (Ed.), Proceedings of the 2009 GMAC Conference on Computerized Adaptive Testing.}, abstract = {The Armed Services Vocational Aptitude Battery (ASVAB) is administered annually to more than one million military applicants and high school students. ASVAB scores are used to determine enlistment eligibility, assign applicants to military occupational specialties, and aid students in career exploration. The ASVAB is administered as both a paper-and-pencil (P\&P) test and a computerized adaptive test (CAT). CAT-ASVAB holds the distinction of being the first large-scale adaptive test battery to be administered in a high-stakes setting. Approximately two-thirds of military applicants currently take CAT-ASVAB; long-term plans are to replace P\&P-ASVAB with CAT-ASVAB at all test sites. Given CAT-ASVAB{\textquoteright}s pedigree{\textemdash}approximately 20 years in development and 20 years in operational administration{\textemdash}much can be learned from revisiting some of the major highlights of CATASVAB history. This paper traces the progression of CAT-ASVAB through nine major phases of development including: research and evelopment of the CAT-ASVAB prototype, the initial development of psychometric procedures and item pools, initial and full-scale operational implementation, the introduction of new item pools, the introduction of Windows administration, the introduction of Internet administration, and research and development of the next generation CATASVAB. A background and history is provided for each phase, including discussions of major research and operational issues, innovative approaches and practices, and lessons learned.}, author = {Pommerich, M and Segall, D. O. and Moreno, K. E.} } @article {293, title = {The NAPLEX: evolution, purpose, scope, and educational implications}, journal = {American Journal of Pharmaceutical Education}, volume = {72}, number = {2}, year = {2008}, note = {Newton, David WBoyle, MariaCatizone, Carmen AHistorical ArticleUnited StatesAmerican journal of pharmaceutical educationAm J Pharm Educ. 2008 Apr 15;72(2):33.}, month = {Apr 15}, pages = {33}, edition = {2008/05/17}, abstract = {Since 2004, passing the North American Pharmacist Licensure Examination (NAPLEX) has been a requirement for earning initial pharmacy licensure in all 50 United States. The creation and evolution from 1952-2005 of the particular pharmacy competency testing areas and quantities of questions are described for the former paper-and-pencil National Association of Boards of Pharmacy Licensure Examination (NABPLEX) and the current candidate-specific computer adaptive NAPLEX pharmacy licensure examinations. A 40\% increase in the weighting of NAPLEX Blueprint Area 2 in May 2005, compared to that in the preceding 1997-2005 Blueprint, has implications for candidates{\textquoteright} NAPLEX performance and associated curricular content and instruction. New pharmacy graduates{\textquoteright} scores on the NAPLEX are neither intended nor validated to serve as a criterion for assessing or judging the quality or effectiveness of pharmacy curricula and instruction. The newest cycle of NAPLEX Blueprint revision, a continual process to ensure representation of nationwide contemporary practice, began in early 2008. It may take up to 2 years, including surveying several thousand national pharmacists, to complete.}, keywords = {*Educational Measurement, Education, Pharmacy/*standards, History, 20th Century, History, 21st Century, Humans, Licensure, Pharmacy/history/*legislation \& jurisprudence, North America, Pharmacists/*legislation \& jurisprudence, Software}, isbn = {1553-6467 (Electronic)0002-9459 (Linking)}, author = {Newton, D. W. and Boyle, M. and Catizone, C. A.} } @article {568, title = {An NCME instructional module on multistage testing}, journal = {Educational Measurement: Issues and Practice}, volume = {26(2)}, year = {2007}, note = {$\#$HE07044}, pages = {44-52}, author = {Hendrickson, A.} } @inbook {1865, title = {A new delivery system for CAT}, year = {2007}, note = {{PDF file, 248 KB}}, address = {D. J. Weiss (Ed.). Proceedings of the 2007 GMAC Conference on Computerized Adaptive Testing.}, author = {Park, J.} } @inbook {1881, title = {Nonparametric online item calibration}, year = {2007}, note = {8 MB}}, address = {D. J. Weiss (Ed.). Proceedings of the 2007 GMAC Conference on Computerized Adaptive Testing. }, author = {Samejima, F.} } @conference {1234, title = {New methods for CBT item pool evaluation}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {2004}, note = {$\#$WA04-02 {PDF file, 1.005 MB}}, address = {San Diego CA}, author = {Wang, L.} } @article {21, title = {NCLEX-RN performance: predicting success on the computerized examination}, journal = {Journal of Professional Nursing}, volume = {17}, number = {4}, year = {2001}, note = {8755-7223Journal Article}, month = {Jul-Aug}, pages = {158-165}, abstract = {Since the adoption of the Computerized Adaptive Testing (CAT) format of the National Certification Licensure Examination for Registered Nurses (NCLEX-RN), no studies have been reported in the literature on predictors of successful performance by baccalaureate nursing graduates on the licensure examination. In this study, a discriminant analysis was used to identify which of 21 variables can be significant predictors of success on the CAT NCLEX-RN. The convenience sample consisted of 289 individuals who graduated from a baccalaureate nursing program between 1995 and 1998. Seven significant predictor variables were identified. The total number of C+ or lower grades earned in nursing theory courses was the best predictor, followed by grades in several individual nursing courses. More than 93 per cent of graduates were correctly classified. Ninety-four per cent of NCLEX "passes" were correctly classified, as were 92 per cent of NCLEX failures. This degree of accuracy in classifying CAT NCLEX-RN failures represents a marked improvement over results reported in previous studies of licensure examinations, and suggests the discriminant function will be helpful in identifying future students in danger of failure. J Prof Nurs 17:158-165, 2001.}, keywords = {*Education, Nursing, Baccalaureate, *Educational Measurement, *Licensure, Adult, Female, Humans, Male, Predictive Value of Tests, Software}, author = {Beeman, P. B. and Waterhouse, J. K.} } @conference {1084, title = {Nearest neighbors, simple strata, and probabilistic parameters: An empirical comparison of methods for item exposure control in CATs}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {2001}, address = {Seattle WA}, author = {Parshall, C. G. and Kromrey, J. D. and Harmes, J. C. and Sentovich, C.} } @conference {881, title = {A new approach to simulation studies in computerized adaptive testing}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {2001}, note = {{PDF file, 251 KB}}, address = {Seattle WA}, author = {Chen, S-Y.} } @article {818, title = {A new computer algorithm for simultaneous test construction of two-stage and multistage testing}, journal = {Journal of Educational and Behavioral Statistics}, volume = {26}, year = {2001}, pages = {180-198}, author = {Wu, I. L.} } @article {279, title = {Nouveaux d{\'e}veloppements dans le domaine du testing informatis{\'e} [New developments in the area of computerized testing]}, journal = {Psychologie Fran{\c c}aise}, volume = {46}, number = {3}, year = {2001}, pages = {221-230}, abstract = {L{\textquoteright}usage de l{\textquoteright}{\'e}valuation assist{\'e}e par ordinateur s{\textquoteright}est fortement d{\'e}velopp{\'e} depuis la premi{\`e}re formulation de ses principes de base dans les ann{\'e}es soixante et soixante-dix. Cet article offre une introduction aux derniers d{\'e}veloppements dans le domaine de l{\textquoteright}{\'e}valuation assist{\'e}e par ordinateur, en particulier celui du testing adaptative informatis{\'e}e (TAI). L{\textquoteright}estimation de l{\textquoteright}aptitude, la s{\'e}lection des items et le d{\'e}veloppement d{\textquoteright}une base d{\textquoteright}items dans le cas du TAI sont discut{\'e}s. De plus, des exemples d{\textquoteright}utilisations innovantes de l{\textquoteright}ordinateur dans des syst{\`e}mes int{\'e}gr{\'e}s de testing et de testing via Internet sont pr{\'e}sent{\'e}s. L{\textquoteright}article se termine par quelques illustrations de nouvelles applications du testing informatis{\'e} et des suggestions pour des recherches futures.Discusses the latest developments in computerized psychological assessment, with emphasis on computerized adaptive testing (CAT). Ability estimation, item selection, and item pool development in CAT are described. Examples of some innovative approaches to CAT are presented. (PsycINFO Database Record (c) 2005 APA )}, keywords = {Adaptive Testing, Computer Applications, Computer Assisted, Diagnosis, Psychological Assessment computerized adaptive testing}, author = {Meijer, R. R. and Gr{\'e}goire, J.} } @conference {994, title = {A new item selection procedure for mixed item type in computerized classification testing}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {2000}, note = {{PDF file, 452 KB}}, address = {New Orleans}, author = {Lau, C. and Wang, T.} } @article {603, title = {The null distribution of person-fit statistics for conventional and adaptive tests}, journal = {Applied Psychological Measurement}, volume = {23}, year = {2000}, pages = {327-345}, author = {van Krimpen-Stoop, E. M. L. A. and Meijer, R. R.} } @article {752, title = {The null distribution of person-fit statistics for conventional and adaptive tests}, journal = {Applied Psychological Measurement}, volume = {23}, year = {1999}, pages = {327-345}, author = {van Krimpen-Stoop, E. M. L. A. and Meijer, R. R.} } @conference {1062, title = {A new approach for the detection of item preknowledge in computerized adaptive testing}, booktitle = {Paper presented at the annual meeting of the Psychometric Society}, year = {1998}, address = {Urbana, IL}, author = {McLeod, L. D. and Lewis, C.} } @conference {1281, title = {Nonmodel-fitting responses and robust ability estimation in a realistic CAT environment}, booktitle = {Paper presented at the annual meeting of the National Council on Measurement in Education}, year = {1998}, address = {San Diego CA}, author = {Yi, Q, and Nering, M.} } @article {496, title = {Nonlinear sequential designs for logistic item response theory models with applications to computerized adaptive tests}, journal = {The Annals of Statistics.}, year = {1997}, author = {Chang, Hua-Hua and Ying, Z.} } @conference {922, title = {New algorithms for item selection and exposure and proficiency estimation under 1- and 2-PL models}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {1996}, address = {New York}, author = {Featherman, C. M. and Subhiyah, R. G. and Hadadi, A.} } @conference {892, title = {New algorithms for item selection and exposure control with computerized adaptive testing}, booktitle = {Paper presented at the annual meeting of the American Educational Research Association}, year = {1995}, address = {San Francisco CA}, author = {Davey, T. and Parshall, C. G.} } @conference {1198, title = {New item exposure control algorithms for computerized adaptive testing}, booktitle = {Paper presented at the annual meeting of the Psychometric Society}, year = {1995}, address = {Minneapolis MN}, author = {Thomasson, G. L.} } @booklet {1620, title = {A new method of controlling item exposure in computerized adaptive testing (Research Report 95-25)}, year = {1995}, address = {Princeton NJ: Educational Testing Service}, author = {Stocking, M. L. and Lewis, C.} } @article {578, title = {New computer technique seen producing a revolution in testing}, journal = {The Chronicle of Higher Education}, volume = {40}, year = {1993}, pages = {22-23, 26}, edition = {4}, chapter = {.}, author = {Jacobson, R. L.} } @article {2026, title = {The Nominal Response Model in Computerized Adaptive Testing}, journal = {Applied Psychological Measurement}, volume = {15}, number = {4}, year = {1992}, pages = {327-343}, author = {De Ayals, R.J.} } @article {525, title = {The nominal response model in computerized adaptive testing}, journal = {Applied Psychological Measurement}, volume = {16}, year = {1992}, pages = {327-343}, author = {De Ayala, R. J.,} } @article {190, title = {National Council Computerized Adaptive Testing Project Review--committee perspective}, journal = {Issues}, volume = {11}, number = {4}, year = {1990}, note = {911613110885-0046Journal Article}, pages = {3}, keywords = {*Computers, *Licensure, Educational Measurement/*methods, Feasibility Studies, Societies, Nursing, United States}, author = {Haynes, B.} } @book {1720, title = {New horizons in testing: Latent trait test theory and computerized adaptive testing}, year = {1983}, address = {New York: Academic Press}, author = {Weiss, D. J.} } @inbook {1736, title = {New types of information and psychological implications}, year = {1975}, note = {{PDF file, 609 KB}}, address = {D. J. Weiss (Ed.), Computerized adaptive trait measurement: Problems and Prospects (Research Report 75-5), pp. 32-43. Minneapolis: University of Minnesota, Department of Psychology, Psychometric Methods Program.}, author = {Betz, N. E.} } @inbook {1750, title = {New light on test strategy from decision theory}, year = {1966}, address = {A. Anastasi (Ed.). Testing problems in perspective. Washington DC: American Council on Education.}, author = {Cronbach, L. J.} }