@article {35, title = {Expansion of a physical function item bank and development of an abbreviated form for clinical research}, journal = {Journal of Applied Measurement}, volume = {7}, number = {1}, year = {2006}, pages = {1-15}, publisher = {Richard M Smith: US}, abstract = {We expanded an existing 33-item physical function (PF) item bank with a sufficient number of items to enable computerized adaptive testing (CAT). Ten items were written to expand the bank and the new item pool was administered to 295 people with cancer. For this analysis of the new pool, seven poorly performing items were identified for further examination. This resulted in a bank with items that define an essentially unidimensional PF construct, cover a wide range of that construct, reliably measure the PF of persons with cancer, and distinguish differences in self-reported functional performance levels. We also developed a 5-item (static) assessment form ("BriefPF") that can be used in clinical research to express scores on the same metric as the overall bank. The BriefPF was compared to the PF-10 from the Medical Outcomes Study SF-36. Both short forms significantly differentiated persons across functional performance levels. While the entire bank was more precise across the PF continuum than either short form, there were differences in the area of the continuum in which each short form was more precise: the BriefPF was more precise than the PF-10 at the lower functional levels and the PF-10 was more precise than the BriefPF at the higher levels. Future research on this bank will include the development of a CAT version, the PF-CAT. (PsycINFO Database Record (c) 2007 APA, all rights reserved)}, keywords = {clinical research, computerized adaptive testing, performance levels, physical function item bank, Psychometrics, test reliability, Test Validity}, isbn = {1529-7713 (Print)}, author = {Bode, R. K. and Lai, J-S. and Dineen, K. and Heinemann, A. W. and Shevrin, D. and Von Roenn, J. and Cella, D.} } @article {296, title = {T{\'e}cnicas para detectar patrones de respuesta at{\'\i}picos [Aberrant patterns detection methods]}, journal = {Anales de Psicolog{\'\i}a}, volume = {22}, number = {1}, year = {2006}, note = {Spain: Universidad de Murcia}, pages = {143-154}, abstract = {La identificaci{\'o}n de patrones de respuesta at{\'\i}picos es de gran utilidad para la construcci{\'o}n de tests y de bancos de {\'\i}tems con propiedades psicom{\'e}tricas as{\'\i} como para el an{\'a}lisis de validez de los mismos. En este trabajo de revisi{\'o}n se han recogido los m{\'a}s relevantes y novedosos m{\'e}todos de ajuste de personas que se han elaborado dentro de cada uno de los principales {\'a}mbitos de trabajo de la Psicometr{\'\i}a: el escalograma de Guttman, la Teor{\'\i}a Cl{\'a}sica de Tests (TCT), la Teor{\'\i}a de la Generalizabilidad (TG), la Teor{\'\i}a de Respuesta al {\'I}tem (TRI), los Modelos de Respuesta al {\'I}tem No Param{\'e}tricos (MRINP), los Modelos de Clase Latente de Orden Restringido (MCL-OR) y el An{\'a}lisis de Estructura de Covarianzas (AEC).Aberrant patterns detection has a great usefulness in order to make tests and item banks with psychometric characteristics and validity analysis of tests and items. The most relevant and newest person-fit methods have been reviewed. All of them have been made in each one of main areas of Psychometry: Guttman{\textquoteright}s scalogram, Classical Test Theory (CTT), Generalizability Theory (GT), Item Response Theory (IRT), Non-parametric Response Models (NPRM), Order-Restricted Latent Class Models (OR-LCM) and Covariance Structure Analysis (CSA).}, keywords = {aberrant patterns detection, Classical Test Theory, generalizability theory, Item Response, Item Response Theory, Mathematics, methods, person-fit, Psychometrics, psychometry, Test Validity, test validity analysis, Theory}, isbn = {0212-9728}, author = {N{\'u}{\~n}ez, R. M. N. and Pina, J. A. L.} } @article {289, title = {La Validez desde una {\'o}ptica psicom{\'e}trica [Validity from a psychometric perspective]}, journal = {Acta Comportamentalia}, volume = {13}, number = {1}, year = {2005}, pages = {9-20}, abstract = {El estudio de la validez constituye el eje central de los an{\'a}lisis psicom{\'e}tricos de los instrumentos de medida. En esta comunicaci{\'o}n se traza una breve nota hist{\'o}rica de los distintos modos de concebir la validez a lo largo de los tiempos, se comentan las l{\'\i}neas actuales, y se tratan de vislumbrar posibles v{\'\i}as futuras, teniendo en cuenta el impacto que las nuevas tecnolog{\'\i}as inform{\'a}ticas est{\'a}n ejerciendo sobre los propios instrumentos de medida en Psicolog{\'\i}a y Educaci{\'o}n. Cuestiones como los nuevos formatos multimedia de los {\'\i}tems, la evaluaci{\'o}n a distancia, el uso intercultural de las pruebas, las consecuencias de su uso, o los tests adaptativos informatizados, reclaman nuevas formas de evaluar y conceptualizar la validez. Tambi{\'e}n se analizan cr{\'\i}ticamente algunos planteamientos recientes sobre el concepto de validez. The study of validity constitutes a central axis of psychometric analyses of measurement instruments. This paper presents a historical sketch of different modes of conceiving validity, with commentary on current views, and it attempts to predict future lines of research by considering the impact of new computerized technologies on measurement instruments in psychology and education. Factors such as the new multimedia format of items, distance assessment, the intercultural use of tests, the consequences of the latter, or the development of computerized adaptive tests demand new ways of conceiving and evaluating validity. Some recent thoughts about the concept of validity are also critically analyzed. (PsycINFO Database Record (c) 2005 APA ) (journal abstract)}, keywords = {Factor Analysis, Measurement, Psychometrics, Scaling (Testing), Statistical, Technology, Test Validity}, author = {Mu{\~n}iz, J.} } @article {4, title = {Propiedades psicom{\'e}tricas de un test Adaptativo Informatizado para la medici{\'o}n del ajuste emocional [Psychometric properties of an Emotional Adjustment Computerized Adaptive Test]}, journal = {Psicothema}, volume = {17}, number = {3}, year = {2005}, pages = {484-491}, abstract = {En el presente trabajo se describen las propiedades psicom{\'e}tricas de un Test Adaptativo Informatizado para la medici{\'o}n del ajuste emocional de las personas. La revisi{\'o}n de la literatura acerca de la aplicaci{\'o}n de los modelos de la teor{\'\i}a de la respuesta a los {\'\i}tems (TRI) muestra que {\'e}sta se ha utilizado m{\'a}s en el trabajo con variables aptitudinales que para la medici{\'o}n de variables de personalidad, sin embargo diversos estudios han mostrado la eficacia de la TRI para la descripci{\'o}n psicom{\'e}trica de dichasvariables. Aun as{\'\i}, pocos trabajos han explorado las caracter{\'\i}sticas de un Test Adaptativo Informatizado, basado en la TRI, para la medici{\'o}n de una variable de personalidad como es el ajuste emocional. Nuestros resultados muestran la eficiencia del TAI para la evaluaci{\'o}n del ajuste emocional, proporcionando una medici{\'o}n v{\'a}lida y precisa, utilizando menor n{\'u}mero de elementos de medida encomparaci{\'o}n con las escalas de ajuste emocional de instrumentos fuertemente implantados. Psychometric properties of an emotional adjustment computerized adaptive test. In the present work it was described the psychometric properties of an emotional adjustment computerized adaptive test. An examination of Item Response Theory (IRT) research literature indicates that IRT has been mainly used for assessing achievements and ability rather than personality factors. Nevertheless last years have shown several studies wich have successfully used IRT to personality assessment instruments. Even so, a few amount of works has inquired the computerized adaptative test features, based on IRT, for the measurement of a personality traits as it{\textquoteright}s the emotional adjustment. Our results show the CAT efficiency for the emotional adjustment assessment so this provides a valid and accurate measurement; by using a less number of items in comparison with the emotional adjustment scales from the most strongly established questionnaires.}, keywords = {Computer Assisted Testing, Emotional Adjustment, Item Response, Personality Measures, Psychometrics, Test Validity, Theory}, author = {Aguado, D. and Rubio, V. J. and Hontangas, P. M. and Hern{\'a}ndez, J. M.} } @inbook {119, title = {Generating abstract reasoning items with cognitive theory}, booktitle = {Item generation for test development}, year = {2002}, note = {Using Smart Source ParsingItem generation for test development. (pp. 219-250). Mahwah, NJ : Lawrence Erlbaum Associates, Publishers. xxxii, 412 pp}, pages = {219-250}, publisher = {Lawrence Erlbaum Associates, Inc.}, organization = {Lawrence Erlbaum Associates, Inc.}, address = {Mahwah, N.J. USA}, abstract = {(From the chapter) Developed and evaluated a generative system for abstract reasoning items based on cognitive theory. The cognitive design system approach was applied to generate matrix completion problems. Study 1 involved developing the cognitive theory with 191 college students who were administered Set I and Set II of the Advanced Progressive Matrices. Study 2 examined item generation by cognitive theory. Study 3 explored the psychometric properties and construct representation of abstract reasoning test items with 728 young adults. Five structurally equivalent forms of Abstract Reasoning Test (ART) items were prepared from the generated item bank and administered to the Ss. In Study 4, the nomothetic span of construct validity of the generated items was examined with 728 young adults who were administered ART items, and 217 young adults who were administered ART items and the Advanced Progressive Matrices. Results indicate the matrix completion items were effectively generated by the cognitive design system approach. (PsycINFO Database Record (c) 2005 APA )}, keywords = {Cognitive Processes, Measurement, Reasoning, Test Construction, Test Items, Test Validity, Theories}, author = {Embretson, S. E.}, editor = {P. Kyllomen} } @article {378, title = {The development of a computerized version of Vandenberg{\textquoteright}s mental rotation test and the effect of visuo-spatial working memory loading}, journal = {Dissertation Abstracts International Section A: Humanities and Social Sciences}, volume = {60}, number = {11-A}, year = {2000}, pages = {3938}, abstract = {This dissertation focused on the generation and evaluation of web-based versions of Vandenberg{\textquoteright}s Mental Rotation Test. Memory and spatial visualization theory were explored in relation to the addition of a visuo-spatial working memory component. Analysis of the data determined that there was a significant difference between scores on the MRT Computer and MRT Memory test. The addition of a visuo-spatial working memory component did significantly affect results at the .05 alpha level. Reliability and discrimination estimates were higher on the MRT Memory version. The computerization of the paper and pencil version on the MRT did not significantly effect scores but did effect the time required to complete the test. The population utilized in the quasi-experiment consisted of 107 university students from eight institutions in engineering graphics related courses. The subjects completed two researcher developed, Web-based versions of Vandenberg{\textquoteright}s Mental Rotation Test and the original paper and pencil version of the Mental Rotation Test. One version of the test included a visuo-spatial working memory loading. Significant contributions of this study included developing and evaluating computerized versions of Vandenberg{\textquoteright}s Mental Rotation Test. Previous versions of Vandenberg{\textquoteright}s Mental Rotation Test did not take advantage of the ability of the computer to incorporate an interaction factor, such as a visuo-spatial working memory loading, into the test. The addition of an interaction factor results in a more discriminate test which will lend itself well to computerized adaptive testing practices. Educators in engineering graphics related disciplines should strongly consider the use of spatial visualization tests to aid in establishing the effects of modern computer systems on fundamental design/drafting skills. Regular testing of spatial visualization skills will result assist in the creation of a more relevant curriculum. Computerized tests which are valid and reliable will assist in making this task feasible. (PsycINFO Database Record (c) 2005 APA )}, keywords = {Computer Assisted Testing, Mental Rotation, Short Term Memory computerized adaptive testing, Test Construction, Test Validity, Visuospatial Memory}, author = {Strong, S. D.} } @article {41, title = {An examination of the reliability and validity of performance ratings made using computerized adaptive rating scales}, journal = {Dissertation Abstracts International: Section B: The Sciences and Engineering}, volume = {61}, number = {1-B}, year = {2000}, pages = {570}, abstract = {This study compared the psychometric properties of performance ratings made using recently-developed computerized adaptive rating scales (CARS) to the psyc hometric properties of ratings made using more traditional paper-and-pencil rati ng formats, i.e., behaviorally-anchored and graphic rating scales. Specifically, the reliability, validity and accuracy of the performance ratings from each for mat were examined. One hundred twelve participants viewed six 5-minute videotape s of office situations and rated the performance of a target person in each vide otape on three contextual performance dimensions-Personal Support, Organizationa l Support, and Conscientious Initiative-using CARS and either behaviorally-ancho red or graphic rating scales. Performance rating properties were measured using Shrout and Fleiss{\textquoteright}s intraclass correlation (2, 1), Borman{\textquoteright}s differential accurac y measure, and Cronbach{\textquoteright}s accuracy components as indexes of rating reliability, validity, and accuracy, respectively. Results found that performance ratings mad e using the CARS were significantly more reliable and valid than performance rat ings made using either of the other formats. Additionally, CARS yielded more acc urate performance ratings than the paper-and-pencil formats. The nature of the C ARS system (i.e., its adaptive nature and scaling methodology) and its paired co mparison judgment task are offered as possible reasons for the differences found in the psychometric properties of the performance ratings made using the variou s rating formats. (PsycINFO Database Record (c) 2005 APA )}, keywords = {Adaptive Testing, Computer Assisted Testing, Performance Tests, Rating Scales, Reliability, Test, Test Validity}, author = {Buck, D. E.} }