@misc{10481/84794, year = {2023}, month = {2}, url = {https://hdl.handle.net/10481/84794}, abstract = {When two raters independently classify n objects within K nominal categories, the level of agreement between them is usually assessed by means of Cohen’s Kappa coefficient. However, the coefficient Kappa has been the subject to several criticisms. Additionally, when a more detailed analysis is needed, it requires the evaluation of the degree of agreement class by class, and traditionally, non-chance corrected indexes are used for this purpose. Model Delta, does not possess the limitations aforementioned of kappa and it allows to define measures of agreement class by class which are chance-corrected.}, keywords = {Agreement}, keywords = {Multiple-choice tests}, keywords = {software}, title = {Delta. Comparison of agreement in nominal scale between two raters and assessment of the degree of knowledge in multiple-choice tests.}, author = {Femia Marzo, Pedro and Martín Andrés, Antonio}, }