Datasets:
- tokenizer_robustness_completion_stem_canonical
- tokenizer_robustness_completion_stem_character_deletion
- tokenizer_robustness_completion_stem_colloquial
- tokenizer_robustness_completion_stem_compounds
- tokenizer_robustness_completion_stem_diacriticized_styling
- tokenizer_robustness_completion_stem_double_struck
- tokenizer_robustness_completion_stem_enclosed_characters
- tokenizer_robustness_completion_stem_equivalent_expressions
- tokenizer_robustness_completion_stem_fullwidth_characters
- tokenizer_robustness_completion_stem_latex
- tokenizer_robustness_completion_stem_morpheme_separation
- tokenizer_robustness_completion_stem_scripted_text
- tokenizer_robustness_completion_stem_space_removal
- tokenizer_robustness_completion_stem_spelled_out
-
2.46 kB
-
51 kB