Datasets:
Uploading tokenizer_robustness_completion_math_latex subset
Browse files
README.md
CHANGED
@@ -628,6 +628,130 @@ dataset_info:
|
|
628 |
num_examples: 21
|
629 |
download_size: 34631
|
630 |
dataset_size: 11219
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
631 |
configs:
|
632 |
- config_name: tokenizer_robustness_completion_math_canonical
|
633 |
data_files:
|
@@ -649,6 +773,10 @@ configs:
|
|
649 |
data_files:
|
650 |
- split: test
|
651 |
path: tokenizer_robustness_completion_math_italian/test-*
|
|
|
|
|
|
|
|
|
652 |
---
|
653 |
|
654 |
# Dataset Card for Tokenization Robustness Math
|
|
|
628 |
num_examples: 21
|
629 |
download_size: 34631
|
630 |
dataset_size: 11219
|
631 |
+
- config_name: tokenizer_robustness_completion_math_latex
|
632 |
+
features:
|
633 |
+
- name: question
|
634 |
+
dtype: string
|
635 |
+
- name: choices
|
636 |
+
list: string
|
637 |
+
- name: answer
|
638 |
+
dtype: int64
|
639 |
+
- name: answer_label
|
640 |
+
dtype: string
|
641 |
+
- name: split
|
642 |
+
dtype: string
|
643 |
+
- name: subcategories
|
644 |
+
dtype: string
|
645 |
+
- name: category
|
646 |
+
dtype: string
|
647 |
+
- name: lang
|
648 |
+
dtype: string
|
649 |
+
- name: second_lang
|
650 |
+
dtype: string
|
651 |
+
- name: notes
|
652 |
+
dtype: string
|
653 |
+
- name: id
|
654 |
+
dtype: string
|
655 |
+
- name: set_id
|
656 |
+
dtype: string
|
657 |
+
- name: variation_id
|
658 |
+
dtype: string
|
659 |
+
- name: vanilla_cos_sim_to_canonical
|
660 |
+
struct:
|
661 |
+
- name: CohereLabs/aya-expanse-8b
|
662 |
+
dtype: float64
|
663 |
+
- name: Qwen/Qwen3-8B
|
664 |
+
dtype: float64
|
665 |
+
- name: bigscience/bloom
|
666 |
+
dtype: float64
|
667 |
+
- name: common-pile/comma-v0.1-1t
|
668 |
+
dtype: float64
|
669 |
+
- name: facebook/xglm-564M
|
670 |
+
dtype: float64
|
671 |
+
- name: google-bert/bert-base-multilingual-cased
|
672 |
+
dtype: float64
|
673 |
+
- name: google/byt5-small
|
674 |
+
dtype: float64
|
675 |
+
- name: google/gemma-2-2b
|
676 |
+
dtype: float64
|
677 |
+
- name: gpt2
|
678 |
+
dtype: float64
|
679 |
+
- name: meta-llama/Llama-3.2-1B
|
680 |
+
dtype: float64
|
681 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
682 |
+
dtype: float64
|
683 |
+
- name: mistralai/tekken
|
684 |
+
dtype: float64
|
685 |
+
- name: tiktoken/gpt-4o
|
686 |
+
dtype: float64
|
687 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
688 |
+
dtype: float64
|
689 |
+
- name: trimmed_cos_sim_to_canonical
|
690 |
+
struct:
|
691 |
+
- name: CohereLabs/aya-expanse-8b
|
692 |
+
dtype: float64
|
693 |
+
- name: Qwen/Qwen3-8B
|
694 |
+
dtype: float64
|
695 |
+
- name: bigscience/bloom
|
696 |
+
dtype: float64
|
697 |
+
- name: common-pile/comma-v0.1-1t
|
698 |
+
dtype: float64
|
699 |
+
- name: facebook/xglm-564M
|
700 |
+
dtype: float64
|
701 |
+
- name: google-bert/bert-base-multilingual-cased
|
702 |
+
dtype: float64
|
703 |
+
- name: google/byt5-small
|
704 |
+
dtype: float64
|
705 |
+
- name: google/gemma-2-2b
|
706 |
+
dtype: float64
|
707 |
+
- name: gpt2
|
708 |
+
dtype: float64
|
709 |
+
- name: meta-llama/Llama-3.2-1B
|
710 |
+
dtype: float64
|
711 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
712 |
+
dtype: float64
|
713 |
+
- name: mistralai/tekken
|
714 |
+
dtype: float64
|
715 |
+
- name: tiktoken/gpt-4o
|
716 |
+
dtype: float64
|
717 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
718 |
+
dtype: float64
|
719 |
+
- name: token_counts
|
720 |
+
struct:
|
721 |
+
- name: CohereLabs/aya-expanse-8b
|
722 |
+
dtype: int64
|
723 |
+
- name: Qwen/Qwen3-8B
|
724 |
+
dtype: int64
|
725 |
+
- name: bigscience/bloom
|
726 |
+
dtype: int64
|
727 |
+
- name: common-pile/comma-v0.1-1t
|
728 |
+
dtype: int64
|
729 |
+
- name: facebook/xglm-564M
|
730 |
+
dtype: int64
|
731 |
+
- name: google-bert/bert-base-multilingual-cased
|
732 |
+
dtype: int64
|
733 |
+
- name: google/byt5-small
|
734 |
+
dtype: int64
|
735 |
+
- name: google/gemma-2-2b
|
736 |
+
dtype: int64
|
737 |
+
- name: gpt2
|
738 |
+
dtype: int64
|
739 |
+
- name: meta-llama/Llama-3.2-1B
|
740 |
+
dtype: int64
|
741 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
742 |
+
dtype: int64
|
743 |
+
- name: mistralai/tekken
|
744 |
+
dtype: int64
|
745 |
+
- name: tiktoken/gpt-4o
|
746 |
+
dtype: int64
|
747 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
748 |
+
dtype: int64
|
749 |
+
splits:
|
750 |
+
- name: test
|
751 |
+
num_bytes: 11494
|
752 |
+
num_examples: 21
|
753 |
+
download_size: 34230
|
754 |
+
dataset_size: 11494
|
755 |
configs:
|
756 |
- config_name: tokenizer_robustness_completion_math_canonical
|
757 |
data_files:
|
|
|
773 |
data_files:
|
774 |
- split: test
|
775 |
path: tokenizer_robustness_completion_math_italian/test-*
|
776 |
+
- config_name: tokenizer_robustness_completion_math_latex
|
777 |
+
data_files:
|
778 |
+
- split: test
|
779 |
+
path: tokenizer_robustness_completion_math_latex/test-*
|
780 |
---
|
781 |
|
782 |
# Dataset Card for Tokenization Robustness Math
|
tokenizer_robustness_completion_math_latex/test-00000-of-00001.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d75d4f699bd6eadc09bdc16404a11ee511d824f8175fec689baaf6dae09fd640
|
3 |
+
size 34230
|