Uploading tokenizer_robustness_completion_english_word_reordering subset
Browse files
README.md
CHANGED
@@ -3480,6 +3480,130 @@ dataset_info:
|
|
3480 |
num_examples: 37
|
3481 |
download_size: 38294
|
3482 |
dataset_size: 18991
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3483 |
configs:
|
3484 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
3485 |
data_files:
|
@@ -3593,6 +3717,10 @@ configs:
|
|
3593 |
data_files:
|
3594 |
- split: test
|
3595 |
path: tokenizer_robustness_completion_english_web_search_query/test-*
|
|
|
|
|
|
|
|
|
3596 |
---
|
3597 |
|
3598 |
# Dataset Card for Tokenization Robustness
|
|
|
3480 |
num_examples: 37
|
3481 |
download_size: 38294
|
3482 |
dataset_size: 18991
|
3483 |
+
- config_name: tokenizer_robustness_completion_english_word_reordering
|
3484 |
+
features:
|
3485 |
+
- name: question
|
3486 |
+
dtype: string
|
3487 |
+
- name: choices
|
3488 |
+
list: string
|
3489 |
+
- name: answer
|
3490 |
+
dtype: int64
|
3491 |
+
- name: answer_label
|
3492 |
+
dtype: string
|
3493 |
+
- name: split
|
3494 |
+
dtype: string
|
3495 |
+
- name: subcategories
|
3496 |
+
dtype: string
|
3497 |
+
- name: category
|
3498 |
+
dtype: string
|
3499 |
+
- name: lang
|
3500 |
+
dtype: string
|
3501 |
+
- name: second_lang
|
3502 |
+
dtype: string
|
3503 |
+
- name: notes
|
3504 |
+
dtype: string
|
3505 |
+
- name: id
|
3506 |
+
dtype: string
|
3507 |
+
- name: set_id
|
3508 |
+
dtype: string
|
3509 |
+
- name: variation_id
|
3510 |
+
dtype: string
|
3511 |
+
- name: vanilla_cos_sim_to_canonical
|
3512 |
+
struct:
|
3513 |
+
- name: CohereLabs/aya-expanse-8b
|
3514 |
+
dtype: float64
|
3515 |
+
- name: Qwen/Qwen3-8B
|
3516 |
+
dtype: float64
|
3517 |
+
- name: bigscience/bloom
|
3518 |
+
dtype: float64
|
3519 |
+
- name: common-pile/comma-v0.1-1t
|
3520 |
+
dtype: float64
|
3521 |
+
- name: facebook/xglm-564M
|
3522 |
+
dtype: float64
|
3523 |
+
- name: google-bert/bert-base-multilingual-cased
|
3524 |
+
dtype: float64
|
3525 |
+
- name: google/byt5-small
|
3526 |
+
dtype: float64
|
3527 |
+
- name: google/gemma-2-2b
|
3528 |
+
dtype: float64
|
3529 |
+
- name: gpt2
|
3530 |
+
dtype: float64
|
3531 |
+
- name: meta-llama/Llama-3.2-1B
|
3532 |
+
dtype: float64
|
3533 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
3534 |
+
dtype: float64
|
3535 |
+
- name: mistralai/tekken
|
3536 |
+
dtype: float64
|
3537 |
+
- name: tiktoken/gpt-4o
|
3538 |
+
dtype: float64
|
3539 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
3540 |
+
dtype: float64
|
3541 |
+
- name: trimmed_cos_sim_to_canonical
|
3542 |
+
struct:
|
3543 |
+
- name: CohereLabs/aya-expanse-8b
|
3544 |
+
dtype: float64
|
3545 |
+
- name: Qwen/Qwen3-8B
|
3546 |
+
dtype: float64
|
3547 |
+
- name: bigscience/bloom
|
3548 |
+
dtype: float64
|
3549 |
+
- name: common-pile/comma-v0.1-1t
|
3550 |
+
dtype: float64
|
3551 |
+
- name: facebook/xglm-564M
|
3552 |
+
dtype: float64
|
3553 |
+
- name: google-bert/bert-base-multilingual-cased
|
3554 |
+
dtype: float64
|
3555 |
+
- name: google/byt5-small
|
3556 |
+
dtype: float64
|
3557 |
+
- name: google/gemma-2-2b
|
3558 |
+
dtype: float64
|
3559 |
+
- name: gpt2
|
3560 |
+
dtype: float64
|
3561 |
+
- name: meta-llama/Llama-3.2-1B
|
3562 |
+
dtype: float64
|
3563 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
3564 |
+
dtype: float64
|
3565 |
+
- name: mistralai/tekken
|
3566 |
+
dtype: float64
|
3567 |
+
- name: tiktoken/gpt-4o
|
3568 |
+
dtype: float64
|
3569 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
3570 |
+
dtype: float64
|
3571 |
+
- name: token_counts
|
3572 |
+
struct:
|
3573 |
+
- name: CohereLabs/aya-expanse-8b
|
3574 |
+
dtype: int64
|
3575 |
+
- name: Qwen/Qwen3-8B
|
3576 |
+
dtype: int64
|
3577 |
+
- name: bigscience/bloom
|
3578 |
+
dtype: int64
|
3579 |
+
- name: common-pile/comma-v0.1-1t
|
3580 |
+
dtype: int64
|
3581 |
+
- name: facebook/xglm-564M
|
3582 |
+
dtype: int64
|
3583 |
+
- name: google-bert/bert-base-multilingual-cased
|
3584 |
+
dtype: int64
|
3585 |
+
- name: google/byt5-small
|
3586 |
+
dtype: int64
|
3587 |
+
- name: google/gemma-2-2b
|
3588 |
+
dtype: int64
|
3589 |
+
- name: gpt2
|
3590 |
+
dtype: int64
|
3591 |
+
- name: meta-llama/Llama-3.2-1B
|
3592 |
+
dtype: int64
|
3593 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
3594 |
+
dtype: int64
|
3595 |
+
- name: mistralai/tekken
|
3596 |
+
dtype: int64
|
3597 |
+
- name: tiktoken/gpt-4o
|
3598 |
+
dtype: int64
|
3599 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
3600 |
+
dtype: int64
|
3601 |
+
splits:
|
3602 |
+
- name: test
|
3603 |
+
num_bytes: 21498
|
3604 |
+
num_examples: 40
|
3605 |
+
download_size: 39384
|
3606 |
+
dataset_size: 21498
|
3607 |
configs:
|
3608 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
3609 |
data_files:
|
|
|
3717 |
data_files:
|
3718 |
- split: test
|
3719 |
path: tokenizer_robustness_completion_english_web_search_query/test-*
|
3720 |
+
- config_name: tokenizer_robustness_completion_english_word_reordering
|
3721 |
+
data_files:
|
3722 |
+
- split: test
|
3723 |
+
path: tokenizer_robustness_completion_english_word_reordering/test-*
|
3724 |
---
|
3725 |
|
3726 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_english_word_reordering/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b35d2f928e6e12dcbc04cf54b9950405c6cda15e56ea99b33996f7d37df61dd5
|
3 |
+
size 39384
|