gsaltintas commited on
Commit
4e219f4
·
verified ·
1 Parent(s): f3393e8

Uploading tokenizer_robustness_completion_math_italian subset

Browse files
README.md CHANGED
@@ -504,6 +504,130 @@ dataset_info:
504
  num_examples: 21
505
  download_size: 34859
506
  dataset_size: 12034
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
  configs:
508
  - config_name: tokenizer_robustness_completion_math_canonical
509
  data_files:
@@ -521,6 +645,10 @@ configs:
521
  data_files:
522
  - split: test
523
  path: tokenizer_robustness_completion_math_farsi/test-*
 
 
 
 
524
  ---
525
 
526
  # Dataset Card for Tokenization Robustness Math
 
504
  num_examples: 21
505
  download_size: 34859
506
  dataset_size: 12034
507
+ - config_name: tokenizer_robustness_completion_math_italian
508
+ features:
509
+ - name: question
510
+ dtype: string
511
+ - name: choices
512
+ list: string
513
+ - name: answer
514
+ dtype: int64
515
+ - name: answer_label
516
+ dtype: string
517
+ - name: split
518
+ dtype: string
519
+ - name: subcategories
520
+ dtype: string
521
+ - name: category
522
+ dtype: string
523
+ - name: lang
524
+ dtype: string
525
+ - name: second_lang
526
+ dtype: string
527
+ - name: notes
528
+ dtype: string
529
+ - name: id
530
+ dtype: string
531
+ - name: set_id
532
+ dtype: string
533
+ - name: variation_id
534
+ dtype: string
535
+ - name: vanilla_cos_sim_to_canonical
536
+ struct:
537
+ - name: CohereLabs/aya-expanse-8b
538
+ dtype: float64
539
+ - name: Qwen/Qwen3-8B
540
+ dtype: float64
541
+ - name: bigscience/bloom
542
+ dtype: float64
543
+ - name: common-pile/comma-v0.1-1t
544
+ dtype: float64
545
+ - name: facebook/xglm-564M
546
+ dtype: float64
547
+ - name: google-bert/bert-base-multilingual-cased
548
+ dtype: float64
549
+ - name: google/byt5-small
550
+ dtype: float64
551
+ - name: google/gemma-2-2b
552
+ dtype: float64
553
+ - name: gpt2
554
+ dtype: float64
555
+ - name: meta-llama/Llama-3.2-1B
556
+ dtype: float64
557
+ - name: microsoft/Phi-3-mini-4k-instruct
558
+ dtype: float64
559
+ - name: mistralai/tekken
560
+ dtype: float64
561
+ - name: tiktoken/gpt-4o
562
+ dtype: float64
563
+ - name: tokenmonster/englishcode-32000-consistent-v1
564
+ dtype: float64
565
+ - name: trimmed_cos_sim_to_canonical
566
+ struct:
567
+ - name: CohereLabs/aya-expanse-8b
568
+ dtype: float64
569
+ - name: Qwen/Qwen3-8B
570
+ dtype: float64
571
+ - name: bigscience/bloom
572
+ dtype: float64
573
+ - name: common-pile/comma-v0.1-1t
574
+ dtype: float64
575
+ - name: facebook/xglm-564M
576
+ dtype: float64
577
+ - name: google-bert/bert-base-multilingual-cased
578
+ dtype: float64
579
+ - name: google/byt5-small
580
+ dtype: float64
581
+ - name: google/gemma-2-2b
582
+ dtype: float64
583
+ - name: gpt2
584
+ dtype: float64
585
+ - name: meta-llama/Llama-3.2-1B
586
+ dtype: float64
587
+ - name: microsoft/Phi-3-mini-4k-instruct
588
+ dtype: float64
589
+ - name: mistralai/tekken
590
+ dtype: float64
591
+ - name: tiktoken/gpt-4o
592
+ dtype: float64
593
+ - name: tokenmonster/englishcode-32000-consistent-v1
594
+ dtype: float64
595
+ - name: token_counts
596
+ struct:
597
+ - name: CohereLabs/aya-expanse-8b
598
+ dtype: int64
599
+ - name: Qwen/Qwen3-8B
600
+ dtype: int64
601
+ - name: bigscience/bloom
602
+ dtype: int64
603
+ - name: common-pile/comma-v0.1-1t
604
+ dtype: int64
605
+ - name: facebook/xglm-564M
606
+ dtype: int64
607
+ - name: google-bert/bert-base-multilingual-cased
608
+ dtype: int64
609
+ - name: google/byt5-small
610
+ dtype: int64
611
+ - name: google/gemma-2-2b
612
+ dtype: int64
613
+ - name: gpt2
614
+ dtype: int64
615
+ - name: meta-llama/Llama-3.2-1B
616
+ dtype: int64
617
+ - name: microsoft/Phi-3-mini-4k-instruct
618
+ dtype: int64
619
+ - name: mistralai/tekken
620
+ dtype: int64
621
+ - name: tiktoken/gpt-4o
622
+ dtype: int64
623
+ - name: tokenmonster/englishcode-32000-consistent-v1
624
+ dtype: int64
625
+ splits:
626
+ - name: test
627
+ num_bytes: 11219
628
+ num_examples: 21
629
+ download_size: 34631
630
+ dataset_size: 11219
631
  configs:
632
  - config_name: tokenizer_robustness_completion_math_canonical
633
  data_files:
 
645
  data_files:
646
  - split: test
647
  path: tokenizer_robustness_completion_math_farsi/test-*
648
+ - config_name: tokenizer_robustness_completion_math_italian
649
+ data_files:
650
+ - split: test
651
+ path: tokenizer_robustness_completion_math_italian/test-*
652
  ---
653
 
654
  # Dataset Card for Tokenization Robustness Math
tokenizer_robustness_completion_math_italian/test-00000-of-00001.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:912f2b9e68cb3e23e61b7ded5ae4046c047b0717764c96897295ab5949ec0061
3
- size 34633
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c75e99571039ad2d8e06bebe4e0ce7bd664c52b1546b802851ea4b1957e47d44
3
+ size 34631