Datasets:
Uploading tokenizer_robustness_completion_english_historical_spelling subset
Browse files
README.md
CHANGED
@@ -1372,6 +1372,130 @@ dataset_info:
|
|
1372 |
num_examples: 40
|
1373 |
download_size: 39734
|
1374 |
dataset_size: 21411
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1375 |
configs:
|
1376 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
1377 |
data_files:
|
@@ -1417,6 +1541,10 @@ configs:
|
|
1417 |
data_files:
|
1418 |
- split: test
|
1419 |
path: tokenizer_robustness_completion_english_grammatical_errors/test-*
|
|
|
|
|
|
|
|
|
1420 |
---
|
1421 |
|
1422 |
# Dataset Card for Tokenization Robustness
|
|
|
1372 |
num_examples: 40
|
1373 |
download_size: 39734
|
1374 |
dataset_size: 21411
|
1375 |
+
- config_name: tokenizer_robustness_completion_english_historical_spelling
|
1376 |
+
features:
|
1377 |
+
- name: question
|
1378 |
+
dtype: string
|
1379 |
+
- name: choices
|
1380 |
+
list: string
|
1381 |
+
- name: answer
|
1382 |
+
dtype: int64
|
1383 |
+
- name: answer_label
|
1384 |
+
dtype: string
|
1385 |
+
- name: split
|
1386 |
+
dtype: string
|
1387 |
+
- name: subcategories
|
1388 |
+
dtype: string
|
1389 |
+
- name: category
|
1390 |
+
dtype: string
|
1391 |
+
- name: lang
|
1392 |
+
dtype: string
|
1393 |
+
- name: second_lang
|
1394 |
+
dtype: string
|
1395 |
+
- name: notes
|
1396 |
+
dtype: string
|
1397 |
+
- name: id
|
1398 |
+
dtype: string
|
1399 |
+
- name: set_id
|
1400 |
+
dtype: string
|
1401 |
+
- name: variation_id
|
1402 |
+
dtype: string
|
1403 |
+
- name: vanilla_cos_sim_to_canonical
|
1404 |
+
struct:
|
1405 |
+
- name: CohereLabs/aya-expanse-8b
|
1406 |
+
dtype: float64
|
1407 |
+
- name: Qwen/Qwen3-8B
|
1408 |
+
dtype: float64
|
1409 |
+
- name: bigscience/bloom
|
1410 |
+
dtype: float64
|
1411 |
+
- name: common-pile/comma-v0.1-1t
|
1412 |
+
dtype: float64
|
1413 |
+
- name: facebook/xglm-564M
|
1414 |
+
dtype: float64
|
1415 |
+
- name: google-bert/bert-base-multilingual-cased
|
1416 |
+
dtype: float64
|
1417 |
+
- name: google/byt5-small
|
1418 |
+
dtype: float64
|
1419 |
+
- name: google/gemma-2-2b
|
1420 |
+
dtype: float64
|
1421 |
+
- name: gpt2
|
1422 |
+
dtype: float64
|
1423 |
+
- name: meta-llama/Llama-3.2-1B
|
1424 |
+
dtype: float64
|
1425 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
1426 |
+
dtype: float64
|
1427 |
+
- name: mistralai/tekken
|
1428 |
+
dtype: float64
|
1429 |
+
- name: tiktoken/gpt-4o
|
1430 |
+
dtype: float64
|
1431 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
1432 |
+
dtype: float64
|
1433 |
+
- name: trimmed_cos_sim_to_canonical
|
1434 |
+
struct:
|
1435 |
+
- name: CohereLabs/aya-expanse-8b
|
1436 |
+
dtype: float64
|
1437 |
+
- name: Qwen/Qwen3-8B
|
1438 |
+
dtype: float64
|
1439 |
+
- name: bigscience/bloom
|
1440 |
+
dtype: float64
|
1441 |
+
- name: common-pile/comma-v0.1-1t
|
1442 |
+
dtype: float64
|
1443 |
+
- name: facebook/xglm-564M
|
1444 |
+
dtype: float64
|
1445 |
+
- name: google-bert/bert-base-multilingual-cased
|
1446 |
+
dtype: float64
|
1447 |
+
- name: google/byt5-small
|
1448 |
+
dtype: float64
|
1449 |
+
- name: google/gemma-2-2b
|
1450 |
+
dtype: float64
|
1451 |
+
- name: gpt2
|
1452 |
+
dtype: float64
|
1453 |
+
- name: meta-llama/Llama-3.2-1B
|
1454 |
+
dtype: float64
|
1455 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
1456 |
+
dtype: float64
|
1457 |
+
- name: mistralai/tekken
|
1458 |
+
dtype: float64
|
1459 |
+
- name: tiktoken/gpt-4o
|
1460 |
+
dtype: float64
|
1461 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
1462 |
+
dtype: float64
|
1463 |
+
- name: token_counts
|
1464 |
+
struct:
|
1465 |
+
- name: CohereLabs/aya-expanse-8b
|
1466 |
+
dtype: int64
|
1467 |
+
- name: Qwen/Qwen3-8B
|
1468 |
+
dtype: int64
|
1469 |
+
- name: bigscience/bloom
|
1470 |
+
dtype: int64
|
1471 |
+
- name: common-pile/comma-v0.1-1t
|
1472 |
+
dtype: int64
|
1473 |
+
- name: facebook/xglm-564M
|
1474 |
+
dtype: int64
|
1475 |
+
- name: google-bert/bert-base-multilingual-cased
|
1476 |
+
dtype: int64
|
1477 |
+
- name: google/byt5-small
|
1478 |
+
dtype: int64
|
1479 |
+
- name: google/gemma-2-2b
|
1480 |
+
dtype: int64
|
1481 |
+
- name: gpt2
|
1482 |
+
dtype: int64
|
1483 |
+
- name: meta-llama/Llama-3.2-1B
|
1484 |
+
dtype: int64
|
1485 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
1486 |
+
dtype: int64
|
1487 |
+
- name: mistralai/tekken
|
1488 |
+
dtype: int64
|
1489 |
+
- name: tiktoken/gpt-4o
|
1490 |
+
dtype: int64
|
1491 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
1492 |
+
dtype: int64
|
1493 |
+
splits:
|
1494 |
+
- name: test
|
1495 |
+
num_bytes: 21962
|
1496 |
+
num_examples: 40
|
1497 |
+
download_size: 40045
|
1498 |
+
dataset_size: 21962
|
1499 |
configs:
|
1500 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
1501 |
data_files:
|
|
|
1541 |
data_files:
|
1542 |
- split: test
|
1543 |
path: tokenizer_robustness_completion_english_grammatical_errors/test-*
|
1544 |
+
- config_name: tokenizer_robustness_completion_english_historical_spelling
|
1545 |
+
data_files:
|
1546 |
+
- split: test
|
1547 |
+
path: tokenizer_robustness_completion_english_historical_spelling/test-*
|
1548 |
---
|
1549 |
|
1550 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_english_historical_spelling/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d5f850665777f8ca6897d1fe4350ee6f7a0fda92e5164de6ff88be20af7bafc
|
3 |
+
size 40045
|