Uploading tokenizer_robustness_completion_english_grammatical_errors subset
Browse files
README.md
CHANGED
@@ -1248,6 +1248,130 @@ dataset_info:
|
|
1248 |
num_examples: 44
|
1249 |
download_size: 40562
|
1250 |
dataset_size: 23967
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1251 |
configs:
|
1252 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
1253 |
data_files:
|
@@ -1289,6 +1413,10 @@ configs:
|
|
1289 |
data_files:
|
1290 |
- split: test
|
1291 |
path: tokenizer_robustness_completion_english_emoji_substitution/test-*
|
|
|
|
|
|
|
|
|
1292 |
---
|
1293 |
|
1294 |
# Dataset Card for Tokenization Robustness
|
|
|
1248 |
num_examples: 44
|
1249 |
download_size: 40562
|
1250 |
dataset_size: 23967
|
1251 |
+
- config_name: tokenizer_robustness_completion_english_grammatical_errors
|
1252 |
+
features:
|
1253 |
+
- name: question
|
1254 |
+
dtype: string
|
1255 |
+
- name: choices
|
1256 |
+
list: string
|
1257 |
+
- name: answer
|
1258 |
+
dtype: int64
|
1259 |
+
- name: answer_label
|
1260 |
+
dtype: string
|
1261 |
+
- name: split
|
1262 |
+
dtype: string
|
1263 |
+
- name: subcategories
|
1264 |
+
dtype: string
|
1265 |
+
- name: category
|
1266 |
+
dtype: string
|
1267 |
+
- name: lang
|
1268 |
+
dtype: string
|
1269 |
+
- name: second_lang
|
1270 |
+
dtype: string
|
1271 |
+
- name: notes
|
1272 |
+
dtype: string
|
1273 |
+
- name: id
|
1274 |
+
dtype: string
|
1275 |
+
- name: set_id
|
1276 |
+
dtype: string
|
1277 |
+
- name: variation_id
|
1278 |
+
dtype: string
|
1279 |
+
- name: vanilla_cos_sim_to_canonical
|
1280 |
+
struct:
|
1281 |
+
- name: CohereLabs/aya-expanse-8b
|
1282 |
+
dtype: float64
|
1283 |
+
- name: Qwen/Qwen3-8B
|
1284 |
+
dtype: float64
|
1285 |
+
- name: bigscience/bloom
|
1286 |
+
dtype: float64
|
1287 |
+
- name: common-pile/comma-v0.1-1t
|
1288 |
+
dtype: float64
|
1289 |
+
- name: facebook/xglm-564M
|
1290 |
+
dtype: float64
|
1291 |
+
- name: google-bert/bert-base-multilingual-cased
|
1292 |
+
dtype: float64
|
1293 |
+
- name: google/byt5-small
|
1294 |
+
dtype: float64
|
1295 |
+
- name: google/gemma-2-2b
|
1296 |
+
dtype: float64
|
1297 |
+
- name: gpt2
|
1298 |
+
dtype: float64
|
1299 |
+
- name: meta-llama/Llama-3.2-1B
|
1300 |
+
dtype: float64
|
1301 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
1302 |
+
dtype: float64
|
1303 |
+
- name: mistralai/tekken
|
1304 |
+
dtype: float64
|
1305 |
+
- name: tiktoken/gpt-4o
|
1306 |
+
dtype: float64
|
1307 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
1308 |
+
dtype: float64
|
1309 |
+
- name: trimmed_cos_sim_to_canonical
|
1310 |
+
struct:
|
1311 |
+
- name: CohereLabs/aya-expanse-8b
|
1312 |
+
dtype: float64
|
1313 |
+
- name: Qwen/Qwen3-8B
|
1314 |
+
dtype: float64
|
1315 |
+
- name: bigscience/bloom
|
1316 |
+
dtype: float64
|
1317 |
+
- name: common-pile/comma-v0.1-1t
|
1318 |
+
dtype: float64
|
1319 |
+
- name: facebook/xglm-564M
|
1320 |
+
dtype: float64
|
1321 |
+
- name: google-bert/bert-base-multilingual-cased
|
1322 |
+
dtype: float64
|
1323 |
+
- name: google/byt5-small
|
1324 |
+
dtype: float64
|
1325 |
+
- name: google/gemma-2-2b
|
1326 |
+
dtype: float64
|
1327 |
+
- name: gpt2
|
1328 |
+
dtype: float64
|
1329 |
+
- name: meta-llama/Llama-3.2-1B
|
1330 |
+
dtype: float64
|
1331 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
1332 |
+
dtype: float64
|
1333 |
+
- name: mistralai/tekken
|
1334 |
+
dtype: float64
|
1335 |
+
- name: tiktoken/gpt-4o
|
1336 |
+
dtype: float64
|
1337 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
1338 |
+
dtype: float64
|
1339 |
+
- name: token_counts
|
1340 |
+
struct:
|
1341 |
+
- name: CohereLabs/aya-expanse-8b
|
1342 |
+
dtype: int64
|
1343 |
+
- name: Qwen/Qwen3-8B
|
1344 |
+
dtype: int64
|
1345 |
+
- name: bigscience/bloom
|
1346 |
+
dtype: int64
|
1347 |
+
- name: common-pile/comma-v0.1-1t
|
1348 |
+
dtype: int64
|
1349 |
+
- name: facebook/xglm-564M
|
1350 |
+
dtype: int64
|
1351 |
+
- name: google-bert/bert-base-multilingual-cased
|
1352 |
+
dtype: int64
|
1353 |
+
- name: google/byt5-small
|
1354 |
+
dtype: int64
|
1355 |
+
- name: google/gemma-2-2b
|
1356 |
+
dtype: int64
|
1357 |
+
- name: gpt2
|
1358 |
+
dtype: int64
|
1359 |
+
- name: meta-llama/Llama-3.2-1B
|
1360 |
+
dtype: int64
|
1361 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
1362 |
+
dtype: int64
|
1363 |
+
- name: mistralai/tekken
|
1364 |
+
dtype: int64
|
1365 |
+
- name: tiktoken/gpt-4o
|
1366 |
+
dtype: int64
|
1367 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
1368 |
+
dtype: int64
|
1369 |
+
splits:
|
1370 |
+
- name: test
|
1371 |
+
num_bytes: 21411
|
1372 |
+
num_examples: 40
|
1373 |
+
download_size: 39734
|
1374 |
+
dataset_size: 21411
|
1375 |
configs:
|
1376 |
- config_name: tokenizer_robustness_completion_english_abbreviations
|
1377 |
data_files:
|
|
|
1413 |
data_files:
|
1414 |
- split: test
|
1415 |
path: tokenizer_robustness_completion_english_emoji_substitution/test-*
|
1416 |
+
- config_name: tokenizer_robustness_completion_english_grammatical_errors
|
1417 |
+
data_files:
|
1418 |
+
- split: test
|
1419 |
+
path: tokenizer_robustness_completion_english_grammatical_errors/test-*
|
1420 |
---
|
1421 |
|
1422 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_english_grammatical_errors/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f24c9dcc97ee778cc0d00d5352e8db5411b4753f65f2026607ca35d4d4e338f
|
3 |
+
size 39734
|