Datasets:

ArXiv:
License:
orionweller commited on
Commit
a8d425c
·
verified ·
1 Parent(s): 0695eb1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  2. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds +3 -0
  3. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds +3 -0
  4. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds +3 -0
  5. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  6. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  7. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  8. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds +3 -0
  9. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds +3 -0
  10. train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00011.mds +3 -0
  11. train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  12. train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  13. train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  14. train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  15. train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  16. train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  17. train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  18. train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  19. train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  20. train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  21. train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  22. train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  23. train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  24. train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  25. train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  26. train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  27. train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  28. train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  29. train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  30. train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  31. train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  32. train/math-sampled/split_260-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  33. train/math-sampled/split_262-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  34. train/math-sampled/split_3-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  35. train/math-sampled/split_419-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  36. train/math-sampled/split_419-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  37. train/math-sampled/split_419-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  38. train/math-sampled/split_422-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  39. train/math-sampled/split_422-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  40. train/math-sampled/split_422-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  41. train/math-sampled/split_48-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  42. train/math-sampled/split_48-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  43. train/math-sampled/split_48-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  44. train/math-sampled/split_496-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  45. train/math-sampled/split_496-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  46. train/math-sampled/split_496-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  47. train/math-sampled/split_52-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  48. train/math-sampled/split_52-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  49. train/math-sampled/split_52-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  50. train/math-sampled/split_534-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7798f88e71e7733471ba849e036d29607bb472efe69513337f72a7fce2d49d3
3
+ size 67085530
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9ddf05d69f8a8620b9e977bc208730c12d642b19b598d481ea8cf12dd642196
3
+ size 67100483
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8d9fb56a94c59fef45ddcaa6806169fdee84f60ec755b9f0b2af30bf8efefe7
3
+ size 67079242
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76f7240959058fe2a1a26c841a6da39e8d08d2f0b001d318f1495bf0cdb0b95c
3
+ size 67093123
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:180f8960dcec4f166f75688960705fcd102cb4db06804fa3143c56ba69336ee0
3
+ size 67076529
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a8f91c048e0f6326ac29d2af4d87517382987c703b2c899f283da219cdca2e7
3
+ size 67107476
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb968e65cd388a98ff977b12f44a8b2844e13619fcb9c4a1affcb176b85a8ec3
3
+ size 67097165
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be3d875c43235482cdb1a99c118da73230ab50d74b7aef1717783c648396dc85
3
+ size 67099336
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb101496aea573abf5aafbc97b45414a055b942b3a1471fac3a59c37f52d85fc
3
+ size 67103625
train/books-gutenberg-dup-sampled/shard_00005-tokenized-chunked-8192-512-32-backfill-nodups/shard.00011.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:468b20f6b7cadaf6c3414f6e17d849d073efccbc184a727373c4a3936f06f0b3
3
+ size 62119207
train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 58033568, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12485075, "hashes": {}}}], "version": 2}
train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14223794, "total_tokens_skipped": 0, "percentiles": {"0th": 130, "10th": 280, "20th": 327, "30th": 370, "40th": 410, "50th": 449, "60th": 490, "70th": 534, "80th": 589, "90th": 683, "95th": 799, "99th": 1083, "100th": 1219}}
train/math-sampled/split_126-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57812910, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12464719, "hashes": {}}}], "version": 2}
train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14168637, "total_tokens_skipped": 0, "percentiles": {"0th": 134, "10th": 279, "20th": 324, "30th": 368, "40th": 409, "50th": 449, "60th": 488, "70th": 532, "80th": 589, "90th": 682, "95th": 795, "99th": 1078, "100th": 1215}}
train/math-sampled/split_132-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57671689, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12435486, "hashes": {}}}], "version": 2}
train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14133332, "total_tokens_skipped": 0, "percentiles": {"0th": 123, "10th": 277, "20th": 324, "30th": 368, "40th": 408, "50th": 447, "60th": 486, "70th": 531, "80th": 588, "90th": 681, "95th": 787, "99th": 1077, "100th": 1232}}
train/math-sampled/split_143-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54772644, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11736274, "hashes": {}}}], "version": 2}
train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13408806, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 259, "20th": 303, "30th": 346, "40th": 385, "50th": 423, "60th": 461, "70th": 504, "80th": 558, "90th": 650, "95th": 756, "99th": 1074, "100th": 1225}}
train/math-sampled/split_171-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54249169, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11621692, "hashes": {}}}], "version": 2}
train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13277957, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 257, "20th": 300, "30th": 341, "40th": 380, "50th": 418, "60th": 456, "70th": 499, "80th": 552, "90th": 643, "95th": 749, "99th": 1074, "100th": 1201}}
train/math-sampled/split_189-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106379, "hashes": {}}, "samples": 15977, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20872802, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 62239961, "hashes": {}}, "samples": 14023, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 18951573, "hashes": {}}}], "version": 2}
train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 32023398, "total_tokens_skipped": 0, "percentiles": {"0th": 130, "10th": 734, "20th": 827, "30th": 894, "40th": 953, "50th": 1016, "60th": 1084, "70th": 1165, "80th": 1279, "90th": 1467, "95th": 1663, "99th": 2134, "100th": 2934}}
train/math-sampled/split_205-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 53287424, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11483806, "hashes": {}}}], "version": 2}
train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13037529, "total_tokens_skipped": 0, "percentiles": {"0th": 111, "10th": 257, "20th": 298, "30th": 334, "40th": 372, "50th": 409, "60th": 448, "70th": 490, "80th": 543, "90th": 628, "95th": 726, "99th": 1071, "100th": 1237}}
train/math-sampled/split_256-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_260-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c8a2841157983338ebed637d6f509027b4677d8af1afea7a5105f2e8c969385
3
+ size 21422463
train/math-sampled/split_262-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64429971a82c0a1b5169386c887e759f8514a8996c357d2bf347236b9332440b
3
+ size 18244248
train/math-sampled/split_3-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a0d11694ac7d384ed2b1b1775bdf6d87a70683e4e2c0ed6dcb1aaa13470e71f
3
+ size 17769572
train/math-sampled/split_419-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54451832, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11650853, "hashes": {}}}], "version": 2}
train/math-sampled/split_419-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13328612, "total_tokens_skipped": 0, "percentiles": {"0th": 101, "10th": 258, "20th": 303, "30th": 343, "40th": 382, "50th": 420, "60th": 458, "70th": 500, "80th": 552, "90th": 643, "95th": 751, "99th": 1076, "100th": 1243}}
train/math-sampled/split_419-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_422-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 49492767, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 10818981, "hashes": {}}}], "version": 2}
train/math-sampled/split_422-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 12088598, "total_tokens_skipped": 0, "percentiles": {"0th": 117, "10th": 270, "20th": 301, "30th": 326, "40th": 351, "50th": 374, "60th": 400, "70th": 434, "80th": 480, "90th": 561, "95th": 652, "99th": 1003, "100th": 1334}}
train/math-sampled/split_422-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_48-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107564, "hashes": {}}, "samples": 15110, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20450798, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 66186624, "hashes": {}}, "samples": 14890, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20197040, "hashes": {}}}], "version": 2}
train/math-sampled/split_48-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 33015940, "total_tokens_skipped": 0, "percentiles": {"0th": 272, "10th": 785, "20th": 859, "30th": 918, "40th": 975, "50th": 1037, "60th": 1106, "70th": 1186, "80th": 1302, "90th": 1504, "95th": 1707, "99th": 2153, "100th": 3024}}
train/math-sampled/split_48-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_496-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 48254502, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 10359932, "hashes": {}}}], "version": 2}
train/math-sampled/split_496-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 11779062, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 268, "20th": 296, "30th": 319, "40th": 342, "50th": 365, "60th": 388, "70th": 419, "80th": 460, "90th": 543, "95th": 636, "99th": 979, "100th": 1338}}
train/math-sampled/split_496-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_52-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67101099, "hashes": {}}, "samples": 15122, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20459662, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 66124100, "hashes": {}}, "samples": 14878, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20159086, "hashes": {}}}], "version": 2}
train/math-sampled/split_52-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 32998693, "total_tokens_skipped": 0, "percentiles": {"0th": 235, "10th": 784, "20th": 858, "30th": 918, "40th": 976, "50th": 1037, "60th": 1105, "70th": 1190, "80th": 1305, "90th": 1495, "95th": 1701, "99th": 2143, "100th": 2989}}
train/math-sampled/split_52-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_534-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 48002287, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 10306803, "hashes": {}}}], "version": 2}