Datasets:

ArXiv:
License:
orionweller commited on
Commit
6a6c123
·
verified ·
1 Parent(s): 25b7311

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/math-sampled/split_103-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  2. train/math-sampled/split_103-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  3. train/math-sampled/split_103-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  4. train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  5. train/math-sampled/split_133-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  6. train/math-sampled/split_133-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  7. train/math-sampled/split_133-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  8. train/math-sampled/split_138-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  9. train/math-sampled/split_138-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  10. train/math-sampled/split_138-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  11. train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  12. train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  13. train/math-sampled/split_154-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  14. train/math-sampled/split_154-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  15. train/math-sampled/split_154-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  16. train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  17. train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  18. train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  19. train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  20. train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  21. train/math-sampled/split_216-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  22. train/math-sampled/split_216-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  23. train/math-sampled/split_216-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  24. train/math-sampled/split_224-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  25. train/math-sampled/split_224-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  26. train/math-sampled/split_224-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  27. train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  28. train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  29. train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  30. train/math-sampled/split_240-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  31. train/math-sampled/split_240-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  32. train/math-sampled/split_240-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  33. train/math-sampled/split_267-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  34. train/math-sampled/split_267-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  35. train/math-sampled/split_267-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  36. train/math-sampled/split_279-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  37. train/math-sampled/split_279-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  38. train/math-sampled/split_279-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  39. train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  40. train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  41. train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  42. train/math-sampled/split_369-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  43. train/math-sampled/split_369-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  44. train/math-sampled/split_369-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  45. train/math-sampled/split_384-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  46. train/math-sampled/split_384-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  47. train/math-sampled/split_384-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  48. train/math-sampled/split_391-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  49. train/math-sampled/split_4-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  50. train/math-sampled/split_407-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
train/math-sampled/split_103-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 30818214, "hashes": {}}, "samples": 16000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 6633728, "hashes": {}}}], "version": 2}
train/math-sampled/split_103-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 7552743, "total_tokens_skipped": 0, "percentiles": {"0th": 137, "10th": 279, "20th": 326, "30th": 369, "40th": 408, "50th": 446, "60th": 486, "70th": 530, "80th": 587, "90th": 683, "95th": 802, "99th": 1083, "100th": 1229}}
train/math-sampled/split_103-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_112-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30a56b5d6acaa64b2a82f81ad7988ab47eb141cc4668dde1c1f0e283214889bc
3
+ size 57877774
train/math-sampled/split_133-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57802264, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12448937, "hashes": {}}}], "version": 2}
train/math-sampled/split_133-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14165984, "total_tokens_skipped": 0, "percentiles": {"0th": 121, "10th": 277, "20th": 326, "30th": 369, "40th": 409, "50th": 448, "60th": 487, "70th": 531, "80th": 588, "90th": 679, "95th": 796, "99th": 1079, "100th": 1209}}
train/math-sampled/split_133-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_138-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57480086, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12407323, "hashes": {}}}], "version": 2}
train/math-sampled/split_138-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14085435, "total_tokens_skipped": 0, "percentiles": {"0th": 121, "10th": 277, "20th": 324, "30th": 367, "40th": 407, "50th": 446, "60th": 485, "70th": 528, "80th": 583, "90th": 677, "95th": 786, "99th": 1077, "100th": 1188}}
train/math-sampled/split_138-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_142-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a73b1a9d7ca049bda64aba1c7e68e7defbc1e3fb7142abbf9e3993abf88749
3
+ size 57719610
train/math-sampled/split_149-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd0c8936ad6369a82292d69c050d47c42faf72c588deb9c2d30e7baed52ec659
3
+ size 67104766
train/math-sampled/split_154-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 56047507, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12050424, "hashes": {}}}], "version": 2}
train/math-sampled/split_154-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13727416, "total_tokens_skipped": 0, "percentiles": {"0th": 125, "10th": 266, "20th": 313, "30th": 355, "40th": 394, "50th": 434, "60th": 473, "70th": 517, "80th": 572, "90th": 663, "95th": 770, "99th": 1078, "100th": 1243}}
train/math-sampled/split_154-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_156-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ce74755d46846bfcbc911f7e13a25ccfbbf97bee67f88da573c8460c735e1b4
3
+ size 55871933
train/math-sampled/split_159-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d202a47095fc53cd70a6922cc879d6d4905c6ce72f253979258ee5d8b345432f
3
+ size 55900731
train/math-sampled/split_16-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6209e32ece2641794007007fcd3de0e3e52f2ccdf0ca01e491a15a4bab8a438
3
+ size 17431204
train/math-sampled/split_160-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc6947cc8594eddb21707a8a713ab6e8124c5f6ac6c5a390090518760b2f8951
3
+ size 55919197
train/math-sampled/split_169-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301fac5ae7165298013f5e7d8f2ddbcbf0f5c3592fa8a93b984f0ff75e2b3f7f
3
+ size 54804760
train/math-sampled/split_216-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54305007, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11623165, "hashes": {}}}], "version": 2}
train/math-sampled/split_216-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13291956, "total_tokens_skipped": 0, "percentiles": {"0th": 110, "10th": 255, "20th": 300, "30th": 341, "40th": 380, "50th": 418, "60th": 456, "70th": 500, "80th": 554, "90th": 646, "95th": 758, "99th": 1079, "100th": 1244}}
train/math-sampled/split_216-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_224-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54152185, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11603629, "hashes": {}}}], "version": 2}
train/math-sampled/split_224-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13253718, "total_tokens_skipped": 0, "percentiles": {"0th": 110, "10th": 257, "20th": 301, "30th": 342, "40th": 382, "50th": 419, "60th": 457, "70th": 497, "80th": 550, "90th": 639, "95th": 739, "99th": 1072, "100th": 1195}}
train/math-sampled/split_224-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 15762309, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 3990314, "hashes": {}}}], "version": 2}
train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 3693249, "total_tokens_skipped": 0, "percentiles": {"0th": 9, "10th": 12, "20th": 13, "30th": 13, "40th": 16, "50th": 19, "60th": 25, "70th": 145, "80th": 217, "90th": 312, "95th": 423, "99th": 1218, "100th": 2821}}
train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_240-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 57781742, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12441980, "hashes": {}}}], "version": 2}
train/math-sampled/split_240-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 14160845, "total_tokens_skipped": 0, "percentiles": {"0th": 129, "10th": 279, "20th": 326, "30th": 370, "40th": 409, "50th": 447, "60th": 486, "70th": 531, "80th": 587, "90th": 680, "95th": 794, "99th": 1082, "100th": 1214}}
train/math-sampled/split_240-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_267-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105813, "hashes": {}}, "samples": 3831, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 21140497, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67108501, "hashes": {}}, "samples": 8496, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20153839, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 37562921, "hashes": {}}, "samples": 19557, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 8096176, "hashes": {}}}], "version": 2}
train/math-sampled/split_267-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 42631293, "total_tokens_skipped": 275, "percentiles": {"0th": 66, "10th": 269, "20th": 328, "30th": 382, "40th": 431, "50th": 481, "60th": 537, "70th": 618, "80th": 877, "90th": 4939, "95th": 8190, "99th": 8191, "100th": 8191}}
train/math-sampled/split_267-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_279-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54379312, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11632469, "hashes": {}}}], "version": 2}
train/math-sampled/split_279-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13310483, "total_tokens_skipped": 0, "percentiles": {"0th": 100, "10th": 258, "20th": 302, "30th": 342, "40th": 382, "50th": 420, "60th": 457, "70th": 501, "80th": 554, "90th": 645, "95th": 750, "99th": 1076, "100th": 1240}}
train/math-sampled/split_279-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_307-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c4fd5cab386cac692b91f68d03809ebea6d4929e35ca6813c4eac2efae04b35
3
+ size 54145098
train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d25102c2147ea04104c0eca4360671cb93a880f1fae46288b5c70ecdf7aed2c7
3
+ size 67106562
train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78b3fa5d50c7f2fdd778174e00eb2e91cb7ad094b10a336210f29b859a5c713a
3
+ size 66218018
train/math-sampled/split_369-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 53013198, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11591063, "hashes": {}}}], "version": 2}
train/math-sampled/split_369-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 12968723, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 272, "20th": 308, "30th": 341, "40th": 370, "50th": 399, "60th": 434, "70th": 474, "80th": 529, "90th": 619, "95th": 726, "99th": 1063, "100th": 1341}}
train/math-sampled/split_369-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_384-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 53076442, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11486635, "hashes": {}}}], "version": 2}
train/math-sampled/split_384-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 12984691, "total_tokens_skipped": 0, "percentiles": {"0th": 133, "10th": 262, "20th": 303, "30th": 338, "40th": 372, "50th": 405, "60th": 441, "70th": 482, "80th": 535, "90th": 624, "95th": 723, "99th": 1065, "100th": 1223}}
train/math-sampled/split_384-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_391-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f2bd2228ada084720587d4ec286453dccd0364c30329565b767c41fea7f797e
3
+ size 49508493
train/math-sampled/split_4-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b0924172007ff623b112cd86ef240665525b3d5ef2b16ce8e5c6b32364eabbf
3
+ size 67107211
train/math-sampled/split_407-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 50544831, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11189402, "hashes": {}}}], "version": 2}