Datasets:

ArXiv:
License:
orionweller commited on
Commit
5a28c22
·
verified ·
1 Parent(s): e3adc5d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  2. train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds +3 -0
  3. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  4. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  5. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  6. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds +3 -0
  7. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds +3 -0
  8. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds +3 -0
  9. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds +3 -0
  10. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds +3 -0
  11. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds +3 -0
  12. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds +3 -0
  13. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds +3 -0
  14. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00011.mds +3 -0
  15. train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00012.mds +3 -0
  16. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  17. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds +3 -0
  18. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds +3 -0
  19. train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds +3 -0
  20. train/books-gutenberg-dup-sampled/shard_00014-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  21. train/books-gutenberg-dup-sampled/shard_00014-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  22. train/books-gutenberg-dup-sampled/shard_00014-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds +3 -0
  23. train/math-sampled/split_106-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  24. train/math-sampled/split_147-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  25. train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  26. train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  27. train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  28. train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  29. train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  30. train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  31. train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  32. train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  33. train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  34. train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  35. train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds +3 -0
  36. train/math-sampled/split_230-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  37. train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  38. train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  39. train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  40. train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  41. train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  42. train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  43. train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  44. train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  45. train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  46. train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  47. train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  48. train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  49. train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  50. train/math-sampled/split_366-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea46535b515a12d22dc5db32d467ad1167d64c9d78547f73af5cc99dd25635e2
3
+ size 67076330
train/books-gutenberg-dup-sampled/shard_00001-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23b8363caf68cb536f6b918aa43a9be6882e15d5594005e542bda6d369fa8b2e
3
+ size 67085190
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fa44e635b2651a1d031f4c01eff20211e2ad6adcea11715132861df8ef8c914
3
+ size 67091517
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eae6050fc6ff91e747eeb8899e69be85e9f89f3320a2f93ef70ec2d141521a7
3
+ size 67083532
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da62c1a8157630ece5d45599b051343154dcaea3b8227c48c593608b83e7b68a
3
+ size 67085265
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0406053a23a06677f57ee8f115c7ee0e5e17372b1c91e9b863d3283fd143afb1
3
+ size 67103574
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10e340860872dc74c50849f1c73339ccf7687af8cc6744b1c76c301f2f2e9177
3
+ size 67105128
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bdb108eafbcab28c86c032f7fa253638d9649f533f3c5153de09f6cd648cdf1
3
+ size 67108767
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa6e301c13f0a43628a2b823c4a1be444d411ed2c24a2fa65d2cda88e0a46be0
3
+ size 67093542
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76f9e4a03e3a4519c7ba0d94b6501db68b4f1a29c69334e6f2c8adec7c7af973
3
+ size 67103398
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f941a7eee4d8dc6affb874cd029b9c8e2c4f21bfe51f41632aa001ba426ffed
3
+ size 67105123
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:909252e9c9bf812af652ee1bb52d88e65b6cdaa36d43876cc355acc450115a37
3
+ size 67095047
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:074690eb09140798db3548c1e8986f6f57a5b5d185867390cd3583429fff5cb8
3
+ size 67081267
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00011.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a0bf74033377b7f4b3d868bb2e56c01e7badc14a647b39b4af9872bce14d85e
3
+ size 67078068
train/books-gutenberg-dup-sampled/shard_00007-tokenized-chunked-8192-512-32-backfill-nodups/shard.00012.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91d704004e300a2866dd406538696f434957ef4fa7a547ce909137f52338de98
3
+ size 67102177
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27ecfba192c70e1b3eca9d6cc848078d67ecb659b6c2aa1d4385cf06cc8c7449
3
+ size 67104246
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0956b101ee9b81410be2a8f6d17d393a5a29ab11945db1f91aedf6e61002b68
3
+ size 67095160
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:346fe077b0dfcde5e36a917fd4a595a953fde00fdc94195b2932e3859dd43951
3
+ size 67105606
train/books-gutenberg-dup-sampled/shard_00012-tokenized-chunked-8192-512-32-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fa69a6f2513682955318ff49d7324f307a870e7330542ffb03299d8d276e367
3
+ size 67105660
train/books-gutenberg-dup-sampled/shard_00014-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:882bce3eae7dfa41c2350acee750fe452969869899ca5934e0e47737ea2a4740
3
+ size 67083909
train/books-gutenberg-dup-sampled/shard_00014-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39fad9053cfde3966159d81cff8750134b93fdcaa5bb12ba29515b92abfee961
3
+ size 67082946
train/books-gutenberg-dup-sampled/shard_00014-tokenized-chunked-8192-512-32-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acb858c71c6c3d76240963db2a205dc2708012977d406cbbf8d30c8d441e8187
3
+ size 66573819
train/math-sampled/split_106-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d12ccea3d3f37a0b0fa9d9b3292069bbf9a75cfd4944b6967bcf49ebb45bf59a
3
+ size 32659181
train/math-sampled/split_147-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf3be6e1bd17c79f3beb6efd85be6291d6e0db0ad761b16485f04c4715dbebf
3
+ size 30089539
train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67105863, "hashes": {}}, "samples": 26298, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 19621712, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16495174, "hashes": {}}, "samples": 3704, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5023429, "hashes": {}}}], "version": 2}
train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7eec801aa3361516c2f43f76a726f62ffaa1227f2e31ee86a37c9bb503b6d09
3
+ size 16495174
train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 20621550, "total_tokens_skipped": 0, "percentiles": {"0th": 36, "10th": 42, "20th": 42, "30th": 42, "40th": 451, "50th": 824, "60th": 931, "70th": 1031, "80th": 1156, "90th": 1356, "95th": 1548, "99th": 2009, "100th": 8191}}
train/math-sampled/split_15-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54259689, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11626637, "hashes": {}}}], "version": 2}
train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13280593, "total_tokens_skipped": 0, "percentiles": {"0th": 117, "10th": 257, "20th": 301, "30th": 341, "40th": 380, "50th": 418, "60th": 456, "70th": 499, "80th": 553, "90th": 647, "95th": 750, "99th": 1076, "100th": 1221}}
train/math-sampled/split_155-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 56450994, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12141439, "hashes": {}}}], "version": 2}
train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13828237, "total_tokens_skipped": 0, "percentiles": {"0th": 129, "10th": 271, "20th": 316, "30th": 358, "40th": 399, "50th": 437, "60th": 476, "70th": 521, "80th": 574, "90th": 667, "95th": 776, "99th": 1074, "100th": 1220}}
train/math-sampled/split_174-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_2-tokenized-chunked-8192-512-32-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06accfd8ed8509af234a7b0cb86bd0f4cf594286ed294043d8b75eb1d0a72847
3
+ size 16527748
train/math-sampled/split_230-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6d2f2bd7a909f3deb1accd868a25a14bb4839aa20ced7e907cefb7377d5e5a8
3
+ size 54336787
train/math-sampled/split_25-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ace31895a2fa5ef1623390dff0e6441f7ca7b793c48d87fcca16dd0f6aba967a
3
+ size 8938575
train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54365996, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11640266, "hashes": {}}}], "version": 2}
train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13307169, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 257, "20th": 301, "30th": 342, "40th": 382, "50th": 419, "60th": 459, "70th": 501, "80th": 553, "90th": 646, "95th": 744, "99th": 1074, "100th": 1185}}
train/math-sampled/split_312-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54270591, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11616248, "hashes": {}}}], "version": 2}
train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13283294, "total_tokens_skipped": 0, "percentiles": {"0th": 114, "10th": 259, "20th": 302, "30th": 343, "40th": 381, "50th": 419, "60th": 457, "70th": 498, "80th": 551, "90th": 640, "95th": 745, "99th": 1076, "100th": 1230}}
train/math-sampled/split_314-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106562, "hashes": {}}, "samples": 15092, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20427091, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 66218018, "hashes": {}}, "samples": 14908, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20194225, "hashes": {}}}], "version": 2}
train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 33023538, "total_tokens_skipped": 0, "percentiles": {"0th": 300, "10th": 788, "20th": 861, "30th": 919, "40th": 977, "50th": 1037, "60th": 1107, "70th": 1190, "80th": 1305, "90th": 1496, "95th": 1698, "99th": 2158, "100th": 3051}}
train/math-sampled/split_32-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54185605, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11606407, "hashes": {}}}], "version": 2}
train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13262093, "total_tokens_skipped": 0, "percentiles": {"0th": 98, "10th": 256, "20th": 301, "30th": 343, "40th": 381, "50th": 419, "60th": 456, "70th": 498, "80th": 551, "90th": 641, "95th": 747, "99th": 1077, "100th": 1217}}
train/math-sampled/split_340-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_366-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 51010338, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11236035, "hashes": {}}}], "version": 2}