Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- train/math/split_103-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_103-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_103-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_109-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_109-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_109-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_123-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_123-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_123-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_130-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_130-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_130-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_152-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_159-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_159-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_159-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_188-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_200-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_243-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_253-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_253-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_253-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_256-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_294-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_320-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_320-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_320-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_33-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_33-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_33-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_359-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_359-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_359-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_408-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_423-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_456-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_456-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_456-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_468-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/math/split_486-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_486-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_486-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_500-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_500-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_500-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_505-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_505-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
- train/math/split_505-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json +0 -0
- train/math/split_514-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/math/split_514-tokenized-chunked-1024-512-128-backfill-nodups/stats.json +1 -0
train/math/split_103-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 15670513, "hashes": {}}, "samples": 16011, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 5102027, "hashes": {}}}], "version": 2}
|
train/math/split_103-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 7531428, "total_tokens_skipped": 21326, "percentiles": {"0th": 137, "10th": 278, "20th": 326, "30th": 369, "40th": 408, "50th": 446, "60th": 486, "70th": 530, "80th": 587, "90th": 683, "95th": 801, "99th": 1022, "100th": 1022}}
|
train/math/split_103-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_109-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 29327682, "hashes": {}}, "samples": 30020, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 9541930, "hashes": {}}}], "version": 2}
|
train/math/split_109-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 14094285, "total_tokens_skipped": 38465, "percentiles": {"0th": 116, "10th": 277, "20th": 324, "30th": 367, "40th": 407, "50th": 446, "60th": 486, "70th": 530, "80th": 585, "90th": 682, "95th": 795, "99th": 1022, "100th": 1022}}
|
train/math/split_109-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_123-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 29332738, "hashes": {}}, "samples": 30029, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 9548064, "hashes": {}}}], "version": 2}
|
train/math/split_123-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 14096651, "total_tokens_skipped": 40161, "percentiles": {"0th": 130, "10th": 277, "20th": 325, "30th": 369, "40th": 407, "50th": 446, "60th": 486, "70th": 530, "80th": 586, "90th": 677, "95th": 789, "99th": 1022, "100th": 1022}}
|
train/math/split_123-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_130-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 29366113, "hashes": {}}, "samples": 30028, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 9564566, "hashes": {}}}], "version": 2}
|
train/math/split_130-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 14113341, "total_tokens_skipped": 36288, "percentiles": {"0th": 105, "10th": 278, "20th": 325, "30th": 368, "40th": 407, "50th": 447, "60th": 487, "70th": 531, "80th": 588, "90th": 682, "95th": 792, "99th": 1022, "100th": 1022}}
|
train/math/split_130-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_152-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53bd3e3ab3c2f546dff15e4f53316c571b2f38337c632759a1573a92a6e4a0ad
|
3 |
+
size 28498612
|
train/math/split_159-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 28449966, "hashes": {}}, "samples": 30033, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 9202417, "hashes": {}}}], "version": 2}
|
train/math/split_159-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13655459, "total_tokens_skipped": 35306, "percentiles": {"0th": 112, "10th": 265, "20th": 311, "30th": 355, "40th": 393, "50th": 433, "60th": 472, "70th": 516, "80th": 569, "90th": 659, "95th": 764, "99th": 1022, "100th": 1022}}
|
train/math/split_159-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_188-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13209953, "total_tokens_skipped": 33456, "percentiles": {"0th": 114, "10th": 256, "20th": 301, "30th": 341, "40th": 381, "50th": 417, "60th": 455, "70th": 498, "80th": 549, "90th": 637, "95th": 746, "99th": 1022, "100th": 1022}}
|
train/math/split_200-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c507381e46f42563fcfcc4723b94fc863774b9d9a0448b78bebd722217f68a9d
|
3 |
+
size 27701455
|
train/math/split_243-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be177fb4bf606ccf42fea4eec63ef9c15ab11d050cd1b2b2b8b17eecac8cc166
|
3 |
+
size 27708871
|
train/math/split_253-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 27438623, "hashes": {}}, "samples": 30026, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 8829180, "hashes": {}}}], "version": 2}
|
train/math/split_253-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13150169, "total_tokens_skipped": 35614, "percentiles": {"0th": 122, "10th": 257, "20th": 300, "30th": 339, "40th": 377, "50th": 416, "60th": 453, "70th": 496, "80th": 546, "90th": 634, "95th": 735, "99th": 1022, "100th": 1022}}
|
train/math/split_253-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_256-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9759f38029646917ed5541752dcab3d82f166a92e4ed3358bf601983e03f3e55
|
3 |
+
size 27151692
|
train/math/split_294-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd9b00799d2f30774280cab45bd51f44101b63c826e162e420d27eb440997bb1
|
3 |
+
size 27022838
|
train/math/split_320-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 27658218, "hashes": {}}, "samples": 30022, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 8876218, "hashes": {}}}], "version": 2}
|
train/math/split_320-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13260075, "total_tokens_skipped": 34297, "percentiles": {"0th": 114, "10th": 256, "20th": 300, "30th": 342, "40th": 381, "50th": 419, "60th": 457, "70th": 500, "80th": 552, "90th": 644, "95th": 748, "99th": 1022, "100th": 1022}}
|
train/math/split_320-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_33-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 66774036, "hashes": {}}, "samples": 40519, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 35634755, "hashes": {}}}], "version": 2}
|
train/math/split_33-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 32556572, "total_tokens_skipped": 329714, "percentiles": {"0th": 133, "10th": 476, "20th": 613, "30th": 720, "40th": 799, "50th": 861, "60th": 915, "70th": 969, "80th": 1022, "90th": 1022, "95th": 1022, "99th": 1022, "100th": 1023}}
|
train/math/split_33-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_359-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 27766790, "hashes": {}}, "samples": 30023, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 8921439, "hashes": {}}}], "version": 2}
|
train/math/split_359-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 13314241, "total_tokens_skipped": 33638, "percentiles": {"0th": 112, "10th": 259, "20th": 302, "30th": 343, "40th": 383, "50th": 421, "60th": 459, "70th": 501, "80th": 553, "90th": 644, "95th": 754, "99th": 1022, "100th": 1022}}
|
train/math/split_359-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_408-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39d75587d4838c4267da24be47abe6b6216cfecbb55bb80d8678483f4d8a14a9
|
3 |
+
size 25711920
|
train/math/split_423-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8c11d7dd436e26607315931549e2bff42e4da5ec6aac944d54a8c6f109880c62
|
3 |
+
size 25666123
|
train/math/split_456-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 24756970, "hashes": {}}, "samples": 30010, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 8294683, "hashes": {}}}], "version": 2}
|
train/math/split_456-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 11809165, "total_tokens_skipped": 9505, "percentiles": {"0th": 120, "10th": 267, "20th": 296, "30th": 320, "40th": 344, "50th": 367, "60th": 391, "70th": 421, "80th": 464, "90th": 546, "95th": 634, "99th": 952, "100th": 1022}}
|
train/math/split_456-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_468-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6e7a72d36b9021b0805a80eaa9da6625a9757f3e530d56aa887f90a823a5c7d
|
3 |
+
size 24612273
|
train/math/split_486-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 24568755, "hashes": {}}, "samples": 30008, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 8172539, "hashes": {}}}], "version": 2}
|
train/math/split_486-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 11715113, "total_tokens_skipped": 9455, "percentiles": {"0th": 139, "10th": 267, "20th": 296, "30th": 318, "40th": 341, "50th": 364, "60th": 387, "70th": 417, "80th": 458, "90th": 538, "95th": 629, "99th": 948, "100th": 1022}}
|
train/math/split_486-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_500-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 24603726, "hashes": {}}, "samples": 30008, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 8188470, "hashes": {}}}], "version": 2}
|
train/math/split_500-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 11732566, "total_tokens_skipped": 9868, "percentiles": {"0th": 159, "10th": 268, "20th": 296, "30th": 319, "40th": 343, "50th": 365, "60th": 388, "70th": 418, "80th": 458, "90th": 539, "95th": 628, "99th": 960, "100th": 1022}}
|
train/math/split_500-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_505-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 15866824, "hashes": {}}, "samples": 30015, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 6444177, "hashes": {}}}], "version": 2}
|
train/math/split_505-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 7237670, "total_tokens_skipped": 1997, "percentiles": {"0th": 8, "10th": 122, "20th": 145, "30th": 167, "40th": 189, "50th": 212, "60th": 240, "70th": 275, "80th": 321, "90th": 395, "95th": 467, "99th": 666, "100th": 1022}}
|
train/math/split_505-tokenized-chunked-1024-512-128-backfill-nodups/token_decile.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train/math/split_514-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 24618082, "hashes": {}}, "samples": 30008, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 8203179, "hashes": {}}}], "version": 2}
|
train/math/split_514-tokenized-chunked-1024-512-128-backfill-nodups/stats.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"total_duplicated_tokens": 0, "total_tokens_written": 11739762, "total_tokens_skipped": 8965, "percentiles": {"0th": 133, "10th": 268, "20th": 296, "30th": 319, "40th": 343, "50th": 365, "60th": 389, "70th": 419, "80th": 461, "90th": 538, "95th": 628, "99th": 934, "100th": 1022}}
|