Don't use datasets.data_files.* functions
Browse filesIt was overkill to use them, and because there was a breaking change it was not working
- github-code.py +6 -15
github-code.py
CHANGED
|
@@ -20,8 +20,6 @@ import pyarrow as pa
|
|
| 20 |
import pyarrow.parquet as pq
|
| 21 |
|
| 22 |
import datasets
|
| 23 |
-
from huggingface_hub import HfApi, HfFolder
|
| 24 |
-
from datasets.data_files import DataFilesDict
|
| 25 |
|
| 26 |
_REPO_NAME = "codeparrot/github-code"
|
| 27 |
|
|
@@ -160,19 +158,12 @@ class GithubCode(datasets.GeneratorBasedBuilder):
|
|
| 160 |
)
|
| 161 |
|
| 162 |
def _split_generators(self, dl_manager):
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info)
|
| 170 |
-
data_files = datasets.data_files.DataFilesDict.from_hf_repo(
|
| 171 |
-
patterns,
|
| 172 |
-
dataset_info=hfh_dataset_info,
|
| 173 |
-
)
|
| 174 |
-
|
| 175 |
-
files = dl_manager.download_and_extract(data_files["train"])
|
| 176 |
return [
|
| 177 |
datasets.SplitGenerator(
|
| 178 |
name=datasets.Split.TRAIN,
|
|
|
|
| 20 |
import pyarrow.parquet as pq
|
| 21 |
|
| 22 |
import datasets
|
|
|
|
|
|
|
| 23 |
|
| 24 |
_REPO_NAME = "codeparrot/github-code"
|
| 25 |
|
|
|
|
| 158 |
)
|
| 159 |
|
| 160 |
def _split_generators(self, dl_manager):
|
| 161 |
+
num_shards = 1126
|
| 162 |
+
data_files = [
|
| 163 |
+
f"data/train-{_index:05d}-of-{num_shards:05d}-{shard._fingerprint}.parquet"
|
| 164 |
+
for _index in range(num_shards)
|
| 165 |
+
]
|
| 166 |
+
files = dl_manager.download(data_files)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
return [
|
| 168 |
datasets.SplitGenerator(
|
| 169 |
name=datasets.Split.TRAIN,
|