BeeGass commited on
Commit
d7643ae
·
verified ·
1 Parent(s): 3f1d630

Upload permutation-groups.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. permutation-groups.py +52 -17
permutation-groups.py CHANGED
@@ -1,6 +1,7 @@
1
  import datasets
2
  import json
3
  import os
 
4
  from sympy.combinatorics import Permutation
5
  from sympy.combinatorics.named_groups import AlternatingGroup, SymmetricGroup
6
 
@@ -75,6 +76,13 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
75
  group_degree=7,
76
  group_order=2520,
77
  ),
 
 
 
 
 
 
 
78
  ]
79
 
80
  DEFAULT_CONFIG_NAME = "s5_data"
@@ -91,37 +99,64 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
91
  )
92
 
93
  def _split_generators(self, dl_manager):
94
- # Construct the full URL for the data folder on the Hugging Face Hub
95
- remote_data_url = f"{_HOMEPAGE}/resolve/main/data/{self.config.name}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- # Download the data folder
98
- downloaded_path = dl_manager.download_and_extract(remote_data_url)
99
 
100
  return [
101
  datasets.SplitGenerator(
102
  name=datasets.Split.TRAIN,
103
  gen_kwargs={
104
- "filepath": os.path.join(downloaded_path, "train", "data-*.arrow"),
105
  "split": "train",
106
  },
107
  ),
108
  datasets.SplitGenerator(
109
  name=datasets.Split.TEST,
110
  gen_kwargs={
111
- "filepath": os.path.join(downloaded_path, "test", "data-*.arrow"),
112
  "split": "test",
113
  },
114
  ),
115
  ]
116
 
117
- def _generate_examples(self, filepath, split):
118
- # Load the Arrow files directly
119
- for i, file in enumerate(dl_manager.iter_files(filepath)):
120
- # Assuming each .arrow file contains a table that can be converted to a list of dicts
121
- # For simplicity, we'll load it as a Dataset and iterate
122
- dataset = datasets.Dataset.from_file(file)
123
- for id_, row in enumerate(dataset):
124
- yield f"{split}_{i}_{id_}", {
125
- "input_sequence": row["input_sequence"],
126
- "target": row["target"],
127
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import datasets
2
  import json
3
  import os
4
+ import glob
5
  from sympy.combinatorics import Permutation
6
  from sympy.combinatorics.named_groups import AlternatingGroup, SymmetricGroup
7
 
 
76
  group_degree=7,
77
  group_order=2520,
78
  ),
79
+ PermutationGroupsConfig(
80
+ name="all",
81
+ description="All Permutation Composition Datasets (S3-S7 and A5-A7).",
82
+ group_name="All",
83
+ group_degree=None,
84
+ group_order=None,
85
+ ),
86
  ]
87
 
88
  DEFAULT_CONFIG_NAME = "s5_data"
 
99
  )
100
 
101
  def _split_generators(self, dl_manager):
102
+ # Handle the "all" configuration specially
103
+ if self.config.name == "all":
104
+ # Get all individual dataset configurations
105
+ all_configs = ["s3_data", "s4_data", "s5_data", "s6_data", "s7_data",
106
+ "a5_data", "a6_data", "a7_data"]
107
+
108
+ data_files = {
109
+ "train": [f"data/{config}/train/*.arrow" for config in all_configs],
110
+ "test": [f"data/{config}/test/*.arrow" for config in all_configs],
111
+ }
112
+ else:
113
+ # The data is stored in nested folders: data/{config_name}/train and data/{config_name}/test
114
+ data_files = {
115
+ "train": f"data/{self.config.name}/train/*.arrow",
116
+ "test": f"data/{self.config.name}/test/*.arrow",
117
+ }
118
 
119
+ # Download the files
120
+ downloaded_files = dl_manager.download(data_files)
121
 
122
  return [
123
  datasets.SplitGenerator(
124
  name=datasets.Split.TRAIN,
125
  gen_kwargs={
126
+ "filepaths": downloaded_files["train"],
127
  "split": "train",
128
  },
129
  ),
130
  datasets.SplitGenerator(
131
  name=datasets.Split.TEST,
132
  gen_kwargs={
133
+ "filepaths": downloaded_files["test"],
134
  "split": "test",
135
  },
136
  ),
137
  ]
138
 
139
+ def _generate_examples(self, filepaths, split):
140
+ # Handle both single file path and list of file paths
141
+ if isinstance(filepaths, str):
142
+ filepaths = [filepaths]
143
+
144
+ # Generate examples from all arrow files
145
+ example_id = 0
146
+ for filepath in filepaths:
147
+ # Handle glob patterns
148
+ if "*" in filepath:
149
+ files = glob.glob(filepath)
150
+ else:
151
+ files = [filepath]
152
+
153
+ for file in files:
154
+ if os.path.exists(file):
155
+ # Load the Arrow file
156
+ dataset = datasets.Dataset.from_file(file)
157
+ for row in dataset:
158
+ yield example_id, {
159
+ "input_sequence": row["input_sequence"],
160
+ "target": row["target"],
161
+ }
162
+ example_id += 1