Datasets:
Update loading script to v5.0.0 with all 10 groups and dynamic filtering
Browse files- permutation-groups.py +96 -207
permutation-groups.py
CHANGED
@@ -1,41 +1,12 @@
|
|
1 |
import datasets
|
2 |
import json
|
3 |
import os
|
4 |
-
import
|
5 |
|
6 |
_DESCRIPTION = "Permutation composition datasets with dynamic filtering by group degree, order, and sequence length."
|
7 |
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
|
8 |
_LICENSE = "MIT"
|
9 |
|
10 |
-
# TEMPORARY: Define the actual file structure explicitly
|
11 |
-
# TODO: Revert to wildcard patterns once datasets library supports them properly
|
12 |
-
_DATA_FILES = {
|
13 |
-
"symmetric_superset": {
|
14 |
-
"train": ["data-00000-of-00003.arrow", "data-00001-of-00003.arrow", "data-00002-of-00003.arrow"],
|
15 |
-
"test": ["data-00000-of-00001.arrow"]
|
16 |
-
},
|
17 |
-
"alternating_superset": {
|
18 |
-
"train": ["data-00000-of-00003.arrow", "data-00001-of-00003.arrow", "data-00002-of-00003.arrow"],
|
19 |
-
"test": ["data-00000-of-00001.arrow"]
|
20 |
-
},
|
21 |
-
"cyclic_superset": {
|
22 |
-
"train": ["data-00000-of-00001.arrow"],
|
23 |
-
"test": ["data-00000-of-00001.arrow"]
|
24 |
-
},
|
25 |
-
"dihedral_superset": {
|
26 |
-
"train": ["data-00000-of-00001.arrow"],
|
27 |
-
"test": ["data-00000-of-00001.arrow"]
|
28 |
-
},
|
29 |
-
"psl25_data": {
|
30 |
-
"train": ["data-00000-of-00001.arrow"],
|
31 |
-
"test": ["data-00000-of-00001.arrow"]
|
32 |
-
},
|
33 |
-
"f20_data": {
|
34 |
-
"train": ["data-00000-of-00001.arrow"],
|
35 |
-
"test": ["data-00000-of-00001.arrow"]
|
36 |
-
}
|
37 |
-
}
|
38 |
-
|
39 |
class PermutationGroupsConfig(datasets.BuilderConfig):
|
40 |
def __init__(
|
41 |
self,
|
@@ -52,7 +23,8 @@ class PermutationGroupsConfig(datasets.BuilderConfig):
|
|
52 |
Configuration for loading permutation groups.
|
53 |
|
54 |
Args:
|
55 |
-
group_type: Type of group (symmetric, alternating, cyclic, dihedral,
|
|
|
56 |
min_degree: Minimum group degree to include
|
57 |
max_degree: Maximum group degree to include
|
58 |
min_order: Minimum group order to include
|
@@ -79,10 +51,14 @@ class PermutationGroupsConfig(datasets.BuilderConfig):
|
|
79 |
class PermutationGroups(datasets.GeneratorBasedBuilder):
|
80 |
"""Permutation groups dataset with dynamic filtering."""
|
81 |
|
82 |
-
VERSION = datasets.Version("
|
83 |
|
84 |
-
# Define available group types
|
85 |
-
GROUP_TYPES = [
|
|
|
|
|
|
|
|
|
86 |
|
87 |
BUILDER_CONFIGS = []
|
88 |
|
@@ -105,66 +81,6 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
|
|
105 |
)
|
106 |
)
|
107 |
|
108 |
-
# Keep backwards compatibility configs
|
109 |
-
LEGACY_GROUPS = {
|
110 |
-
"s3": ("symmetric", 3, 3), "s4": ("symmetric", 4, 4), "s5": ("symmetric", 5, 5),
|
111 |
-
"s6": ("symmetric", 6, 6), "s7": ("symmetric", 7, 7), "s8": ("symmetric", 8, 8),
|
112 |
-
"s9": ("symmetric", 9, 9), "s10": ("symmetric", 10, 10),
|
113 |
-
"a3": ("alternating", 3, 3), "a4": ("alternating", 4, 4), "a5": ("alternating", 5, 5),
|
114 |
-
"a6": ("alternating", 6, 6), "a7": ("alternating", 7, 7), "a8": ("alternating", 8, 8),
|
115 |
-
"a9": ("alternating", 9, 9), "a10": ("alternating", 10, 10),
|
116 |
-
"c3": ("cyclic", 3, 3), "c4": ("cyclic", 4, 4), "c5": ("cyclic", 5, 5),
|
117 |
-
"c6": ("cyclic", 6, 6), "c7": ("cyclic", 7, 7), "c8": ("cyclic", 8, 8),
|
118 |
-
"c9": ("cyclic", 9, 9), "c10": ("cyclic", 10, 10), "c12": ("cyclic", 12, 12),
|
119 |
-
"c15": ("cyclic", 15, 15), "c20": ("cyclic", 20, 20), "c25": ("cyclic", 25, 25),
|
120 |
-
"c30": ("cyclic", 30, 30),
|
121 |
-
"z3": ("cyclic", 3, 3), "z4": ("cyclic", 4, 4), "z5": ("cyclic", 5, 5), "z6": ("cyclic", 6, 6),
|
122 |
-
"d3": ("dihedral", 3, 3), "d4": ("dihedral", 4, 4), "d5": ("dihedral", 5, 5),
|
123 |
-
"d6": ("dihedral", 6, 6), "d7": ("dihedral", 7, 7), "d8": ("dihedral", 8, 8),
|
124 |
-
"d9": ("dihedral", 9, 9), "d10": ("dihedral", 10, 10), "d12": ("dihedral", 12, 12),
|
125 |
-
"d15": ("dihedral", 15, 15), "d20": ("dihedral", 20, 20),
|
126 |
-
}
|
127 |
-
|
128 |
-
for name, (group_type, min_deg, max_deg) in LEGACY_GROUPS.items():
|
129 |
-
# Simple name (e.g., "s5")
|
130 |
-
BUILDER_CONFIGS.append(
|
131 |
-
PermutationGroupsConfig(
|
132 |
-
name=name,
|
133 |
-
description=f"Legacy config for {name.upper()}",
|
134 |
-
group_type=group_type,
|
135 |
-
min_degree=min_deg,
|
136 |
-
max_degree=max_deg,
|
137 |
-
)
|
138 |
-
)
|
139 |
-
# Old style name (e.g., "s5_data")
|
140 |
-
BUILDER_CONFIGS.append(
|
141 |
-
PermutationGroupsConfig(
|
142 |
-
name=f"{name}_data",
|
143 |
-
description=f"Legacy config for {name.upper()}",
|
144 |
-
group_type=group_type,
|
145 |
-
min_degree=min_deg,
|
146 |
-
max_degree=max_deg,
|
147 |
-
)
|
148 |
-
)
|
149 |
-
|
150 |
-
# Add legacy configs for special groups
|
151 |
-
BUILDER_CONFIGS.extend([
|
152 |
-
PermutationGroupsConfig(
|
153 |
-
name="psl25_data",
|
154 |
-
description="Legacy config for PSL(2,5)",
|
155 |
-
group_type="psl25",
|
156 |
-
min_degree=6,
|
157 |
-
max_degree=6,
|
158 |
-
),
|
159 |
-
PermutationGroupsConfig(
|
160 |
-
name="f20_data",
|
161 |
-
description="Legacy config for F20",
|
162 |
-
group_type="f20",
|
163 |
-
min_degree=5,
|
164 |
-
max_degree=5,
|
165 |
-
),
|
166 |
-
])
|
167 |
-
|
168 |
DEFAULT_CONFIG_NAME = "symmetric"
|
169 |
|
170 |
def _info(self):
|
@@ -185,137 +101,110 @@ class PermutationGroups(datasets.GeneratorBasedBuilder):
|
|
185 |
def _split_generators(self, dl_manager):
|
186 |
# Determine which datasets to load
|
187 |
if self.config.group_type:
|
188 |
-
|
189 |
-
|
190 |
-
datasets_to_load = [f"{self.config.group_type}_data"]
|
191 |
-
else:
|
192 |
-
# Load the superset for this group type
|
193 |
-
datasets_to_load = [f"{self.config.group_type}_superset"]
|
194 |
else:
|
195 |
# Load all supersets
|
196 |
-
datasets_to_load = [
|
197 |
-
|
198 |
-
|
|
|
|
|
|
|
|
|
199 |
|
200 |
-
#
|
201 |
-
# TODO: Revert to wildcard pattern once supported:
|
202 |
-
# data_urls = {
|
203 |
-
# "train": f"data/{dataset_name}/train/data-*-of-*.arrow",
|
204 |
-
# "test": f"data/{dataset_name}/test/data-*-of-*.arrow",
|
205 |
-
# }
|
206 |
train_urls = []
|
207 |
test_urls = []
|
208 |
|
209 |
for dataset_name in datasets_to_load:
|
210 |
-
|
211 |
-
|
212 |
-
for filename in _DATA_FILES[dataset_name]["train"]:
|
213 |
-
train_urls.append(f"data/{dataset_name}/train/{filename}")
|
214 |
-
for filename in _DATA_FILES[dataset_name]["test"]:
|
215 |
-
test_urls.append(f"data/{dataset_name}/test/{filename}")
|
216 |
|
217 |
# Download files
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
datasets.
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
datasets.
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
]
|
254 |
|
255 |
def _generate_examples(self, files):
|
256 |
"""Yield examples with filtering."""
|
257 |
idx = 0
|
258 |
-
total_examined = 0
|
259 |
-
total_filtered_out = 0
|
260 |
|
261 |
for file_path in files:
|
262 |
-
# Load the
|
263 |
-
|
264 |
|
265 |
-
# Convert to pandas for easier
|
266 |
-
df =
|
267 |
|
268 |
-
#
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
continue
|
299 |
-
|
300 |
-
# Yield the example
|
301 |
yield idx, {
|
302 |
"input_sequence": row["input_sequence"],
|
303 |
"target": row["target"],
|
304 |
-
"group_type": row
|
305 |
-
"group_degree": int(row
|
306 |
-
"group_order": int(row
|
307 |
-
"sequence_length": int(row
|
308 |
}
|
309 |
-
idx += 1
|
310 |
-
|
311 |
-
# Log a warning if all examples were filtered out
|
312 |
-
if idx == 0 and total_examined > 0:
|
313 |
-
import warnings
|
314 |
-
warnings.warn(
|
315 |
-
f"All {total_examined} examples were filtered out with the current configuration:\n"
|
316 |
-
f" group_type={self.config.group_type}\n"
|
317 |
-
f" degree_range=[{self.config.min_degree}, {self.config.max_degree}]\n"
|
318 |
-
f" order_range=[{self.config.min_order}, {self.config.max_order}]\n"
|
319 |
-
f" length_range=[{self.config.min_len}, {self.config.max_len}]\n"
|
320 |
-
f"This might be expected if the requested configuration doesn't exist in the dataset."
|
321 |
-
)
|
|
|
1 |
import datasets
|
2 |
import json
|
3 |
import os
|
4 |
+
import pandas as pd
|
5 |
|
6 |
_DESCRIPTION = "Permutation composition datasets with dynamic filtering by group degree, order, and sequence length."
|
7 |
_HOMEPAGE = "https://huggingface.co/datasets/BeeGass/permutation-groups"
|
8 |
_LICENSE = "MIT"
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
class PermutationGroupsConfig(datasets.BuilderConfig):
|
11 |
def __init__(
|
12 |
self,
|
|
|
23 |
Configuration for loading permutation groups.
|
24 |
|
25 |
Args:
|
26 |
+
group_type: Type of group (symmetric, alternating, cyclic, dihedral, klein,
|
27 |
+
quaternion, elementary_abelian, psl, frobenius, mathieu)
|
28 |
min_degree: Minimum group degree to include
|
29 |
max_degree: Maximum group degree to include
|
30 |
min_order: Minimum group order to include
|
|
|
51 |
class PermutationGroups(datasets.GeneratorBasedBuilder):
|
52 |
"""Permutation groups dataset with dynamic filtering."""
|
53 |
|
54 |
+
VERSION = datasets.Version("5.0.0")
|
55 |
|
56 |
+
# Define all available group types
|
57 |
+
GROUP_TYPES = [
|
58 |
+
"symmetric", "alternating", "cyclic", "dihedral",
|
59 |
+
"klein", "quaternion", "elementary_abelian", "psl",
|
60 |
+
"frobenius", "mathieu"
|
61 |
+
]
|
62 |
|
63 |
BUILDER_CONFIGS = []
|
64 |
|
|
|
81 |
)
|
82 |
)
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
DEFAULT_CONFIG_NAME = "symmetric"
|
85 |
|
86 |
def _info(self):
|
|
|
101 |
def _split_generators(self, dl_manager):
|
102 |
# Determine which datasets to load
|
103 |
if self.config.group_type:
|
104 |
+
# Load the superset for this group type
|
105 |
+
datasets_to_load = [f"{self.config.group_type}_superset"]
|
|
|
|
|
|
|
|
|
106 |
else:
|
107 |
# Load all supersets
|
108 |
+
datasets_to_load = [
|
109 |
+
"symmetric_superset", "alternating_superset",
|
110 |
+
"cyclic_superset", "dihedral_superset",
|
111 |
+
"klein_superset", "quaternion_superset",
|
112 |
+
"elementary_abelian_superset", "psl_superset",
|
113 |
+
"frobenius_superset", "mathieu_superset"
|
114 |
+
]
|
115 |
|
116 |
+
# Build file URLs using wildcards
|
|
|
|
|
|
|
|
|
|
|
117 |
train_urls = []
|
118 |
test_urls = []
|
119 |
|
120 |
for dataset_name in datasets_to_load:
|
121 |
+
train_urls.append(f"data/{dataset_name}/train/data-*.arrow")
|
122 |
+
test_urls.append(f"data/{dataset_name}/test/data-*.arrow")
|
|
|
|
|
|
|
|
|
123 |
|
124 |
# Download files
|
125 |
+
downloaded_files = dl_manager.download({
|
126 |
+
"train": train_urls,
|
127 |
+
"test": test_urls
|
128 |
+
})
|
129 |
+
|
130 |
+
# Flatten the lists of files
|
131 |
+
train_files = []
|
132 |
+
test_files = []
|
133 |
+
|
134 |
+
for file_list in downloaded_files["train"]:
|
135 |
+
if isinstance(file_list, list):
|
136 |
+
train_files.extend(file_list)
|
137 |
+
else:
|
138 |
+
train_files.append(file_list)
|
139 |
+
|
140 |
+
for file_list in downloaded_files["test"]:
|
141 |
+
if isinstance(file_list, list):
|
142 |
+
test_files.extend(file_list)
|
143 |
+
else:
|
144 |
+
test_files.append(file_list)
|
145 |
+
|
146 |
+
return [
|
147 |
+
datasets.SplitGenerator(
|
148 |
+
name=datasets.Split.TRAIN,
|
149 |
+
gen_kwargs={
|
150 |
+
"files": train_files,
|
151 |
+
},
|
152 |
+
),
|
153 |
+
datasets.SplitGenerator(
|
154 |
+
name=datasets.Split.TEST,
|
155 |
+
gen_kwargs={
|
156 |
+
"files": test_files,
|
157 |
+
},
|
158 |
+
),
|
159 |
+
]
|
|
|
160 |
|
161 |
def _generate_examples(self, files):
|
162 |
"""Yield examples with filtering."""
|
163 |
idx = 0
|
|
|
|
|
164 |
|
165 |
for file_path in files:
|
166 |
+
# Load the Arrow file
|
167 |
+
table = datasets.table.read_table(file_path)
|
168 |
|
169 |
+
# Convert to pandas for easier filtering
|
170 |
+
df = table.to_pandas()
|
171 |
|
172 |
+
# Apply filters
|
173 |
+
mask = pd.Series([True] * len(df))
|
174 |
+
|
175 |
+
# Filter by group type (if specified in config)
|
176 |
+
if self.config.group_type:
|
177 |
+
mask &= (df["group_type"] == self.config.group_type)
|
178 |
+
|
179 |
+
# Filter by degree
|
180 |
+
if self.config.min_degree is not None:
|
181 |
+
mask &= (df["group_degree"] >= self.config.min_degree)
|
182 |
+
if self.config.max_degree is not None:
|
183 |
+
mask &= (df["group_degree"] <= self.config.max_degree)
|
184 |
+
|
185 |
+
# Filter by order
|
186 |
+
if self.config.min_order is not None:
|
187 |
+
mask &= (df["group_order"] >= self.config.min_order)
|
188 |
+
if self.config.max_order is not None:
|
189 |
+
mask &= (df["group_order"] <= self.config.max_order)
|
190 |
+
|
191 |
+
# Filter by sequence length
|
192 |
+
if self.config.min_len is not None:
|
193 |
+
mask &= (df["sequence_length"] >= self.config.min_len)
|
194 |
+
if self.config.max_len is not None:
|
195 |
+
mask &= (df["sequence_length"] <= self.config.max_len)
|
196 |
+
|
197 |
+
# Apply mask
|
198 |
+
filtered_df = df[mask]
|
199 |
+
|
200 |
+
# Yield filtered examples
|
201 |
+
for _, row in filtered_df.iterrows():
|
|
|
|
|
|
|
202 |
yield idx, {
|
203 |
"input_sequence": row["input_sequence"],
|
204 |
"target": row["target"],
|
205 |
+
"group_type": row["group_type"],
|
206 |
+
"group_degree": int(row["group_degree"]),
|
207 |
+
"group_order": int(row["group_order"]),
|
208 |
+
"sequence_length": int(row["sequence_length"]),
|
209 |
}
|
210 |
+
idx += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|