Datasets:
Commit
·
ef566ad
1
Parent(s):
fe5f0d7
Update dataset structure and provide legacy format export instructions
Browse files- README.md +66 -1
- scripts/export_rare_species.py +58 -0
- scripts/requirements.txt +4 -0
README.md
CHANGED
@@ -108,9 +108,74 @@ Baseline for Random guessing is 0.3.
|
|
108 |
English, Latin
|
109 |
|
110 |
## Dataset Structure
|
|
|
111 |
|
112 |
```
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
<kingdom-phylum-class-order-family-genus-species-1>/
|
115 |
<eol_content_id_1>_<eol_page_id>_eol_full-size-copy.jpg
|
116 |
<eol_content_id_2>_<eol_page_id>_eol_full-size-copy.jpg
|
|
|
108 |
English, Latin
|
109 |
|
110 |
## Dataset Structure
|
111 |
+
The repository consists of the following:
|
112 |
|
113 |
```
|
114 |
+
├── data
|
115 |
+
│ ├── train-00000-of-00009.parquet
|
116 |
+
│ ├── ...
|
117 |
+
│ └── train-00008-of-00009.parquet
|
118 |
+
├── metadata
|
119 |
+
│ ├── licenses.csv
|
120 |
+
│ └── rarespecies-catalog.csv
|
121 |
+
├── metadata.csv
|
122 |
+
├── README.md
|
123 |
+
└── visuals
|
124 |
+
├── phyla_ToL_tree.html
|
125 |
+
├── phyla_ToL_tree.pdf
|
126 |
+
└── phyla_ToL_tree.png
|
127 |
+
```
|
128 |
+
To load the dataset, use the `datasets` library.
|
129 |
+
```python
|
130 |
+
from datasets import load_dataset
|
131 |
+
|
132 |
+
ds = load_dataset("imageomics/rare-species", split="train")
|
133 |
+
```
|
134 |
+
|
135 |
+
You will have:
|
136 |
+
```python
|
137 |
+
ds
|
138 |
+
Dataset({
|
139 |
+
features: ['file_name', 'rarespecies_id', 'eol_content_id', 'eol_page_id', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'sciName', 'common'],
|
140 |
+
num_rows: 11983
|
141 |
+
})
|
142 |
+
```
|
143 |
+
Where, for example, an entry `ds[0]` contains:
|
144 |
+
```python
|
145 |
+
{ 'file_name': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=400x400 at 0x14F5F54C9940>,
|
146 |
+
'rarespecies_id': '75fd91cb-2881-41cd-88e6-de451e8b60e2',
|
147 |
+
'eol_content_id': '12853737',
|
148 |
+
'eol_page_id': '449393',
|
149 |
+
'kingdom': 'Animalia',
|
150 |
+
'phylum': 'Mollusca',
|
151 |
+
'class': 'Bivalvia',
|
152 |
+
'order': 'Unionida',
|
153 |
+
'family': 'Unionidae',
|
154 |
+
'genus': 'Cyclonaias',
|
155 |
+
'species': 'tuberculata',
|
156 |
+
'sciName': 'Cyclonaias tuberculata',
|
157 |
+
'common': 'purple wartyback'
|
158 |
+
}
|
159 |
+
```
|
160 |
+
For more information about Hugging Face datasets, see the [documentation](https://huggingface.co/docs/datasets/loading#hugging-face-hub).
|
161 |
+
|
162 |
+
|
163 |
+
To export the data into a format matching the previously used dataset structure, you may do the following without cloning the repository:
|
164 |
+
|
165 |
+
1) Create and activate a virtual environment.
|
166 |
+
2) Install dependencies.
|
167 |
+
```bash
|
168 |
+
pip install -r \
|
169 |
+
https://huggingface.co/datasets/imageomics/rare-species/resolve/main/scripts/requirements.txt
|
170 |
+
```
|
171 |
+
3) Run the export script. You may customize the output directory by specifying the `--dataset_path` argument.
|
172 |
+
```bash
|
173 |
+
curl -s https://huggingface.co/datasets/imageomics/rare-species/resolve/main/scripts/export_rare_species.py \
|
174 |
+
| python3 - --dataset_path ./exported_dataset
|
175 |
+
```
|
176 |
+
This will create a directory structure like the following:
|
177 |
+
```
|
178 |
+
/exported_dataset/dataset/
|
179 |
<kingdom-phylum-class-order-family-genus-species-1>/
|
180 |
<eol_content_id_1>_<eol_page_id>_eol_full-size-copy.jpg
|
181 |
<eol_content_id_2>_<eol_page_id>_eol_full-size-copy.jpg
|
scripts/export_rare_species.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
import polars as pl
|
4 |
+
from datasets import load_dataset, Features, Value
|
5 |
+
from datasets.features import Image
|
6 |
+
|
7 |
+
def main():
|
8 |
+
parser = argparse.ArgumentParser(
|
9 |
+
description="Export the Rare Species dataset into a legacy on-disk folder structure"
|
10 |
+
)
|
11 |
+
parser.add_argument(
|
12 |
+
"--dataset-path", default="dataset",
|
13 |
+
help="Directory under which to write the `dataset/...` hierarchy"
|
14 |
+
)
|
15 |
+
parser.add_argument(
|
16 |
+
"--revision", default="main",
|
17 |
+
help="Hugging Face dataset revision (branch, tag, or commit SHA)"
|
18 |
+
)
|
19 |
+
args = parser.parse_args()
|
20 |
+
|
21 |
+
# Read metadata.csv from the remote
|
22 |
+
csv_url = (
|
23 |
+
f"https://huggingface.co/datasets/imageomics/rare-species/"
|
24 |
+
f"resolve/main/metadata.csv?download=true"
|
25 |
+
)
|
26 |
+
print(f"Loading metadata from {csv_url}")
|
27 |
+
df_pl = pl.read_csv(csv_url)
|
28 |
+
rel_paths = df_pl["file_name"].to_list()
|
29 |
+
|
30 |
+
# Define schema: file_name as raw bytes (no PIL decode), others as strings
|
31 |
+
features = Features({
|
32 |
+
"file_name": Image(decode=False),
|
33 |
+
**{c: Value("string") for c in df_pl.columns if c != "file_name"}
|
34 |
+
})
|
35 |
+
|
36 |
+
# Load the Parquet-backed dataset
|
37 |
+
print(f"Loading dataset imageomics/rare-species @ {args.revision}")
|
38 |
+
ds = load_dataset(
|
39 |
+
"imageomics/rare-species",
|
40 |
+
split="train",
|
41 |
+
revision=args.revision,
|
42 |
+
features=features
|
43 |
+
)
|
44 |
+
|
45 |
+
# Export each image's raw bytes under <path>/dataset/...
|
46 |
+
print(f"Exporting {len(rel_paths)} images to {args.dataset_path}/")
|
47 |
+
for idx, rel in enumerate(rel_paths):
|
48 |
+
info = ds[idx]["file_name"]
|
49 |
+
img_bytes = info["bytes"]
|
50 |
+
dst = os.path.join(args.dataset_path, rel)
|
51 |
+
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
52 |
+
with open(dst, "wb") as f:
|
53 |
+
f.write(img_bytes)
|
54 |
+
|
55 |
+
print(f"Export complete: images written under {args.dataset_path}/dataset")
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
main()
|
scripts/requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
polars
|
3 |
+
pandas
|
4 |
+
huggingface_hub
|