SATIN / SATIN.py
jonathan-roberts1's picture
Rename satin_class.py to SATIN.py
b1618c1
raw
history blame
6.6 kB
"""
import datasets
import os
import pyarrow.parquet as pq
from PIL import Image
from io import BytesIO
import numpy as np
import pandas as pd
def load_data(data_dir):
parquet_file = [file for file in os.listdir(data_dir) if file.endswith('.parquet')][0]
print(parquet_file)
parquet_path = os.path.join(data_dir, parquet_file)
parquet_path = data_dir
table = pq.read_table(parquet_path)
for row in table.iterrecords():
image_bytes = row['image']
image = Image.open(BytesIO(image_bytes))
label = row['label']
yield image, label
class SATINConfig(datasets.BuilderConfig):
def __init__(self, name, description, data_url, class_names, **kwargs):
Args:
data_url: `string`, url to download the zip file from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
super(SATINConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.name = name
self.data_url = data_url
self.description = description
self.class_names = class_names
class SATIN(datasets.GeneratorBasedBuilder):
SATIN Images dataset
_SAT_4_NAMES = ['barren land', 'grassland', 'other', 'trees']
_SAT_6_NAMES = ['barren land', 'building', 'grassland', 'road', 'trees', 'water']
BUILDER_CONFIGS = [
SATINConfig(
name="SAT_4",
description="SAT_4.",
data_url="https://huggingface.co/datasets/jonathan-roberts1/SAT-4/tree/main/data/",#train-00000-of-00001-e2dcb38bc165dfb0.parquet",
class_names = _SAT_4_NAMES
#metadata_urls={
# "train": "https://link-to-breakfast-foods-train.txt",
),
SATINConfig(
name="SAT_6",
description="SAT_6.",
data_url="https://huggingface.co/datasets/jonathan-roberts1/SAT-6/tree/main/data/",#train-00000-of-00001-c47ada2c92f814d2.parquet",
class_names = _SAT_6_NAMES
)
]
@property
def url_prefix(self):
return {
"SAT-4": "https://huggingface.co/datasets/jonathan-roberts1/SAT-4/tree/main/data/",#train-00000-of-00001-e2dcb38bc165dfb0.parquet",#train-00000-of-00001-e2dcb38bc165dfb0.parquet",
"SAT-6": "https://huggingface.co/datasets/jonathan-roberts1/SAT-6/tree/main/data/",
}
def _info(self):
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.ClassLabel(names=self.config.class_names),
}
),
supervised_keys=("image", "label"),
#homepage=_HOMEPAGE,
#citation=_CITATION,
#license=_LICENSE,
#task_templates=[ImageClassification(image_column="image", label_column="label")],
)
def _split_generators(self, dl_manager):
url = self.config.data_url
data_dir = dl_manager.download_and_extract(url)#, use_auth_token=True)
print(data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir},
),
]
def _generate_examples(self, data_dir):
#base_url = self.url_prefix[self.config.name]
file_url = self.config.data_url
use_auth_token = os.environ.get("HUGGINGFACE_TOKEN")
with NamedTemporaryFile() as file:
download(file_url, file.name, use_auth_token=use_auth_token)
df = pd.read_parquet(file.name)
for idx, row in df.iterrows():
example = {
"image": row["image"],
"label": row["label"],
}
yield idx, example
#def _generate_examples(self, data_dir):
# for idx, (image, label) in enumerate(load_data(data_dir)):
# image_array = np.array(image)
# yield idx, {"image": image_array, "label": label}
"""
from datasets.utils.download_manager import DownloadManager
import tempfile
import datasets
import os
import pyarrow.parquet as pq
from PIL import Image
from io import BytesIO
import numpy as np
import pandas as pd
class SATINConfig(datasets.BuilderConfig):
def __init__(self, name, description, data_url, class_names, **kwargs):
super(SATINConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.name = name
self.data_url = data_url
self.description = description
self.class_names = class_names
class SATIN(datasets.GeneratorBasedBuilder):
"""SATIN Images dataset"""
_SAT_4_NAMES = ['barren land', 'grassland', 'other', 'trees']
_SAT_6_NAMES = ['barren land', 'building', 'grassland', 'road', 'trees', 'water']
BUILDER_CONFIGS = [
SATINConfig(
name="SAT_4",
description="SAT_4.",
data_url="jonathan-roberts1/SAT-4",#https://huggingface.co/datasets/jonathan-roberts1/SAT-4/blob/main/data/train-00000-of-00001-e2dcb38bc165dfb0.parquet?raw=true",
class_names=_SAT_4_NAMES
),
SATINConfig(
name="SAT_6",
description="SAT_6.",
data_url="jonathan-roberts1/SAT-6",#"https://huggingface.co/datasets/jonathan-roberts1/SAT-6/blob/main/data/train-00000-of-00001-c47ada2c92f814d2.parquet?raw=true",
class_names=_SAT_6_NAMES
)
]
def _info(self):
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.ClassLabel(names=self.config.class_names),
}
),
supervised_keys=("image", "label"),
)
def _split_generators(self, dl_manager):
#data_path = dl_manager.download(self.config.data_url)
from datasets import load_dataset
dataset = load_dataset(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_path": dataset},
),
]
def _generate_examples(self, data_path):
# iterate over the Huggingface dataset and yield the idx, image and label
huggingface_dataset = data_path["train"]
for idx, row in enumerate(huggingface_dataset):
yield idx, {"image": row["image"], "label": row["label"]}