metadata
license: mit
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
dataset_info:
features:
- name: images
sequence: string
- name: problem
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 477238
num_examples: 3000
- name: test
num_bytes: 93920
num_examples: 600
download_size: 287256
dataset_size: 571158
language:
- en
size_categories:
- 1K<n<10K
Please download images.tar to your local disk and use tar -xvf images.tar
to unarchive the image files.
This dataset was mixed with hiyouga/geometry3k and hiyouga/math12k using the following script.
import os
from functools import partial
from datasets import DatasetDict, Features, Sequence, Value, concatenate_datasets, load_dataset
def process_sample(example: dict, index: int, split: str):
if "images" in example:
image = example["images"][0]
image_path = os.path.join("images", split, f"{index}.png")
image.save(image_path)
images = [image_path]
else:
images = []
return {
"images": images,
"problem": example["problem"],
"answer": example["answer"],
}
def main():
geo3k = load_dataset("hiyouga/geometry3k")
math12k = load_dataset("hiyouga/math12k")
os.makedirs(os.path.join("images", "train"), exist_ok=True)
os.makedirs(os.path.join("images", "test"), exist_ok=True)
map_kwargs = {
"with_indices": True,
"num_proc": 64,
"features": Features(
{"images": Sequence(Value("string")), "problem": Value("string"), "answer": Value("string")}
),
}
geo3k_train = geo3k["train"].select(range(1500)).map(partial(process_sample, split="train"), **map_kwargs)
geo3k_test = geo3k["test"].select(range(300)).map(partial(process_sample, split="test"), **map_kwargs)
math12k_train = math12k["train"].select(range(1500)).map(partial(process_sample, split="train"), **map_kwargs)
math12k_test = math12k["test"].select(range(300)).map(partial(process_sample, split="test"), **map_kwargs)
trainset = concatenate_datasets([geo3k_train, math12k_train])
testset = concatenate_datasets([geo3k_test, math12k_test])
dataset = DatasetDict({"train": trainset, "test": testset})
dataset.push_to_hub("hiyouga/rl-mixed-dataset")
if __name__ == "__main__":
main()