Datasets:

Languages:
English
ArXiv:
License:

Failed to load dataset

#3
by joelb - opened

I tried to load this dataset with load_dataset("allenai/OLMoE-mix-0924", split="train[:2048]"). This fails with:

TypeError: Couldn't cast array of type
struct<paloma_paragraphs: list<item: null>, paloma_documents: list<item: null>>
to
{'paloma_paragraphs': Sequence(feature=Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), length=-1, id=None)}

There's some indication that this is an issue with the dataset itself:

Full error:

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:2013, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
   2012 try:
-> 2013     writer.write_table(table)
   2014 except CastError as cast_error:

File /usr/local/lib/python3.10/dist-packages/datasets/arrow_writer.py:585, in ArrowWriter.write_table(self, pa_table, writer_batch_size)
    584 pa_table = pa_table.combine_chunks()
--> 585 pa_table = table_cast(pa_table, self._schema)
    586 if self.embed_local_files:

File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2281, in table_cast(table, schema)
   2280 if table.schema != schema:
-> 2281     return cast_table_to_schema(table, schema)
   2282 elif table.schema.metadata != schema.metadata:

File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2240, in cast_table_to_schema(table, schema)
   2235     raise CastError(
   2236         f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match",
   2237         table_column_names=table.column_names,
   2238         requested_column_names=list(features),
   2239     )
-> 2240 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
   2241 return pa.Table.from_arrays(arrays, schema=schema)

File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2240, in <listcomp>(.0)
   2235     raise CastError(
   2236         f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match",
   2237         table_column_names=table.column_names,
   2238         requested_column_names=list(features),
   2239     )
-> 2240 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
   2241 return pa.Table.from_arrays(arrays, schema=schema)

File /usr/local/lib/python3.10/dist-packages/datasets/table.py:1795, in _wrap_for_chunked_arrays.<locals>.wrapper(array, *args, **kwargs)
   1794 if isinstance(array, pa.ChunkedArray):
-> 1795     return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
   1796 else:

File /usr/local/lib/python3.10/dist-packages/datasets/table.py:1795, in <listcomp>(.0)
   1794 if isinstance(array, pa.ChunkedArray):
-> 1795     return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
   1796 else:

File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2104, in cast_array_to_feature(array, feature, allow_primitive_to_str, allow_decimal_to_str)
   2098     return array_cast(
   2099         array,
   2100         feature(),
   2101         allow_primitive_to_str=allow_primitive_to_str,
   2102         allow_decimal_to_str=allow_decimal_to_str,
   2103     )
-> 2104 raise TypeError(f"Couldn't cast array of type\n{_short_str(array.type)}\nto\n{_short_str(feature)}")

TypeError: Couldn't cast array of type
struct<paloma_paragraphs: list<item: null>, paloma_documents: list<item: null>>
to
{'paloma_paragraphs': Sequence(feature=Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), length=-1, id=None)}

The above exception was the direct cause of the following exception:

DatasetGenerationError                    Traceback (most recent call last)
Cell In[15], line 1
----> 1 load_dataset("allenai/OLMoE-mix-0924", split="train[:2048]")

File /usr/local/lib/python3.10/dist-packages/datasets/load.py:2628, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
   2625     return builder_instance.as_streaming_dataset(split=split)
   2627 # Download and prepare data
-> 2628 builder_instance.download_and_prepare(
   2629     download_config=download_config,
   2630     download_mode=download_mode,
   2631     verification_mode=verification_mode,
   2632     num_proc=num_proc,
   2633     storage_options=storage_options,
   2634 )
   2636 # Build dataset for splits
   2637 keep_in_memory = (
   2638     keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
   2639 )

File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1029, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
   1027     if num_proc is not None:
   1028         prepare_split_kwargs["num_proc"] = num_proc
-> 1029     self._download_and_prepare(
   1030         dl_manager=dl_manager,
   1031         verification_mode=verification_mode,
   1032         **prepare_split_kwargs,
   1033         **download_and_prepare_kwargs,
   1034     )
   1035 # Sync info
   1036 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())

File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1124, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
   1120 split_dict.add(split_generator.split_info)
   1122 try:
   1123     # Prepare split will record examples associated to the split
-> 1124     self._prepare_split(split_generator, **prepare_split_kwargs)
   1125 except OSError as e:
   1126     raise OSError(
   1127         "Cannot find data file. "
   1128         + (self.manual_download_instructions or "")
   1129         + "\nOriginal error:\n"
   1130         + str(e)
   1131     ) from None

File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1884, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, num_proc, max_shard_size)
   1882 job_id = 0
   1883 with pbar:
-> 1884     for job_id, done, content in self._prepare_split_single(
   1885         gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
   1886     ):
   1887         if done:
   1888             result = content

File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:2040, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
   2038     if isinstance(e, DatasetGenerationError):
   2039         raise
-> 2040     raise DatasetGenerationError("An error occurred while generating the dataset") from e
   2042 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)

DatasetGenerationError: An error occurred while generating the dataset

In case it helps others, I seem to be able to stream the dataset load_dataset("allenai/OLMoE-mix-0924", split="train", streaming=True)

@joelb yeah this is expected due to the fact that the format we use for data allows missing keys, while huggingface datasets do not.

Hi ! Yes datasets expects the fields of each example to have the same subfields and types, but in the dataset some samples have additional fields and subfields.

I opened a PR to this dataset fix this issue using a workaround. It set the string type for the fields that cause problems in YAML.
The PR is here: https://huggingface.co/datasets/allenai/OLMoE-mix-0924/discussions/4

Merged!!

amanrangapur changed discussion status to closed

Sign up or log in to comment