metadata
license: apache-2.0
task_categories:
- text2text-generation
language:
- en
tags:
- code
size_categories:
- 1K<n<10K
Libraries:
- croissant
- Datasets
language:
- {lang_0} # Example: fr
- {lang_1} # Example: en
license: {license} # Example: apache-2.0 or any license from https://hf.co/docs/hub/repositories-licenses
license_name: {license_name} # If license = other (license not in https://hf.co/docs/hub/repositories-licenses), specify an id for it here, like
my-license-1.0
. license_link: {license_link} # If license = other, specify "LICENSE" or "LICENSE.md" to link to a file of that name inside the repo, or a URL to a remote file. license_details: {license_details} # Legacy, textual description of a custom license. tags: - {tag_0} # Example: audio
- {tag_1} # Example: bio
- {tag_2} # Example: natural-language-understanding
- {tag_3} # Example: birds-classification annotations_creators:
- {creator} # Example: crowdsourced, found, expert-generated, machine-generated language_creators:
- {creator} # Example: crowdsourced, ... language_details:
- {bcp47_lang_0} # Example: fr-FR
- {bcp47_lang_1} # Example: en-US pretty_name: {pretty_name} # Example: SQuAD size_categories:
- {number_of_elements_in_dataset} # Example: n<1K, 100K<n<1M, … source_datasets:
- {source_dataset_0} # Example: wikipedia
- {source_dataset_1} # Example: laion/laion-2b task_categories: # Full list at https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts
- {task_0} # Example: question-answering
- {task_1} # Example: image-classification task_ids:
- {subtask_0} # Example: extractive-qa
- {subtask_1} # Example: multi-class-image-classification
paperswithcode_id: {paperswithcode_id} # Dataset id on PapersWithCode (from the URL). Example for SQuAD: squad
configs: # Optional. This can be used to pass additional parameters to the dataset loader, such as
data_files
,data_dir
, and any builder-specific parameters - config_name: {config_name_0} # Name of the dataset subset, if applicable. Example: default
data_files:
- split: {split_name_0} # Example: train path: {file_path_0} # Example: data.csv
- split: {split_name_1} # Example: test path: {file_path_1} # Example: holdout.csv
- config_name: {config_name_1} # Name of the dataset subset. Example: processed
data_files:
- split: {split_name_3} # Example: train path: {file_path_3} # Example: data_processed.csv