block.
+ Explicitely replacing all `\n` with their html equivalent to bypass this issue.
+ Also stripping the trailing `\n` first.
+ """
+ return t.strip("\n").replace("\n", "
")
+
+ wrap = textwrap.fill(t, width=width, replace_whitespace=False)
+ out = highlight(wrap, DjangoLexer(), HtmlFormatter())
+ out = replace_linebreaks(out)
+ st.write(out, unsafe_allow_html=True)
+
+ def show_text(t, width=WIDTH, with_markdown=False):
+ wrap = [textwrap.fill(subt, width=width, replace_whitespace=False) for subt in t.split("\n")]
+ wrap = "\n".join(wrap)
+ if with_markdown:
+ st.write(wrap, unsafe_allow_html=True)
+ else:
+ st.text(wrap)
+
+ if mode == "Helicopter view":
+ st.title("High level metrics")
+ st.write("This will take a minute to collect.")
+ st.write(
+ "If you want to contribute, please refer to the instructions in "
+ + "[Contributing](https://github.com/bigscience-workshop/promptsource/blob/main/CONTRIBUTING.md)."
+ )
+
+ #
+ # Loads template data
+ #
+ try:
+ template_collection = TemplateCollection()
+ except FileNotFoundError:
+ st.error(
+ "Unable to find the prompt folder!\n\n"
+ "We expect the folder to be in the working directory. "
+ "You might need to restart the app in the root directory of the repo."
+ )
+ st.stop()
+
+ #
+ # Global metrics
+ #
+ counts = template_collection.get_templates_count()
+ nb_prompted_datasets = len(counts)
+ st.write(f"## Number of *prompted datasets*: `{nb_prompted_datasets}`")
+ nb_prompts = sum(counts.values())
+ st.write(f"## Number of *prompts*: `{nb_prompts}`")
+
+ #
+ # Metrics per dataset/subset
+ #
+ # Download dataset infos (multiprocessing download)
+ manager = Manager()
+ all_infos = manager.dict()
+ all_datasets = list(set([t[0] for t in template_collection.keys]))
+
+ pool = Pool(processes=multiprocessing.cpu_count())
+ pool.map(functools.partial(get_infos, all_infos), all_datasets)
+ pool.close()
+ pool.join()
+
+ results = []
+ for (dataset_name, subset_name) in template_collection.keys:
+ # Collect split sizes (train, validation and test)
+ if dataset_name not in all_infos:
+ infos = get_dataset_infos(dataset_name)
+ all_infos[dataset_name] = infos
+ else:
+ infos = all_infos[dataset_name]
+ if infos:
+ if subset_name is None:
+ subset_infos = infos[list(infos.keys())[0]]
+ else:
+ subset_infos = infos[subset_name]
+
+ try:
+ split_sizes = {k: v.num_examples for k, v in subset_infos.splits.items()}
+ except Exception:
+ # Fixing bug in some community datasets.
+ # For simplicity, just filling `split_sizes` with nothing, so the displayed split sizes will be 0.
+ split_sizes = {}
+ else:
+ split_sizes = {}
+
+ # Collect template counts, original task counts and names
+ dataset_templates = template_collection.get_dataset(dataset_name, subset_name)
+ results.append(
+ {
+ "Dataset name": dataset_name,
+ "Subset name": "∅" if subset_name is None else subset_name,
+ "Train size": split_sizes["train"] if "train" in split_sizes else 0,
+ "Validation size": split_sizes["validation"] if "validation" in split_sizes else 0,
+ "Test size": split_sizes["test"] if "test" in split_sizes else 0,
+ "Number of prompts": len(dataset_templates),
+ "Number of original task prompts": sum(
+ [bool(t.metadata.original_task) for t in dataset_templates.templates.values()]
+ ),
+ "Prompt names": [t.name for t in dataset_templates.templates.values()],
+ }
+ )
+ results_df = pd.DataFrame(results)
+ results_df.sort_values(["Number of prompts"], inplace=True, ascending=False)
+ results_df.reset_index(drop=True, inplace=True)
+
+ nb_training_instances = results_df["Train size"].sum()
+ st.write(f"## Number of *training instances*: `{nb_training_instances}`")
+
+ plot_df = results_df[["Dataset name", "Subset name", "Train size", "Number of prompts"]].copy()
+ plot_df["Name"] = plot_df["Dataset name"] + " - " + plot_df["Subset name"]
+ plot_df.sort_values(["Train size"], inplace=True, ascending=False)
+ fig = px.bar(
+ plot_df,
+ x="Name",
+ y="Train size",
+ hover_data=["Dataset name", "Subset name", "Number of prompts"],
+ log_y=True,
+ title="Number of training instances per data(sub)set - y-axis is in logscale",
+ )
+ fig.update_xaxes(visible=False, showticklabels=False)
+ st.plotly_chart(fig, use_container_width=True)
+ st.write(
+ f"- Top 3 training subsets account for `{100 * plot_df[:3]['Train size'].sum() / nb_training_instances:.2f}%` of the training instances."
+ )
+ biggest_training_subset = plot_df.iloc[0]
+ st.write(
+ f"- Biggest training subset is *{biggest_training_subset['Name']}* with `{biggest_training_subset['Train size']}` instances"
+ )
+ smallest_training_subset = plot_df[plot_df["Train size"] > 0].iloc[-1]
+ st.write(
+ f"- Smallest training subset is *{smallest_training_subset['Name']}* with `{smallest_training_subset['Train size']}` instances"
+ )
+
+ st.markdown("***")
+ st.write("Details per dataset")
+ st.table(results_df)
+
+ else:
+ # Combining mode `Prompted dataset viewer` and `Sourcing` since the
+ # backbone of the interfaces is the same
+ assert mode in ["Prompted dataset viewer", "Sourcing"], ValueError(
+ f"`mode` ({mode}) should be in `[Helicopter view, Prompted dataset viewer, Sourcing]`"
+ )
+
+ #
+ # Loads dataset information
+ #
+
+ dataset_list = list_datasets()
+ ag_news_index = dataset_list.index("ag_news")
+
+ #
+ # Select a dataset - starts with ag_news
+ #
+ dataset_key = st.sidebar.selectbox(
+ "Dataset",
+ dataset_list,
+ key="dataset_select",
+ index=ag_news_index,
+ help="Select the dataset to work on.",
+ )
+
+ #
+ # If a particular dataset is selected, loads dataset and template information
+ #
+ if dataset_key is not None:
+
+ #
+ # Check for subconfigurations (i.e. subsets)
+ #
+ configs = get_dataset_confs(dataset_key)
+ conf_option = None
+ if len(configs) > 0:
+ conf_option = st.sidebar.selectbox("Subset", configs, index=0, format_func=lambda a: a.name)
+
+ subset_name = str(conf_option.name) if conf_option else None
+ try:
+ dataset = get_dataset(dataset_key, subset_name)
+ except OSError as e:
+ st.error(
+ f"Some datasets are not handled automatically by `datasets` and require users to download the "
+ f"dataset manually. This applies to {dataset_key}{f'/{subset_name}' if subset_name is not None else ''}. "
+ f"\n\nPlease download the raw dataset to `~/.cache/promptsource/{dataset_key}{f'/{subset_name}' if subset_name is not None else ''}`. "
+ f"\n\nYou can choose another cache directory by overriding `PROMPTSOURCE_MANUAL_DATASET_DIR` environment "
+ f"variable and downloading raw dataset to `$PROMPTSOURCE_MANUAL_DATASET_DIR/{dataset_key}{f'/{subset_name}' if subset_name is not None else ''}`"
+ f"\n\nOriginal error:\n{str(e)}"
+ )
+ st.stop()
+
+ splits = list(dataset.keys())
+ index = 0
+ if "train" in splits:
+ index = splits.index("train")
+ split = st.sidebar.selectbox("Split", splits, key="split_select", index=index)
+ dataset = dataset[split]
+ dataset = renameDatasetColumn(dataset)
+
+ #
+ # Loads template data
+ #
+ try:
+ dataset_templates = DatasetTemplates(dataset_key, conf_option.name if conf_option else None)
+ except FileNotFoundError:
+ st.error(
+ "Unable to find the prompt folder!\n\n"
+ "We expect the folder to be in the working directory. "
+ "You might need to restart the app in the root directory of the repo."
+ )
+ st.stop()
+
+ template_list = dataset_templates.all_template_names
+ num_templates = len(template_list)
+ st.sidebar.write(
+ "No of prompts created for "
+ + f"`{dataset_key + (('/' + conf_option.name) if conf_option else '')}`"
+ + f": **{str(num_templates)}**"
+ )
+
+ if mode == "Prompted dataset viewer":
+ if num_templates > 0:
+ template_name = st.sidebar.selectbox(
+ "Prompt name",
+ template_list,
+ key="template_select",
+ index=0,
+ help="Select the prompt to visualize.",
+ )
+
+ step = 50
+ example_index = st.sidebar.number_input(
+ f"Select the example index (Size = {len(dataset)})",
+ min_value=0,
+ max_value=len(dataset) - step,
+ value=0,
+ step=step,
+ key="example_index_number_input",
+ help="Offset = 50.",
+ )
+ else: # mode = Sourcing
+ st.sidebar.subheader("Select Example")
+ example_index = st.sidebar.slider("Select the example index", 0, len(dataset) - 1)
+
+ example = dataset[example_index]
+ example = removeHyphen(example)
+
+ st.sidebar.write(example)
+
+ st.sidebar.subheader("Dataset Schema")
+ rendered_features = render_features(dataset.features)
+ st.sidebar.write(rendered_features)
+
+ #
+ # Display dataset information
+ #
+ st.header("Dataset: " + dataset_key + " " + (("/ " + conf_option.name) if conf_option else ""))
+
+ # If we have a custom dataset change the source link to the hub
+ split_dataset_key = dataset_key.split("/")
+ possible_user = split_dataset_key[0]
+ if len(split_dataset_key) > 1 and possible_user in INCLUDED_USERS:
+ source_link = "https://huggingface.co/datasets/%s/blob/main/%s.py" % (
+ dataset_key,
+ split_dataset_key[-1],
+ )
+ else:
+ source_link = "https://github.com/huggingface/datasets/blob/master/datasets/%s/%s.py" % (
+ dataset_key,
+ dataset_key,
+ )
+
+ st.markdown("*Homepage*: " + dataset.info.homepage + "\n\n*Dataset*: " + source_link)
+
+ md = """
+ %s
+ """ % (
+ dataset.info.description.replace("\\", "") if dataset_key else ""
+ )
+ st.markdown(md)
+
+ #
+ # Body of the app: display prompted examples in mode `Prompted dataset viewer`
+ # or text boxes to create new prompts in mode `Sourcing`
+ #
+ if mode == "Prompted dataset viewer":
+ #
+ # Display template information
+ #
+ if num_templates > 0:
+ template = dataset_templates[template_name]
+ st.subheader("Prompt")
+ st.markdown("##### Name")
+ st.text(template.name)
+ st.markdown("##### Reference")
+ st.text(template.reference)
+ st.markdown("##### Original Task? ")
+ st.text(template.metadata.original_task)
+ st.markdown("##### Choices in template? ")
+ st.text(template.metadata.choices_in_prompt)
+ st.markdown("##### Metrics")
+ st.text(", ".join(template.metadata.metrics) if template.metadata.metrics else None)
+ st.markdown("##### Prompt Languages")
+ if template.metadata.languages:
+ st.text(", ".join([format_language(tag) for tag in template.metadata.languages]))
+ else:
+ st.text(None)
+ st.markdown("##### Answer Choices")
+ if template.get_answer_choices_expr() is not None:
+ show_jinja(template.get_answer_choices_expr())
+ else:
+ st.text(None)
+ st.markdown("##### Jinja template")
+ splitted_template = template.jinja.split("|||")
+ st.markdown("###### Input template")
+ show_jinja(splitted_template[0].strip())
+ if len(splitted_template) > 1:
+ st.markdown("###### Target template")
+ show_jinja(splitted_template[1].strip())
+ st.markdown("***")
+
+ #
+ # Display a couple (steps) examples
+ #
+ for ex_idx in range(example_index, example_index + step):
+ if ex_idx >= len(dataset):
+ continue
+ example = dataset[ex_idx]
+ example = removeHyphen(example)
+ col1, _, col2 = st.beta_columns([12, 1, 12])
+ with col1:
+ st.write(example)
+ if num_templates > 0:
+ with col2:
+ prompt = template.apply(example, highlight_variables=False)
+ if prompt == [""]:
+ st.write("∅∅∅ *Blank result*")
+ else:
+ st.write("Input")
+ show_text(prompt[0])
+ if len(prompt) > 1:
+ st.write("Target")
+ show_text(prompt[1])
+ st.markdown("***")
+ else: # mode = Sourcing
+ st.markdown("## Prompt Creator")
+
+ #
+ # Create a new template or select an existing one
+ #
+ col1a, col1b, _, col2 = st.beta_columns([9, 9, 1, 6])
+
+ # current_templates_key and state.templates_key are keys for the templates object
+ current_templates_key = (dataset_key, conf_option.name if conf_option else None)
+
+ # Resets state if there has been a change in templates_key
+ if state.templates_key != current_templates_key:
+ state.templates_key = current_templates_key
+ reset_template_state()
+
+ with col1a, st.form("new_template_form"):
+ new_template_name = st.text_input(
+ "Create a New Prompt",
+ key="new_template",
+ value="",
+ help="Enter name and hit enter to create a new prompt.",
+ )
+ new_template_submitted = st.form_submit_button("Create")
+ if new_template_submitted:
+ if new_template_name in dataset_templates.all_template_names:
+ st.error(
+ f"A prompt with the name {new_template_name} already exists "
+ f"for dataset {state.templates_key}."
+ )
+ elif new_template_name == "":
+ st.error("Need to provide a prompt name.")
+ else:
+ template = Template(new_template_name, "", "")
+ dataset_templates.add_template(template)
+ reset_template_state()
+ state.template_name = new_template_name
+ else:
+ state.new_template_name = None
+
+ with col1b, st.beta_expander("or Select Prompt", expanded=True):
+ template_list = dataset_templates.all_template_names
+ if state.template_name:
+ index = template_list.index(state.template_name)
+ else:
+ index = 0
+ state.template_name = st.selectbox(
+ "", template_list, key="template_select", index=index, help="Select the prompt to work on."
+ )
+
+ if st.button("Delete Prompt", key="delete_prompt"):
+ dataset_templates.remove_template(state.template_name)
+ reset_template_state()
+
+ variety_guideline = """
+ :heavy_exclamation_mark::question:Creating a diverse set of prompts whose differences go beyond surface wordings (i.e. marginally changing 2 or 3 words) is highly encouraged.
+ Ultimately, the hope is that exposing the model to such a diversity will have a non-trivial impact on the model's robustness to the prompt formulation.
+ \r**To get various prompts, you can try moving the cursor along theses axes**:
+ \n- **Interrogative vs affirmative form**: Ask a question about an attribute of the inputs or tell the model to decide something about the input.
+ \n- **Task description localization**: where is the task description blended with the inputs? In the beginning, in the middle, at the end?
+ \n- **Implicit situation or contextualization**: how explicit is the query? For instance, *Given this review, would you buy this product?* is an indirect way to ask whether the review is positive.
+ """
+
+ col1, _, _ = st.beta_columns([18, 1, 6])
+ with col1:
+ if state.template_name is not None:
+ show_text(variety_guideline, with_markdown=True)
+
+ #
+ # Edit the created or selected template
+ #
+ col1, _, col2 = st.beta_columns([18, 1, 6])
+ with col1:
+ if state.template_name is not None:
+ template = dataset_templates[state.template_name]
+ #
+ # If template is selected, displays template editor
+ #
+ with st.form("edit_template_form"):
+ updated_template_name = st.text_input("Name", value=template.name)
+ state.reference = st.text_input(
+ "Prompt Reference",
+ help="Short description of the prompt and/or paper reference for the prompt.",
+ value=template.reference,
+ )
+
+ # Metadata
+ state.metadata = template.metadata
+ state.metadata.original_task = st.checkbox(
+ "Original Task?",
+ value=template.metadata.original_task,
+ help="Prompt asks model to perform the original task designed for this dataset.",
+ )
+ state.metadata.choices_in_prompt = st.checkbox(
+ "Choices in Template?",
+ value=template.metadata.choices_in_prompt,
+ help="Prompt explicitly lists choices in the template for the output.",
+ )
+
+ state.metadata.metrics = st.multiselect(
+ "Metrics",
+ sorted(METRICS),
+ default=template.metadata.metrics,
+ help="Select all metrics that are commonly used (or should "
+ "be used if a new task) to evaluate this prompt.",
+ )
+
+ state.metadata.languages = st.multiselect(
+ "Prompt Languages",
+ sorted(LANGUAGES.keys()),
+ default=template.metadata.languages,
+ format_func=format_language,
+ help="Select all languages used in this prompt. "
+ "This annotation is independent from the language(s) "
+ "of the dataset.",
+ )
+
+ # Answer choices
+ if template.get_answer_choices_expr() is not None:
+ answer_choices = template.get_answer_choices_expr()
+ else:
+ answer_choices = ""
+ state.answer_choices = st.text_input(
+ "Answer Choices",
+ value=answer_choices,
+ help="A Jinja expression for computing answer choices. "
+ "Separate choices with a triple bar (|||).",
+ )
+
+ # Jinja
+ state.jinja = st.text_area("Template", height=40, value=template.jinja)
+
+ # Submit form
+ if st.form_submit_button("Save"):
+ if (
+ updated_template_name in dataset_templates.all_template_names
+ and updated_template_name != state.template_name
+ ):
+ st.error(
+ f"A prompt with the name {updated_template_name} already exists "
+ f"for dataset {state.templates_key}."
+ )
+ elif updated_template_name == "":
+ st.error("Need to provide a prompt name.")
+ else:
+ # Parses state.answer_choices
+ if state.answer_choices == "":
+ updated_answer_choices = None
+ else:
+ updated_answer_choices = state.answer_choices
+
+ dataset_templates.update_template(
+ state.template_name,
+ updated_template_name,
+ state.jinja,
+ state.reference,
+ state.metadata,
+ updated_answer_choices,
+ )
+ # Update the state as well
+ state.template_name = updated_template_name
+ #
+ # Displays template output on current example if a template is selected
+ # (in second column)
+ #
+ with col2:
+ if state.template_name is not None:
+ st.empty()
+ template = dataset_templates[state.template_name]
+ prompt = template.apply(example)
+ if prompt == [""]:
+ st.write("∅∅∅ *Blank result*")
+ else:
+ st.write("Input")
+ show_text(prompt[0], width=40)
+ if len(prompt) > 1:
+ st.write("Target")
+ show_text(prompt[1], width=40)
+
+ #
+ # Must sync state at end
+ #
+ state.sync()
+
+
+if __name__ == "__main__":
+ run_app()
diff --git a/promptsource/session.py b/promptsource/session.py
new file mode 100644
index 0000000000000000000000000000000000000000..75d22656fe75e47c6a09e9f1f99f66e0853a8ef8
--- /dev/null
+++ b/promptsource/session.py
@@ -0,0 +1,89 @@
+#
+# Code for managing session state, which is needed for multi-input forms
+# See https://github.com/streamlit/streamlit/issues/1557
+#
+# This code is taken from
+# https://gist.github.com/okld/0aba4869ba6fdc8d49132e6974e2e662
+#
+from streamlit.hashing import _CodeHasher
+from streamlit.report_thread import get_report_ctx
+from streamlit.server.server import Server
+
+
+class _SessionState:
+ def __init__(self, session, hash_funcs):
+ """Initialize SessionState instance."""
+ self.__dict__["_state"] = {
+ "data": {},
+ "hash": None,
+ "hasher": _CodeHasher(hash_funcs),
+ "is_rerun": False,
+ "session": session,
+ }
+
+ def __call__(self, **kwargs):
+ """Initialize state data once."""
+ for item, value in kwargs.items():
+ if item not in self._state["data"]:
+ self._state["data"][item] = value
+
+ def __getitem__(self, item):
+ """Return a saved state value, None if item is undefined."""
+ return self._state["data"].get(item, None)
+
+ def __getattr__(self, item):
+ """Return a saved state value, None if item is undefined."""
+ return self._state["data"].get(item, None)
+
+ def __setitem__(self, item, value):
+ """Set state value."""
+ self._state["data"][item] = value
+
+ def __setattr__(self, item, value):
+ """Set state value."""
+ self._state["data"][item] = value
+
+ def clear(self):
+ """Clear session state and request a rerun."""
+ self._state["data"].clear()
+ self._state["session"].request_rerun(None)
+
+ def sync(self):
+ """
+ Rerun the app with all state values up to date from the beginning to
+ fix rollbacks.
+ """
+ data_to_bytes = self._state["hasher"].to_bytes(self._state["data"], None)
+
+ # Ensure to rerun only once to avoid infinite loops
+ # caused by a constantly changing state value at each run.
+ #
+ # Example: state.value += 1
+ if self._state["is_rerun"]:
+ self._state["is_rerun"] = False
+
+ elif self._state["hash"] is not None:
+ if self._state["hash"] != data_to_bytes:
+ self._state["is_rerun"] = True
+ self._state["session"].request_rerun(None)
+
+ self._state["hash"] = data_to_bytes
+
+
+def _get_session():
+ session_id = get_report_ctx().session_id
+ session_info = Server.get_current()._get_session_info(session_id)
+
+ if session_info is None:
+ raise RuntimeError("Couldn't get your Streamlit Session object.")
+
+ return session_info.session
+
+
+def _get_state(hash_funcs=None):
+ session = _get_session()
+
+ if not hasattr(session, "_custom_session_state"):
+ session._custom_session_state = _SessionState(session, hash_funcs)
+
+ return session._custom_session_state
diff --git a/promptsource/templates.py b/promptsource/templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a03407af543ae40d716b4e395e942337b427dcc
--- /dev/null
+++ b/promptsource/templates.py
@@ -0,0 +1,731 @@
+import logging
+import os
+import random
+import uuid
+from collections import Counter, defaultdict
+from shutil import rmtree
+from typing import Dict, List, Optional, Tuple
+
+import pandas as pd
+import pkg_resources
+import yaml
+from jinja2 import BaseLoader, Environment, meta
+
+
+# Truncation of jinja template variables
+# 1710 = 300 words x 4.7 avg characters per word + 300 spaces
+TEXT_VAR_LENGTH = 2048
+
+# Local path to the folder containing the templates
+TEMPLATES_FOLDER_PATH = pkg_resources.resource_filename(__name__, "templates")
+
+env = Environment(loader=BaseLoader)
+
+# Allow the python function zip()
+env.globals.update(zip=zip)
+
+# These are users whose datasets should be included in the results returned by
+# filter_english_datasets (regardless of their metadata)
+INCLUDED_USERS = {"Zaid", "craffel"}
+
+# These are the metrics with which templates can be tagged
+METRICS = {
+ "BLEU",
+ "ROUGE",
+ "Squad",
+ "Trivia QA",
+ "Accuracy",
+ "Pearson Correlation",
+ "Spearman Correlation",
+ "MultiRC",
+ "AUC",
+ "COQA F1",
+ "Edit Distance",
+ "Mean Reciprocal Rank",
+ "Other",
+}
+
+# These are the languages with which templates can be tagged. Keys are ISO 639-1
+# tags, which are the actual tags we use. Values are English names shown in the
+# UI for convenience.
+LANGUAGES = {
+ "ab": "Abkhazian",
+ "aa": "Afar",
+ "af": "Afrikaans",
+ "ak": "Akan",
+ "sq": "Albanian",
+ "am": "Amharic",
+ "ar": "Arabic",
+ "an": "Aragonese",
+ "hy": "Armenian",
+ "as": "Assamese",
+ "av": "Avaric",
+ "ae": "Avestan",
+ "ay": "Aymara",
+ "az": "Azerbaijani",
+ "bm": "Bambara",
+ "ba": "Bashkir",
+ "eu": "Basque",
+ "be": "Belarusian",
+ "bn": "Bengali",
+ "bi": "Bislama",
+ "bs": "Bosnian",
+ "br": "Breton",
+ "bg": "Bulgarian",
+ "my": "Burmese",
+ "ca": "Catalan, Valencian",
+ "ch": "Chamorro",
+ "ce": "Chechen",
+ "ny": "Chichewa, Chewa, Nyanja",
+ "zh": "Chinese",
+ "cu": "Church Slavic, Old Slavonic, Church Slavonic, Old Bulgarian, Old Church Slavonic",
+ "cv": "Chuvash",
+ "kw": "Cornish",
+ "co": "Corsican",
+ "cr": "Cree",
+ "hr": "Croatian",
+ "cs": "Czech",
+ "da": "Danish",
+ "dv": "Divehi, Dhivehi, Maldivian",
+ "nl": "Dutch, Flemish",
+ "dz": "Dzongkha",
+ "en": "English",
+ "eo": "Esperanto",
+ "et": "Estonian",
+ "ee": "Ewe",
+ "fo": "Faroese",
+ "fj": "Fijian",
+ "fi": "Finnish",
+ "fr": "French",
+ "fy": "Western Frisian",
+ "ff": "Fulah",
+ "gd": "Gaelic, Scottish Gaelic",
+ "gl": "Galician",
+ "lg": "Ganda",
+ "ka": "Georgian",
+ "de": "German",
+ "el": "Greek, Modern (1453–)",
+ "kl": "Kalaallisut, Greenlandic",
+ "gn": "Guarani",
+ "gu": "Gujarati",
+ "ht": "Haitian, Haitian Creole",
+ "ha": "Hausa",
+ "he": "Hebrew",
+ "hz": "Herero",
+ "hi": "Hindi",
+ "ho": "Hiri Motu",
+ "hu": "Hungarian",
+ "is": "Icelandic",
+ "io": "Ido",
+ "ig": "Igbo",
+ "id": "Indonesian",
+ "ia": "Interlingua (International Auxiliary Language Association)",
+ "ie": "Interlingue, Occidental",
+ "iu": "Inuktitut",
+ "ik": "Inupiaq",
+ "ga": "Irish",
+ "it": "Italian",
+ "ja": "Japanese",
+ "jv": "Javanese",
+ "kn": "Kannada",
+ "kr": "Kanuri",
+ "ks": "Kashmiri",
+ "kk": "Kazakh",
+ "km": "Central Khmer",
+ "ki": "Kikuyu, Gikuyu",
+ "rw": "Kinyarwanda",
+ "ky": "Kirghiz, Kyrgyz",
+ "kv": "Komi",
+ "kg": "Kongo",
+ "ko": "Korean",
+ "kj": "Kuanyama, Kwanyama",
+ "ku": "Kurdish",
+ "lo": "Lao",
+ "la": "Latin",
+ "lv": "Latvian",
+ "li": "Limburgan, Limburger, Limburgish",
+ "ln": "Lingala",
+ "lt": "Lithuanian",
+ "lu": "Luba-Katanga",
+ "lb": "Luxembourgish, Letzeburgesch",
+ "mk": "Macedonian",
+ "mg": "Malagasy",
+ "ms": "Malay",
+ "ml": "Malayalam",
+ "mt": "Maltese",
+ "gv": "Manx",
+ "mi": "Maori",
+ "mr": "Marathi",
+ "mh": "Marshallese",
+ "mn": "Mongolian",
+ "na": "Nauru",
+ "nv": "Navajo, Navaho",
+ "nd": "North Ndebele",
+ "nr": "South Ndebele",
+ "ng": "Ndonga",
+ "ne": "Nepali",
+ "no": "Norwegian",
+ "nb": "Norwegian Bokmål",
+ "nn": "Norwegian Nynorsk",
+ "ii": "Sichuan Yi, Nuosu",
+ "oc": "Occitan",
+ "oj": "Ojibwa",
+ "or": "Oriya",
+ "om": "Oromo",
+ "os": "Ossetian, Ossetic",
+ "pi": "Pali",
+ "ps": "Pashto, Pushto",
+ "fa": "Persian",
+ "pl": "Polish",
+ "pt": "Portuguese",
+ "pa": "Punjabi, Panjabi",
+ "qu": "Quechua",
+ "ro": "Romanian, Moldavian, Moldovan",
+ "rm": "Romansh",
+ "rn": "Rundi",
+ "ru": "Russian",
+ "se": "Northern Sami",
+ "sm": "Samoan",
+ "sg": "Sango",
+ "sa": "Sanskrit",
+ "sc": "Sardinian",
+ "sr": "Serbian",
+ "sn": "Shona",
+ "sd": "Sindhi",
+ "si": "Sinhala, Sinhalese",
+ "sk": "Slovak",
+ "sl": "Slovenian",
+ "so": "Somali",
+ "st": "Southern Sotho",
+ "es": "Spanish, Castilian",
+ "su": "Sundanese",
+ "sw": "Swahili",
+ "ss": "Swati",
+ "sv": "Swedish",
+ "tl": "Tagalog",
+ "ty": "Tahitian",
+ "tg": "Tajik",
+ "ta": "Tamil",
+ "tt": "Tatar",
+ "te": "Telugu",
+ "th": "Thai",
+ "bo": "Tibetan",
+ "ti": "Tigrinya",
+ "to": "Tonga (Tonga Islands)",
+ "ts": "Tsonga",
+ "tn": "Tswana",
+ "tr": "Turkish",
+ "tk": "Turkmen",
+ "tw": "Twi",
+ "ug": "Uighur, Uyghur",
+ "uk": "Ukrainian",
+ "ur": "Urdu",
+ "uz": "Uzbek",
+ "ve": "Venda",
+ "vi": "Vietnamese",
+ "vo": "Volapük",
+ "wa": "Walloon",
+ "cy": "Welsh",
+ "wo": "Wolof",
+ "xh": "Xhosa",
+ "yi": "Yiddish",
+ "yo": "Yoruba",
+ "za": "Zhuang, Chuang",
+ "zu": "Zulu",
+}
+
+
+def highlight(input):
+ return "
" + input + ""
+
+
+def choice(choices):
+ return random.choice(choices)
+
+
+def most_frequent(items):
+ """Returns the set of items which appear most frequently in the input"""
+ if not items:
+ return
+ item_counts = Counter(items).most_common()
+ max_freq = item_counts[0][1]
+ most_frequent_items = [c[0] for c in item_counts if c[1] == max_freq]
+ return most_frequent_items
+
+
+env.filters["highlight"] = highlight
+env.filters["choice"] = choice
+env.filters["most_frequent"] = most_frequent
+
+
+class Template(yaml.YAMLObject):
+ """
+ A prompt template.
+ """
+
+ yaml_tag = "!Template"
+
+ def __init__(self, name, jinja, reference, metadata=None, answer_choices=None):
+ """
+ Creates a prompt template.
+
+ A prompt template is expressed in Jinja. It is rendered using an example
+ from the corresponding Hugging Face datasets library (a dictionary). The
+ separator ||| should appear once to divide the template into prompt and
+ output. Generally, the prompt should provide information on the desired
+ behavior, e.g., text passage and instructions, and the output should be
+ a desired response.
+
+ :param name: unique name (per dataset) for template
+ :param jinja: template expressed in Jinja
+ :param reference: string describing author or paper reference for template
+ :param metadata: a Metadata object with template annotations
+ :param answer_choices: Jinja expression for answer choices. Should produce
+ a ||| delimited string of choices that enumerates
+ the possible completions for templates that should
+ be evaluated as ranked completions. If None, then
+ the template is open-ended. This list is accessible
+ from within Jinja as the variable `answer_choices`.
+ """
+ self.id = str(uuid.uuid4())
+ self.name = name
+ self.jinja = jinja
+ self.reference = reference
+ self.metadata = metadata if metadata is not None else Template.Metadata()
+ self.answer_choices = answer_choices
+
+ def get_id(self):
+ """
+ Returns the id of the template
+
+ :return: unique id for template
+ """
+ return self.id
+
+ def get_name(self):
+ """
+ Returns the name of the template
+
+ :return: unique (per dataset) name for template
+ """
+ return self.name
+
+ def get_reference(self):
+ """
+ Returns the bibliographic reference (or author) for the template
+
+ :return: reference as a string
+ """
+ return self.reference
+
+ def get_answer_choices_expr(self):
+ """
+ Returns a Jinja expression for computing the answer choices from an example.
+
+ :return: String, or None if no answer choices
+ """
+ return self.answer_choices
+
+ def get_answer_choices_list(self, example):
+ """
+ Returns a list of answer choices for a given example
+
+ :return: list of strings, or None if get_answer_choices_expr is None
+ """
+ jinja = self.get_answer_choices_expr()
+ if jinja is None:
+ return None
+
+ rtemplate = env.from_string(jinja)
+ protected_example = self._escape_pipe(example)
+ rendered_choices = rtemplate.render(**protected_example)
+ return [self._unescape_pipe(answer_choice.strip()) for answer_choice in rendered_choices.split("|||")]
+
+ def get_fixed_answer_choices_list(self):
+ """
+ Returns a list of answer choices that is static across examples, if possible
+ :return: list of strings, or None if no static list exists
+ """
+ jinja = self.get_answer_choices_expr()
+ if jinja is None:
+ return None
+
+ parse = env.parse(jinja)
+ variables = meta.find_undeclared_variables(parse)
+ if len(variables) == 0:
+ rtemplate = env.from_string(jinja)
+ rendered_choices = rtemplate.render()
+ return [answer_choice.strip() for answer_choice in rendered_choices.split("|||")]
+ else:
+ return None
+
+ def apply(self, example, truncate=True, highlight_variables=False):
+ """
+ Creates a prompt by applying this template to an example
+
+ :param example: the dataset example to create a prompt for
+ :param truncate: if True, example fields will be truncated to TEXT_VAR_LENGTH chars
+ :param highlight_variables: highlight the added variables
+ :return: tuple of 2 strings, for prompt and output
+ """
+ jinja = self.jinja
+
+ # Truncates the prompt if needed
+ if truncate:
+ trunc_command = (
+ f" | string | truncate({TEXT_VAR_LENGTH}) }}}}" # Escaping curly braces requires doubling them
+ )
+ jinja = jinja.replace("}}", trunc_command)
+
+ # Highlights text that was substituted for variables, if requested
+ if highlight_variables:
+ jinja = jinja.replace("}}", " | highlight }}")
+ rtemplate = env.from_string(jinja)
+
+ protected_example = self._escape_pipe(example)
+
+ # Adds in answer_choices variable
+ if "answer_choices" in protected_example:
+ raise ValueError("Example contains the restricted key 'answer_choices'.")
+
+ protected_example["answer_choices"] = self.get_answer_choices_list(example)
+
+ # Renders the Jinja template
+ rendered_example = rtemplate.render(**protected_example)
+
+ # Splits on the separator, and then replaces back any occurrences of the
+ # separator in the original example
+ return [self._unescape_pipe(part).strip() for part in rendered_example.split("|||")]
+
+ pipe_protector = "3ed2dface8203c4c9dfb1a5dc58e41e0"
+
+ @classmethod
+ def _escape_pipe(cls, example):
+ # Replaces any occurrences of the "|||" separator in the example, which
+ # which will be replaced back after splitting
+ protected_example = {
+ key: value.replace("|||", cls.pipe_protector) if isinstance(value, str) else value
+ for key, value in example.items()
+ }
+ return protected_example
+
+ @classmethod
+ def _unescape_pipe(cls, string):
+ # replaces back any occurrences of the separator in a string
+ return string.replace(cls.pipe_protector, "|||")
+
+ class Metadata(yaml.YAMLObject):
+ """
+ Metadata for a prompt template.
+ """
+
+ yaml_tag = "!TemplateMetadata"
+
+ def __init__(
+ self,
+ original_task: Optional[bool] = None,
+ choices_in_prompt: Optional[bool] = None,
+ metrics: Optional[List[str]] = None,
+ languages: Optional[List[str]] = None,
+ ):
+ """
+ Initializes template metadata.
+
+ In the following, trivial choices are defined as Yes/No, True/False,
+ etc. and nontrivial choices are other types of choices denoted in
+ the answer_choices field.
+
+ :param original_task: If True, this prompt asks a model to perform the original task designed for
+ this dataset.
+ :param choices_in_prompt: If True, the answer choices are included in the templates such that models
+ see those choices in the input. Only applicable to classification tasks.
+ :param metrics: List of strings denoting metrics to use for evaluation
+ :param metrics: List of strings denoting languages used in the prompt (not the associated dataset!)
+ """
+ self.original_task = original_task
+ self.choices_in_prompt = choices_in_prompt
+ self.metrics = metrics
+ self.languages = languages
+
+
+class TemplateCollection:
+ """
+ This helper class wraps the DatasetTemplates class
+ - Initialized the DatasetTemplates for all existing template folder
+ - Give access to each DatasetTemplates
+ - Provides aggregated counts over all DatasetTemplates
+ """
+
+ def __init__(self):
+
+ # Dict of all the DatasetTemplates, key is the tuple (dataset_name, subset_name)
+ self.datasets_templates: Dict[(str, Optional[str]), DatasetTemplates] = self._collect_datasets()
+
+ @property
+ def keys(self):
+ return list(self.datasets_templates.keys())
+
+ def __len__(self) -> int:
+ return len(self.datasets_templates)
+
+ def remove(self, dataset_name: str, subset_name: Optional[str] = None) -> None:
+ del self.datasets_templates[dataset_name, subset_name]
+
+ def _collect_datasets(self) -> Dict[Tuple[str, str], "DatasetTemplates"]:
+ """
+ Initialize a DatasetTemplates object for each templates.yaml detected in the templates folder
+
+ Returns: a dict with key=(dataset_name, subset_name)
+ """
+ dataset_folders = os.listdir(TEMPLATES_FOLDER_PATH)
+ dataset_folders = [folder for folder in dataset_folders if not folder.startswith(".")]
+
+ output = {} # format is {(dataset_name, subset_name): DatasetsTemplates}
+ for dataset in dataset_folders:
+ if dataset in INCLUDED_USERS:
+ for filename in os.listdir(os.path.join(TEMPLATES_FOLDER_PATH, dataset)):
+ output = {**output, **self._collect_dataset(dataset + "/" + filename)}
+ else:
+ output = {**output, **self._collect_dataset(dataset)}
+ return output
+
+ def _collect_dataset(self, dataset):
+ output = {} # format is {(dataset_name, subset_name): DatasetsTemplates}
+ for filename in os.listdir(os.path.join(TEMPLATES_FOLDER_PATH, dataset)):
+ if filename.endswith(".yaml"):
+ # If there is no sub-folder, there is no subset for this dataset
+ output[(dataset, None)] = DatasetTemplates(dataset)
+ else:
+ # This is a subfolder, and its name corresponds to the subset name
+ output[(dataset, filename)] = DatasetTemplates(dataset_name=dataset, subset_name=filename)
+ return output
+
+ def get_dataset(self, dataset_name: str, subset_name: Optional[str] = None) -> "DatasetTemplates":
+ """
+ Return the DatasetTemplates object corresponding to the dataset name
+
+ :param dataset_name: name of the dataset to get
+ :param subset_name: name of the subset
+ """
+ # if the dataset does not exist, we add it
+ if dataset_name not in self.keys:
+ self.datasets_templates[(dataset_name, subset_name)] = DatasetTemplates(dataset_name, subset_name)
+
+ return self.datasets_templates[(dataset_name, subset_name)]
+
+ def get_templates_count(self) -> Dict:
+ """
+ Return the overall number count over all datasets
+
+ NB: we don't breakdown datasets into subsets for the count, i.e subsets count are included
+ into the dataset count
+ """
+
+ count_dict = defaultdict(int)
+ for k, v in self.datasets_templates.items():
+ # Subsets count towards dataset count
+ count_dict[k[0]] += len(v)
+ # converting to regular dict
+ return dict(count_dict)
+
+
+class DatasetTemplates:
+ """
+ Class that wraps all templates for a specific dataset/subset and implements all the helper
+ functions necessary to read/write to the yaml file
+ """
+
+ TEMPLATES_KEY = "templates"
+ DATASET_KEY = "dataset"
+ SUBSET_KEY = "subset"
+ TEMPLATE_FILENAME = "templates.yaml"
+
+ def __init__(self, dataset_name: str, subset_name: str = None):
+ self.dataset_name: str = dataset_name
+ self.subset_name: str = subset_name
+ # dictionary is keyed by template name.
+ self.templates: Dict = self.read_from_file()
+
+ # Mapping from template name to template id
+ self.name_to_id_mapping = {}
+ self.sync_mapping()
+
+ def sync_mapping(self) -> None:
+ """
+ Re-compute the name_to_id_mapping to ensure it is in sync with self.templates
+ """
+ self.name_to_id_mapping = {template.name: template.id for template in self.templates.values()}
+
+ @property
+ def all_template_names(self) -> List[str]:
+ """
+ Sorted list of all templates names for this dataset
+ """
+ return sorted([template.name for template in self.templates.values()])
+
+ @property
+ def folder_path(self) -> str:
+ if self.subset_name:
+ return os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name, self.subset_name)
+ else:
+ return os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name)
+
+ @property
+ def yaml_path(self) -> str:
+ return os.path.join(self.folder_path, self.TEMPLATE_FILENAME)
+
+ def format_for_dump(self) -> Dict:
+ """
+ Create a formatted dictionary for the class attributes
+ """
+ formatted_dict = {self.DATASET_KEY: self.dataset_name, self.TEMPLATES_KEY: self.templates}
+ if self.subset_name:
+ formatted_dict[self.SUBSET_KEY] = self.subset_name
+ return formatted_dict
+
+ def read_from_file(self) -> Dict:
+ """
+ Reads a file containing a prompt collection.
+ """
+
+ if not os.path.exists(self.yaml_path):
+ dataset_name = f"{self.dataset_name} {self.subset_name}" if self.subset_name else self.dataset_name
+ logging.warning(
+ f"Tried instantiating `DatasetTemplates` for {dataset_name}, but no prompts found. "
+ "Please ignore this warning if you are creating new prompts for this dataset."
+ )
+ return {}
+ yaml_dict = yaml.load(open(self.yaml_path, "r"), Loader=yaml.FullLoader)
+ return yaml_dict[self.TEMPLATES_KEY]
+
+ def write_to_file(self) -> None:
+ """
+ Writes to a file with the current prompt collection.
+ """
+ # Sync the mapping
+ self.sync_mapping()
+
+ # We only create the folder if a template is written
+ if not os.path.exists(self.folder_path):
+ os.makedirs(self.folder_path)
+ yaml.dump(self.format_for_dump(), open(self.yaml_path, "w"))
+
+ def add_template(self, template: "Template") -> None:
+ """
+ Adds a new template for the dataset
+
+ :param template: template
+ """
+ self.templates[template.get_id()] = template
+
+ self.write_to_file()
+
+ def remove_template(self, template_name: str) -> None:
+ """
+ Deletes a template
+
+ :param template_name: name of template to remove
+ """
+
+ # Even if we have an ID, we want to check for duplicate names
+ if template_name not in self.all_template_names:
+ raise ValueError(f"No template with name {template_name} for dataset {self.dataset_name} exists.")
+
+ del self.templates[self.name_to_id_mapping[template_name]]
+
+ if len(self.templates) == 0:
+ # There is no remaining template, we can remove the entire folder
+ self.delete_folder()
+ else:
+ # We just update the file
+ self.write_to_file()
+
+ def update_template(
+ self,
+ current_template_name: str,
+ new_template_name: str,
+ jinja: str,
+ reference: str,
+ metadata: Template.Metadata,
+ answer_choices: str,
+ ) -> None:
+ """
+ Updates a pre-existing template and writes changes
+
+ :param current_template_name: current name of the template stored in self.templates
+ :param new_template_name: new name for the template
+ :param jinja: new jinja entry
+ :param reference: new reference entry
+ :param metadata: a Metadata object with template annotations
+ :param answer_choices: new answer_choices string
+ """
+ template_id = self.name_to_id_mapping[current_template_name]
+ self.templates[template_id].name = new_template_name
+ self.templates[template_id].jinja = jinja
+ self.templates[template_id].reference = reference
+ self.templates[template_id].metadata = metadata
+ self.templates[template_id].answer_choices = answer_choices
+
+ self.write_to_file()
+
+ def delete_folder(self) -> None:
+ """
+ Delete the folder corresponding to self.folder_path
+ """
+ self.sync_mapping()
+
+ rmtree(self.folder_path)
+
+ # If it is a subset, we have to check whether to remove the dataset folder
+ if self.subset_name:
+ # have to check for other folders
+ base_dataset_folder = os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name)
+ if len(os.listdir(base_dataset_folder)) == 0:
+ rmtree(base_dataset_folder)
+
+ def __getitem__(self, template_key: str) -> "Template":
+ return self.templates[self.name_to_id_mapping[template_key]]
+
+ def __len__(self) -> int:
+ return len(self.templates)
+
+
+def get_templates_data_frame():
+ """
+ Gathers all template information into a Pandas DataFrame.
+
+ :return: Pandas DataFrame
+ """
+ data = {
+ "id": [],
+ "dataset": [],
+ "subset": [],
+ "name": [],
+ "reference": [],
+ "original_task": [],
+ "choices_in_prompt": [],
+ "metrics": [],
+ "languages": [],
+ "answer_choices": [],
+ "jinja": [],
+ }
+
+ template_collection = TemplateCollection()
+
+ for key in template_collection.keys:
+ templates = template_collection.get_dataset(key[0], key[1])
+ for template_name in templates.all_template_names:
+ template = templates[template_name]
+ data["id"].append(template.get_id())
+ data["dataset"].append(key[0])
+ data["subset"].append(key[1])
+ data["name"].append(template.get_name())
+ data["reference"].append(template.get_reference())
+ data["original_task"].append(template.metadata.original_task)
+ data["choices_in_prompt"].append(template.metadata.choices_in_prompt)
+ data["metrics"].append(template.metadata.metrics)
+ data["languages"].append(template.metadata.languages)
+ data["answer_choices"].append(template.get_answer_choices_expr())
+ data["jinja"].append(template.jinja)
+
+ return pd.DataFrame(data)
diff --git a/promptsource/templates/Zaid/coqa_expanded/templates.yaml b/promptsource/templates/Zaid/coqa_expanded/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..24d95aec15ec60ae8cf73e40c3cc14a3a2b149e7
--- /dev/null
+++ b/promptsource/templates/Zaid/coqa_expanded/templates.yaml
@@ -0,0 +1,130 @@
+dataset: Zaid/coqa_expanded
+templates:
+ 12ad4331-d063-4b56-b0f6-76f59c690717: !Template
+ answer_choices: null
+ id: 12ad4331-d063-4b56-b0f6-76f59c690717
+ jinja: "Below is a passage, followed by a series of questions and answers about\
+ \ the passage. Answer the last question based on the information contained in\
+ \ the passage. If there is no answer in the passage, say \"unknown\".\n\nPassage:\
+ \ {{story}}\n\nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1\
+ \ %}\n{{answer[\"input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Verbose instructions
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+ 2f9fb20d-f4c9-4371-9cd4-db47607cb7a3: !Template
+ answer_choices: null
+ id: 2f9fb20d-f4c9-4371-9cd4-db47607cb7a3
+ jinja: "What is the answer to the last question in the dialogue below? If there\
+ \ is no answer in the passage, say \"unknown\".\n\nPassage: {{story}}\n\nQ:\
+ \ {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"\
+ input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: What is the answer
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+ 9aff8967-d41c-4d79-8ef4-fc3650773735: !Template
+ answer_choices: null
+ id: 9aff8967-d41c-4d79-8ef4-fc3650773735
+ jinja: "Complete the dialogue based on the information contained in the passage.\
+ \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{story}}\n\
+ \nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"\
+ input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Complete the dialogue
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+ 9bc32f2e-eee6-4006-bce3-74a79403d33e: !Template
+ answer_choices: null
+ id: 9bc32f2e-eee6-4006-bce3-74a79403d33e
+ jinja: "Answer the last question based on the information contained in the passage.\
+ \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{story}}\n\
+ \nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"\
+ input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Answer the last question
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+ bacb6534-e607-4afc-a412-ccfcd9fe38e2: !Template
+ answer_choices: null
+ id: bacb6534-e607-4afc-a412-ccfcd9fe38e2
+ jinja: 'In the passage below, extract the part which answers the last question.
+ If there is no answer in the passage, say "unknown".
+
+
+ Passage: {{story}}
+
+
+ Q: {{question}}
+
+ A: |||
+
+ {% if answer["answer_start"] != -1 %}
+
+ {{story[answer["answer_start"] : answer["answer_end"] ]}}
+
+ {% else %}
+
+ unknown
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: extract_answer
+ reference: ''
+ be39974f-aa86-4076-b444-bd3c2732b17b: !Template
+ answer_choices: null
+ id: be39974f-aa86-4076-b444-bd3c2732b17b
+ jinja: "Help me complete the dialogue about this passage. If there is no answer\
+ \ in the passage, say \"unknown\".\n\nPassage: {{story}}\n\nQ: {{question}}\
+ \ \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"input_text\"\
+ ]}}\n{% else %}\nunknown\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Help me
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+ d95440ce-d538-40f8-ae09-664e05852ca8: !Template
+ answer_choices: null
+ id: d95440ce-d538-40f8-ae09-664e05852ca8
+ jinja: "{{story}}\n\nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] !=\
+ \ -1 %}\n{{answer[\"input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: GPT-3 Style
+ reference: 'Brown et al. NeurIPS 2020. Metric: variant of SQuAD (Section 6.1 of
+ the paper)'
diff --git a/promptsource/templates/Zaid/quac_expanded/templates.yaml b/promptsource/templates/Zaid/quac_expanded/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d976e3a164027fd84248433f46cc7365bc305c2
--- /dev/null
+++ b/promptsource/templates/Zaid/quac_expanded/templates.yaml
@@ -0,0 +1,91 @@
+dataset: Zaid/quac_expanded
+templates:
+ 01d8c949-89a7-4a44-9a39-6cf2ac3e0a7b: !Template
+ answer_choices: null
+ id: 01d8c949-89a7-4a44-9a39-6cf2ac3e0a7b
+ jinja: "What is the answer to the last question in the dialogue below? If there\
+ \ is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\n\
+ Q: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: What is the answer
+ reference: 'Metric: F1'
+ 1484c6e6-bf42-47ca-9ea7-c3c552a24de1: !Template
+ answer_choices: null
+ id: 1484c6e6-bf42-47ca-9ea7-c3c552a24de1
+ jinja: "{{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: GPT-3 Style
+ reference: 'Brown et al. NeurIPS 2020. Metric: F1'
+ 2bca0532-01a3-4a64-a228-a57ae0965719: !Template
+ answer_choices: null
+ id: 2bca0532-01a3-4a64-a228-a57ae0965719
+ jinja: "Below is a passage, followed by a series of questions and answers about\
+ \ the passage. Answer the last question based on the information contained in\
+ \ the passage. If there is no answer in the passage, say \"unknown\".\n\nPassage:\
+ \ {{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Verbose instructions
+ reference: 'Metric: F1'
+ 4abd0379-dbc0-4f71-901b-dd0af3581157: !Template
+ answer_choices: null
+ id: 4abd0379-dbc0-4f71-901b-dd0af3581157
+ jinja: "Answer the last question based on the information contained in the passage.\
+ \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\
+ \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Answer the last question
+ reference: 'Metric: F1'
+ 8ebbd098-b40c-4e69-8cbb-0ffecf0fe2a6: !Template
+ answer_choices: null
+ id: 8ebbd098-b40c-4e69-8cbb-0ffecf0fe2a6
+ jinja: "Complete the dialogue based on the information contained in the passage.\
+ \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\
+ \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Complete the dialogue
+ reference: 'Metric: F1'
+ e624695b-5d26-47cc-bdb4-ac2bee4ddaea: !Template
+ answer_choices: null
+ id: e624695b-5d26-47cc-bdb4-ac2bee4ddaea
+ jinja: "Help me complete the dialogue about this passage. If there is no answer\
+ \ in the passage, say \"unknown\".\n\nPassage: {{context}}\n\nQ: {{question}}\
+ \ \nA: ||| {{answer[\"texts\"][0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Help me
+ reference: 'Metric: F1'
diff --git a/promptsource/templates/acronym_identification/templates.yaml b/promptsource/templates/acronym_identification/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9fae68d2c7fdff59a25c78b19be3d6b94288d4dd
--- /dev/null
+++ b/promptsource/templates/acronym_identification/templates.yaml
@@ -0,0 +1,248 @@
+dataset: acronym_identification
+templates:
+ 64f438f2-9968-459f-82d2-24bad632b358: !Template
+ answer_choices: null
+ id: 64f438f2-9968-459f-82d2-24bad632b358
+ jinja: "{% set random_abbr = '' %}\n{% set _dummy = none %}\n{% set abbr_exp_dict\
+ \ = namespace(value = {}) %}\n{% set abbr_string=namespace(value='') %}\n{%\
+ \ set exp_string=namespace(value='')%}\n \n{% for label_idx in range(labels|length)\
+ \ %}\n {% if labels[label_idx] == 0 %}{# Long Beginning #}\n {% set exp_string.value\
+ \ = tokens[label_idx] %}{# Create new long string #}\n {% elif labels[label_idx]\
+ \ == 1 %}{# Short Beginning #}\n {% if abbr_string.value!='' and abbr_string.value\
+ \ not in abbr_exp_dict.value.keys()%}{# Some string already present #}\n \
+ \ {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:''}) %}{#\
+ \ Discard this string as a new short string is coming #}\n {% endif %}\n\
+ \ {% set abbr_string.value = tokens[label_idx] %}{# Create new short string\
+ \ #}\n {% elif labels[label_idx] == 2 %}{# Long Intermediate #}\n {% set\
+ \ exp_string.value = exp_string.value+' '+tokens[label_idx] %}{# Update existing\
+ \ string #}\n {% elif labels[label_idx] == 3 %}{# Short Intermediate #}\n \
+ \ {% set abbr_string.value = abbr_string.value+tokens[label_idx] %}{# Update\
+ \ existing string #}\n {% else %}{# Other #}\n {# Both non-empty, and first\
+ \ characters match #}\n {% if abbr_string.value!='' and exp_string.value!=''\
+ \ and exp_string.value.split()[0][0]|lower in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower\
+ \ in abbr_string.value|lower%}\n {# Update both the dictionaries #}\n \
+ \ {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+ \ %}\n {# Empty both the strings #}\n {% set abbr_string.value= ''\
+ \ %}\n {% set exp_string.value= '' %}\n {% endif %}\n {% endif %}\n\
+ {% endfor %}\n{# Both non-empty, and first characters match #}\n{% if abbr_string.value!=''\
+ \ and exp_string.value!='' %}\n {% if exp_string.value.split()[0][0]|lower\
+ \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower\
+ \ %}\n {# Update both the dictionaries #}\n {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+ \ %}\n {% elif abbr_exp_dict.value.items()|length==0 %}\n {% set _dummy\
+ \ = abbr_exp_dict.value.update({abbr_string.value:exp_string.value}) %}\n {%\
+ \ endif %}\n{% else %}\n {% if abbr_string.value!=''%}\n {% if abbr_string.value\
+ \ not in abbr_exp_dict.value.keys() %}\n {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:''})\
+ \ %}\n {% endif %}\n {% endif %}\n{% endif %}\n{% if abbr_exp_dict.value\
+ \ %}\n{% set random_abbr = abbr_exp_dict.value.keys()|list|choice %}\nGiven\
+ \ the tokens below, find the expansion (acronym meaning) of \"{{random_abbr}}\"\
+ . Return {{\"\\\"Unclear\\\"\"}} if the expansion can't be found.\n \nTokens:\
+ \ {{tokens|join(' ')}}\nExpansion: |||\n{% if random_abbr in abbr_exp_dict.value.keys()\
+ \ and abbr_exp_dict.value[random_abbr]!='' %}\n{{abbr_exp_dict.value[random_abbr]}}\n\
+ {% else %}\nUnclear\n{% endif %}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_acronym_meaning
+ reference: 'Given the tokens, find the expansion of an abbreviation in the tokens.
+ Metrics: Precision, Recall, F1'
+ 81babc83-18cd-4eed-a343-8ede56b21df5: !Template
+ answer_choices: null
+ id: 81babc83-18cd-4eed-a343-8ede56b21df5
+ jinja: "Specification for BIO tags: \"{{\"B-short\"}}\" and \"{{\"I-short\"}}\"\
+ \ represent respectively the beginning and intermediate tokens for abbreviations\
+ \ (acronyms).\"{{\"B-long\"}}\" and \"{{\"I-long\"}}\" represent respectively\
+ \ the beginning and intermediate tokens for expansions of abbreviations (acronyms\
+ \ meaning). All other tokens are represented by \"{{\"O\"}}\". \n\nGiven the\
+ \ space-separated tokens below, write down for each token the corresponding\
+ \ BIO tag. Use a space to separate tags in the answer.\n\nTokens: {{tokens|join('\
+ \ ')}}\nBIO tags:|||{% for label in labels %}{{[\"B-long\", \"B-short\", \"\
+ I-long\", \"I-short\", \"O\"][label]}}{% if not loop.last %} {%endif %}{% endfor\
+ \ %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: acronyms_and_expansions_bio_encode
+ reference: 'Given the comma separated tokens, generate BIO encoding for abbreviations.
+ Metrics: Precision, Recall, F1'
+ 8832e5f7-7c45-46da-b85f-71fcb444f264: !Template
+ answer_choices: null
+ id: 8832e5f7-7c45-46da-b85f-71fcb444f264
+ jinja: 'List all the expansions (meanings) of the acronyms present in the following
+ space-separated tokens. Return {{"\"No expansions found\""}} if the expansions
+ can''t be found.
+
+
+ Tokens: {{tokens|join('' '')}}
+
+ |||
+
+ {% set abbr_string=namespace(value='''') %}
+
+ {% set answer_list=namespace(value=[]) %}
+
+ {% for label_idx in range(labels|length) %}
+
+ {% if labels[label_idx] == 0 %}
+
+ {% set abbr_string.value = tokens[label_idx] %}
+
+ {% elif abbr_string.value!='''' and labels[label_idx]==2%}
+
+ {% set abbr_string.value = abbr_string.value+'' ''+tokens[label_idx] %}
+
+ {% elif abbr_string.value!='''' and labels[label_idx]!=2%}
+
+ {% set answer_list.value = answer_list.value +[abbr_string.value] %}
+
+ {% set abbr_string.value = '''' %}
+
+ {% endif %}
+
+ {% if loop.last and abbr_string.value!='''' %}
+
+ {% set answer_list.value = answer_list.value +[abbr_string.value] %}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% if answer_list.value|length!=0 %}
+
+ {{ answer_list.value|join('', '') }}
+
+ {% else %}
+
+ No expansions found
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: list_expansions
+ reference: 'Given the tokens, list the expansion tokens. Metrics: Precision, Recall,
+ F1'
+ cae58242-cde9-472d-ae9e-56fc7e79c0d1: !Template
+ answer_choices: null
+ id: cae58242-cde9-472d-ae9e-56fc7e79c0d1
+ jinja: "List all the acryonyms in the following space-separated tokens: \n\n{{tokens|join('\
+ \ ')}}\n|||\n{% set abbr_string=namespace(value='') %}\n{% set answer_list=namespace(value=[])\
+ \ %}\n{% for label_idx in range(labels|length) %}\n{% if labels[label_idx] ==\
+ \ 1 %}\n{% set abbr_string.value = tokens[label_idx] %}\n{% elif abbr_string.value!=''\
+ \ and labels[label_idx]==3%}\n{% set abbr_string.value = abbr_string.value+tokens[label_idx]\
+ \ %}\n{% elif abbr_string.value!='' and labels[label_idx]!=3 %}\n{% set answer_list.value\
+ \ = answer_list.value +[abbr_string.value] %}\n{% set abbr_string.value = ''\
+ \ %}\n{% endif %}\n{% if loop.last and abbr_string.value!='' %}\n{% set answer_list.value\
+ \ = answer_list.value +[abbr_string.value] %}\n{% endif %}\n{% endfor %}\n{{\
+ \ answer_list.value|join(', ') }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: list_abbreviations
+ reference: 'Given the tokens, list the abbreviations. Metrics: Precision, Recall,
+ F1'
+ e4e42433-0e37-4aa5-bbce-7f336ecac6a3: !Template
+ answer_choices: null
+ id: e4e42433-0e37-4aa5-bbce-7f336ecac6a3
+ jinja: "{% set _dummy = none %}\n{% set abbr_exp_dict = namespace(value = {})\
+ \ %}\n{% set abbr_string=namespace(value='') %}\n{% set exp_string=namespace(value='')%}\n\
+ \ \n{% for label_idx in range(labels|length) %}\n {% if labels[label_idx] ==\
+ \ 0 %}{# Long Beginning #}\n {% set exp_string.value = tokens[label_idx]\
+ \ %}{# Create new long string #}\n {% elif labels[label_idx] == 1 %}{# Short\
+ \ Beginning #}\n {% if abbr_string.value!='' and abbr_string.value not in\
+ \ abbr_exp_dict.value.keys()%}{# Some string already present #}\n {% set\
+ \ _dummy = abbr_exp_dict.value.update({abbr_string.value:''}) %}{# Discard this\
+ \ string as a new short string is coming #}\n {% endif %}\n {% set abbr_string.value\
+ \ = tokens[label_idx] %}{# Create new short string #}\n {% elif labels[label_idx]\
+ \ == 2 %}{# Long Intermediate #}\n {% set exp_string.value = exp_string.value+'\
+ \ '+tokens[label_idx] %}{# Update existing string #}\n {% elif labels[label_idx]\
+ \ == 3 %}{# Short Intermediate #}\n {% set abbr_string.value = abbr_string.value+tokens[label_idx]\
+ \ %}{# Update existing string #}\n {% else %}{# Other #}\n {# Both non-empty,\
+ \ and first characters match #}\n {% if abbr_string.value!='' and exp_string.value!=''\
+ \ and exp_string.value.split()[0][0]|lower in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower\
+ \ in abbr_string.value|lower%}\n {# Update both the dictionaries #}\n \
+ \ {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+ \ %}\n {# Empty both the strings #}\n {% set abbr_string.value= ''\
+ \ %}\n {% set exp_string.value= '' %}\n {% endif %}\n {% endif %}\n\
+ {% endfor %}\n{# Both non-empty, and first characters match #}\n{% if abbr_string.value!=''\
+ \ and exp_string.value!='' %}\n {% if exp_string.value.split()[0][0]|lower\
+ \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower\
+ \ %}\n {# Update both the dictionaries #}\n {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+ \ %}\n {% elif abbr_exp_dict.value.items()|length==0 %}\n {% set _dummy\
+ \ = abbr_exp_dict.value.update({abbr_string.value:exp_string.value}) %}\n {%\
+ \ endif %}\n{% else %}\n {% if abbr_string.value!=''%}\n {% if abbr_string.value\
+ \ not in abbr_exp_dict.value.keys() %}\n {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:''})\
+ \ %}\n {% endif %}\n {% endif %}\n{% endif %}\n \nGiven the following tokens,\
+ \ find the abbreviations (acronyms) and their expansions (acronyms meaning).\
+ \ Return {{\"\\\"Unclear\\\"\"}} if the expansion can't be found.\n \nTokens:\
+ \ {{tokens|join(' ')}}\n|||\n{% for item, value in abbr_exp_dict.value.items()\
+ \ %}\n{{item}} : {% if value!='' %}{{value}}{% else %}Unclear{% endif %}\n{%endfor%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_acronyms_and_expansions
+ reference: 'Given the tokens, find the abbreviation mapping. Metrics: Precision,
+ Recall, F1'
+ eed32ee4-ebc3-499f-ba61-e91461f56ccb: !Template
+ answer_choices: null
+ id: eed32ee4-ebc3-499f-ba61-e91461f56ccb
+ jinja: "{% set random_exp = '' %}{% set _dummy = none %}{% set exp_abbr_dict =\
+ \ namespace(value = {}) %}{% set abbr_string=namespace(value='') %}{% set exp_string=namespace(value='')%}{%\
+ \ for label_idx in range(labels|length) %}{% if labels[label_idx] == 0 %}{#\
+ \ Long Beginning #}{% if exp_string.value!='' and exp_string.value not in exp_abbr_dict.value.keys()\
+ \ %}{# Some string already present #}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:''})\
+ \ %}{# Discard this string as a new long string is coming #} {% endif %}{% set\
+ \ exp_string.value = tokens[label_idx] %}{# Create new long string #}{% elif\
+ \ labels[label_idx] == 1 %}{# Short Beginning #}{% set abbr_string.value = tokens[label_idx]\
+ \ %}{# Create new short string #}{% elif labels[label_idx] == 2 %}{# Long Intermediate\
+ \ #}{% set exp_string.value = exp_string.value+' '+tokens[label_idx] %}{# Update\
+ \ existing string #}{% elif labels[label_idx] == 3 %}{# Short Intermediate #}{%\
+ \ set abbr_string.value = abbr_string.value+tokens[label_idx] %}{# Update existing\
+ \ string #}{% else %}{# Other #}{# Both non-empty, and first characters match\
+ \ #}{% if abbr_string.value!='' and exp_string.value!='' and exp_string.value.split()[0][0]|lower\
+ \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower%}{#\
+ \ Update both the dictionaries #}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:abbr_string.value})\
+ \ %}{# Empty both the strings #}{% set abbr_string.value= '' %}{% set exp_string.value=\
+ \ '' %}{% endif %}{% endif %}{% endfor %}{# Both non-empty, and first characters\
+ \ match #}{% if abbr_string.value!='' and exp_string.value!='' %}{% if exp_string.value.split()[0][0]|lower\
+ \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower\
+ \ %}{# Update the dictionary #}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:abbr_string.value})\
+ \ %}{% elif exp_abbr_dict.value.items()|length==0 %}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:abbr_string.value})\
+ \ %}{% endif %}{% else %}{% if exp_string.value!='' %}{% if exp_string.value\
+ \ not in exp_abbr_dict.value.keys() %}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:''})\
+ \ %}{% endif %}{% endif %}{% endif %}{% if exp_abbr_dict.value.items()|length!=0\
+ \ %}{% set random_exp = exp_abbr_dict.value.keys()|list|choice %}Given the tokens\
+ \ below, find the abbreviation (acronym) for: \"{{random_exp}}\". Return {{\"\
+ \\\"Unclear\\\"\"}} if the abbreviation can't be found.\n \nTokens: {{tokens|join('\
+ \ ')}}\nAcronyms: |||{% if random_exp in exp_abbr_dict.value.keys() and exp_abbr_dict.value[random_exp]!=''\
+ \ %}{{exp_abbr_dict.value[random_exp]}}{% else %}Unclear{% endif %}{% endif\
+ \ %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_acronym
+ reference: 'Given the tokens, find the abbreviation for an expansion. Metrics:
+ Precision, Recall, F1'
diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..930dbdb06d320024c556d8b5396c705c89323011
--- /dev/null
+++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml
@@ -0,0 +1,50 @@
+dataset: ade_corpus_v2
+subset: Ade_corpus_v2_classification
+templates:
+ 56bd12a8-b8ee-464e-98cc-5f586ba9f74d: !Template
+ answer_choices: No ||| Yes
+ id: 56bd12a8-b8ee-464e-98cc-5f586ba9f74d
+ jinja: 'Please answer the below Yes / No question.
+
+
+ Is "{{text}}" related to adverse drug effect (ADE)? ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: binary-classification
+ reference: ''
+ 78c4ce65-dd66-46ed-878d-11f4eca5e544: !Template
+ answer_choices: No ||| Yes
+ id: 78c4ce65-dd66-46ed-878d-11f4eca5e544
+ jinja: "Read the below text and answer the question.\n\nText: {{text}} \n\nQuestion:\
+ \ Is the above text related to adverse drug effect (ADE)? Your answer should\
+ \ be either \"Yes\" or \"No\".\n\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: verbose-binary-classification
+ reference: ''
+ dabc0337-5bd3-4150-98b3-794a15ce1a3a: !Template
+ answer_choices: null
+ id: dabc0337-5bd3-4150-98b3-794a15ce1a3a
+ jinja: "{% if label==1 %}\nPlease write a short medical report that is related\
+ \ to adverse drug effect (ADE). \n{% else %}\nWrite a medical report that is\
+ \ not related to adverse drug effect (ADE). \n{% endif %}\n|||\n{{text}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: label-to-text
+ reference: ''
diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ee815d774cbfccfb69accb7b2a3a0ccfa8d402c4
--- /dev/null
+++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml
@@ -0,0 +1,125 @@
+dataset: ade_corpus_v2
+subset: Ade_corpus_v2_drug_ade_relation
+templates:
+ 0ec35408-652d-4ebc-9478-5a0d330c24c8: !Template
+ answer_choices: null
+ id: 0ec35408-652d-4ebc-9478-5a0d330c24c8
+ jinja: 'Read the below text and answer the question.
+
+
+ Text: {{text}}
+
+
+ Question: What drug has an effect of {{effect}}?
+
+ |||
+
+ {{drug}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-drug
+ reference: ''
+ 2682a789-a435-4976-b34f-f376991c842a: !Template
+ answer_choices: null
+ id: 2682a789-a435-4976-b34f-f376991c842a
+ jinja: '{{drug}} has an effect of {{effect}}. Please write a short medical report
+ about this.
+
+ |||
+
+ {{text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: drug-and-effect-to-text
+ reference: ''
+ 61ba3622-72bc-4fd8-acfc-826bc2a93aa5: !Template
+ answer_choices: null
+ id: 61ba3622-72bc-4fd8-acfc-826bc2a93aa5
+ jinja: 'Read the below text and answer the question.
+
+
+ Text: {{text}}
+
+
+ Question: What effect does {{drug}} have?
+
+ |||
+
+ {{effect}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-effect
+ reference: ''
+ 6acf3588-baa1-4ff6-87c4-4c2356855464: !Template
+ answer_choices: null
+ id: 6acf3588-baa1-4ff6-87c4-4c2356855464
+ jinja: 'Read the below text and answer the question.
+
+
+ Text: {{text}}
+
+
+ Question: What are the drug and its effect of the above text?
+
+
+ You should answer in the "drug" and "effect" format (e.g., alcohol and high
+ blood pressure)
+
+ |||
+
+ {{drug}} and {{effect}}.'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-drug-and-effect
+ reference: ''
+ db68e609-ba92-40ae-b161-8b7710124142: !Template
+ answer_choices: null
+ id: db68e609-ba92-40ae-b161-8b7710124142
+ jinja: 'Read the below text and answer the two following questions.
+
+
+ Text: {{text}}
+
+
+ Question 1: What is the drug in the above text?
+
+
+ Question 2: What is the effect of it?
+
+
+ You should answer in the "drug" and "effect" format (e.g., alcohol and high
+ blood pressure)
+
+ |||
+
+ {{drug}} and {{effect}}.'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-drug-and-effect-two-questions
+ reference: ''
diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d39c973a141be4395068e09dca2d2cb5926411c4
--- /dev/null
+++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml
@@ -0,0 +1,114 @@
+dataset: ade_corpus_v2
+subset: Ade_corpus_v2_drug_dosage_relation
+templates:
+ 1de6d411-ed0a-4d48-806e-cad009f07a65: !Template
+ answer_choices: null
+ id: 1de6d411-ed0a-4d48-806e-cad009f07a65
+ jinja: 'Read the below text and answer the question.
+
+
+ Text: {{text}}
+
+
+ Question: What drug has a dosage of {{dosage}}?
+
+ |||
+
+ {{drug}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-drug
+ reference: ''
+ 1e719388-59c9-4b0a-9ed9-dd02b6ddd0a6: !Template
+ answer_choices: null
+ id: 1e719388-59c9-4b0a-9ed9-dd02b6ddd0a6
+ jinja: '{{dosage}} of {{drug}} was given to a patient. Please write a short medical
+ report about this.
+
+ |||
+
+ {{text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: drug-and-dosage-to-text
+ reference: ''
+ 2bed0f04-8249-4248-86ea-e3a1971b2e1b: !Template
+ answer_choices: null
+ id: 2bed0f04-8249-4248-86ea-e3a1971b2e1b
+ jinja: 'Read the below text and answer the two following questions.
+
+
+ Text: {{text}}
+
+
+
+ Question 1: What is the drug in the above text?
+
+
+ Question 2: What is the dosage of it?
+
+
+ You should answer in the "drug" and "dosage" format (e.g., Aspirin and 500mg)
+
+ |||
+
+ {{drug}} and {{dosage}}.'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-drug-and-dosage-two-questions
+ reference: ''
+ ca175bed-d046-40e7-9dbb-1e50fde7e603: !Template
+ answer_choices: null
+ id: ca175bed-d046-40e7-9dbb-1e50fde7e603
+ jinja: 'Read the below text and answer the question.
+
+
+ Text: {{text}}
+
+
+ Question: What is the dosage of {{drug}}?
+
+ |||
+
+ {{dosage}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-dosage
+ reference: ''
+ ce5208ac-6b4c-4a35-8738-e20232df1917: !Template
+ answer_choices: null
+ id: ce5208ac-6b4c-4a35-8738-e20232df1917
+ jinja: "Read the below text and answer the question.\n\nText: {{text}}\n\nQuestion:\
+ \ What are the drug and its dosage of the above text? \n\nYou should answer\
+ \ in the \"drug\" and \"dosage\" format (e.g., Aspirin and 500mg)\n|||\n{{drug}}\
+ \ and {{dosage}}."
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: find-drug-and-dosage
+ reference: ''
diff --git a/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml b/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..89b74eae86a72a06d9396f1c3a9b6e600ccf9755
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml
@@ -0,0 +1,120 @@
+dataset: adversarial_qa
+subset: adversarialQA
+templates:
+ 00755780-f3c0-44b4-b159-8f3873cdb16c: !Template
+ answer_choices: null
+ id: 00755780-f3c0-44b4-b159-8f3873cdb16c
+ jinja: 'I want to test the ability of students to read a passage and answer questions
+ about it. Could you please come up with a good question for the passage "{{context}}"?
+ |||
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: 'Input: Context, Output: Question (generate a question)'
+ 3b2459cc-6600-443c-abf8-8f60c34cd998: !Template
+ answer_choices: null
+ id: 3b2459cc-6600-443c-abf8-8f60c34cd998
+ jinja: '{% if metadata.split != "test" %}
+
+ I know that the answer to the question "{{question}}" is in "{{context}}". Can
+ you tell me what it is? |||
+
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: tell_what_it_is
+ reference: 'Input: QC, Output: A (rephrase)'
+ 5bdb1815-5c6f-49a3-ad1d-367344420701: !Template
+ answer_choices: null
+ id: 5bdb1815-5c6f-49a3-ad1d-367344420701
+ jinja: '{% if metadata.split != "test" %}
+
+ Question: "{{question}}"
+
+
+ Context: "{{context}}"
+
+
+ Answer:
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_context_answer
+ reference: 'Input: QC, Output: Answer (short form)'
+ a0872cde-2f19-4ae6-919a-868da47bfbcb: !Template
+ answer_choices: null
+ id: a0872cde-2f19-4ae6-919a-868da47bfbcb
+ jinja: '{% if metadata.split != "test" %}
+
+ Extract the answer to the question from the following context.
+
+ Question: {{question}}
+
+ Context: {{context}}|||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: based_on
+ reference: ''
+ a64d5a15-68e2-4d1c-b30a-ca8250c860f9: !Template
+ answer_choices: null
+ id: a64d5a15-68e2-4d1c-b30a-ca8250c860f9
+ jinja: '{% if metadata.split != "test" %}
+
+ Given the following passage
+
+
+ "{{context}}",
+
+
+ answer the following question. Note that the answer is present within the text.
+
+
+ Question: {{question}} |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_following_q
+ reference: 'Input: QC, Output: Answer'
diff --git a/promptsource/templates/adversarial_qa/dbert/templates.yaml b/promptsource/templates/adversarial_qa/dbert/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6a2a33629c4560c5519dc93a8c5d9a8b39f503f3
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/dbert/templates.yaml
@@ -0,0 +1,120 @@
+dataset: adversarial_qa
+subset: dbert
+templates:
+ 00755780-f3c0-44b4-b159-8f3873cdb16a: !Template
+ answer_choices: null
+ id: 00755780-f3c0-44b4-b159-8f3873cdb16a
+ jinja: 'I want to test the ability of students to read a passage and answer questions
+ about it. Could you please come up with a good question for the passage "{{context}}"?
+ |||
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: 'Input: Context, Output: Question (generate a question)'
+ 3b2459cc-6600-443c-abf8-8f60c34cd99a: !Template
+ answer_choices: null
+ id: 3b2459cc-6600-443c-abf8-8f60c34cd99a
+ jinja: '{% if metadata.split != "test" %}
+
+ I know that the answer to the question "{{question}}" is in "{{context}}". Can
+ you tell me what it is? |||
+
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: tell_what_it_is
+ reference: 'Input: QC, Output: A (rephrase)'
+ 5bdb1815-5c6f-49a3-ad1d-36734442070a: !Template
+ answer_choices: null
+ id: 5bdb1815-5c6f-49a3-ad1d-36734442070a
+ jinja: '{% if metadata.split != "test" %}
+
+ Question: "{{question}}"
+
+
+ Context: "{{context}}"
+
+
+ Answer:
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_context_answer
+ reference: 'Input: QC, Output: Answer (short form)'
+ a0872cde-2f19-4ae6-919a-868da47bfbca: !Template
+ answer_choices: null
+ id: a0872cde-2f19-4ae6-919a-868da47bfbca
+ jinja: '{% if metadata.split != "test" %}
+
+ Extract the answer to the question from the following context.
+
+ Question: {{question}}
+
+ Context: {{context}}|||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: based_on
+ reference: ''
+ a64d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template
+ answer_choices: null
+ id: a64d5a15-68e2-4d1c-b30a-ca8250c860fa
+ jinja: '{% if metadata.split != "test" %}
+
+ Given the following passage
+
+
+ "{{context}}",
+
+
+ answer the following question. Note that the answer is present within the text.
+
+
+ Question: {{question}} |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_following_q
+ reference: 'Input: QC, Output: Answer'
diff --git a/promptsource/templates/adversarial_qa/dbidaf/templates.yaml b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..047946f71435dcca707550070c1d21f7cd459220
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml
@@ -0,0 +1,120 @@
+dataset: adversarial_qa
+subset: dbidaf
+templates:
+ 41f28b31-d0fc-4f20-a0a2-ff21813e298e: !Template
+ answer_choices: null
+ id: 41f28b31-d0fc-4f20-a0a2-ff21813e298e
+ jinja: '{% if metadata.split != "test" %}
+
+ Extract the answer to the question from the following context.
+
+ Question: {{question}}
+
+ Context: {{context}}|||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: based_on
+ reference: ''
+ a64d5a15-68e2-4d1c-b30a-ca8250c860d9: !Template
+ answer_choices: null
+ id: a64d5a15-68e2-4d1c-b30a-ca8250c860d9
+ jinja: '{% if metadata.split != "test" %}
+
+ Given the following passage
+
+
+ "{{context}}",
+
+
+ answer the following question. Note that the answer is present within the text.
+
+
+ Question: {{question}} |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_following_q
+ reference: 'Input: QC, Output: Answer'
+ c7a80603-d610-4999-98a7-815b2f84592d: !Template
+ answer_choices: null
+ id: c7a80603-d610-4999-98a7-815b2f84592d
+ jinja: 'I want to test the ability of students to read a passage and answer questions
+ about it. Could you please come up with a good question for the passage "{{context}}"?
+ |||
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: 'Input: Context, Output: Question (generate a question)'
+ ce9bc00a-567b-4c4e-aad7-df6f5d5d57bb: !Template
+ answer_choices: null
+ id: ce9bc00a-567b-4c4e-aad7-df6f5d5d57bb
+ jinja: '{% if metadata.split != "test" %}
+
+ I know that the answer to the question "{{question}}" is in "{{context}}". Can
+ you tell me what it is? |||
+
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: tell_what_it_is
+ reference: 'Input: QC, Output: A (rephrase)'
+ fa185424-6ebe-49b8-b4ed-7632ca33c361: !Template
+ answer_choices: null
+ id: fa185424-6ebe-49b8-b4ed-7632ca33c361
+ jinja: '{% if metadata.split != "test" %}
+
+ Question: "{{question}}"
+
+
+ Context: "{{context}}"
+
+
+ Answer:
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_context_answer
+ reference: 'Input: QC, Output: Answer (short form)'
diff --git a/promptsource/templates/adversarial_qa/droberta/templates.yaml b/promptsource/templates/adversarial_qa/droberta/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef3a2ef6f6382f6ad01dcfd5b3630e97400c0c58
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/droberta/templates.yaml
@@ -0,0 +1,120 @@
+dataset: adversarial_qa
+subset: droberta
+templates:
+ 00755780-f3c0-44b4-b159-8f3873cdb163: !Template
+ answer_choices: null
+ id: 00755780-f3c0-44b4-b159-8f3873cdb163
+ jinja: 'I want to test the ability of students to read a passage and answer questions
+ about it. Could you please come up with a good question for the passage "{{context}}"?
+ |||
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: 'Input: Context, Output: Question (generate a question)'
+ 3b2459cc-6600-443c-abf8-8f60c34cd993: !Template
+ answer_choices: null
+ id: 3b2459cc-6600-443c-abf8-8f60c34cd993
+ jinja: '{% if metadata.split != "test" %}
+
+ I know that the answer to the question "{{question}}" is in "{{context}}". Can
+ you tell me what it is? |||
+
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: tell_what_it_is
+ reference: 'Input: QC, Output: A (rephrase)'
+ 5bdb1815-5c6f-49a3-ad1d-367344420703: !Template
+ answer_choices: null
+ id: 5bdb1815-5c6f-49a3-ad1d-367344420703
+ jinja: '{% if metadata.split != "test" %}
+
+ Question: "{{question}}"
+
+
+ Context: "{{context}}"
+
+
+ Answer:
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_context_answer
+ reference: 'Input: QC, Output: Answer (short form)'
+ a0872cde-2f19-4ae6-919a-868da47bfbc3: !Template
+ answer_choices: null
+ id: a0872cde-2f19-4ae6-919a-868da47bfbc3
+ jinja: '{% if metadata.split != "test" %}
+
+ Extract the answer to the question from the following context.
+
+ Question: {{question}}
+
+ Context: {{context}}|||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: based_on
+ reference: ''
+ a64d5a15-68e2-4d1c-b30a-ca8250c860f3: !Template
+ answer_choices: null
+ id: a64d5a15-68e2-4d1c-b30a-ca8250c860f3
+ jinja: '{% if metadata.split != "test" %}
+
+ Given the following passage
+
+
+ "{{context}}",
+
+
+ answer the following question. Note that the answer is present within the text.
+
+
+ Question: {{question}} |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_following_q
+ reference: 'Input: QC, Output: Answer'
diff --git a/promptsource/templates/aeslc/templates.yaml b/promptsource/templates/aeslc/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..b42a26380096a69b6dd6d9f332e9383b99d6c8ca
--- /dev/null
+++ b/promptsource/templates/aeslc/templates.yaml
@@ -0,0 +1,163 @@
+dataset: aeslc
+templates:
+ 0bef38b8-6d0b-440b-8a3d-db034aaf5a15: !Template
+ answer_choices: null
+ id: 0bef38b8-6d0b-440b-8a3d-db034aaf5a15
+ jinja: '{{ email_body }}
+
+
+ What is this email about? |||
+
+
+ {{ subject_line }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: what_is_this_email_about
+ reference: Ask a question from a context
+ 11de8b2c-8016-4b98-b5f2-c1a7e5c0e433: !Template
+ answer_choices: null
+ id: 11de8b2c-8016-4b98-b5f2-c1a7e5c0e433
+ jinja: 'What is the subject of this email:
+
+
+ {{ email_body }} |||
+
+
+ {{ subject_line }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: what_is_the_subject_of_this_email
+ reference: Ask a question from a context
+ 12616e45-1d61-4924-8ce4-fe3efd061e7a: !Template
+ answer_choices: null
+ id: 12616e45-1d61-4924-8ce4-fe3efd061e7a
+ jinja: 'The text below is the content of an email. What is the topic of this email?
+
+
+ {{ email_body }} |||
+
+
+ {{ subject_line }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: the_text_below
+ reference: ''
+ 25179c66-5638-4de5-bdce-d6dccec64c65: !Template
+ answer_choices: null
+ id: 25179c66-5638-4de5-bdce-d6dccec64c65
+ jinja: 'Generate a subject line for the email body below:
+
+
+ {{ email_body }} |||
+
+
+ {{ subject_line }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: generate_subject_line
+ reference: Instruct to generate
+ 8917d7f0-5f72-418f-a2d9-98d4a8da13b0: !Template
+ answer_choices: null
+ id: 8917d7f0-5f72-418f-a2d9-98d4a8da13b0
+ jinja: 'What is this email about:
+
+
+ {{ email_body }} |||
+
+
+ {{ subject_line }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: what_about
+ reference: Ask a question from a context
+ d1c5da3f-f1e4-4891-abcb-79463b30a616: !Template
+ answer_choices: null
+ id: d1c5da3f-f1e4-4891-abcb-79463b30a616
+ jinja: '{{ email_body }}
+
+
+ What is the subject of this email? |||
+
+
+ {{ subject_line }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: what_subject_of_email
+ reference: Ask a question from a context
+ d9dd8e72-acb4-4aad-aeb7-a877bacbb402: !Template
+ answer_choices: null
+ id: d9dd8e72-acb4-4aad-aeb7-a877bacbb402
+ jinja: '{{ email_body }}
+
+
+ Generate a subject line for the email body above. |||
+
+
+ {{ subject_line }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: generate_subject
+ reference: Instruct to generate
+ dca29ebb-2372-423f-b93c-21d99eddf455: !Template
+ answer_choices: null
+ id: dca29ebb-2372-423f-b93c-21d99eddf455
+ jinja: '{{ email_body }}
+
+
+ The above text is the content of an email. What is the topic of this email?
+ |||
+
+
+ {{ subject_line }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - Other
+ original_task: true
+ name: what_topic
+ reference: ''
diff --git a/promptsource/templates/ag_news/templates.yaml b/promptsource/templates/ag_news/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cdfdf6acb2ee7a88f35a50f2156f6b3c234b8232
--- /dev/null
+++ b/promptsource/templates/ag_news/templates.yaml
@@ -0,0 +1,108 @@
+dataset: ag_news
+templates:
+ 24e44a81-a18a-42dd-a71c-5b31b2d2cb39: !Template
+ answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+ id: 24e44a81-a18a-42dd-a71c-5b31b2d2cb39
+ jinja: "What label best describes this news article?\n{{text}} ||| \n{{answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify_question_first
+ reference: ''
+ 8fdc1056-1029-41a1-9c67-354fc2b8ceaf: !Template
+ answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+ id: 8fdc1056-1029-41a1-9c67-354fc2b8ceaf
+ jinja: "Is this a piece of news regarding {{\"world politics, sports, business,\
+ \ or science and technology\"}}?\n{{text}} \n||| \n{{answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify_with_choices_question_first
+ reference: ''
+ 918267e0-af68-4117-892d-2dbe66a58ce9: !Template
+ answer_choices: Politician ||| Athlete ||| Business executive ||| Scientist
+ id: 918267e0-af68-4117-892d-2dbe66a58ce9
+ jinja: 'Would you recommend the following article to a {{"politician"}}, an {{"athlete"}},
+ a {{"business executive"}}, or a {{"scientist"}}?
+
+
+ {{ text }}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: recommend
+ reference: ''
+ 9345df33-4f23-4944-a33c-eef94e626862: !Template
+ answer_choices: World News ||| Sports ||| Business ||| Science and Technology
+ id: 9345df33-4f23-4944-a33c-eef94e626862
+ jinja: "{{text}} \n\nWhich of the following sections of a newspaper would this\
+ \ article likely appear in? {{\"World News\"}}, {{\"Sports\"}}, {{\"Business\"\
+ }}, or {{\"Science and Technology\"}}? ||| \n{{answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_section_choices
+ reference: ''
+ 98534347-fff7-4c39-a795-4e69a44791f7: !Template
+ answer_choices: World News ||| Sports ||| Business ||| Science and Technology
+ id: 98534347-fff7-4c39-a795-4e69a44791f7
+ jinja: "{{text}} \n\nWhich section of a newspaper would this article likely appear\
+ \ in? ||| \n{{answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_section
+ reference: ''
+ b401b0ee-6ffe-4a91-8e15-77ee073cd858: !Template
+ answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+ id: b401b0ee-6ffe-4a91-8e15-77ee073cd858
+ jinja: "{{text}} \nIs this a piece of news regarding {{\"world politics, sports,\
+ \ business, or science and technology\"}}? ||| \n{{answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify_with_choices
+ reference: ''
+ cb355f33-7e8c-4455-a72b-48d315bd4f60: !Template
+ answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+ id: cb355f33-7e8c-4455-a72b-48d315bd4f60
+ jinja: "{{text}} \nWhat label best describes this news article? ||| \n{{answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify
+ reference: ''
diff --git a/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml b/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5c4950d917974fc8261f2eab9193ea087e211c62
--- /dev/null
+++ b/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml
@@ -0,0 +1,142 @@
+dataset: ai2_arc
+subset: ARC-Challenge
+templates:
+ 32f7eb4d-dd38-4503-b67d-a8a96ab40449: !Template
+ answer_choices: null
+ id: 32f7eb4d-dd38-4503-b67d-a8a96ab40449
+ jinja: 'Pick and copy all the incorrect options for the following question:
+
+
+ {{question}}
+
+
+ Options:
+
+ - {{choices["text"] | join("\n- ")}}|||
+
+ {% for i in range(choices["label"]|length) %}
+
+ {% if i != choices["label"].index(answerKey) %}
+
+ - {{choices["text"][i]}}
+
+ {% endif %}
+
+ {% endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: false
+ name: pick_false_options
+ reference: ''
+ 540ebc31-2ea6-4feb-a6fd-67b6e71cf20a: !Template
+ answer_choices: '{{choices.label | join("|||")}}'
+ id: 540ebc31-2ea6-4feb-a6fd-67b6e71cf20a
+ jinja: "Here's a problem to solve: {{question}}\n\nAmong the 4 following options,\
+ \ which is the correct answer?\n{% for letter, t in zip(answer_choices, choices.text)\
+ \ %}\n- {{letter}}: {{t}}\n {% endfor %}|||{{answerKey}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: heres_a_problem
+ reference: ''
+ 5ec2b8ca-e4c0-444e-b097-89ccce811550: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 5ec2b8ca-e4c0-444e-b097-89ccce811550
+ jinja: '{{question}}
+
+
+ Options:
+
+ - {{answer_choices | join("\n- ")}}|||
+
+ {{answer_choices[choices["label"].index(answerKey)]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_options
+ reference: ''
+ 5ff84886-9d5f-40d1-80d7-2a39b7c16ec6: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 5ff84886-9d5f-40d1-80d7-2a39b7c16ec6
+ jinja: 'I am hesitating between 4 options to answer the following question, which
+ option should I choose?
+
+ Question: {{question}}
+
+ Possibilities:
+
+ - {{answer_choices | join("\n- ")}}|||
+
+ {{answer_choices[choices["label"].index(answerKey)]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: i_am_hesitating
+ reference: ''
+ ced2b33b-b590-4522-b041-51d7dd669561: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: ced2b33b-b590-4522-b041-51d7dd669561
+ jinja: 'I gave my students this multiple choice question: {{question}}
+
+
+ Only one answer is correct among these 4 choices:
+
+ - {{answer_choices | join("\n- ")}}
+
+
+ Could you tell me which one is correct?|||
+
+ {{answer_choices[choices["label"].index(answerKey)]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: multiple_choice
+ reference: ''
+ e371fc1a-8edb-477b-b345-9d73e97ffade: !Template
+ answer_choices: '{{choices.label | join("|||")}}'
+ id: e371fc1a-8edb-477b-b345-9d73e97ffade
+ jinja: 'Pick the most correct option to answer the following question.
+
+
+ {{question}}
+
+
+ Options:
+
+ {% for letter, t in zip(answer_choices, choices.text) %}
+
+ - {{letter}}: {{t}}
+
+ {% endfor %} |||
+
+ {{answerKey}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_the_most_correct_option
+ reference: ''
diff --git a/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml b/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b7c3a7b3c32204161eb7f58035d140b94272d5b8
--- /dev/null
+++ b/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml
@@ -0,0 +1,142 @@
+dataset: ai2_arc
+subset: ARC-Easy
+templates:
+ 033498ca-3d9a-47e3-b631-d881ab53b5ad: !Template
+ answer_choices: '{{choices.label | join("|||")}}'
+ id: 033498ca-3d9a-47e3-b631-d881ab53b5ad
+ jinja: 'Pick the most correct option to answer the following question.
+
+
+ {{question}}
+
+
+ Options:
+
+ {% for letter, t in zip(answer_choices, choices.text) %}
+
+ - {{letter}}: {{t}}
+
+ {% endfor %} |||
+
+ {{answerKey}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_the_most_correct_option
+ reference: ''
+ 252aa566-9482-4e81-aad9-664a9bebd8e8: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 252aa566-9482-4e81-aad9-664a9bebd8e8
+ jinja: '{{question}}
+
+
+ Options:
+
+ - {{answer_choices | join("\n- ")}}|||
+
+ {{answer_choices[choices["label"].index(answerKey)]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_options
+ reference: ''
+ 4fb13ac1-f770-45ea-b5d5-91ac50b0d609: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 4fb13ac1-f770-45ea-b5d5-91ac50b0d609
+ jinja: 'I am hesitating between 4 options to answer the following question, which
+ option should I choose?
+
+ Question: {{question}}
+
+ Possibilities:
+
+ - {{answer_choices | join("\n- ")}}|||
+
+ {{answer_choices[choices["label"].index(answerKey)]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: i_am_hesitating
+ reference: ''
+ 8c689423-880d-402b-8c7d-a1a98c7589e8: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 8c689423-880d-402b-8c7d-a1a98c7589e8
+ jinja: 'I gave my students this multiple choice question: {{question}}
+
+
+ Only one answer is correct among these 4 choices:
+
+ - {{answer_choices | join("\n- ")}}
+
+
+ Could you tell me which one is correct?|||
+
+ {{answer_choices[choices["label"].index(answerKey)]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: multiple_choice
+ reference: ''
+ c988ee30-a523-457b-af21-87353349b543: !Template
+ answer_choices: null
+ id: c988ee30-a523-457b-af21-87353349b543
+ jinja: 'Pick and copy all the incorrect options for the following question:
+
+
+ {{question}}
+
+
+ Options:
+
+ - {{choices["text"] | join("\n- ")}}|||
+
+ {% for i in range(choices["label"]|length) %}
+
+ {% if i != choices["label"].index(answerKey) %}
+
+ - {{choices["text"][i]}}
+
+ {% endif %}
+
+ {% endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: false
+ name: pick_false_options
+ reference: ''
+ d90da519-0e2c-4f9b-a546-7cba82824eb2: !Template
+ answer_choices: '{{choices.label | join("|||")}}'
+ id: d90da519-0e2c-4f9b-a546-7cba82824eb2
+ jinja: "Here's a problem to solve: {{question}}\n\nAmong the 4 following options,\
+ \ which is the correct answer?\n{% for letter, t in zip(answer_choices, choices.text)\
+ \ %}\n- {{letter}}: {{t}}\n {% endfor %}|||{{answerKey}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: heres_a_problem
+ reference: ''
diff --git a/promptsource/templates/amazon_polarity/templates.yaml b/promptsource/templates/amazon_polarity/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4558071bea4a7b0343beb0cd4599070996e61542
--- /dev/null
+++ b/promptsource/templates/amazon_polarity/templates.yaml
@@ -0,0 +1,192 @@
+dataset: amazon_polarity
+templates:
+ 1e90a24a-1182-43dd-9445-22f2e56e5761: !Template
+ answer_choices: Negative ||| Positive
+ id: 1e90a24a-1182-43dd-9445-22f2e56e5761
+ jinja: 'Title: {{title}}
+
+ Review: {{content}}
+
+ Is the review positive or negative? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Is_this_review
+ reference: ''
+ 3a48f287-6a4b-4df0-ab2d-2eaf6cb8e53d: !Template
+ answer_choices: No ||| Yes
+ id: 3a48f287-6a4b-4df0-ab2d-2eaf6cb8e53d
+ jinja: 'Based on this review, would the user recommend this product?
+
+ ===
+
+ Review: {{content}}
+
+ Answer: |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: User_recommend_this_product
+ reference: 'Reformulation equivalent to sent analysis: would the user recommend
+ this product?'
+ 592caf8f-f8ff-426a-a61b-b7e95ed510b6: !Template
+ answer_choices: No ||| Yes
+ id: 592caf8f-f8ff-426a-a61b-b7e95ed510b6
+ jinja: 'Is this product review positive?
+
+ Title: {{title}}
+
+ Review: {{content}}
+
+ Answer: |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Is_this_product_review_positive
+ reference: ''
+ 745b9c05-10df-4a7e-81ad-1b88cefcb166: !Template
+ answer_choices: Yes ||| No
+ id: 745b9c05-10df-4a7e-81ad-1b88cefcb166
+ jinja: 'Title: {{title}}
+
+ Review: {{content}}
+
+ Is this product review negative?|||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Is_this_review_negative
+ reference: ''
+ 8abb5377-5dd3-4402-92a5-0d81adb6a325: !Template
+ answer_choices: Negative ||| Positive
+ id: 8abb5377-5dd3-4402-92a5-0d81adb6a325
+ jinja: 'Title: {{title}}
+
+ Review: {{content}}
+
+ Does this product review convey a negative or positive sentiment?|||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: convey_negative_or_positive_sentiment
+ reference: ''
+ 9df70cdf-f8ed-4e79-8e2f-b4668058d637: !Template
+ answer_choices: Negative ||| Positive
+ id: 9df70cdf-f8ed-4e79-8e2f-b4668058d637
+ jinja: 'Is there a negative or positive tone to this product review?
+
+ ===
+
+ Title: {{title}}
+
+ Review: {{content}}
+
+ Answer: |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: negative_or_positive_tone
+ reference: ''
+ b13369e8-0500-4e93-90d4-8e6814bfb97b: !Template
+ answer_choices: dissatisfied ||| satisfied
+ id: b13369e8-0500-4e93-90d4-8e6814bfb97b
+ jinja: 'Here is a review left by a customer on a product. Would you say he was
+ {{answer_choices[1]}} or {{answer_choices[0]}}?
+
+ Title: {{title}}
+
+ Review: {{content}}
+
+ |||
+
+ {{answer_choices[label]}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: user_satisfied
+ reference: ''
+ b13369e8-0500-4e93-90d4-8e6814bfb98b: !Template
+ answer_choices: decrease ||| increase
+ id: b13369e8-0500-4e93-90d4-8e6814bfb98b
+ jinja: 'You are considering whether to buy a product. You look at the reviews.
+ Would the following review {{answer_choices[0]}} or {{answer_choices[1]}} the
+ chances of you buying the product?
+
+ Review title: {{title}}
+
+ Product review: {{content}}
+
+ |||
+
+ {{answer_choices[label]}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: would_you_buy
+ reference: ''
+ b13369e8-0500-4e93-90d4-8e6814bfb99b: !Template
+ answer_choices: unflattering ||| flattering
+ id: b13369e8-0500-4e93-90d4-8e6814bfb99b
+ jinja: 'Title: {{title}}
+
+ Product review: {{content}}
+
+ Would you say this review depicts the product in a {{answer_choices[1]}} or
+ {{answer_choices[0]}} light?
+
+ |||
+
+ {{answer_choices[label]}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: flattering_or_not
+ reference: ''
diff --git a/promptsource/templates/amazon_reviews_multi/en/templates.yaml b/promptsource/templates/amazon_reviews_multi/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ede73254bcced344d199cd0866a469d6b7a3a29e
--- /dev/null
+++ b/promptsource/templates/amazon_reviews_multi/en/templates.yaml
@@ -0,0 +1,147 @@
+dataset: amazon_reviews_multi
+subset: en
+templates:
+ 073dfd34-5aef-461a-81d9-bdb8e00f12c9: !Template
+ answer_choices: null
+ id: 073dfd34-5aef-461a-81d9-bdb8e00f12c9
+ jinja: 'Write a title for the review below:
+
+ ===
+
+ {{review_body}} |||
+
+ {{review_title}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_title
+ reference: Review Title based on Review body
+ 0f5b005b-c6bc-4fe0-bde4-0917cdba39e8: !Template
+ answer_choices: 1|||2|||3|||4|||5
+ id: 0f5b005b-c6bc-4fe0-bde4-0917cdba39e8
+ jinja: 'Rate the product by the number of stars based on the review title below:
+ (1 being the lowest and 5 the highest)
+
+ ===
+
+ {{review_title}} |||
+
+ {{answer_choices[stars-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: false
+ name: prompt_title_to_star
+ reference: Rating based on review title
+ 199ad6de-5bcc-421e-90e2-4b6edada6a01: !Template
+ answer_choices: 1|||2|||3|||4|||5
+ id: 199ad6de-5bcc-421e-90e2-4b6edada6a01
+ jinja: 'Rate the product by the number of stars based on the review body below:
+ (1 being the lowest and 5 the highest)
+
+ ===
+
+ {{review_body}} |||
+
+ {{answer_choices[stars-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: prompt_review_to_star
+ reference: Rating based on review body
+ 37806754-58f7-4383-961a-fe2c88109fcd: !Template
+ answer_choices: 1|||2|||3|||4|||5
+ id: 37806754-58f7-4383-961a-fe2c88109fcd
+ jinja: 'Rate the product by the number of stars based on the review below: (1
+ being the lowest and 5 the highest)
+
+ ===
+
+ {{review_title}}. {{review_body}} |||
+
+ {{answer_choices[stars-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: prompt_body_title_to_star
+ reference: Rating based on review body,title
+ 7ecaf718-c85d-47f4-83cb-f14c58f2911f: !Template
+ answer_choices: null
+ id: 7ecaf718-c85d-47f4-83cb-f14c58f2911f
+ jinja: 'Guess the product category from the following review:
+
+ ===
+
+ {{review_body}} |||
+
+ {{product_category}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: false
+ name: prompt_review_to_category
+ reference: Product category based on review body
+ 8e8973f6-431f-4e78-b83a-a86c04655882: !Template
+ answer_choices: 1|||2|||3|||4|||5
+ id: 8e8973f6-431f-4e78-b83a-a86c04655882
+ jinja: 'Rate the product by the number of stars based on the review below: (1
+ being the lowest and 5 the highest)
+
+ ===
+
+ {{review_title}}. {{review_body}} Product category: {{product_category}}|||
+
+ {{answer_choices[stars-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: prompt_body_title_category_to_star
+ reference: Rating based on review body, title, category
+ c4717e75-4d3e-4b79-9737-167155f51513: !Template
+ answer_choices: null
+ id: c4717e75-4d3e-4b79-9737-167155f51513
+ jinja: 'Guess the product category from the review title below:
+
+ ===
+
+ {{review_title}} |||
+
+ {{product_category}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: false
+ name: prompt_title_to_product_category
+ reference: Product category from review title
diff --git a/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml b/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a3bc39ee9f11431922beff0ee95088bb1d6d7f03
--- /dev/null
+++ b/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml
@@ -0,0 +1,79 @@
+dataset: amazon_us_reviews
+subset: Wireless_v1_00
+templates:
+ 5feaa0d7-e4e0-46cc-8517-e00bfa7fd00e: !Template
+ answer_choices: null
+ id: 5feaa0d7-e4e0-46cc-8517-e00bfa7fd00e
+ jinja: "Give a short sentence describing the following product review:\n{{review_body}}\
+ \ \n|||\n{{review_headline}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Generate review headline based on review body
+ reference: Generate review headline based on review body
+ 9588a967-d698-4a33-9b96-a5254df9d260: !Template
+ answer_choices: null
+ id: 9588a967-d698-4a33-9b96-a5254df9d260
+ jinja: Generate a {{star_rating}}-star review (1 being lowest and 5 being highest)
+ about this product {{product_title}}. ||| {{review_body}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate review based on rating and category
+ reference: Generate review based on rating and category
+ 9a8b953d-2c68-4046-a7b7-8fd5f7469d10: !Template
+ answer_choices: '1 ||| 2 ||| 3 ||| 4 ||| 5 '
+ id: 9a8b953d-2c68-4046-a7b7-8fd5f7469d10
+ jinja: "Given the following review headline \n{{review_headline}}\npredict the\
+ \ the associated rating from the following choices\n- {{ answer_choices | join('\\\
+ n- ') }} \n(1 being lowest and 5 being highest)\n|||\n{{answer_choices[star_rating-1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Given the review headline return a categorical rating
+ reference: 'Given the review headline, return a categorical rating. '
+ e40e4a53-ca5d-4fc8-a7c3-be9adfe0dbec: !Template
+ answer_choices: null
+ id: e40e4a53-ca5d-4fc8-a7c3-be9adfe0dbec
+ jinja: "Generate a {{star_rating}}-star review headline (1 being lowest and 5\
+ \ being highest) about this product: \n{{product_title}} \n||| \
+ \ \n{{review_headline}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate review headline based on rating
+ reference: 'Generate review headline based on rating. '
+ e6a1bbde-715d-4dad-9178-e2bcfaf5c646: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: e6a1bbde-715d-4dad-9178-e2bcfaf5c646
+ jinja: "Given the following review:\n{{review_body}}\npredict the associated rating\
+ \ from the following choices (1 being lowest and 5 being highest)\n- {{ answer_choices\
+ \ | join('\\n- ') }} \n|||\n{{answer_choices[star_rating-1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Given the review body return a categorical rating
+ reference: 'Given the review body, return a categorical rating. '
diff --git a/promptsource/templates/ambig_qa/light/templates.yaml b/promptsource/templates/ambig_qa/light/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6b5b5543ff44ca0cf005975a9e85351aa13771ff
--- /dev/null
+++ b/promptsource/templates/ambig_qa/light/templates.yaml
@@ -0,0 +1,128 @@
+dataset: ambig_qa
+subset: light
+templates:
+ 050b1534-b53f-4341-b42c-6e689ef8911b: !Template
+ answer_choices: null
+ id: 050b1534-b53f-4341-b42c-6e689ef8911b
+ jinja: "{# Assignement in if clause breaks test, we need to declare variables\
+ \ in global scope first: https://github.com/pallets/jinja/issues/1314 #}\n{%\
+ \ set selected_question = \"\" %}\n{% set selected_answer = \"\" %}\n{% set\
+ \ random_question_id = -1 %}\n{% if annotations.type[0] == \"multipleQAs\" %}\n\
+ \ {% set random_question_id = range(0, annotations.qaPairs[0].question | length)\
+ \ | choice%}\n {% set selected_question = annotations.qaPairs[0].question[random_question_id]%}\n\
+ \ {% set selected_answer = annotations.qaPairs[0].answer[random_question_id]\
+ \ | choice%}\n{% else %}\n {% set selected_question = question %}\n {% set\
+ \ selected_answer = annotations.answer[0] | choice %}\n{% endif %}\n\nHere's\
+ \ a question-answer pair: {{question}} {{selected_answer}}.\nIs the question\
+ \ ambiguous? If so, generate a better question suitable for the answer. Otherwise,\
+ \ output the same question.\n|||\n{{selected_question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - Edit Distance
+ original_task: false
+ name: is_question_ambiguous
+ reference: ''
+ 09880e1a-0fcc-49dc-8462-b6603e15d691: !Template
+ answer_choices: null
+ id: 09880e1a-0fcc-49dc-8462-b6603e15d691
+ jinja: "What are the possible answers to the question \"{{question}}\"? Use semi-colons\
+ \ to separate your answers if you have multiple answers.\n\n|||\n\n{# Assignement\
+ \ in if clause breaks test, we need to declare variables in global scope first:\
+ \ https://github.com/pallets/jinja/issues/1314 #}\n{% set random_answer = \"\
+ \" %}\n{% set random_answer_form = \"\" %}\n{% if annotations.type[0] == \"\
+ singleAnswer\" %}\n {% set random_answer_form = [] %}\n {% for possible_answer\
+ \ in annotations.answer[0] %}\n {{ random_answer_form.append(possible_answer\
+ \ ) or \"\"}}\n {% endfor %}\n{% else %}\n {% set random_answer_form =\
+ \ [] %}\n {% for possible_answers in annotations.qaPairs[0].answer %}\n \
+ \ {% for possible_answer in possible_answers %}\n {{ random_answer_form.append(possible_answer\
+ \ ) or \"\"}}\n {% endfor %}\n {% endfor %}\n{% endif %}\n\n{{random_answer_form\
+ \ | join(\"; \")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: answer_prediction_all_answers_interrogative
+ reference: ''
+ 45b20de4-a3c1-4e76-ad79-06d7c8c66009: !Template
+ answer_choices: null
+ id: 45b20de4-a3c1-4e76-ad79-06d7c8c66009
+ jinja: "Given the question \"{{question}}\", generate all the possible answers,\
+ \ separated by semi-colon.\n\n|||\n\n{# Assignement in if clause breaks test,\
+ \ we need to declare variables in global scope first: https://github.com/pallets/jinja/issues/1314\
+ \ #}\n{% set random_answer = \"\" %}\n{% set random_answer_form = \"\" %}\n\
+ {% if annotations.type[0] == \"singleAnswer\" %}\n {% set random_answer_form\
+ \ = [] %}\n {% for possible_answer in annotations.answer[0] %}\n {{ random_answer_form.append(possible_answer\
+ \ ) or \"\"}}\n {% endfor %}\n{% else %}\n {% set random_answer_form =\
+ \ [] %}\n {% for possible_answers in annotations.qaPairs[0].answer %}\n \
+ \ {% for possible_answer in possible_answers %}\n {{ random_answer_form.append(possible_answer\
+ \ ) or \"\"}}\n {% endfor %}\n {% endfor %}\n{% endif %}\n\n{{random_answer_form\
+ \ | join(\"; \")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: answer_prediction_all_answers_affirmative
+ reference: ''
+ 72bf511b-44ce-4b9f-a2d0-5ed6334f0e07: !Template
+ answer_choices: Yes ||| No
+ id: 72bf511b-44ce-4b9f-a2d0-5ed6334f0e07
+ jinja: "{# Assignement in if clause breaks test, we need to declare variables\
+ \ in global scope first: https://github.com/pallets/jinja/issues/1314 #}\n{%\
+ \ set random_question_id = -1 %}\n{% set random_answer_id = -1 %}\n{% set selected_question\
+ \ = \"\" %}\n{% set selected_answer = \"\" %}\n{% if annotations.type[0] ==\
+ \ \"multipleQAs\" %}\n {% set random_question_id = range(0, annotations.qaPairs[0].question\
+ \ | length) | choice%}\n {% set random_answer_id = range(0, annotations.qaPairs[0].answer\
+ \ | length) | choice%}\n {% set selected_question = annotations.qaPairs[0].question[random_question_id]\
+ \ %}\n {% set selected_answer = annotations.qaPairs[0].answer[random_answer_id]\
+ \ | choice%}\n{% else %}\n {% set random_question_id = 0 %}\n {% set random_answer_id\
+ \ = 0 %}\n {% set selected_question = question %}\n {% set selected_answer\
+ \ = annotations.answer[0] | choice %}\n{% endif %}\n\nIs \"{{selected_answer}}\"\
+ \ an acceptable answer to \"{{selected_question}}\"? {{answer_choices[0]}} or\
+ \ {{answer_choices[1].lower()}}?\n\n|||\n\n{% if random_answer_id == random_question_id\
+ \ %} {{answer_choices[0]}} {% else %} {{answer_choices[1]}} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: answer_prediction_yes_or_no
+ reference: Classify if the given answer if correct compared to the chosen question
+ bb089312-23cb-475d-93b5-952781bc6be4: !Template
+ answer_choices: null
+ id: bb089312-23cb-475d-93b5-952781bc6be4
+ jinja: "{# Assignement in if clause breaks test, we need to declare variables\
+ \ in global scope first: https://github.com/pallets/jinja/issues/1314 #}\n{%\
+ \ set selected_question = \"\" %}\n{% set selected_answer = \"\" %}\n{% set\
+ \ random_question_id = -1 %}\n{% if annotations.type[0] == \"multipleQAs\" %}\n\
+ \ {% set random_question_id = range(0, annotations.qaPairs[0].question | length)\
+ \ | choice%}\n {% set selected_question = annotations.qaPairs[0].question[random_question_id]%}\n\
+ \ {% set selected_answer = annotations.qaPairs[0].answer[random_question_id]\
+ \ | choice%}\n{% else %}\n {% set selected_question = question %}\n {% set\
+ \ selected_answer = annotations.answer[0] | choice %}\n{% endif %}\n\nQuestion:\
+ \ {{question}}\nAnswer: {{selected_answer}}\n\nKnowing that the question can\
+ \ be ambiguous, can you perform question disambiguation by generating a question\
+ \ such that \"{{selected_answer}}\" is a more suitable answer? If you deem that\
+ \ the question is not ambiguous, generate the same question given above.\n|||\n\
+ {{selected_question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: perform_question_disambiguation
+ reference: ''
diff --git a/promptsource/templates/anli/templates.yaml b/promptsource/templates/anli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ab6e2ee082e108ebb2b1a3165edc3ce92eda70a6
--- /dev/null
+++ b/promptsource/templates/anli/templates.yaml
@@ -0,0 +1,221 @@
+dataset: anli
+templates:
+ 0cc3ae39-3997-4686-8c93-5d51457efa1f: !Template
+ answer_choices: Correct ||| Inconclusive ||| Incorrect
+ id: 0cc3ae39-3997-4686-8c93-5d51457efa1f
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ 179eb863-3ece-4e6f-af0f-fcb46d997306: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 179eb863-3ece-4e6f-af0f-fcb46d997306
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ 5459237b-97de-4340-bf7b-2939c3f7ca19: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 5459237b-97de-4340-bf7b-2939c3f7ca19
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 620aa3fc-d5eb-46f5-a1ee-4c754527aa97: !Template
+ answer_choices: True ||| Neither ||| False
+ id: 620aa3fc-d5eb-46f5-a1ee-4c754527aa97
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ 9b613182-c6ab-4427-9221-3d68f6d62765: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 9b613182-c6ab-4427-9221-3d68f6d62765
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ a850110d-f1a3-49b4-949a-d3bfe9f81344: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: a850110d-f1a3-49b4-949a-d3bfe9f81344
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ bab86d5a-4f9c-40db-b619-a7b7d5cae681: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: bab86d5a-4f9c-40db-b619-a7b7d5cae681
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
+ bcd90047-3a2b-426b-b065-8a418f1317b8: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: bcd90047-3a2b-426b-b065-8a418f1317b8
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ c4ed37ae-d7d7-4197-a725-ef2152fa3b1f: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: c4ed37ae-d7d7-4197-a725-ef2152fa3b1f
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ ca24b93a-6265-462f-b140-e329c03d94fa: !Template
+ answer_choices: Guaranteed ||| Possible ||| Impossible
+ id: ca24b93a-6265-462f-b140-e329c03d94fa
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
+ dbc68425-5c42-43ae-9748-70ce8c5a167e: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: dbc68425-5c42-43ae-9748-70ce8c5a167e
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
+ e5b7fdd7-fdff-4630-889b-3c7a052e5da0: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: e5b7fdd7-fdff-4630-889b-3c7a052e5da0
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ e6f32b9c-7e0b-474a-a0d2-e84d20c22aba: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: e6f32b9c-7e0b-474a-a0d2-e84d20c22aba
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+ \ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ ec249357-e672-4e7d-b8b6-d97ed7d090c5: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: ec249357-e672-4e7d-b8b6-d97ed7d090c5
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ ffa0a6f0-7186-4ccb-bb35-8b1affb747a0: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: ffa0a6f0-7186-4ccb-bb35-8b1affb747a0
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
diff --git a/promptsource/templates/app_reviews/templates.yaml b/promptsource/templates/app_reviews/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d92249d114cdcaa5436728803b728edbe3ba373a
--- /dev/null
+++ b/promptsource/templates/app_reviews/templates.yaml
@@ -0,0 +1,78 @@
+dataset: app_reviews
+templates:
+ 2da8f134-58db-4f9d-b3b0-8c6b50693ab5: !Template
+ answer_choices: Not at all ||| No ||| Maybe ||| Yes ||| Definitely
+ id: 2da8f134-58db-4f9d-b3b0-8c6b50693ab5
+ jinja: 'Given this review: "{{review}}"
+
+ Would you recommend this app to a friend? {{answer_choices[0]}}, {{answer_choices[1]}},
+ {{answer_choices[2]}}, {{answer_choices[3]}}, or {{answer_choices[4]}}?
+
+ |||
+
+ {{answer_choices[star-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Spearman Correlation
+ original_task: false
+ name: categorize_rating_using_review
+ reference: Given the review, return a categorical answer.
+ 8086b434-a75e-45a4-87fb-4364601e2e05: !Template
+ answer_choices: null
+ id: 8086b434-a75e-45a4-87fb-4364601e2e05
+ jinja: 'Generate a {{star}}-star review (1 being lowest and 5 being highest) about
+ an app with package {{package_name}}.
+
+ |||
+
+ {{review}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Spearman Correlation
+ original_task: false
+ name: generate_review
+ reference: Generate a review from the rating.
+ 9746ce4b-ac58-4dfb-9783-d77c95cb62cf: !Template
+ answer_choices: "\u2605 ||| \u2605\u2605 ||| \u2605\u2605\u2605 ||| \u2605\u2605\
+ \u2605\u2605 ||| \u2605\u2605\u2605\u2605\u2605"
+ id: 9746ce4b-ac58-4dfb-9783-d77c95cb62cf
+ jinja: "What would be the \u2605-rating of this review (\u2605 being the lowest\
+ \ and \u2605\u2605\u2605\u2605\u2605 being the highest)? \"{{review}}\"\n|||\n\
+ {{answer_choices[star-1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Spearman Correlation
+ original_task: false
+ name: convert_to_star_rating
+ reference: Given the review, generate a star rating.
+ d34e1413-2699-4701-baa2-05d931d012ba: !Template
+ answer_choices: null
+ id: d34e1413-2699-4701-baa2-05d931d012ba
+ jinja: 'On a scale of 1-5 (with 1 being least favorable and 5 being most favorable),
+ how would you rate this review? "{{review}}"
+
+ |||
+
+ {{star}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Spearman Correlation
+ original_task: false
+ name: convert_to_rating
+ reference: Convert review to rating
diff --git a/promptsource/templates/aqua_rat/raw/templates.yaml b/promptsource/templates/aqua_rat/raw/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..19484d09f7e13055250b0d8a6ad028ec5b48ee52
--- /dev/null
+++ b/promptsource/templates/aqua_rat/raw/templates.yaml
@@ -0,0 +1,131 @@
+dataset: aqua_rat
+subset: raw
+templates:
+ 13bd5099-33fa-4383-a441-33a7d2e1746f: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: 13bd5099-33fa-4383-a441-33a7d2e1746f
+ jinja: "Given the problem:\n{{question}}\n\nand the options:\n{% for i in range(options|length)\
+ \ %}\n{{options[i].replace(')', ') ')}}\n{% endfor %}\n\nThe correct answer\
+ \ is\n |||\n{{correct}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: select_the_best_option
+ reference: ''
+ 58a6aa2b-ca26-473d-9bf8-385dd1a743cd: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: 58a6aa2b-ca26-473d-9bf8-385dd1a743cd
+ jinja: 'You will now be given a question and a set of options. Choose the correct
+ option and provide a rationale for the same.
+
+
+ Question:
+
+ {{question}}
+
+
+ Options:
+
+ {% for i in range(options|length) %}
+
+ - {{options[i].replace('')'', '') '')}}
+
+ {% endfor %}
+
+
+ |||
+
+ {{correct}}
+
+
+ {{rationale}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: generate_rational_and_correct_choice
+ reference: ''
+ 5acfaa48-e1b6-44df-8e92-c58b94bff595: !Template
+ answer_choices: null
+ id: 5acfaa48-e1b6-44df-8e92-c58b94bff595
+ jinja: "Answer the given question by providing the correct rationale:\n\n{{question}}\n\
+ {% for i in range(options|length) %}\n {{options[i].replace(')', ') ')}}\n\
+ {%endfor%}\n|||\n{{rationale}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_rationale
+ reference: ''
+ 815acaf5-2e59-4f81-8190-ae75dc237cf1: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: 815acaf5-2e59-4f81-8190-ae75dc237cf1
+ jinja: '{{question}}
+
+
+ The above question was asked in a Math test. Given the following options, can
+ you choose the correct one?
+
+
+ {% for i in range(options|length) %}
+
+ - {{options[i].replace('')'', '') '')}}
+
+ {% endfor %}
+
+ |||
+
+ {{correct}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: answer_quiz
+ reference: ''
+ c0403841-68b0-4c08-8c3b-a00a81272d05: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: c0403841-68b0-4c08-8c3b-a00a81272d05
+ jinja: "Solve the following question and choose the correct option.\n\n{{question}}\
+ \ \n{% for i in range(options|length) %}\n- {{options[i].replace(')', ') ')}}\n\
+ {%endfor%}\n||| \n{{correct}}\n\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Answer questions from options
+ reference: ''
+ c9352c6c-074b-4beb-8489-c151adeeedcb: !Template
+ answer_choices: null
+ id: c9352c6c-074b-4beb-8489-c151adeeedcb
+ jinja: "Question: \n{{question}}\n\nOptions: \n{% for i in range(options|length)\
+ \ %}\n- {{options[i].replace(')', ') ')}}\n{% endfor %}\n\nThis is how I solved\
+ \ the above question:\n|||\n{{rationale}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answer_question_with_rationale
+ reference: ''
diff --git a/promptsource/templates/art/templates.yaml b/promptsource/templates/art/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..067e13ddb313b86ab6268e020a2aed97800ae09c
--- /dev/null
+++ b/promptsource/templates/art/templates.yaml
@@ -0,0 +1,133 @@
+dataset: art
+templates:
+ 151d0e97-d7d2-47f2-86b4-6777587b16f2: !Template
+ answer_choices: '{{hypothesis_1 | trim(''.?!'') }} ||| {{hypothesis_2 | trim(''.?!'')
+ }}'
+ id: 151d0e97-d7d2-47f2-86b4-6777587b16f2
+ jinja: "We know that:\n\n{{ observation_1 }},\n\nand:\n\n{{ observation_2 }} \n\
+ \nWhich one is more likely?\n\nThe first option: \n\n{{ answer_choices[0] }},\
+ \ \n\nor the second option:\n\n{{ answer_choices[1] }}?\n|||\n{{ answer_choices[label-1]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_hypothesis_options
+ reference: ''
+ a090e019-1b98-4863-ab5d-ff9772f682d6: !Template
+ answer_choices: '{{hypothesis_1| trim(''.?!'') }} ||| {{hypothesis_2| trim(''.?!'')
+ }}'
+ id: a090e019-1b98-4863-ab5d-ff9772f682d6
+ jinja: 'You know the following:
+
+
+ {{ observation_1 }} {{ observation_2 }}
+
+
+ Which one is more believable?
+
+
+ - {{ answer_choices[0] }}
+
+ - {{ answer_choices[1] }}
+
+
+ |||
+
+
+ {{ answer_choices[label-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_hypothesis_believable
+ reference: ''
+ bf8a5b8a-70cb-4b27-82db-8ca4fbd2318d: !Template
+ answer_choices: '{{hypothesis_1| trim(''.?!'') }} ||| {{hypothesis_2| trim(''.?!'')
+ }}'
+ id: bf8a5b8a-70cb-4b27-82db-8ca4fbd2318d
+ jinja: '{{ observation_1 }} {{ observation_2 }}
+
+
+ Would you rather believe that:
+
+
+ {{ answer_choices[0] }},
+
+
+ or:
+
+
+ {{ answer_choices[1] }}?
+
+ |||
+
+ {{ answer_choices[label-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_hypothesis
+ reference: ''
+ d418b574-9d0a-4d29-a518-7d9a5f5a4a3d: !Template
+ answer_choices: '{{hypothesis_1| trim(''.?!'') }} ||| {{hypothesis_2| trim(''.?!'')
+ }}'
+ id: d418b574-9d0a-4d29-a518-7d9a5f5a4a3d
+ jinja: "Which of the following better fits the description?\n\nIs it that: \n\n\
+ {{ answer_choices[0] }},\n\nor rather: \n\n{{ answer_choices[1] }}?\n\nDescription:\
+ \ \n\n{{ observation_1 }} {{ observation_2 }}\n|||\n{{ answer_choices[label-1]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_hypothesis_desc
+ reference: ''
+ eb0baa43-3c79-4d1d-973a-37e0055bbfec: !Template
+ answer_choices: '{{hypothesis_1| trim(''.?!'') }} ||| {{hypothesis_2| trim(''.?!'')
+ }}'
+ id: eb0baa43-3c79-4d1d-973a-37e0055bbfec
+ jinja: 'Which version is more likely?
+
+
+ The first one:
+
+
+ {{ answer_choices[0] }},
+
+
+ or the second one:
+
+
+ {{ answer_choices[1] }}?
+
+
+ Assuming that:
+
+
+ {{ observation_1 }} {{ observation_2 }}
+
+ |||
+
+ {{ answer_choices[label-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_hypothesis_likely
+ reference: ''
diff --git a/promptsource/templates/asnq/templates.yaml b/promptsource/templates/asnq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..29e9e81851df2a74487ddfc6785a05c6a19db990
--- /dev/null
+++ b/promptsource/templates/asnq/templates.yaml
@@ -0,0 +1,211 @@
+dataset: asnq
+templates:
+ 0e06d340-6d2c-44f7-b977-604925773f0b: !Template
+ answer_choices: No ||| Yes
+ id: 0e06d340-6d2c-44f7-b977-604925773f0b
+ jinja: "Question: {{question}} \nSentence: {{sentence}} \nAre the question and\
+ \ the sentence positive pairs where positive pairs means that the sentence answers\
+ \ the question? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: positive_pairs
+ reference: ''
+ 55f386ba-9a86-405e-a805-152e254a4205: !Template
+ answer_choices: null
+ id: 55f386ba-9a86-405e-a805-152e254a4205
+ jinja: "{% if label == 1 %}\n\nWhat is a question that someone might ask that\
+ \ the following sentence can answer?\n\n {{sentence}}\n\n|||\n\n{{question}}\n\
+ {% endif %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: question_from_sentence
+ reference: ''
+ 5b6abb0a-1b4f-4338-aab6-430465669164: !Template
+ answer_choices: null
+ id: 5b6abb0a-1b4f-4338-aab6-430465669164
+ jinja: '{% if label == 1 %}
+
+
+ Write a question based on this sentence: {{sentence}}
+
+
+ |||
+
+
+ {{question}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: write_question
+ reference: ''
+ 684aea91-34c4-47de-a61f-7cc9a182b657: !Template
+ answer_choices: No ||| Yes
+ id: 684aea91-34c4-47de-a61f-7cc9a182b657
+ jinja: Can the answer "{{sentence}}" be inferred from the question "{{question}}"
+ ? ||| {{answer_choices[label]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: answer_infer_question
+ reference: ''
+ 719306b9-5dc8-46c7-b693-9b2edc2e09f2: !Template
+ answer_choices: No ||| Yes
+ id: 719306b9-5dc8-46c7-b693-9b2edc2e09f2
+ jinja: Does this sentence "{{sentence}}" answer this question "{{question}}"
+ ? ||| {{answer_choices[label]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Does_sentence_answer_question
+ reference: ''
+ 859ec580-957b-42da-be1b-c3ccb8b52d24: !Template
+ answer_choices: null
+ id: 859ec580-957b-42da-be1b-c3ccb8b52d24
+ jinja: '{% if label == 1 %}
+
+
+ Generate a one-sentence answer to the following question: {{question}}?
+
+
+ |||
+
+
+ {{sentence}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answer question with a sentence
+ reference: ''
+ 85da6666-9e50-4122-84c8-d00b90967475: !Template
+ answer_choices: null
+ id: 85da6666-9e50-4122-84c8-d00b90967475
+ jinja: '{% if label == 1 %}
+
+
+ Given the following question: {{question}}? Can you give me a full sentence
+ answer?
+
+
+ |||
+
+
+ {{sentence}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: give me a full sentence answer
+ reference: ''
+ 85fe8aaa-83c5-41ec-ada5-0e6d60bab1f9: !Template
+ answer_choices: null
+ id: 85fe8aaa-83c5-41ec-ada5-0e6d60bab1f9
+ jinja: '{% if label == 1 %}
+
+ Answer this question as a full sentence: {{question}}?
+
+ |||
+
+ {{sentence}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answer question as a sentence
+ reference: ''
+ 95e39e1d-a830-4b6c-bd2a-10fe51552427: !Template
+ answer_choices: No ||| Yes
+ id: 95e39e1d-a830-4b6c-bd2a-10fe51552427
+ jinja: 'Can this question: "{{question}}" be answered as follow: "{{sentence}}"
+ \ \ please answer yes or no. ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: yes_vs_no
+ reference: ''
+ a36d6152-72c4-4278-8266-d27b28667f61: !Template
+ answer_choices: null
+ id: a36d6152-72c4-4278-8266-d27b28667f61
+ jinja: "{% if label == 1 %}\n\nHere is a sentence:\n\n {{sentence}}\n\nWrite a\
+ \ question to which this sentence is an answer.\n\n|||\n\n{{question}}\n{% endif\
+ \ %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: write_a_question
+ reference: ''
+ a7927e90-1a9b-49e2-a2f8-5ac9e6d286cb: !Template
+ answer_choices: No ||| Yes
+ id: a7927e90-1a9b-49e2-a2f8-5ac9e6d286cb
+ jinja: 'Does the following sentence "{{sentence}}" seem a right answer for the
+ following question : {{question}} ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: right_answer
+ reference: ''
diff --git a/promptsource/templates/asset/ratings/templates.yaml b/promptsource/templates/asset/ratings/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..248ff00550cbc8ef7a52c3e75df2b51a6dc31d47
--- /dev/null
+++ b/promptsource/templates/asset/ratings/templates.yaml
@@ -0,0 +1,119 @@
+dataset: asset
+subset: ratings
+templates:
+ 09b2a13b-cba6-4473-8a46-3fa24be71ce2: !Template
+ answer_choices: No ||| Yes
+ id: 09b2a13b-cba6-4473-8a46-3fa24be71ce2
+ jinja: "{% set label = None %}\n{% set questions = None %}\n{% if rating > 50\
+ \ %}\n{% set label = 1 %}\n{% else %}\n{% set label = 0 %}\n{% endif %}\n{%\
+ \ set questions= [ \"Does the second sentence better convey the information?\"\
+ , \"Is the second sentence more fluent?\", \"Is the second sentence simpler?\"\
+ ] %}\n\nFirst sentence: {{original}}\n\nSecond sentence: {{simplification}}\n\
+ \n{{questions[aspect]}}. Please answer Yes or No. \n|||\n{{answer_choices[label]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: rate-binary
+ reference: Taking questions from the original paper, we use rating to establish
+ a binary classification problem
+ 47142040-4121-4144-98b9-61cb5cbb1313: !Template
+ answer_choices: null
+ id: 47142040-4121-4144-98b9-61cb5cbb1313
+ jinja: 'First sentence: {{original}}
+
+
+ Second sentence: {{simplification}}
+
+
+ I am scoring these simplification exercises. How easier to read is the second
+ sentence on a scale from 0 (harder to read) to 100 (easier to read)?
+
+
+ |||
+
+
+ {{rating}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: rate-regression-simplicity
+ reference: Prompt model to rate how simplified the sentence is in the general
+ sense, instead of an particular aspect. This is a regression task whose range
+ is from 0 to 100.
+ 7dd6e8b6-eae0-40c5-aa5e-1cc24357d85d: !Template
+ answer_choices: null
+ id: 7dd6e8b6-eae0-40c5-aa5e-1cc24357d85d
+ jinja: '{% set label = None %}
+
+ {% set questions = None %}
+
+ {% if rating > 50 %}
+
+ {% set label = 1 %}
+
+ {% else %}
+
+ {% set label = 0 %}
+
+ {% endif %}
+
+ {% if label == 1 %}
+
+ {% set questions= [ "Rewrite the following sentence so that it conveys the information
+ better.", "Rewrite the following sentence so that it is more fluent.", "Rewrite
+ the following sentence so that it is simpler."] %}
+
+ {% else %}
+
+ {% set questions= [ "Rewrite the following sentence so that it conveys the information
+ more poorly.", "Rewrite the following sentence so that it is less fluent.",
+ "Rewrite the following sentence so that it is more complicated."] %}
+
+ {% endif %}
+
+ {{questions[aspect]}}
+
+
+ {{original}}
+
+ |||
+
+ {{simplification}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate-text-based-on-rating
+ reference: ''
+ d2bed959-29ab-4962-a106-dc91c00f3f03: !Template
+ answer_choices: null
+ id: d2bed959-29ab-4962-a106-dc91c00f3f03
+ jinja: "{% set statements= [ \"the second sentence expresses the underlying meaning\
+ \ the best.\", \"the second sentence is more fluent.\", \"the second sentence\
+ \ is simpler.\"] %}\n\nFirst sentence: {{original}}\n\nSecond sentence: {{simplification}}\n\
+ \nRate the following statement from 0 (strongly disagree) to 100 (strongly agree):\
+ \ {{statements[aspect]}} \n\n|||\n{{rating}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: rate-regression
+ reference: Require the model to output the rating. This is a regression task whose
+ range is from 0 to 100.
diff --git a/promptsource/templates/asset/simplification/templates.yaml b/promptsource/templates/asset/simplification/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..00cb181706b8e16eb9d93575e5ba87be64f4d08c
--- /dev/null
+++ b/promptsource/templates/asset/simplification/templates.yaml
@@ -0,0 +1,168 @@
+dataset: asset
+subset: simplification
+templates:
+ 0f0e55f9-28b4-4844-b65d-b9544a0918eb: !Template
+ answer_choices: null
+ id: 0f0e55f9-28b4-4844-b65d-b9544a0918eb
+ jinja: "{% set real_simplifications = [] %}{% for text in simplifications %}{%\
+ \ if text|length < original|length %}{{real_simplifications.append(text) | default(\"\
+ \", True)}}{% endif %}{% endfor %}\n{% if real_simplifications %}\nText: {{original}}\n\
+ \nHow would I simplify this? \n\n|||\n\n{{real_simplifications | choice}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: verbose-to-simplification
+ reference: Rewrite text using one random simplification
+ 3cbfbc1c-6876-4dd7-b7db-45fb3233a667: !Template
+ answer_choices: null
+ id: 3cbfbc1c-6876-4dd7-b7db-45fb3233a667
+ jinja: '{% set real_simplifications = [] %}{% for text in simplifications %}{%
+ if text|length < original|length %}{{real_simplifications.append(text) | default("",
+ True)}}{% endif %}{% endfor %}
+
+ {% if real_simplifications %}
+
+ Make the below sentence more verbose:
+
+
+ {{real_simplifications | choice}}
+
+
+ |||
+
+
+ {{original}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: simplification-to-verbose
+ reference: Make the simplified text more verbose
+ 41d32553-433c-44fb-9eda-0fce51bf9e14: !Template
+ answer_choices: A ||| B
+ id: 41d32553-433c-44fb-9eda-0fce51bf9e14
+ jinja: '{% set rand_num = range(0,2) | choice %}
+
+ {% set real_simplifications = [] %}{% for text in simplifications %}{% if text|length
+ < original|length %}{{real_simplifications.append(text) | default("", True)}}{%
+ endif %}{% endfor %}
+
+ {% if real_simplifications %}
+
+ One of the following two sentences is more verbose than the other. Which one
+ is it?
+
+ {% if rand_num %}
+
+ A: {{real_simplifications | choice}}
+
+
+ B: {{original}}
+
+ {% else %}
+
+ A: {{original}}
+
+
+ B: {{real_simplifications | choice}}
+
+ {% endif %}
+
+ |||
+
+ {{ answer_choices[rand_num] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: choose-verbose
+ reference: ''
+ 5c2f56b9-5bd4-4455-9d68-0729bfdb9c84: !Template
+ answer_choices: A ||| B
+ id: 5c2f56b9-5bd4-4455-9d68-0729bfdb9c84
+ jinja: '{% set rand_num = range(0,2) | choice %}
+
+ {% set real_simplifications = [] %}{% for text in simplifications %}{% if text|length
+ < original|length %}{{real_simplifications.append(text) | default("", True)}}{%
+ endif %}{% endfor %}
+
+ {% if real_simplifications %}
+
+ One of the following two sentences is more simple than the other. Which one
+ is it?
+
+ {% if rand_num %}
+
+ A: {{real_simplifications | choice}}
+
+
+ B: {{original}}
+
+ {% else %}
+
+ A: {{original}}
+
+
+ B: {{real_simplifications | choice}}
+
+ {% endif %}
+
+ |||
+
+ {{ answer_choices[1-rand_num] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose-simplification
+ reference: ''
+ d528d74b-bbc2-4888-ae21-db0ab37304df: !Template
+ answer_choices: null
+ id: d528d74b-bbc2-4888-ae21-db0ab37304df
+ jinja: '{% set real_simplifications = [] %}{% for text in simplifications %}{%
+ if text|length < original|length %}{{real_simplifications.append(text) | default("",
+ True)}}{% endif %}{% endfor %}
+
+ {% if real_simplifications %}
+
+ I''d like to explain to my child "{{original}}". How would I do so?
+
+
+ |||
+
+
+ {{real_simplifications | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: verbose-to-simplification-implicit
+ reference: Implicit simplification request
diff --git a/promptsource/templates/banking77/templates.yaml b/promptsource/templates/banking77/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..12fd8ae9facdad14980d6ef6819a1b0d37839264
--- /dev/null
+++ b/promptsource/templates/banking77/templates.yaml
@@ -0,0 +1,288 @@
+dataset: banking77
+templates:
+ 0dba8abc-248a-44db-bb86-20492ffc17f6: !Template
+ answer_choices: activate my card|||age limit|||apple pay or google pay|||atm support|||automatic
+ top up|||balance not updated after bank transfer|||balance not updated after
+ cheque or cash deposit|||beneficiary not allowed|||cancel transfer|||card about
+ to expire|||card acceptance|||card arrival|||card delivery estimate|||card linking|||card
+ not working|||card payment fee charged|||card payment not recognised|||card
+ payment wrong exchange rate|||card swallowed|||cash withdrawal charge|||cash
+ withdrawal not recognised|||change pin|||compromised card|||contactless not
+ working|||country support|||declined card payment|||declined cash withdrawal|||declined
+ transfer|||direct debit payment not recognised|||disposable card limits|||edit
+ personal details|||exchange charge|||exchange rate|||exchange via app|||extra
+ charge on statement|||failed transfer|||fiat currency support|||get disposable
+ virtual card|||get physical card|||getting spare card|||getting virtual card|||lost
+ or stolen card|||lost or stolen phone|||order physical card|||passcode forgotten|||pending
+ card payment|||pending cash withdrawal|||pending top up|||pending transfer|||pin
+ blocked|||receiving money|||Refund not showing up|||request refund|||reverted
+ card payment?|||supported cards and currencies|||terminate account|||top up
+ by bank transfer charge|||top up by card charge|||top up by cash or cheque|||top
+ up failed|||top up limits|||top up reverted|||topping up by card|||transaction
+ charged twice|||transfer fee charged|||transfer into account|||transfer not
+ received by recipient|||transfer timing|||unable to verify identity|||verify
+ my identity|||verify source of funds|||verify top up|||virtual card not working|||visa
+ or mastercard|||why verify identity|||wrong amount of cash received|||wrong
+ exchange rate for cash withdrawal
+ id: 0dba8abc-248a-44db-bb86-20492ffc17f6
+ jinja: 'Which help page can be provided to provide information regarding this
+ query?
+
+ Query: {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: help_page_topic
+ reference: ''
+ 2520f6d0-fcdf-44b6-abb3-a76e44948047: !Template
+ answer_choices: activate my card|||age limit|||apple pay or google pay|||atm support|||automatic
+ top up|||balance not updated after bank transfer|||balance not updated after
+ cheque or cash deposit|||beneficiary not allowed|||cancel transfer|||card about
+ to expire|||card acceptance|||card arrival|||card delivery estimate|||card linking|||card
+ not working|||card payment fee charged|||card payment not recognised|||card
+ payment wrong exchange rate|||card swallowed|||cash withdrawal charge|||cash
+ withdrawal not recognised|||change pin|||compromised card|||contactless not
+ working|||country support|||declined card payment|||declined cash withdrawal|||declined
+ transfer|||direct debit payment not recognised|||disposable card limits|||edit
+ personal details|||exchange charge|||exchange rate|||exchange via app|||extra
+ charge on statement|||failed transfer|||fiat currency support|||get disposable
+ virtual card|||get physical card|||getting spare card|||getting virtual card|||lost
+ or stolen card|||lost or stolen phone|||order physical card|||passcode forgotten|||pending
+ card payment|||pending cash withdrawal|||pending top up|||pending transfer|||pin
+ blocked|||receiving money|||Refund not showing up|||request refund|||reverted
+ card payment?|||supported cards and currencies|||terminate account|||top up
+ by bank transfer charge|||top up by card charge|||top up by cash or cheque|||top
+ up failed|||top up limits|||top up reverted|||topping up by card|||transaction
+ charged twice|||transfer fee charged|||transfer into account|||transfer not
+ received by recipient|||transfer timing|||unable to verify identity|||verify
+ my identity|||verify source of funds|||verify top up|||virtual card not working|||visa
+ or mastercard|||why verify identity|||wrong amount of cash received|||wrong
+ exchange rate for cash withdrawal
+ id: 2520f6d0-fcdf-44b6-abb3-a76e44948047
+ jinja: 'To which department in the bank can this query be directed?
+
+ Query: {{text}}
+
+ ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: direct_to_which_department
+ reference: ''
+ 9482bce0-f201-451b-9384-af588d707629: !Template
+ answer_choices: activate my card|||age limit|||apple pay or google pay|||atm support|||automatic
+ top up|||balance not updated after bank transfer|||balance not updated after
+ cheque or cash deposit|||beneficiary not allowed|||cancel transfer|||card about
+ to expire|||card acceptance|||card arrival|||card delivery estimate|||card linking|||card
+ not working|||card payment fee charged|||card payment not recognised|||card
+ payment wrong exchange rate|||card swallowed|||cash withdrawal charge|||cash
+ withdrawal not recognised|||change pin|||compromised card|||contactless not
+ working|||country support|||declined card payment|||declined cash withdrawal|||declined
+ transfer|||direct debit payment not recognised|||disposable card limits|||edit
+ personal details|||exchange charge|||exchange rate|||exchange via app|||extra
+ charge on statement|||failed transfer|||fiat currency support|||get disposable
+ virtual card|||get physical card|||getting spare card|||getting virtual card|||lost
+ or stolen card|||lost or stolen phone|||order physical card|||passcode forgotten|||pending
+ card payment|||pending cash withdrawal|||pending top up|||pending transfer|||pin
+ blocked|||receiving money|||Refund not showing up|||request refund|||reverted
+ card payment?|||supported cards and currencies|||terminate account|||top up
+ by bank transfer charge|||top up by card charge|||top up by cash or cheque|||top
+ up failed|||top up limits|||top up reverted|||topping up by card|||transaction
+ charged twice|||transfer fee charged|||transfer into account|||transfer not
+ received by recipient|||transfer timing|||unable to verify identity|||verify
+ my identity|||verify source of funds|||verify top up|||virtual card not working|||visa
+ or mastercard|||why verify identity|||wrong amount of cash received|||wrong
+ exchange rate for cash withdrawal
+ id: 9482bce0-f201-451b-9384-af588d707629
+ jinja: 'To which of the following departments in the bank can the given query
+ be directed?
+
+ Query: {{text}} Departments:
+
+ {% for intent in answer_choices %}
+
+ - {{intent}} {% endfor %}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose the correct department
+ reference: ''
+ e629d77c-46f9-4e00-b23a-c522d07a9943: !Template
+ answer_choices: activate my card|||age limit|||apple pay or google pay|||atm support|||automatic
+ top up|||balance not updated after bank transfer|||balance not updated after
+ cheque or cash deposit|||beneficiary not allowed|||cancel transfer|||card about
+ to expire|||card acceptance|||card arrival|||card delivery estimate|||card linking|||card
+ not working|||card payment fee charged|||card payment not recognised|||card
+ payment wrong exchange rate|||card swallowed|||cash withdrawal charge|||cash
+ withdrawal not recognised|||change pin|||compromised card|||contactless not
+ working|||country support|||declined card payment|||declined cash withdrawal|||declined
+ transfer|||direct debit payment not recognised|||disposable card limits|||edit
+ personal details|||exchange charge|||exchange rate|||exchange via app|||extra
+ charge on statement|||failed transfer|||fiat currency support|||get disposable
+ virtual card|||get physical card|||getting spare card|||getting virtual card|||lost
+ or stolen card|||lost or stolen phone|||order physical card|||passcode forgotten|||pending
+ card payment|||pending cash withdrawal|||pending top up|||pending transfer|||pin
+ blocked|||receiving money|||Refund not showing up|||request refund|||reverted
+ card payment?|||supported cards and currencies|||terminate account|||top up
+ by bank transfer charge|||top up by card charge|||top up by cash or cheque|||top
+ up failed|||top up limits|||top up reverted|||topping up by card|||transaction
+ charged twice|||transfer fee charged|||transfer into account|||transfer not
+ received by recipient|||transfer timing|||unable to verify identity|||verify
+ my identity|||verify source of funds|||verify top up|||virtual card not working|||visa
+ or mastercard|||why verify identity|||wrong amount of cash received|||wrong
+ exchange rate for cash withdrawal
+ id: e629d77c-46f9-4e00-b23a-c522d07a9943
+ jinja: "Summarise the following query in the form of key banking terms: \n{{text}}\n\
+ |||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: rephrase_as_banking_term
+ reference: ''
+ edd67883-0386-4496-af7f-37a44c41293f: !Template
+ answer_choices: activate my card|||age limit|||apple pay or google pay|||atm support|||automatic
+ top up|||balance not updated after bank transfer|||balance not updated after
+ cheque or cash deposit|||beneficiary not allowed|||cancel transfer|||card about
+ to expire|||card acceptance|||card arrival|||card delivery estimate|||card linking|||card
+ not working|||card payment fee charged|||card payment not recognised|||card
+ payment wrong exchange rate|||card swallowed|||cash withdrawal charge|||cash
+ withdrawal not recognised|||change pin|||compromised card|||contactless not
+ working|||country support|||declined card payment|||declined cash withdrawal|||declined
+ transfer|||direct debit payment not recognised|||disposable card limits|||edit
+ personal details|||exchange charge|||exchange rate|||exchange via app|||extra
+ charge on statement|||failed transfer|||fiat currency support|||get disposable
+ virtual card|||get physical card|||getting spare card|||getting virtual card|||lost
+ or stolen card|||lost or stolen phone|||order physical card|||passcode forgotten|||pending
+ card payment|||pending cash withdrawal|||pending top up|||pending transfer|||pin
+ blocked|||receiving money|||Refund not showing up|||request refund|||reverted
+ card payment?|||supported cards and currencies|||terminate account|||top up
+ by bank transfer charge|||top up by card charge|||top up by cash or cheque|||top
+ up failed|||top up limits|||top up reverted|||topping up by card|||transaction
+ charged twice|||transfer fee charged|||transfer into account|||transfer not
+ received by recipient|||transfer timing|||unable to verify identity|||verify
+ my identity|||verify source of funds|||verify top up|||virtual card not working|||visa
+ or mastercard|||why verify identity|||wrong amount of cash received|||wrong
+ exchange rate for cash withdrawal
+ id: edd67883-0386-4496-af7f-37a44c41293f
+ jinja: 'Which of the following intents best represents this banking query?
+
+ Text: {{text}}
+
+ Intents:
+
+ {% for intent in answer_choices %}
+
+ - {{intent}} {% endfor %}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_the_correct_intent
+ reference: ''
+ eee2366a-8f0c-4ac3-b9cc-aa038e40f8cb: !Template
+ answer_choices: activate my card|||age limit|||apple pay or google pay|||atm support|||automatic
+ top up|||balance not updated after bank transfer|||balance not updated after
+ cheque or cash deposit|||beneficiary not allowed|||cancel transfer|||card about
+ to expire|||card acceptance|||card arrival|||card delivery estimate|||card linking|||card
+ not working|||card payment fee charged|||card payment not recognised|||card
+ payment wrong exchange rate|||card swallowed|||cash withdrawal charge|||cash
+ withdrawal not recognised|||change pin|||compromised card|||contactless not
+ working|||country support|||declined card payment|||declined cash withdrawal|||declined
+ transfer|||direct debit payment not recognised|||disposable card limits|||edit
+ personal details|||exchange charge|||exchange rate|||exchange via app|||extra
+ charge on statement|||failed transfer|||fiat currency support|||get disposable
+ virtual card|||get physical card|||getting spare card|||getting virtual card|||lost
+ or stolen card|||lost or stolen phone|||order physical card|||passcode forgotten|||pending
+ card payment|||pending cash withdrawal|||pending top up|||pending transfer|||pin
+ blocked|||receiving money|||Refund not showing up|||request refund|||reverted
+ card payment?|||supported cards and currencies|||terminate account|||top up
+ by bank transfer charge|||top up by card charge|||top up by cash or cheque|||top
+ up failed|||top up limits|||top up reverted|||topping up by card|||transaction
+ charged twice|||transfer fee charged|||transfer into account|||transfer not
+ received by recipient|||transfer timing|||unable to verify identity|||verify
+ my identity|||verify source of funds|||verify top up|||virtual card not working|||visa
+ or mastercard|||why verify identity|||wrong amount of cash received|||wrong
+ exchange rate for cash withdrawal
+ id: eee2366a-8f0c-4ac3-b9cc-aa038e40f8cb
+ jinja: 'What is the intent of this banking query?
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what_is_intent
+ reference: ''
+ f4e80455-1523-4b91-aacc-249d8c6f0f2a: !Template
+ answer_choices: activate my card|||age limit|||apple pay or google pay|||atm support|||automatic
+ top up|||balance not updated after bank transfer|||balance not updated after
+ cheque or cash deposit|||beneficiary not allowed|||cancel transfer|||card about
+ to expire|||card acceptance|||card arrival|||card delivery estimate|||card linking|||card
+ not working|||card payment fee charged|||card payment not recognised|||card
+ payment wrong exchange rate|||card swallowed|||cash withdrawal charge|||cash
+ withdrawal not recognised|||change pin|||compromised card|||contactless not
+ working|||country support|||declined card payment|||declined cash withdrawal|||declined
+ transfer|||direct debit payment not recognised|||disposable card limits|||edit
+ personal details|||exchange charge|||exchange rate|||exchange via app|||extra
+ charge on statement|||failed transfer|||fiat currency support|||get disposable
+ virtual card|||get physical card|||getting spare card|||getting virtual card|||lost
+ or stolen card|||lost or stolen phone|||order physical card|||passcode forgotten|||pending
+ card payment|||pending cash withdrawal|||pending top up|||pending transfer|||pin
+ blocked|||receiving money|||Refund not showing up|||request refund|||reverted
+ card payment?|||supported cards and currencies|||terminate account|||top up
+ by bank transfer charge|||top up by card charge|||top up by cash or cheque|||top
+ up failed|||top up limits|||top up reverted|||topping up by card|||transaction
+ charged twice|||transfer fee charged|||transfer into account|||transfer not
+ received by recipient|||transfer timing|||unable to verify identity|||verify
+ my identity|||verify source of funds|||verify top up|||virtual card not working|||visa
+ or mastercard|||why verify identity|||wrong amount of cash received|||wrong
+ exchange rate for cash withdrawal
+ id: f4e80455-1523-4b91-aacc-249d8c6f0f2a
+ jinja: 'Generate the subject for an email containing the following text:
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: generate_subject_for_text
+ reference: ''
diff --git a/promptsource/templates/billsum/templates.yaml b/promptsource/templates/billsum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1ca14aa92b42ae256ad29ca7d36626608f034dfa
--- /dev/null
+++ b/promptsource/templates/billsum/templates.yaml
@@ -0,0 +1,153 @@
+dataset: billsum
+templates:
+ 0938c6e4-dbaf-43d8-8d8f-4bc62489ae74: !Template
+ answer_choices: null
+ id: 0938c6e4-dbaf-43d8-8d8f-4bc62489ae74
+ jinja: 'Given the title: "{{title}}" and the summary of a bill: {{summary}}.
+
+ Write this bill based on the title and summary.
+
+ |||
+
+ {{text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Write a bill: (title, summary->text)'
+ reference: ''
+ 3c790ac3-0557-47a9-9b71-1cb435f15629: !Template
+ answer_choices: null
+ id: 3c790ac3-0557-47a9-9b71-1cb435f15629
+ jinja: "Given a state bill: {{text}}. \nPlease write the title of this bill in\
+ \ one sentence.\n|||\n{{title}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Summarize this bill in one sentence: (text-> title)'
+ reference: ''
+ 438192e5-d67a-4098-9d82-a9fe892f6be2: !Template
+ answer_choices: null
+ id: 438192e5-d67a-4098-9d82-a9fe892f6be2
+ jinja: 'Given a summary of a bill: {{summary}}.
+
+ Write this bill.
+
+ |||
+
+ {{text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Write a bill: (summary-> text)'
+ reference: ''
+ 4891a8e7-258c-41e2-80d3-0c1a054acb07: !Template
+ answer_choices: null
+ id: 4891a8e7-258c-41e2-80d3-0c1a054acb07
+ jinja: 'Given a title: "{{title}}" of a bill.
+
+ Write this bill based on this title.
+
+ |||
+
+ {{text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Write a bill: (title-> text)'
+ reference: ''
+ 550fa161-af4e-4430-9844-ce7dad587733: !Template
+ answer_choices: null
+ id: 550fa161-af4e-4430-9844-ce7dad587733
+ jinja: 'Given this bill: {{text}}.
+
+ Write a summary of this bill.
+
+ |||
+
+ {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: 'Summarize this bill: (text-> summary)'
+ reference: ''
+ 5d2404b9-63ff-406e-977d-eda6afb5c689: !Template
+ answer_choices: null
+ id: 5d2404b9-63ff-406e-977d-eda6afb5c689
+ jinja: 'Given a summary: {{summary}}, we want to generate a title from this summary.
+
+ |||
+
+ {{title}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate title from summary
+ reference: ''
+ 6a439a80-4924-49e9-b5ae-f661683b399f: !Template
+ answer_choices: null
+ id: 6a439a80-4924-49e9-b5ae-f661683b399f
+ jinja: 'Summarize this US bill: {{text}}
+
+ |||
+
+ {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: 'Summarize: (text -> summary )'
+ reference: ''
+ ea9f0376-6cec-450c-b258-89f479cb9f6d: !Template
+ answer_choices: null
+ id: ea9f0376-6cec-450c-b258-89f479cb9f6d
+ jinja: 'Given a summary of a bill: {{summary}}.
+
+ Please write the title of this summary.
+
+ |||
+
+ {{title}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Summarize: (summary -> title)'
+ reference: ''
diff --git a/promptsource/templates/bing_coronavirus_query_set/templates.yaml b/promptsource/templates/bing_coronavirus_query_set/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e0b45121edeec7874a071970a640b297c79f8c0
--- /dev/null
+++ b/promptsource/templates/bing_coronavirus_query_set/templates.yaml
@@ -0,0 +1,77 @@
+dataset: bing_coronavirus_query_set
+templates:
+ 43332782-9e92-4bb2-94bf-28759f3fe181: !Template
+ answer_choices: null
+ id: 43332782-9e92-4bb2-94bf-28759f3fe181
+ jinja: "This search query talks about the coronavirus and was published on {{Date}}.\
+ \ In what country was it issued? \n{{Query}}\n|||\n{{Country}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: 'what_country '
+ reference: ''
+ 68f9c063-1907-4866-ab1b-756cc57e5695: !Template
+ answer_choices: implicit ||| explicit
+ id: 68f9c063-1907-4866-ab1b-756cc57e5695
+ jinja: "The user is searching for coronavirus results on Bing.com. Is the intent\
+ \ implicit or explicit? \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\"\
+ \ %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: 'is_implicit_or_explicit '
+ reference: ''
+ 992d541f-9e0c-466d-b4c4-92e9e236f863: !Template
+ answer_choices: implicit ||| explicit
+ id: 992d541f-9e0c-466d-b4c4-92e9e236f863
+ jinja: "This search query about coronavirus was issued in {{Country}} on {{Date}}.\
+ \ Is the intent implicit or explicit? \n{{Query}}\n|||\n{% if IsImplicitIntent\
+ \ == \"True\" %}\n{{answer_choices[0] }}\n{% else %}\n{{answer_choices[1] }}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: 'is_explicit_country_date '
+ reference: ''
+ df53652c-36dc-45fe-a015-d0781e32cd33: !Template
+ answer_choices: Yes ||| No
+ id: df53652c-36dc-45fe-a015-d0781e32cd33
+ jinja: "Does this search engine query have an indirect relation to Covid-19? \n\
+ {{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\n{{answer_choices[0] }}\n\
+ {% else %}\n{{answer_choices[1] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: is_implicit_query
+ reference: ''
+ df7bc2ee-686c-4826-ad84-3a056a2da4d4: !Template
+ answer_choices: No ||| Yes
+ id: df7bc2ee-686c-4826-ad84-3a056a2da4d4
+ jinja: "Does this search query on Bing.com talk about the coronavirus explicitly?\
+ \ \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\n{{answer_choices[0]\
+ \ }}\n{% else %}\n{{answer_choices[1] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: is_explicit_query
+ reference: ''
diff --git a/promptsource/templates/biosses/templates.yaml b/promptsource/templates/biosses/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cc5363a99c1e7835462c9fd161be1d2b43208cf9
--- /dev/null
+++ b/promptsource/templates/biosses/templates.yaml
@@ -0,0 +1,186 @@
+dataset: biosses
+templates:
+ 084e20ea-689d-4813-9db0-04735016aa0b: !Template
+ answer_choices: null
+ id: 084e20ea-689d-4813-9db0-04735016aa0b
+ jinja: 'How similar are the following two sentences? {{sentence1}} {{sentence2}}
+
+
+ Give the answer on a scale from 0 - 4, where 0 is "not similar at all" and 4
+ is "means the same thing". |||
+
+
+ {{(((5*score)|round)/5)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: similarity with question first
+ reference: stsb template from FLAN
+ 2aa62df9-5905-4f50-baff-c11986670122: !Template
+ answer_choices: null
+ id: 2aa62df9-5905-4f50-baff-c11986670122
+ jinja: On a scale from 0 to 4, where 0 is "not similar" and 4 is "very similar",
+ how similar is the sentence "{{sentence1}}" to the sentence {{sentence2}}"?
+ ||| {{(((5*score)|round)/5)}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: compare one sentence to another
+ reference: stsb template from FLAN
+ 2ec48b7b-c2c8-4253-9c0f-b57814ba0027: !Template
+ answer_choices: null
+ id: 2ec48b7b-c2c8-4253-9c0f-b57814ba0027
+ jinja: "Sentence 1: {{sentence1}} \nSentence 2: {{sentence2}}\n\nFrom 0 to 4 (0\
+ \ = \"no meaning overlap\" and 4 = \"means the same thing\"), how similar are\
+ \ the two sentences? |||\n\n{{(((5*score)|round)/5)}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: similarity with sentences first
+ reference: stsb template from FLAN
+ 400dcb4c-8654-44aa-acec-4dbe108e34a6: !Template
+ answer_choices: null
+ id: 400dcb4c-8654-44aa-acec-4dbe108e34a6
+ jinja: '{{sentence1}} {{sentence2}}
+
+
+ On a scale from 0 to 4, where 0 is "no meaning overlap" and 4 is "means the
+ same thing", how closely does the first sentence resemble the second one? |||
+
+
+ {{(((5*score)|round)/5)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: resemblance
+ reference: stsb template from FLAN
+ 5a6bc1a2-8d73-4c57-baa1-cc4b5c4dfacc: !Template
+ answer_choices: null
+ id: 5a6bc1a2-8d73-4c57-baa1-cc4b5c4dfacc
+ jinja: 'Do the following sentences say the same thing? {{sentence1}} {{sentence2}}
+
+
+ Return your answer on a scale from 0 to 4, where 0 is "not similar" and 5 is
+ "very similar". |||
+
+
+ {{(((5*score)|round)/5)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: same thing scoring
+ reference: stsb template from FLAN
+ 5c53ce9b-45f6-41ab-9da7-9c24f0f6f56d: !Template
+ answer_choices: no ||| yes
+ id: 5c53ce9b-45f6-41ab-9da7-9c24f0f6f56d
+ jinja: "(1) {{sentence1}} \n(2) {{sentence2}}\n\nDo these two sentences convey\
+ \ the same information? |||\n\n{{answer_choices[0 if score < 2.5 else 1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: same info binary
+ reference: paws_wiki from FLAN
+ c1b48040-b083-4501-a7ef-a21b65800eb6: !Template
+ answer_choices: null
+ id: c1b48040-b083-4501-a7ef-a21b65800eb6
+ jinja: '{{sentence1}} {{sentence2}}
+
+
+ Rate the textual similarity of these two sentences on a scale of {{"0.0"}} and
+ {{"4.0"}}, where 0 is "no relation" and 4 is "equivalent". |||
+
+
+ {{(((5*score)|round)/5)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: rate with sentences first
+ reference: stsb template from FLAN
+ d52895b8-71bb-4b87-a20f-e8eae53ede92: !Template
+ answer_choices: no ||| yes
+ id: d52895b8-71bb-4b87-a20f-e8eae53ede92
+ jinja: Please check if these have the same meaning. Answer "yes" if they do, otherwise
+ "no". {{sentence1}} {{sentence2}} ||| {{answer_choices[0 if score < 2.5 else
+ 1]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: same meaning binary
+ reference: paws_wiki from FLAN
+ e22d8c63-3184-40df-84c2-6800960496a7: !Template
+ answer_choices: no ||| yes
+ id: e22d8c63-3184-40df-84c2-6800960496a7
+ jinja: Do "{{sentence1}}" and "{{sentence2}}" seem similar to you ? ||| {{answer_choices[0
+ if score < 2.5 else 1]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: similarity binary
+ reference: stsb_multi_mt
+ f2b20779-4ac9-41d9-9660-b9c5223fe9c1: !Template
+ answer_choices: null
+ id: f2b20779-4ac9-41d9-9660-b9c5223fe9c1
+ jinja: 'Rate the similarity of these two sentences: ({{"0.0"}} being the lowest
+ and {{"4.0"}} the highest) "{{sentence1}}" and "{{sentence2}}" |||
+
+
+ {{(((5*score)|round)/5)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: rate with question first
+ reference: stsb_multi_mt
+ fc22748c-72c0-4727-bc4e-53aae4449bef: !Template
+ answer_choices: no ||| yes
+ id: fc22748c-72c0-4727-bc4e-53aae4449bef
+ jinja: Do you think "{{sentence1}}" and "{{sentence2}}" express the same thing?
+ ||| {{answer_choices[0 if score < 2.5 else 1]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: same thing binary
+ reference: stsb_multi_mt
diff --git a/promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml b/promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8c8ae9d3d9a3ddbf5114e83cf8083d9690101fc6
--- /dev/null
+++ b/promptsource/templates/blbooksgenre/title_genre_classifiction/templates.yaml
@@ -0,0 +1,63 @@
+dataset: blbooksgenre
+subset: title_genre_classifiction
+templates:
+ 0c3e83f4-7f4d-4eca-8f80-6b6bdd8eeedd: !Template
+ answer_choices: Fiction ||| Non-fiction
+ id: 0c3e83f4-7f4d-4eca-8f80-6b6bdd8eeedd
+ jinja: "Given the title: {{title}}, which of the following genres is the book?\n\
+ (a) {{ answer_choices[0] }}\n(b) {{ answer_choices[1] }}\n|||\n {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: true
+ name: multi-choice
+ reference: ''
+ 5564acb9-c911-4d71-ba4d-add444aaf1e3: !Template
+ answer_choices: True ||| False
+ id: 5564acb9-c911-4d71-ba4d-add444aaf1e3
+ jinja: "{{title}} is the title of a fictional book, True or False?\nAnswer: \n\
+ |||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: true
+ name: premise_context_first
+ reference: ''
+ afc18daa-999d-495f-908a-d99477f6f5ac: !Template
+ answer_choices: True ||| False
+ id: afc18daa-999d-495f-908a-d99477f6f5ac
+ jinja: "The following is the title of a fictional book, True or False?\n{{title}}\n\
+ Answer: \n|||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: true
+ name: premise
+ reference: ''
+ cf4b6ce0-ff87-4c7a-9b9e-ec7c4cf741d8: !Template
+ answer_choices: Fiction ||| Non-fiction
+ id: cf4b6ce0-ff87-4c7a-9b9e-ec7c4cf741d8
+ jinja: The genre of the book "{{title}}" is ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: true
+ name: classify
+ reference: ''
diff --git a/promptsource/templates/blended_skill_talk/templates.yaml b/promptsource/templates/blended_skill_talk/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..187a600f633a599990806afbcafc5454f9fcf4cf
--- /dev/null
+++ b/promptsource/templates/blended_skill_talk/templates.yaml
@@ -0,0 +1,57 @@
+dataset: blended_skill_talk
+templates:
+ 54f785e9-453a-4ffe-8181-28095e3f2b80: !Template
+ answer_choices: null
+ id: 54f785e9-453a-4ffe-8181-28095e3f2b80
+ jinja: "Given the below conversation between two people, what would the listener\
+ \ say?\n\nA: {{previous_utterance[0]}}\n\nB: {{previous_utterance[1]}}\n{% for\
+ \ message_f, message_g in zip(free_messages[:-1], guided_messages[:-1]) %}\n\
+ A: {{message_f}}\n\nB: {{message_g}}\n{% endfor %} \nA: {{free_messages[-1]}}\n\
+ \nB: \n|||\n{{guided_messages[-1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: guess-last-utterance
+ reference: ''
+ 58f4e068-26fa-4843-a1d6-54bde324e780: !Template
+ answer_choices: Yes ||| No
+ id: 58f4e068-26fa-4843-a1d6-54bde324e780
+ jinja: "Two people are having a conversation. Are the utterances in the correct\
+ \ order? \n\nYour answer should be either \"Yes\" or \"No\".\n{% if range(0,\
+ \ 2) | choice %}\nA: {{previous_utterance[0]}}\n\nB: {{previous_utterance[1]}}\n\
+ {% for message_f, message_g in zip(free_messages, guided_messages) %}\nA: {{message_f}}\n\
+ \nB: {{message_g}}\n{% endfor %} \n\n|||\nYes.\n{% else %}\nA: {{previous_utterance[1]}}\n\
+ \nB: {{previous_utterance[0]}}\n{% for message_f, message_g in zip(guided_messages,\
+ \ free_messages) %}\nA: {{message_f}}\n\nB: {{message_g}}\n{% endfor %} \n\n\
+ |||\nNo.\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: guess-correct-order
+ reference: ''
+ 8792b63e-7217-40fe-8130-7392baca3519: !Template
+ answer_choices: null
+ id: 8792b63e-7217-40fe-8130-7392baca3519
+ jinja: "Two people are talking to each other. What do you think Person A said\
+ \ in the beginning?\n\nPerson B: {{previous_utterance[1]}}\n{% for message_f,\
+ \ message_g in zip(free_messages, guided_messages) %}\nPerson A: {{message_f}}\n\
+ \nPerson B: {{message_g}}\n{% endfor %} \n|||\n{{previous_utterance[0]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: guess-first-utterance
+ reference: ''
diff --git a/promptsource/templates/cbt/CN/templates.yaml b/promptsource/templates/cbt/CN/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dad740f2af8a9f5417df982cf7f49c857802082f
--- /dev/null
+++ b/promptsource/templates/cbt/CN/templates.yaml
@@ -0,0 +1,147 @@
+dataset: cbt
+subset: CN
+templates:
+ 08820238-5bb3-4c7c-98bb-ec3d81e432e7: !Template
+ answer_choices: null
+ id: 08820238-5bb3-4c7c-98bb-ec3d81e432e7
+ jinja: '{{sentences | join('' '')}}
+
+
+ Write the next sentence of this story.
+
+ |||
+
+ {{ question.replace("XXXXX", answer) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Next Sentence
+ reference: Generate the next sentence given the story.
+ 1f8cad96-4c0f-435a-9a6f-653fcf158dd0: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 1f8cad96-4c0f-435a-9a6f-653fcf158dd0
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Replace {{"XXXXX"}} with the correct option from:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Replace
+ reference: Fill the blank given the options.
+ 556ee207-18c9-4c6c-860a-8ea09b93505c: !Template
+ answer_choices: '{{options|join(''|||'')}}'
+ id: 556ee207-18c9-4c6c-860a-8ea09b93505c
+ jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\
+ , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\
+ {{answer_choices|join(\", \")}}\n|||\n{{ answer }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - In the following
+ reference: Fill in the blanks given options.
+ 63bfa7b6-b566-4693-848c-e05cd7a12a03: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 63bfa7b6-b566-4693-848c-e05cd7a12a03
+ jinja: '{{ sentences | join('' '') }} {{question}}
+
+
+ Fill in the {{"XXXXX"}}.
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank without Options
+ reference: Fill in the blank without options.
+ a2e38459-90d9-4292-9d96-491ad7d4e3db: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a2e38459-90d9-4292-9d96-491ad7d4e3db
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Which of the following options replaces {{"XXXXX"}} the best in the above story?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blanks with Options - above story
+ reference: Given the sentences, fill the blanks using the options.
+ a6fa37d5-899c-4ad0-b888-fab04cc8e423: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a6fa37d5-899c-4ad0-b888-fab04cc8e423
+ jinja: '{{sentences | join ('' '')}}
+
+
+ Which of the following options replaces {{"XXXXX"}} in "{{question}}"?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Which
+ reference: Fill Blank given options.
+ a8b67815-1927-4ef3-8d04-8d3f95525ef5: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a8b67815-1927-4ef3-8d04-8d3f95525ef5
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Fill in the {{"XXXXX"}} from the following choices:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Fill in
+ reference: Fill in the blank given options
diff --git a/promptsource/templates/cbt/NE/templates.yaml b/promptsource/templates/cbt/NE/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..290d2c2b46985c82eeb3472461fa163a7214ddae
--- /dev/null
+++ b/promptsource/templates/cbt/NE/templates.yaml
@@ -0,0 +1,147 @@
+dataset: cbt
+subset: NE
+templates:
+ 08820238-5bb3-4c7c-98bb-ec3d81e432bb: !Template
+ answer_choices: null
+ id: 08820238-5bb3-4c7c-98bb-ec3d81e432bb
+ jinja: '{{sentences | join('' '')}}
+
+
+ Write the next sentence of this story.
+
+ |||
+
+ {{ question.replace("XXXXX", answer) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Next Sentence
+ reference: Generate the next sentence given the story.
+ 1f8cad96-4c0f-435a-9a6f-653fcf158dbb: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 1f8cad96-4c0f-435a-9a6f-653fcf158dbb
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Replace {{"XXXXX"}} with the correct option from:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Replace
+ reference: Fill the blank given the options.
+ 556ee207-18c9-4c6c-860a-8ea09b9350bb: !Template
+ answer_choices: '{{options|join(''|||'')}}'
+ id: 556ee207-18c9-4c6c-860a-8ea09b9350bb
+ jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\
+ , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\
+ {{answer_choices|join(\", \")}}\n|||\n{{ answer }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - In the following
+ reference: Fill in the blanks given options.
+ 63bfa7b6-b566-4693-848c-e05cd7a12abb: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 63bfa7b6-b566-4693-848c-e05cd7a12abb
+ jinja: '{{ sentences | join('' '') }} {{question}}
+
+
+ Fill in the {{"XXXXX"}}.
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank without Options
+ reference: Fill in the blank without options.
+ a2e38459-90d9-4292-9d96-491ad7d4e3bb: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a2e38459-90d9-4292-9d96-491ad7d4e3bb
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Which of the following options replaces {{"XXXXX"}} the best in the above story?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blanks with Options - above story
+ reference: Given the sentences, fill the blanks using the options.
+ a6fa37d5-899c-4ad0-b888-fab04cc8e4bb: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a6fa37d5-899c-4ad0-b888-fab04cc8e4bb
+ jinja: '{{sentences | join ('' '')}}
+
+
+ Which of the following options replaces {{"XXXXX"}} in "{{question}}"?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Which
+ reference: Fill Blank given options.
+ a8b67815-1927-4ef3-8d04-8d3f95525ebb: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a8b67815-1927-4ef3-8d04-8d3f95525ebb
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Fill in the {{"XXXXX"}} from the following choices:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Fill in
+ reference: Fill in the blank given options
diff --git a/promptsource/templates/cbt/P/templates.yaml b/promptsource/templates/cbt/P/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..74543c35a70beedc0cb7dab4b5155b95147d12f8
--- /dev/null
+++ b/promptsource/templates/cbt/P/templates.yaml
@@ -0,0 +1,147 @@
+dataset: cbt
+subset: P
+templates:
+ 08820238-5bb3-4c7c-98bb-ec3d81e432ea: !Template
+ answer_choices: null
+ id: 08820238-5bb3-4c7c-98bb-ec3d81e432ea
+ jinja: '{{sentences | join('' '')}}
+
+
+ Write the next sentence of this story.
+
+ |||
+
+ {{ question.replace("XXXXX", answer) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Next Sentence
+ reference: Generate the next sentence given the story.
+ 1f8cad96-4c0f-435a-9a6f-653fcf158dda: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 1f8cad96-4c0f-435a-9a6f-653fcf158dda
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Replace {{"XXXXX"}} with the correct option from:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Replace
+ reference: Fill the blank given the options.
+ 556ee207-18c9-4c6c-860a-8ea09b93505a: !Template
+ answer_choices: '{{options|join(''|||'')}}'
+ id: 556ee207-18c9-4c6c-860a-8ea09b93505a
+ jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\
+ , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\
+ {{answer_choices|join(\", \")}}\n|||\n{{ answer }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - In the following
+ reference: Fill in the blanks given options.
+ 63bfa7b6-b566-4693-848c-e05cd7a12a0a: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 63bfa7b6-b566-4693-848c-e05cd7a12a0a
+ jinja: '{{ sentences | join('' '') }} {{question}}
+
+
+ Fill in the {{"XXXXX"}}.
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank without Options
+ reference: Fill in the blank without options.
+ a2e38459-90d9-4292-9d96-491ad7d4e3da: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a2e38459-90d9-4292-9d96-491ad7d4e3da
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Which of the following options replaces {{"XXXXX"}} the best in the above story?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blanks with Options - above story
+ reference: Given the sentences, fill the blanks using the options.
+ a6fa37d5-899c-4ad0-b888-fab04cc8e42a: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a6fa37d5-899c-4ad0-b888-fab04cc8e42a
+ jinja: '{{sentences | join ('' '')}}
+
+
+ Which of the following options replaces {{"XXXXX"}} in "{{question}}"?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Which
+ reference: Fill Blank given options.
+ a8b67815-1927-4ef3-8d04-8d3f95525efa: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a8b67815-1927-4ef3-8d04-8d3f95525efa
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Fill in the {{"XXXXX"}} from the following choices:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Fill in
+ reference: Fill in the blank given options
diff --git a/promptsource/templates/cbt/V/templates.yaml b/promptsource/templates/cbt/V/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..10a1c0af36c9dba5cb7f3c716aec515b3b56bb79
--- /dev/null
+++ b/promptsource/templates/cbt/V/templates.yaml
@@ -0,0 +1,147 @@
+dataset: cbt
+subset: V
+templates:
+ 08820238-5bb3-4c7c-98bb-ec3d81e432cc: !Template
+ answer_choices: null
+ id: 08820238-5bb3-4c7c-98bb-ec3d81e432cc
+ jinja: '{{sentences | join('' '')}}
+
+
+ Write the next sentence of this story.
+
+ |||
+
+ {{ question.replace("XXXXX", answer) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Next Sentence
+ reference: Generate the next sentence given the story.
+ 1f8cad96-4c0f-435a-9a6f-653fcf158dcc: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 1f8cad96-4c0f-435a-9a6f-653fcf158dcc
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Replace {{"XXXXX"}} with the correct option from:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Replace
+ reference: Fill the blank given the options.
+ 556ee207-18c9-4c6c-860a-8ea09b9350cc: !Template
+ answer_choices: '{{options|join(''|||'')}}'
+ id: 556ee207-18c9-4c6c-860a-8ea09b9350cc
+ jinja: "{{sentences | join (' ')}}\n\nIn this following sentence: \n\"{{question}}\"\
+ , \naptly substitute the {{\"XXXXX\"}} with one of the following options:\n\
+ {{answer_choices|join(\", \")}}\n|||\n{{ answer }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - In the following
+ reference: Fill in the blanks given options.
+ 63bfa7b6-b566-4693-848c-e05cd7a12acc: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: 63bfa7b6-b566-4693-848c-e05cd7a12acc
+ jinja: '{{ sentences | join('' '') }} {{question}}
+
+
+ Fill in the {{"XXXXX"}}.
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank without Options
+ reference: Fill in the blank without options.
+ a2e38459-90d9-4292-9d96-491ad7d4e3cc: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a2e38459-90d9-4292-9d96-491ad7d4e3cc
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Which of the following options replaces {{"XXXXX"}} the best in the above story?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blanks with Options - above story
+ reference: Given the sentences, fill the blanks using the options.
+ a6fa37d5-899c-4ad0-b888-fab04cc8e4cc: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a6fa37d5-899c-4ad0-b888-fab04cc8e4cc
+ jinja: '{{sentences | join ('' '')}}
+
+
+ Which of the following options replaces {{"XXXXX"}} in "{{question}}"?
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Which
+ reference: Fill Blank given options.
+ a8b67815-1927-4ef3-8d04-8d3f95525ecc: !Template
+ answer_choices: '{{options|join("|||")}}'
+ id: a8b67815-1927-4ef3-8d04-8d3f95525ecc
+ jinja: '{{sentences | join ('' '')}} {{question}}
+
+
+ Fill in the {{"XXXXX"}} from the following choices:
+
+ {{answer_choices|join(", ")}}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Fill Blank with Options - Fill in
+ reference: Fill in the blank given options
diff --git a/promptsource/templates/cbt/raw/templates.yaml b/promptsource/templates/cbt/raw/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..56ae14a2b352366b3e61bb42fe667b90eafde0a0
--- /dev/null
+++ b/promptsource/templates/cbt/raw/templates.yaml
@@ -0,0 +1,97 @@
+dataset: cbt
+subset: raw
+templates:
+ 4906fc72-c879-4f0a-b7ae-c6379a63e32c: !Template
+ answer_choices: null
+ id: 4906fc72-c879-4f0a-b7ae-c6379a63e32c
+ jinja: 'Guess the author for the book: "{{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'',''
+ '')}}"
+
+ |||
+
+ {{title.split(''___'')[0]|replace(''_'','' '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Given Title Guess Author
+ reference: Given the title, guess the author of the book.
+ 5172f015-f022-4c3b-89e9-607467e29012: !Template
+ answer_choices: null
+ id: 5172f015-f022-4c3b-89e9-607467e29012
+ jinja: 'Suggest a book written by {{title.split(''___'')[0]|replace(''_'','' '')}}.
+
+ |||
+
+ {{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Given Author Recommend Book
+ reference: Given the author name, recommend one of his books.
+ 82c63934-1f33-4e6f-af59-af570b3e2e4c: !Template
+ answer_choices: null
+ id: 82c63934-1f33-4e6f-af59-af570b3e2e4c
+ jinja: 'Who wrote "{{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'',''
+ '')}}"?
+
+ |||
+
+ {{title.split(''___'')[0]|replace(''_'','' '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Who wrote
+ reference: Given the title, guess the author of the title.
+ d407406e-ed5c-4f1f-bca8-b1f511e5fa53: !Template
+ answer_choices: null
+ id: d407406e-ed5c-4f1f-bca8-b1f511e5fa53
+ jinja: '{{ content }}
+
+
+ Guess the author for the above story.
+
+ |||
+
+ {{title.split(''___'')[0]|replace(''_'','' '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Given Story Guess Author
+ reference: Given the story, guess the author.
+ f4e1d9bb-a43e-4c75-aa5d-4711090dd628: !Template
+ answer_choices: null
+ id: f4e1d9bb-a43e-4c75-aa5d-4711090dd628
+ jinja: '{{ content }}
+
+
+ Write a title for the above story.
+
+ |||
+
+ {{title.split(''___'')[1].split(''.'')[:-2]|join(''.'')|replace(''_'','' '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Given Story Write Title
+ reference: Given the story, write a title.
diff --git a/promptsource/templates/cc_news/templates.yaml b/promptsource/templates/cc_news/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..41f12fa3dda5022e63142c014c2b3c8f7bcc87f9
--- /dev/null
+++ b/promptsource/templates/cc_news/templates.yaml
@@ -0,0 +1,264 @@
+dataset: cc_news
+templates:
+ 0c630a0d-5eeb-46ea-ba15-f76f5d05a57d: !Template
+ answer_choices: null
+ id: 0c630a0d-5eeb-46ea-ba15-f76f5d05a57d
+ jinja: 'What could be the content of a news article with the following title and
+ summary?
+
+
+ Title: {{title}}
+
+
+ Summary: {{description}}
+
+ |||
+
+ {{text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Use title and summary to generate news
+ reference: ''
+ 0c651168-8729-4a35-8c7c-5d812d4be790: !Template
+ answer_choices: null
+ id: 0c651168-8729-4a35-8c7c-5d812d4be790
+ jinja: "{{ text }} \n\nGive a brief description of the above text.\n|||\n{{ description\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate description using text
+ reference: ''
+ 11a681c3-8450-4064-aa08-ad3700b8b1bd: !Template
+ answer_choices: null
+ id: 11a681c3-8450-4064-aa08-ad3700b8b1bd
+ jinja: '{{ text }}
+
+
+ What title would you choose for the text above?
+
+ |||
+
+ {{ title }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Generate title using text
+ reference: ''
+ 14aca5f0-89ae-4ae1-9746-7a68f6a0664f: !Template
+ answer_choices: null
+ id: 14aca5f0-89ae-4ae1-9746-7a68f6a0664f
+ jinja: 'Suggest the content of a news article entitled:
+
+
+ {{ title }},
+
+
+ regarding:
+
+
+ {{ description }}
+
+ |||
+
+ {{ text }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Use title and description to generate text
+ reference: ''
+ 319a6d41-d6bb-4f8f-ba1b-085a45b3eddd: !Template
+ answer_choices: null
+ id: 319a6d41-d6bb-4f8f-ba1b-085a45b3eddd
+ jinja: "Write a brief summary of the text below: \n\n{{ text }}\n|||\n{{ description\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Write brief summary
+ reference: ''
+ 5ca5100e-7aa6-48c0-9e78-48914739dc90: !Template
+ answer_choices: null
+ id: 5ca5100e-7aa6-48c0-9e78-48914739dc90
+ jinja: 'Use the description below to write a news article entitled:
+
+ {{ title }}.
+
+
+ Description: {{ description }}
+
+ |||
+
+ {{ text }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Use title and description to generate news article
+ reference: ''
+ 7fd214bd-2403-42aa-850f-5255771e5609: !Template
+ answer_choices: null
+ id: 7fd214bd-2403-42aa-850f-5255771e5609
+ jinja: "Choose a title for the text below: \n\n{{ text }}\n|||\n{{ title }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Choose a title for text
+ reference: ''
+ 858a02bf-10c0-4284-886e-26a8859b2cc3: !Template
+ answer_choices: null
+ id: 858a02bf-10c0-4284-886e-26a8859b2cc3
+ jinja: '{{ text }}
+
+
+ Summarize the essential ideas of the above piece of news.
+
+ |||
+
+ {{ description }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Summarize text using description
+ reference: ''
+ a993713f-fd0e-4d62-99c0-e1313ab5c1c8: !Template
+ answer_choices: null
+ id: a993713f-fd0e-4d62-99c0-e1313ab5c1c8
+ jinja: "{{ text }} \n\nWhat title suits best the above piece of news?\n|||\n{{\
+ \ title }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Generate title using news text
+ reference: ''
+ ae553815-f631-4e67-a6bc-6d8a21dedb25: !Template
+ answer_choices: null
+ id: ae553815-f631-4e67-a6bc-6d8a21dedb25
+ jinja: "Summarize the essential ideas of the following piece of news: \n\n{{ text\
+ \ }}\n|||\n{{ description }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Summarize ideas from news
+ reference: ''
+ b637cfd7-d4b8-420a-b60b-4fe0aa891000: !Template
+ answer_choices: null
+ id: b637cfd7-d4b8-420a-b60b-4fe0aa891000
+ jinja: 'Write a piece of news expanding the following ideas:
+
+
+ {{ description }},
+
+
+ entitled:
+
+
+ {{ title }}
+
+ |||
+
+ {{ text }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Use title and description to generate news
+ reference: ''
+ cc13d9b7-041a-4b29-b6c4-a6851a21fb46: !Template
+ answer_choices: null
+ id: cc13d9b7-041a-4b29-b6c4-a6851a21fb46
+ jinja: "Give this text a title: \n\n{{ text }}\n|||\n{{ title }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Give text a title
+ reference: ''
+ e4d40d0e-8c38-45ef-97dd-15ebab0b4078: !Template
+ answer_choices: null
+ id: e4d40d0e-8c38-45ef-97dd-15ebab0b4078
+ jinja: "Give a brief description of the following text: \n\n{{ text }}\n|||\n\
+ {{ description }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Given brief description of text
+ reference: ''
+ f4a0b21c-fcf1-4e3d-aa59-7cf3b9ae8780: !Template
+ answer_choices: null
+ id: f4a0b21c-fcf1-4e3d-aa59-7cf3b9ae8780
+ jinja: "{{ text }} \n\nThe above text can be summarized as follows:\n|||\n{{ description\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Text summarized using description '
+ reference: ''
diff --git a/promptsource/templates/circa/templates.yaml b/promptsource/templates/circa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e519f28949b849229c5af712c599ece237155b33
--- /dev/null
+++ b/promptsource/templates/circa/templates.yaml
@@ -0,0 +1,101 @@
+dataset: circa
+templates:
+ 053260a8-1bcc-4805-81d2-bb528fc56ca2: !Template
+ answer_choices: null
+ id: 053260a8-1bcc-4805-81d2-bb528fc56ca2
+ jinja: 'Convert this question to a sentence declarative sentence asserting an
+ affirmative answer:
+
+
+ {{question_X}} |||
+
+ {{canquestion_X}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - BLEU
+ - Edit Distance
+ - ROUGE
+ original_task: false
+ name: question_declarative
+ reference: ''
+ 70b7a94a-6a39-4a81-9a6e-0709a0acdb28: !Template
+ answer_choices: "Yes ||| No ||| In the middle, neither yes nor no ||| Probably\
+ \ yes / sometimes yes ||| Probably no ||| Yes, subject to some conditions |||\
+ \ Other ||| I am not sure how X will interpret Y\u2019s answer"
+ id: 70b7a94a-6a39-4a81-9a6e-0709a0acdb28
+ jinja: "{% if goldstandard2 != -1 %}\n\nGiven the question-answer pair of X and\
+ \ Y in the context of {{context}}, which of the following answers is Y implying:\
+ \ \"{{\"Yes\"}}\", \"{{\"No\"}}\", \"{{\"In the middle, neither yes nor no\"\
+ }}\", \"{{\"Probably yes / sometimes yes\"}}\", \"{{\"Probably no\"}}\", \"\
+ {{\"Yes, subject to some conditions\"}}\", \"{{\"Other\"}}\" or \"{{\"I am not\
+ \ sure how X will interpret Y\u2019s answer\"}}\" ?\n\nX: {{question_X}} \n\n\
+ Y: {{answer_Y}} |||\n\n{{ answer_choices[goldstandard2]}}\n\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: goldstandard2_judgement
+ reference: ''
+ 73466d0f-b1b1-4c61-8f03-346e121ae06c: !Template
+ answer_choices: null
+ id: 73466d0f-b1b1-4c61-8f03-346e121ae06c
+ jinja: 'What is a possible question X could ask Y given the context of {{context}}
+ that would cause Y to answer "{{answer_Y}}"? |||
+
+ {{question_X}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: possible_qn
+ reference: ''
+ 997f7f96-d420-48c1-85f7-ecade54adbd7: !Template
+ answer_choices: "Yes ||| No ||| In the middle, neither yes nor no ||| Probably\
+ \ yes / sometimes yes ||| Probably no ||| Yes, subject to some conditions |||\
+ \ Other ||| I am not sure how X will interpret Y\u2019s answer"
+ id: 997f7f96-d420-48c1-85f7-ecade54adbd7
+ jinja: "{% if goldstandard1 != -1 %}\n\nGiven the question-answer pair of X and\
+ \ Y in the context of {{context}}, what answer is Y implying?\n\nX: {{question_X}}\
+ \ \n\nY: {{answer_Y}} |||\n\n{{ answer_choices[goldstandard1]}}\n\n{% endif\
+ \ %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: judgement
+ reference: ''
+ a15c1a30-5ef0-451f-b202-987a16752a0a: !Template
+ answer_choices: "Yes ||| No ||| In the middle, neither yes nor no ||| Probably\
+ \ yes / sometimes yes ||| Probably no ||| Yes, subject to some conditions |||\
+ \ Other ||| I am not sure how X will interpret Y\u2019s answer"
+ id: a15c1a30-5ef0-451f-b202-987a16752a0a
+ jinja: "{% if goldstandard1 != -1 %}\n\nGiven the question-answer pair of X and\
+ \ Y in the context of {{context}}, which of the following answers is Y implying:\
+ \ \"{{\"Yes\"}}\", \"{{\"No\"}}\", \"{{\"In the middle, neither yes nor no\"\
+ }}\", \"{{\"Probably yes / sometimes yes\"}}\", \"{{\"Probably no\"}}\", \"\
+ {{\"Yes, subject to some conditions\"}}\", \"{{\"Other\"}}\" or \"{{\"I am not\
+ \ sure how X will interpret Y\u2019s answer\"}}\" ?\n\nX: {{question_X}} \n\n\
+ Y: {{answer_Y}} |||\n\n{{ answer_choices[goldstandard1]}}\n\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: goldstandard1_judgement
+ reference: ''
diff --git a/promptsource/templates/climate_fever/templates.yaml b/promptsource/templates/climate_fever/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..82faba1b8becbe20f192050c8abd746c05c87ee2
--- /dev/null
+++ b/promptsource/templates/climate_fever/templates.yaml
@@ -0,0 +1,162 @@
+dataset: climate_fever
+templates:
+ 38632cd9-7c4c-4e1d-85b3-20e7a78d4580: !Template
+ answer_choices: support ||| refute ||| not provide enough information for
+ id: 38632cd9-7c4c-4e1d-85b3-20e7a78d4580
+ jinja: 'Here''s a statement and accompanying evidence. Does the evidence {{answer_choices[0]}},
+ {{answer_choices[1]}}, or {{answer_choices[2]}} the statement?
+
+
+ Statement: {{claim}}
+
+
+ Evidence: {{evidences[0]["evidence"].strip(".").strip(''"'')}}.
+
+ |||
+
+ {{ answer_choices[evidences[0]["evidence_label"]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: first_evidence_and_claim_itemization
+ reference: First evidence and claim with simple itemization
+ 3970f474-a9e3-4264-aefa-dd4cfadd279c: !Template
+ answer_choices: support ||| refute ||| not enough information ||| disputed
+ id: 3970f474-a9e3-4264-aefa-dd4cfadd279c
+ jinja: 'For the given claim, do the accompanying statements {{answer_choices[0]}}
+ or {{answer_choices[1]}} it? If there are some of each, say "{{answer_choices[3]}}".
+ If there is not enough information to conclude, say "{{answer_choices[2]}}".
+
+
+ Claim: {{claim}}
+
+
+ Statements:
+
+ - {{ evidences | map(attribute="evidence") | map("trim", "\".") | join(".\n-
+ ") }}.
+
+ |||
+
+ {{ answer_choices[claim_label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim_and_all_supporting_evidences
+ reference: A claim and all supporting evidences provided with the associated claim
+ label
+ 5d5062c1-d28f-4b1c-a7da-9b53796ed39f: !Template
+ answer_choices: support ||| refute ||| not provide enough information for
+ id: 5d5062c1-d28f-4b1c-a7da-9b53796ed39f
+ jinja: 'Following is a statement made on climate change.
+
+ Statement: {{claim}}
+
+
+ If I provide the evidence "{{evidences[4]["evidence"].strip(".").strip(''"'')}}", will
+ it {{answer_choices[0]}}, {{answer_choices[1]}}, or {{answer_choices[2]}} the
+ claim?
+
+ |||
+
+ {{ answer_choices[evidences[4]["evidence_label"]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fifth_evidence_and_claim_itemization
+ reference: Fifth evidence and claim with simple itemization
+ 9ba074a2-fbcf-4f69-bf03-bd16dbdec9cd: !Template
+ answer_choices: support ||| refute ||| not provide enough information for
+ id: 9ba074a2-fbcf-4f69-bf03-bd16dbdec9cd
+ jinja: 'Does the statement "{{evidences[3]["evidence"].strip(".").strip(''"'')}}"
+ {{answer_choices[0]}}, {{answer_choices[1]}}, or {{answer_choices[2]}} the claim
+ that "{{claim}}'' |||
+
+ {{ answer_choices[evidences[3]["evidence_label"]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fourth_evidence_and_claim_itemization
+ reference: Fourth evidence and claim with simple itemization
+ 9f68b883-d6a3-4e95-af2a-b7755bc46ba9: !Template
+ answer_choices: support ||| refute ||| not enough information
+ id: 9f68b883-d6a3-4e95-af2a-b7755bc46ba9
+ jinja: 'While searching about climate change, I came across a claim that says
+ "{{claim}}". Should I use the following evidence to {{answer_choices[0]}} or
+ {{answer_choices[1]}} the claim? If there is not enough information, say "not
+ enough information".
+
+ Evidence: {{evidences[2]["evidence"].strip(".").strip(''"'')}}.
+
+ |||
+
+ {{ answer_choices[evidences[2]["evidence_label"]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: third_evidence_and_claim_itemization
+ reference: Third evidence and claim with simple itemization
+ e3e01825-e256-4098-b7bb-aa07c399e8f6: !Template
+ answer_choices: support ||| refute ||| not provide enough information for
+ id: e3e01825-e256-4098-b7bb-aa07c399e8f6
+ jinja: 'If I argue that "{{claim}}" would the following evidence {{answer_choices[0]}},
+ {{answer_choices[1]}}, or {{answer_choices[2]}} the claim?
+
+ Evidence: {{evidences[1]["evidence"].strip(".").strip(''"'')}}.
+
+ |||
+
+ {{ answer_choices[evidences[1]["evidence_label"]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: second_evidence_and_claim_itemization
+ reference: Second evidence and claim with simple itemization
+ ff9c9c11-92f1-4cb2-a73c-d786d58b00e1: !Template
+ answer_choices: support ||| refute ||| not provide enough information for
+ id: ff9c9c11-92f1-4cb2-a73c-d786d58b00e1
+ jinja: 'Considering the following claim:
+
+ {{claim}}.
+
+ Does the following statement {{answer_choices[0]}}, {{answer_choices[1]}}, or
+ {{answer_choices[2]}} the claim?
+
+ {{evidences[2]["evidence"].strip(".").strip(''"'')}}.
+
+ |||
+
+ {{ answer_choices[evidences[2]["evidence_label"]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: third_evidence_claim_pair
+ reference: Relation between the claim and third evidence pair.
diff --git a/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml b/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a0f12d17c19bed55d51265fdb3efa2efd3554d95
--- /dev/null
+++ b/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml
@@ -0,0 +1,174 @@
+dataset: cnn_dailymail
+subset: 3.0.0
+templates:
+ 0556fd07-f7de-4daf-a0ae-4cda4fc239c8: !Template
+ answer_choices: null
+ id: 0556fd07-f7de-4daf-a0ae-4cda4fc239c8
+ jinja: 'Can you write an outline of the following article in a few points?
+
+
+ Article: {{article}}|||
+
+ {{highlights}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: write_an_outline
+ reference: ''
+ 1c446bde-b3e6-4629-860f-9125681c72a1: !Template
+ answer_choices: null
+ id: 1c446bde-b3e6-4629-860f-9125681c72a1
+ jinja: 'Summarise the article:
+
+
+ {{article}} |||
+
+ {{highlights}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: news_summary
+ reference: ''
+ 6e46894f-b5ff-4259-a691-63f1da8405da: !Template
+ answer_choices: null
+ id: 6e46894f-b5ff-4259-a691-63f1da8405da
+ jinja: 'In 2 or 3 sentences, what are the main points one should remember from
+ this news article?
+
+
+ Article: {{article}} |||
+
+ {{highlights}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: 2_or_3_sentences
+ reference: ''
+ 892cbf90-2c19-4664-943e-a004a0c9a7fa: !Template
+ answer_choices: null
+ id: 892cbf90-2c19-4664-943e-a004a0c9a7fa
+ jinja: 'Could you please generate a TLDR (Too Long Didn''t Read) summary of the
+ following news article?
+
+
+ Article: {{article}} |||
+
+ {{highlights}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: tldr_summary
+ reference: ''
+ 9b7c6abf-5110-4b31-8345-be6b2eeea580: !Template
+ answer_choices: null
+ id: 9b7c6abf-5110-4b31-8345-be6b2eeea580
+ jinja: 'Condense the article down to the essentials to present it in the form
+ of short cards in mobile news apps:
+
+
+ {{article}} |||
+
+ {{highlights}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: news_card_view
+ reference: ''
+ b4ff2f63-8539-4d9c-9858-42fa5f95ba56: !Template
+ answer_choices: null
+ id: b4ff2f63-8539-4d9c-9858-42fa5f95ba56
+ jinja: 'Generate a story from key plot points:
+
+
+ {{highlights}} |||
+
+ {{article}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_story
+ reference: ''
+ c20ac3c5-da85-408a-bba9-8b12ef2ae379: !Template
+ answer_choices: null
+ id: c20ac3c5-da85-408a-bba9-8b12ef2ae379
+ jinja: 'Sum the following article in brief: {{article}}|||{{highlights}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: sum_in_brief
+ reference: ''
+ e047b4ec-abff-4b36-896a-83f5f1ea6759: !Template
+ answer_choices: null
+ id: e047b4ec-abff-4b36-896a-83f5f1ea6759
+ jinja: 'Extract key points from the article based on which the stock market could
+ react:
+
+
+ {{article}} |||
+
+ {{highlights}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: news_stock
+ reference: ''
+ efa42de6-7a20-4e35-92fc-919a5eb0b77e: !Template
+ answer_choices: null
+ id: efa42de6-7a20-4e35-92fc-919a5eb0b77e
+ jinja: 'What details would you include in a storyline to make it more engaging
+ and informative?
+
+
+ {{highlights}} |||
+
+ {{article}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: spice_up_story
+ reference: ''
diff --git a/promptsource/templates/codah/codah/templates.yaml b/promptsource/templates/codah/codah/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1629de03271260eb318cb358ecd34d7d602dc12f
--- /dev/null
+++ b/promptsource/templates/codah/codah/templates.yaml
@@ -0,0 +1,204 @@
+dataset: codah
+subset: codah
+templates:
+ 008b421e-3ca1-495b-acf8-d88fe352da53: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 008b421e-3ca1-495b-acf8-d88fe352da53
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ What''s the best ending to finish the incomplete sentence above?
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_after_sentence_and_choices
+ reference: ''
+ 01fd9142-114e-43ea-bdef-9ccc46135ebb: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 01fd9142-114e-43ea-bdef-9ccc46135ebb
+ jinja: 'Complete the sentence below by choosing the best answer from a list of
+ candidates.
+
+
+ Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_before_sentence_and_choices
+ reference: ''
+ 92522b3b-01ea-4ea2-8159-efddff495f82: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 92522b3b-01ea-4ea2-8159-efddff495f82
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ Complete the sentence by choosing the best answer from the candidates above.
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_after_sentence_and_choices
+ reference: ''
+ 99f0a5f0-2e5d-4e04-817c-8968be2cc760: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 99f0a5f0-2e5d-4e04-817c-8968be2cc760
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Complete the sentence above by choosing the best answer from the candidates
+ below.
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_between_sentence_and_choices
+ reference: ''
+ 9e383a33-67e3-4a03-a4c5-50f986022a71: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 9e383a33-67e3-4a03-a4c5-50f986022a71
+ jinja: 'Here we have an incomplete sentence "{{question_propmt}}".
+
+
+ What would be the best answer choice to finish it?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_between_sentence_and_choices
+ reference: ''
+ c171ce3b-08c4-4056-af11-7bdb165fc75d: !Template
+ answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+ ||| Others
+ id: c171ce3b-08c4-4056-af11-7bdb165fc75d
+ jinja: 'Sentence: {{question_propmt}}
+
+ Completion: {{ candidate_answers[correct_answer_idx] }}
+
+
+ Which of {{answer_choices | join(", ")}} best describes the completed sentence?
+
+
+ |||
+
+ {{answer_choices[question_category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_category
+ reference: ''
+ cc338e7b-c13c-4c4d-af51-7151c24c001e: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: cc338e7b-c13c-4c4d-af51-7151c24c001e
+ jinja: 'What''s the best ending to finish this incomplete sentence "{{question_propmt}}"?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_before_sentence_and_choices
+ reference: ''
diff --git a/promptsource/templates/codah/fold_0/templates.yaml b/promptsource/templates/codah/fold_0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ffe5d78dcb52b12206f04fe17ab2cd4ccea9adf6
--- /dev/null
+++ b/promptsource/templates/codah/fold_0/templates.yaml
@@ -0,0 +1,205 @@
+dataset: codah
+subset: fold_0
+templates:
+ 1d9fa9d2-80d1-442c-986d-fb7598923d09: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 1d9fa9d2-80d1-442c-986d-fb7598923d09
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ What''s the best ending to finish the incomplete sentence above?
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_after_sentence_and_choices
+ reference: ''
+ 3b64d17a-225f-485b-b090-1a53fdeb1c90: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 3b64d17a-225f-485b-b090-1a53fdeb1c90
+ jinja: 'Complete the sentence below by choosing the best answer from a list of
+ candidates.
+
+
+ Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_before_sentence_and_choices
+ reference: ''
+ 6b134736-8660-4457-a5de-f7dd0f1f148b: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 6b134736-8660-4457-a5de-f7dd0f1f148b
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ Complete the sentence by choosing the best answer from the candidates above.
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_after_sentence_and_choices
+ reference: ''
+ 88ec7e5d-304e-4dbd-9aad-6f2a69ec6147: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 88ec7e5d-304e-4dbd-9aad-6f2a69ec6147
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Complete the sentence above by choosing the best answer from the candidates
+ below.
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_between_sentence_and_choices
+ reference: ''
+ 9527a0b1-0267-443e-83c8-dae9e6aa263b: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 9527a0b1-0267-443e-83c8-dae9e6aa263b
+ jinja: 'Here we have an incomplete sentence "{{question_propmt}}".
+
+
+ What would be the best answer choice to finish it?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_between_sentence_and_choices
+ reference: ''
+ bd7bcef8-72fd-4782-85e7-a02c5b90d4a6: !Template
+ answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+ ||| Others
+ id: bd7bcef8-72fd-4782-85e7-a02c5b90d4a6
+ jinja: 'Sentence: {{question_propmt}}
+
+ Completion: {{ candidate_answers[correct_answer_idx] }}
+
+
+ Which of {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+ " or Others"])}} best describes the completed sentence?
+
+
+ |||
+
+ {{answer_choices[question_category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_category
+ reference: ''
+ c79ad64b-0a59-472e-aab4-804b01ddd3c1: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: c79ad64b-0a59-472e-aab4-804b01ddd3c1
+ jinja: 'What''s the best ending to finish this incomplete sentence "{{question_propmt}}"?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_before_sentence_and_choices
+ reference: ''
diff --git a/promptsource/templates/codah/fold_1/templates.yaml b/promptsource/templates/codah/fold_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d5e2ce3f9c43946f3799ee285d1f018d199822e8
--- /dev/null
+++ b/promptsource/templates/codah/fold_1/templates.yaml
@@ -0,0 +1,205 @@
+dataset: codah
+subset: fold_1
+templates:
+ 0a7ef357-b2c2-4c1c-a4b9-7e069780ae76: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 0a7ef357-b2c2-4c1c-a4b9-7e069780ae76
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ What''s the best ending to finish the incomplete sentence above?
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_after_sentence_and_choices
+ reference: ''
+ 0d54d52b-91dc-4e23-98a1-3b81bbd8558f: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 0d54d52b-91dc-4e23-98a1-3b81bbd8558f
+ jinja: 'Complete the sentence below by choosing the best answer from a list of
+ candidates.
+
+
+ Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_before_sentence_and_choices
+ reference: ''
+ 2adfb019-690a-482c-aecc-2b43168a9a2a: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 2adfb019-690a-482c-aecc-2b43168a9a2a
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ Complete the sentence by choosing the best answer from the candidates above.
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_after_sentence_and_choices
+ reference: ''
+ 3a946e9d-7b34-47ed-b3b8-7894ded8839d: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 3a946e9d-7b34-47ed-b3b8-7894ded8839d
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Complete the sentence above by choosing the best answer from the candidates
+ below.
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_between_sentence_and_choices
+ reference: ''
+ 47651e9b-c25c-49a4-b32a-c0029cdc2aa2: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 47651e9b-c25c-49a4-b32a-c0029cdc2aa2
+ jinja: 'Here we have an incomplete sentence "{{question_propmt}}".
+
+
+ What would be the best answer choice to finish it?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_between_sentence_and_choices
+ reference: ''
+ 757a5faf-a48c-41b6-b7bd-a512d5e6107b: !Template
+ answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+ ||| Others
+ id: 757a5faf-a48c-41b6-b7bd-a512d5e6107b
+ jinja: 'Sentence: {{question_propmt}}
+
+ Completion: {{ candidate_answers[correct_answer_idx] }}
+
+
+ Which of {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+ " or Others"])}} best describes the completed sentence?
+
+
+ |||
+
+ {{answer_choices[question_category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_category
+ reference: ''
+ 9f1a9858-9528-47ed-a5ee-16d18b48c0da: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 9f1a9858-9528-47ed-a5ee-16d18b48c0da
+ jinja: 'What''s the best ending to finish this incomplete sentence "{{question_propmt}}"?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_before_sentence_and_choices
+ reference: ''
diff --git a/promptsource/templates/codah/fold_2/templates.yaml b/promptsource/templates/codah/fold_2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..83a6d18568dbfcbe4068f5c8f53b3059dc9f9bd7
--- /dev/null
+++ b/promptsource/templates/codah/fold_2/templates.yaml
@@ -0,0 +1,205 @@
+dataset: codah
+subset: fold_2
+templates:
+ 0516d1f3-da5d-4e0f-b320-e20b79ac4bfc: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 0516d1f3-da5d-4e0f-b320-e20b79ac4bfc
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ What''s the best ending to finish the incomplete sentence above?
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_after_sentence_and_choices
+ reference: ''
+ 1e14c67b-13ae-4bc7-9919-2d405c79cfc3: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 1e14c67b-13ae-4bc7-9919-2d405c79cfc3
+ jinja: 'Complete the sentence below by choosing the best answer from a list of
+ candidates.
+
+
+ Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_before_sentence_and_choices
+ reference: ''
+ 1fa5ab62-06da-4f18-9a0a-d9851224cde5: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 1fa5ab62-06da-4f18-9a0a-d9851224cde5
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ Complete the sentence by choosing the best answer from the candidates above.
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_after_sentence_and_choices
+ reference: ''
+ 698936f4-cdb9-41d4-8feb-bbb934ea7197: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 698936f4-cdb9-41d4-8feb-bbb934ea7197
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Complete the sentence above by choosing the best answer from the candidates
+ below.
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_between_sentence_and_choices
+ reference: ''
+ 8dd5e484-9763-4f70-9990-e0c1a94d76b0: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 8dd5e484-9763-4f70-9990-e0c1a94d76b0
+ jinja: 'Here we have an incomplete sentence "{{question_propmt}}".
+
+
+ What would be the best answer choice to finish it?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_between_sentence_and_choices
+ reference: ''
+ b9f90c9f-2714-4b9c-bf10-1b540ee38dfa: !Template
+ answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+ ||| Others
+ id: b9f90c9f-2714-4b9c-bf10-1b540ee38dfa
+ jinja: 'Sentence: {{question_propmt}}
+
+ Completion: {{ candidate_answers[correct_answer_idx] }}
+
+
+ Which of {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+ " or Others"])}} best describes the completed sentence?
+
+
+ |||
+
+ {{answer_choices[question_category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_category
+ reference: ''
+ bfb69adf-326b-4366-9de6-d566ab75ae2c: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: bfb69adf-326b-4366-9de6-d566ab75ae2c
+ jinja: 'What''s the best ending to finish this incomplete sentence "{{question_propmt}}"?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_before_sentence_and_choices
+ reference: ''
diff --git a/promptsource/templates/codah/fold_3/templates.yaml b/promptsource/templates/codah/fold_3/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1be1555b14b951663c7e79304545817f18110de6
--- /dev/null
+++ b/promptsource/templates/codah/fold_3/templates.yaml
@@ -0,0 +1,205 @@
+dataset: codah
+subset: fold_3
+templates:
+ 15861c29-a2f1-4165-8849-83b88320fc3d: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 15861c29-a2f1-4165-8849-83b88320fc3d
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ What''s the best ending to finish the incomplete sentence above?
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_after_sentence_and_choices
+ reference: ''
+ 9ab1a3e6-6c03-4c37-9a85-d8128dc92545: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 9ab1a3e6-6c03-4c37-9a85-d8128dc92545
+ jinja: 'Complete the sentence below by choosing the best answer from a list of
+ candidates.
+
+
+ Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_before_sentence_and_choices
+ reference: ''
+ 9efbda8e-19f8-47fb-907a-d19c660b0ab8: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 9efbda8e-19f8-47fb-907a-d19c660b0ab8
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ Complete the sentence by choosing the best answer from the candidates above.
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_after_sentence_and_choices
+ reference: ''
+ 9fecf40b-f96f-4124-80b0-038d5e58784c: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 9fecf40b-f96f-4124-80b0-038d5e58784c
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Complete the sentence above by choosing the best answer from the candidates
+ below.
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_between_sentence_and_choices
+ reference: ''
+ a53e444c-da0d-4159-8488-35858b239d3d: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: a53e444c-da0d-4159-8488-35858b239d3d
+ jinja: 'Here we have an incomplete sentence "{{question_propmt}}".
+
+
+ What would be the best answer choice to finish it?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_between_sentence_and_choices
+ reference: ''
+ ce98e4d9-7eca-4101-8299-fb074b52d279: !Template
+ answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+ ||| Others
+ id: ce98e4d9-7eca-4101-8299-fb074b52d279
+ jinja: 'Sentence: {{question_propmt}}
+
+ Completion: {{ candidate_answers[correct_answer_idx] }}
+
+
+ Which of {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+ " or Others"])}} best describes the completed sentence?
+
+
+ |||
+
+ {{answer_choices[question_category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_category
+ reference: ''
+ dd7a60fc-bec9-473b-b00d-f52c31c30b1c: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: dd7a60fc-bec9-473b-b00d-f52c31c30b1c
+ jinja: 'What''s the best ending to finish this incomplete sentence "{{question_propmt}}"?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_before_sentence_and_choices
+ reference: ''
diff --git a/promptsource/templates/codah/fold_4/templates.yaml b/promptsource/templates/codah/fold_4/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9619b9ed70af1c8e2eb2bf3943316f15e9e85973
--- /dev/null
+++ b/promptsource/templates/codah/fold_4/templates.yaml
@@ -0,0 +1,205 @@
+dataset: codah
+subset: fold_4
+templates:
+ 1511a72f-0975-44ab-90cd-4cc6c73d5442: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 1511a72f-0975-44ab-90cd-4cc6c73d5442
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ What''s the best ending to finish the incomplete sentence above?
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_after_sentence_and_choices
+ reference: ''
+ 1713e7dd-f4f6-453f-b849-11932955bc40: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 1713e7dd-f4f6-453f-b849-11932955bc40
+ jinja: 'Complete the sentence below by choosing the best answer from a list of
+ candidates.
+
+
+ Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_before_sentence_and_choices
+ reference: ''
+ 1889f8c5-f868-4c7f-998a-699e2bbcb982: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 1889f8c5-f868-4c7f-998a-699e2bbcb982
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ Complete the sentence by choosing the best answer from the candidates above.
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_after_sentence_and_choices
+ reference: ''
+ 3b179264-27cb-4ad0-ba66-5d701d898f27: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 3b179264-27cb-4ad0-ba66-5d701d898f27
+ jinja: 'Sentence: {{question_propmt}}
+
+
+ Complete the sentence above by choosing the best answer from the candidates
+ below.
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_instruction_between_sentence_and_choices
+ reference: ''
+ 42d83925-b370-4044-9cdd-89ae648a748a: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 42d83925-b370-4044-9cdd-89ae648a748a
+ jinja: 'Here we have an incomplete sentence "{{question_propmt}}".
+
+
+ What would be the best answer choice to finish it?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_between_sentence_and_choices
+ reference: ''
+ 997d06e6-1b35-49b9-9aea-7cb36e8b6dae: !Template
+ answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+ ||| Others
+ id: 997d06e6-1b35-49b9-9aea-7cb36e8b6dae
+ jinja: 'Sentence: {{question_propmt}}
+
+ Completion: {{ candidate_answers[correct_answer_idx] }}
+
+
+ Which of {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+ " or Others"])}} best describes the completed sentence?
+
+
+ |||
+
+ {{answer_choices[question_category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_category
+ reference: ''
+ bdca0580-7a1b-41f9-87cb-9526e959582d: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: bdca0580-7a1b-41f9-87cb-9526e959582d
+ jinja: 'What''s the best ending to finish this incomplete sentence "{{question_propmt}}"?
+
+
+ Candidate Answer Choices:
+
+ {% for candidate in candidate_answers -%}
+
+ {{ answer_choices[loop.index - 1] }}. {{candidate}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_idx] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_instruction_before_sentence_and_choices
+ reference: ''
diff --git a/promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml b/promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9ef7977fe641a9440f1b2944d0c3189ea9030077
--- /dev/null
+++ b/promptsource/templates/code_x_glue_tc_text_to_code/templates.yaml
@@ -0,0 +1,28 @@
+dataset: code_x_glue_tc_text_to_code
+templates:
+ eb965448-691e-4506-bb61-a54771c7014b: !Template
+ answer_choices: null
+ id: eb965448-691e-4506-bb61-a54771c7014b
+ jinja: "{% set field_seperator = \"concode_field_sep\" %}\n{% set method_seperator\
+ \ = \"concode_elem_sep\" %}\n{% set ns = namespace(nl=\"\", fields=[], methods=[])\
+ \ %}\n{% if code | length > 0 %}\n\n {% for chunk_a in nl.split(field_seperator)\
+ \ %}\n {% set first_iter = loop.index0 == 0 %}\n {% for chunk_b\
+ \ in chunk_a.split(method_seperator) %}\n {% if loop.index0 == 0\
+ \ and first_iter %}\n {% set ns.nl = chunk_b %}\n \
+ \ {% elif loop.index0 == 0 %}\n {% set ns.fields = ns.fields\
+ \ + [chunk_b.strip()] %}\n {% else %}\n {% set ns.methods\
+ \ = ns.methods + [chunk_b.strip()] %}\n {% endif %}\n {% endfor\
+ \ %}\n {% endfor %}\n Method description:\n {{ns.nl}}\n\n Class\
+ \ fields:\n {{ns.fields | unique | join(\", \")}}\n\n Class methods:\n\
+ \ {{ns.methods | unique | join(\", \")}}\n\n Generate the method\n \
+ \ |||\n {{code}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - BLEU
+ original_task: true
+ name: generate class member function given class environment
+ reference: ''
diff --git a/promptsource/templates/common_gen/templates.yaml b/promptsource/templates/common_gen/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eeb09679798f5686a2cf208518248e4dd459e979
--- /dev/null
+++ b/promptsource/templates/common_gen/templates.yaml
@@ -0,0 +1,152 @@
+dataset: common_gen
+templates:
+ 45778bd5-fddc-4c60-a77b-4eff5bed1c0b: !Template
+ answer_choices: null
+ id: 45778bd5-fddc-4c60-a77b-4eff5bed1c0b
+ jinja: "Ignoring the order of the concepts: {{ concepts | join(\", \") }}; \n\
+ Generate a sentence with all the concepts :\n|||\n{{target}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Given concepts - type 2
+ reference: Ignoring the order of the concepts:...
+ 684f1859-0b8d-4efe-82e1-7218838813cd: !Template
+ answer_choices: null
+ id: 684f1859-0b8d-4efe-82e1-7218838813cd
+ jinja: 'Put the concepts together to form a sentence: {{ concepts | join(", ")
+ }}.
+
+ |||
+
+ {{target}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Put together
+ reference: This is similar to a task description
+ a4991cc7-cc91-4f37-af80-1983a02eb950: !Template
+ answer_choices: null
+ id: a4991cc7-cc91-4f37-af80-1983a02eb950
+ jinja: "Construct a sentence with the word {{ concepts | choice }}. \n\nHint:\
+ \ Use {{concepts | join(\", \")}} to restrict the output sentence.\n|||\n{{target}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: choice in concept centric sentence generation
+ reference: ''
+ b2033df4-7922-45b8-a113-e7784135cea9: !Template
+ answer_choices: null
+ id: b2033df4-7922-45b8-a113-e7784135cea9
+ jinja: "{% set seq = [ \n'From the concepts mentioned below, generate a sentence:',\
+ \ \n'Convert the concepts to a sentence:', \n'Given the list of concepts, write\
+ \ a sentence:'\n] %} \n{{ seq | choice }}\n{{ concepts | join(\", \") }}\n|||\n\
+ {{target}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: random task template prompt
+ reference: With a randomly chosen task template, generate a sentence from the
+ list of concepts
+ b7012213-04c4-424d-85fb-39d63d8a0ca2: !Template
+ answer_choices: null
+ id: b7012213-04c4-424d-85fb-39d63d8a0ca2
+ jinja: 'What are the topics in the sentence: {{target}}
+
+ |||
+
+ {{ concepts | join(", ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: topics from the sentence
+ reference: The template generates a random topic from the sentence
+ ed215962-8e51-45e7-b025-6e822f877098: !Template
+ answer_choices: null
+ id: ed215962-8e51-45e7-b025-6e822f877098
+ jinja: "We have the sentence: {{target}}; \nExtract all the key concepts: \n|||\n\
+ {{ concepts | join(\", \") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: sentence to concepts
+ reference: Template identifies the concepts from the sentence
+ f3fce748-89e0-4b88-94bb-72ddb9a04d10: !Template
+ answer_choices: null
+ id: f3fce748-89e0-4b88-94bb-72ddb9a04d10
+ jinja: 'Can you write a sentence about the topic {{concepts | choice}}?
+
+ |||
+
+ {{target}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: topic to sentence
+ reference: 'Choose a random topic and ask the model to write a sentence about
+ it '
+ fa787974-86dd-4f66-b2d7-6d3523ce00e1: !Template
+ answer_choices: null
+ id: fa787974-86dd-4f66-b2d7-6d3523ce00e1
+ jinja: "Humans can easily string together abstract concepts to form a coherent\
+ \ sentence. \nFor example, with the concepts {{ concepts | join(\", \") }},\
+ \ a simple sentence can be \n|||\n{{target}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Example prompt
+ reference: The prompt is in the form of an example
+ fbeebb4f-cba1-4bc4-80ec-758a3c134033: !Template
+ answer_choices: null
+ id: fbeebb4f-cba1-4bc4-80ec-758a3c134033
+ jinja: "Given the list of concepts: {{ concepts | join(\", \") }}; \nGenerate\
+ \ a sentence with all the concepts :\n|||\n{{target}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Given concepts type 1
+ reference: 'The prompt has the prefix "Given the .." '
diff --git a/promptsource/templates/commonsense_qa/templates.yaml b/promptsource/templates/commonsense_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..974c03a2eb5c9cbc47343f2c41a05680a72105c5
--- /dev/null
+++ b/promptsource/templates/commonsense_qa/templates.yaml
@@ -0,0 +1,129 @@
+dataset: commonsense_qa
+templates:
+ 1e1d0ce1-b0ea-4ad8-9971-b2b44948123b: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 1e1d0ce1-b0ea-4ad8-9971-b2b44948123b
+ jinja: '{% if answerKey != "" %}
+
+ Answer the following question:
+
+ {{question}} |||
+
+ {{ answer_choices[choices[''label''].index(answerKey)] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: answer_given_question_without_options
+ reference: ''
+ 41188da5-c16a-4c6b-89af-6ce6815aedc6: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 41188da5-c16a-4c6b-89af-6ce6815aedc6
+ jinja: '{% if answerKey != "" %}
+
+ {{question}}
+
+
+ - {{answer_choices | join("\n- ")}} |||
+
+ {{answer_choices[choices[''label''].index(answerKey)] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_answering
+ reference: ''
+ 42fca80b-b614-4288-aad2-2525360543cb: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 42fca80b-b614-4288-aad2-2525360543cb
+ jinja: '{% if answerKey != "" %}
+
+ Given the following options, what do you think is the correct answer to the
+ question below:
+
+
+ {{question}}
+
+
+ Options:
+
+ {% for letter, t in zip(answer_choices, choices.text) %}
+
+ - {{letter}}: {{t}}
+
+ {% endfor %} |||
+
+ {{answerKey}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_to_answer_index
+ reference: ''
+ 8e3f63fa-9ae6-4105-bd51-874b5e1d6b8e: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 8e3f63fa-9ae6-4105-bd51-874b5e1d6b8e
+ jinja: '{% if answerKey != "" %}
+
+ Given the options below, select the most suitable answer for the following question:
+
+ {{question}}
+
+ Options:
+
+ - {{answer_choices | join("\n- ")}}|||
+
+ {{answer_choices[choices["label"].index(answerKey)]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: most_suitable_answer
+ reference: ''
+ bc718994-1d3e-4ae4-b65b-be307154b0a6: !Template
+ answer_choices: null
+ id: bc718994-1d3e-4ae4-b65b-be307154b0a6
+ jinja: '{% if answerKey != "" %}
+
+ Use the following options to predict a possible question for them:
+
+
+ {% for i in range(choices[''text'']|length) %}
+
+ - {{choices[''text''][i]}}
+
+ {% endfor %} |||
+
+ {{question}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answer_to_question
+ reference: ''
diff --git a/promptsource/templates/conv_ai/templates.yaml b/promptsource/templates/conv_ai/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..793f25a7951bb1971075df63298f8f07ebc8685d
--- /dev/null
+++ b/promptsource/templates/conv_ai/templates.yaml
@@ -0,0 +1,231 @@
+dataset: conv_ai
+templates:
+ 1664cdd9-54e8-4679-821b-8013e9df197e: !Template
+ answer_choices: yes ||| no
+ id: 1664cdd9-54e8-4679-821b-8013e9df197e
+ jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Alice") | list |
+ length) %} {{context}}
+
+
+ {% for utterance in thread %}
+
+ - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+ {% endfor %}
+
+
+ Alice''s utterances in the previous conversation show that she was interested
+ in the context, {{ answer_choices[0] }} or {{ answer_choices[1] }}?
+
+ |||
+
+ {% for eval in evaluation %}
+
+ {% if "Alice" == eval["userId"] %}
+
+ {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: engagement_alice_interested
+ reference: ''
+ 2d3ca9f5-60f0-459d-932f-ab30e1e79b22: !Template
+ answer_choices: yes ||| no
+ id: 2d3ca9f5-60f0-459d-932f-ab30e1e79b22
+ jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Bob") | list | length)
+ %} "{{context}}"
+
+
+ Given the previous context, would you say Bob''s engagement is real in this
+ conversation:
+
+ {% for utterance in thread %}
+
+ - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+ {% endfor %}
+
+ |||
+
+ {% for eval in evaluation %}
+
+ {% if "Bob" == eval["userId"] %}
+
+ {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: engagement_bob_real
+ reference: ''
+ 4390549a-3bdf-43ad-9e69-6bc380f33f01: !Template
+ answer_choices: Alice ||| Bob ||| both
+ id: 4390549a-3bdf-43ad-9e69-6bc380f33f01
+ jinja: '{% set alice = (evaluation|selectattr("userId", "equalto", "Alice")|first)["engagement"]
+ %} {% set bob = (evaluation|selectattr("userId", "equalto", "Bob")|first)["engagement"]
+ %} {% if (0 < (thread | selectattr("userId", "equalto", "Bob") | list | length))
+ and (0 < (thread | selectattr("userId", "equalto", "Alice") | list | length))
+ %} {{context}}
+
+
+ Given the previous context, who do you think is more engaged in this conversation
+ ({{ answer_choices[0] }}, {{ answer_choices[1] }}, or {{ answer_choices[2] }}):
+
+ {% for utterance in thread %}
+
+ - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+ {% endfor %}
+
+ |||
+
+ {% if alice == bob %}{{ answer_choices[2] }}{% elif alice < bob %}{{ answer_choices[1]
+ }}{% else %}{{ answer_choices[0] }}{% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: engagement_question_before
+ reference: ''
+ 955dd9f5-0fe5-46c3-a217-995bd876f26f: !Template
+ answer_choices: yes ||| no
+ id: 955dd9f5-0fe5-46c3-a217-995bd876f26f
+ jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Bob") | list | length)
+ %}{{context}}
+
+
+ Given the preceding context, do Bob''s following utterances show that he was
+ interested in the context?
+
+ {% for utterance in thread %}
+
+ {% if "Bob" == utterance["userId"] %}
+
+ - "{{ utterance["text"] }}",
+
+ {% endif %}{% endfor %}
+
+ |||
+
+ {% for eval in evaluation %}
+
+ {% if "Bob" == eval["userId"] %}
+
+ {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: engagement_bob_interested
+ reference: ''
+ c0840f89-e444-49ab-bab0-08f71ec89093: !Template
+ answer_choices: yes ||| no
+ id: c0840f89-e444-49ab-bab0-08f71ec89093
+ jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Alice") | list |
+ length) %} context:
+
+
+ {{context}}
+
+
+ conversation:
+
+ {% for utterance in thread %}
+
+ - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+ {% endfor %}
+
+ Was Alice really into this conversation?|||
+
+ {% for eval in evaluation %}
+
+ {% if "Alice" == eval["userId"] %}
+
+ {% if 3 < eval["engagement"] %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: engagement_alice_really
+ reference: ''
+ db94d56f-3fc8-4b6a-b3d3-2ac37e8110ff: !Template
+ answer_choices: Alice ||| Bob ||| both
+ id: db94d56f-3fc8-4b6a-b3d3-2ac37e8110ff
+ jinja: '{% set alice = (evaluation|selectattr("userId", "equalto", "Alice")|first)["engagement"]
+ %} {% set bob = (evaluation|selectattr("userId", "equalto", "Bob")|first)["engagement"]
+ %} {% if (0 < (thread | selectattr("userId", "equalto", "Bob") | list | length))
+ and (0 < (thread | selectattr("userId", "equalto", "Alice") | list | length))
+ %} Context: {{context}}
+
+ Conversation: {% for utterance in thread %}
+
+ {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+ {% endfor %}
+
+ Given the previous context and conversation, who do you think is more engaged
+ in this conversation ({{ answer_choices[0] }}, {{ answer_choices[1] }}, or {{
+ answer_choices[2] }}):
+
+ |||
+
+ {% if alice == bob %}{{ answer_choices[2] }}{% elif alice < bob %}{{ answer_choices[1]
+ }}{% else %}{{ answer_choices[0] }}{% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: engagement_question_after
+ reference: ''
diff --git a/promptsource/templates/conv_ai_2/templates.yaml b/promptsource/templates/conv_ai_2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fda9271e77d894c992ce099ed320562979878701
--- /dev/null
+++ b/promptsource/templates/conv_ai_2/templates.yaml
@@ -0,0 +1,202 @@
+dataset: conv_ai_2
+templates:
+ 04f7a3d8-c40f-45d1-b9ae-5bd23ff11628: !Template
+ answer_choices: yes ||| no
+ id: 04f7a3d8-c40f-45d1-b9ae-5bd23ff11628
+ jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+ "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+ "equalto", "Human") | list | length)) %}
+
+ Would a person self-describing with such statements:
+
+ {% for bp in user_profile %}
+
+ - "{{ bp | join('''') }}",
+
+ {% endfor %}
+
+ say things like:
+
+ {% for utterance in dialog %}
+
+ {% if class == utterance["sender_class"] %}
+
+ - "{{ utterance["text"] }}",
+
+ {% endif %}{% endfor %}
+
+ in a conversation?|||
+
+ {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: match_profile_question
+ reference: ''
+ 2dfa7a0c-46d5-4842-be2f-ae62fa80d581: !Template
+ answer_choices: yes ||| no
+ id: 2dfa7a0c-46d5-4842-be2f-ae62fa80d581
+ jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+ "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+ "equalto", "Human") | list | length)) %}
+
+ I wonder if somebody describing herself, or himself, using these statements:
+
+ {% for bp in user_profile %}
+
+ - "{{ bp | join('''') }}",
+
+ {% endfor %}
+
+ could utter things like:
+
+ {% for utterance in dialog %}
+
+ {% if class == utterance["sender_class"] %}
+
+ - "{{ utterance["text"] }}",
+
+ {% endif %}{% endfor %}
+
+ in a conversation...
+
+ What''s your guess: {{ answer_choices[0] }} or {{ answer_choices[1] }}?|||
+
+ {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: match_profile_guess
+ reference: ''
+ 6ff0a746-5cf0-4a73-9dd9-8e08ddf0768a: !Template
+ answer_choices: yes ||| no
+ id: 6ff0a746-5cf0-4a73-9dd9-8e08ddf0768a
+ jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+ "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+ "equalto", "Human") | list | length)) %}
+
+ Somebody using the following self-describing statements:
+
+ {% for bp in user_profile %}
+
+ - {{ bp | join('''') }}
+
+ {% endfor %}
+
+ might possibly say things like:
+
+ {% for utterance in dialog %}
+
+ {% if class == utterance["sender_class"] %}
+
+ - {{ utterance["text"] }}
+
+ {% endif %}{% endfor %}
+
+ Do you agree?|||
+
+ {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: match_profile_agree
+ reference: ''
+ cb296bf2-5189-48af-9517-a1f802509eb1: !Template
+ answer_choices: yes ||| no
+ id: cb296bf2-5189-48af-9517-a1f802509eb1
+ jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+ "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+ "equalto", "Human") | list | length)) %}
+
+ {% for bp in user_profile %}
+
+ - "{{ bp | join('''') }}"
+
+ {% endfor %}
+
+ This profile matches a person saying things like:
+
+ {% for utterance in dialog %}
+
+ {% if class == utterance["sender_class"] %}
+
+ - "{{ utterance["text"] }}"
+
+ {% endif %}{% endfor %}
+
+ {{ answer_choices[0] }} of {{ answer_choices[1] }}?|||
+
+ {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: match_profile
+ reference: ''
+ eea6d2da-1c4c-460c-a1f9-f64cfd8c21c7: !Template
+ answer_choices: yes ||| no
+ id: eea6d2da-1c4c-460c-a1f9-f64cfd8c21c7
+ jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+ "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+ "equalto", "Human") | list | length)) %}
+
+ Given the following profile:
+
+ {% for bp in user_profile %}
+
+ - "{{ bp | join('''') }}"
+
+ {% endfor %}
+
+ would these utterances be expected:
+
+ {% for utterance in dialog %}
+
+ {% if class == utterance["sender_class"] %}
+
+ - "{{ utterance["text"] }}"
+
+ {% endif %}{% endfor %}
+
+ from a person in a conversation?|||
+
+ {% if "Human" == class %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1]
+ }}{% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: match_profile_expected
+ reference: ''
diff --git a/promptsource/templates/conv_ai_3/templates.yaml b/promptsource/templates/conv_ai_3/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1b5c97bd13a91e4b74bca37293f411e3f392e87f
--- /dev/null
+++ b/promptsource/templates/conv_ai_3/templates.yaml
@@ -0,0 +1,126 @@
+dataset: conv_ai_3
+templates:
+ 04de512a-b097-474e-b952-3f47548ae557: !Template
+ answer_choices: yes ||| no
+ id: 04de512a-b097-474e-b952-3f47548ae557
+ jinja: Given the request "{{initial_request}}", would a search system need clarification
+ to answer it?|||{% if 3 <= clarification_need%}{{ answer_choices[0] }}{% else
+ %}{{ answer_choices[1] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: clarification_needed
+ reference: ''
+ 2b94810c-515d-455f-a7d0-d1465d5f4f9d: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4
+ id: 2b94810c-515d-455f-a7d0-d1465d5f4f9d
+ jinja: 'Can you give me a number from {{ answer_choices[0] }} to {{ answer_choices[3]
+ }} that denotes how much the request "{{initial_request}}" needs clarification,
+ where {{ answer_choices[0] }} means clarification absolutely not needed and
+ {{ answer_choices[3] }} means clarification absolutely needed?
+
+ |||
+
+ {{ answer_choices[clarification_need-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: score_give_number
+ reference: ''
+ 5c302d76-b34c-44e2-9f56-96901758060a: !Template
+ answer_choices: yes ||| no
+ id: 5c302d76-b34c-44e2-9f56-96901758060a
+ jinja: Would the request "{{initial_request}}" be ambiguous for an informational
+ retrieval system?|||{% if 3 <= clarification_need%}{{ answer_choices[0] }}{%
+ else %}{{ answer_choices[1] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: ambiguous
+ reference: ''
+ 691d46e5-f1b0-4c7b-90b9-6da9711fd054: !Template
+ answer_choices: yes ||| no
+ id: 691d46e5-f1b0-4c7b-90b9-6da9711fd054
+ jinja: Is the request "{{initial_request}}" to a information retrieval system
+ self-contained?|||{% if 2 >= clarification_need%}{{ answer_choices[0] }}{% else
+ %}{{ answer_choices[1] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: self_contained
+ reference: ''
+ a1bad8cc-ee02-465d-a51d-a2b79a75075e: !Template
+ answer_choices: yes ||| no
+ id: a1bad8cc-ee02-465d-a51d-a2b79a75075e
+ jinja: 'Would a search system directly be able to answer this request unambiguously:
+ "{{initial_request}}"?|||{% if 2 >= clarification_need%}{{ answer_choices[0]
+ }}{% else %}{{ answer_choices[1] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: directly_answer
+ reference: ''
+ c718e88c-bedd-4b8d-98ec-5db99787a4b8: !Template
+ answer_choices: null
+ id: c718e88c-bedd-4b8d-98ec-5db99787a4b8
+ jinja: '{% if clarification_need >= 3 and question != "" %}
+
+ Here''s a request: "{{initial_request}}".
+
+ The request is ambiguous in the sense that an information retrieval system could
+ return multiple appropriate answers to the request.
+
+ Generate a clarifying question to the request.|||
+
+ {{question}}?
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: generate_clarif_question
+ reference: ''
+ d5479a4d-a57d-4005-995e-ec10bff02123: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4
+ id: d5479a4d-a57d-4005-995e-ec10bff02123
+ jinja: 'Request "{{initial_request}}", how ambiguous is this request? ({{ answer_choices[0]
+ }} means it would not need any clarification, and {{ answer_choices[3] }} means
+ it is absolutely ambiguous, making it impossible to guess the user''s right
+ intent before answering)
+
+ |||
+
+ {{ answer_choices[clarification_need-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: score_how_much
+ reference: ''
diff --git a/promptsource/templates/coqa/templates.yaml b/promptsource/templates/coqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d1d61ef582c6cd921f6e302ca8aa0aca63a59d2
--- /dev/null
+++ b/promptsource/templates/coqa/templates.yaml
@@ -0,0 +1,117 @@
+dataset: coqa
+templates:
+ 530616d3-1cc8-4faa-a855-4b21e0da9ec9: !Template
+ answer_choices: null
+ id: 530616d3-1cc8-4faa-a855-4b21e0da9ec9
+ jinja: "Answer the question based on the information contained in the passage.\n\
+ Q: {{questions[0]}} \n\nPassage: {{story}}\n\nA: ||| {{answers[\"input_text\"\
+ ][0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: first_qa_turn
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+ 7c0e2256-961c-48e9-bc08-6c270cc68b4a: !Template
+ answer_choices: null
+ id: 7c0e2256-961c-48e9-bc08-6c270cc68b4a
+ jinja: 'Answer the last question based on the hint.
+
+ {% for question, answer in zip(questions[:-1], answers["input_text"][:-1]) %}
+
+ Q: {{question}}
+
+
+ A:{{answer}}
+
+ {%endfor %}
+
+
+ Q: {{questions[-1]}}
+
+
+ Hint: {{story}}
+
+
+ A:|||
+
+ {{answers["input_text"][-1]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: last_qa_turn
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+ a22e8bf1-f5af-43eb-b38c-002462261da2: !Template
+ answer_choices: null
+ id: a22e8bf1-f5af-43eb-b38c-002462261da2
+ jinja: 'Can you form a set of {{questions | length}} question-answer pairs about
+ the passage below?
+
+
+ Passage: {{story}}|||
+
+ {% for question, answer in zip(questions, answers["input_text"]) %}
+
+ Q: {{question}}
+
+
+ A: {{answer}}
+
+
+ {% endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_dialogue
+ reference: ''
+ cb53f12f-e781-4a92-bbb0-fbef19bd2d29: !Template
+ answer_choices: null
+ id: cb53f12f-e781-4a92-bbb0-fbef19bd2d29
+ jinja: 'In the passage below, extract the part which answers the question:
+
+
+ Q: {{questions[0]}}
+
+ Passage: {{story}} |||
+
+ {{story[answers["answer_start"][0] : answers["answer_end"][0] ]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: extract_answer_first_qa_turn
+ reference: ''
+ dc05f09a-0b2b-4448-9226-45dcc4cf52e6: !Template
+ answer_choices: null
+ id: dc05f09a-0b2b-4448-9226-45dcc4cf52e6
+ jinja: "{% set missing_idx = range(questions|length)|choice %}\n\n{% for i in\
+ \ range(questions|length) %}\nQ: {{questions[i] }}\n\nA: {% if i !=missing_idx\
+ \ %}\n{{answers[\"input_text\"][i]}}\n{%endif%}\n{%endfor%}\n\nGiven the above\
+ \ conversation, give a suitable response to the missing answer\n\nHint: {{story}}\n\
+ ||| \n{{answers[\"input_text\"][missing_idx]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: missing_answer
+ reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
diff --git a/promptsource/templates/cord19/metadata/templates.yaml b/promptsource/templates/cord19/metadata/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..04c0a3f639bd8e268a46b367765d134bb14d7aea
--- /dev/null
+++ b/promptsource/templates/cord19/metadata/templates.yaml
@@ -0,0 +1,89 @@
+dataset: cord19
+subset: metadata
+templates:
+ 10d78ae0-635d-4cf3-8e24-61c879fd6ae7: !Template
+ answer_choices: null
+ id: 10d78ae0-635d-4cf3-8e24-61c879fd6ae7
+ jinja: 'Write a scientific title for the following abstract: {{abstract}}
+
+ Title:|||
+
+ {{ title }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: title_generation_from_following_abstract
+ reference: ''
+ 1821279d-37a7-42f0-ab0c-2a5589a2a7c3: !Template
+ answer_choices: null
+ id: 1821279d-37a7-42f0-ab0c-2a5589a2a7c3
+ jinja: "Title: {{title}}\nGenerate a plausible scientific abstract for a scientific\
+ \ paper on Covid-19 with the previous title |||\n {{ abstract }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: abstract_generation_from_previous_title
+ reference: ''
+ 21fc3c51-5168-4abb-b969-81a115f2f568: !Template
+ answer_choices: null
+ id: 21fc3c51-5168-4abb-b969-81a115f2f568
+ jinja: 'Write a scientific abstract for a paper on Covid-19 with the following
+ title: {{title}}
+
+ Abstract:|||
+
+ {{ abstract }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: abstract_generation_from_following_title
+ reference: ''
+ 6a2ebf64-9db7-41f0-85a5-379270c54fa6: !Template
+ answer_choices: null
+ id: 6a2ebf64-9db7-41f0-85a5-379270c54fa6
+ jinja: "Abstract: \n{{abstract}}\nWhat could a scientific title be for this abstract\
+ \ on Covid-19?\nTitle:|||\n{{ title }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: title_generation_from_previous_abstract
+ reference: ''
+ c895c4f1-d5e1-4a07-9ae9-0268c218e526: !Template
+ answer_choices: null
+ id: c895c4f1-d5e1-4a07-9ae9-0268c218e526
+ jinja: 'Write a scientific abstract for a research paper on Coronavirus disease
+ with the following title: {{title}}
+
+ Abstract:|||
+
+ {{ abstract }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: abstract_generation_on_coronavirus
+ reference: ''
diff --git a/promptsource/templates/cos_e/v1.0/templates.yaml b/promptsource/templates/cos_e/v1.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..43656a60487e70be696478df5ebaa36cc675f670
--- /dev/null
+++ b/promptsource/templates/cos_e/v1.0/templates.yaml
@@ -0,0 +1,246 @@
+dataset: cos_e
+subset: v1.0
+templates:
+ 1040d9f9-4ba6-44a5-9d44-aa907ef35d49: !Template
+ answer_choices: '{{ choices | join("|||") }}'
+ id: 1040d9f9-4ba6-44a5-9d44-aa907ef35d49
+ jinja: '{{ question }}
+
+ Choose the most suitable option to answer the above question.
+
+ Options:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_description_option_text
+ reference: ''
+ 60e81a2b-8441-41c9-a904-46746216b621: !Template
+ answer_choices: A ||| B ||| C
+ id: 60e81a2b-8441-41c9-a904-46746216b621
+ jinja: "{{ question }}\nChoose the most suitable option to answer the above question.\n\
+ Options\uFF1A\n{% for k in range(choices | length) %}\n{{'. '.join([answer_choices[k],\
+ \ choices[k]])}}\n{% endfor %}\n|||\n{{ answer_choices[choices.index(answer)]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_description_option_id
+ reference: ''
+ 836b1643-b0c7-4c21-b33f-1a0aacae6562: !Template
+ answer_choices: '{{ choices | join("|||") }}'
+ id: 836b1643-b0c7-4c21-b33f-1a0aacae6562
+ jinja: '{{ question }}
+
+ - {{ answer_choices | join("\n- ") }}
+
+
+ The best answer is
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_option_description_text
+ reference: ''
+ 97ffc573-3aaf-46b5-873f-cd1081c87ea2: !Template
+ answer_choices: null
+ id: 97ffc573-3aaf-46b5-873f-cd1081c87ea2
+ jinja: 'Question: {{ question }}
+
+ Options:
+
+ - {{ choices | join("\n- ") }}
+
+
+ The answer is "{{ answer }}" because
+
+ |||
+
+ {{ abstractive_explanation }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: generate_explanation_given_text
+ reference: ''
+ 9ad6c3c2-883f-474f-98e1-7afc7744485c: !Template
+ answer_choices: null
+ id: 9ad6c3c2-883f-474f-98e1-7afc7744485c
+ jinja: 'Here''s a question: {{ question }}
+
+
+ Here are possible answers to this question:
+
+ - {{ choices | join("\n- ") }}
+
+
+ I believe the correct choice is "{{answer}}", here''s why:
+
+ |||
+
+ {{ abstractive_explanation }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: i_think
+ reference: ''
+ b46fcaba-9076-49b6-ab5a-ebdbd5a098b4: !Template
+ answer_choices: null
+ id: b46fcaba-9076-49b6-ab5a-ebdbd5a098b4
+ jinja: "Question: {{question}}\n\nChoices: \n- {{ choices | join(\"\\n- \") }}\n\
+ \nThe rationale to choose \"{{answer}}\" as the answer is that: |||\n{{abstractive_explanation}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: rationale
+ reference: ''
+ bf17f5c6-65e9-4449-ba49-f5fde0041d08: !Template
+ answer_choices: A ||| B ||| C
+ id: bf17f5c6-65e9-4449-ba49-f5fde0041d08
+ jinja: '{{ question }}
+
+ {% for k in range(choices | length) %}
+
+ {{''. ''.join([answer_choices[k], choices[k]])}}
+
+ {% endfor %}
+
+ The best answer is
+
+ |||
+
+ {{ answer_choices[choices.index(answer)] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_option_description_id
+ reference: ''
+ e57e45eb-9d02-4e15-9a95-ba4ef68245c1: !Template
+ answer_choices: '{{ choices | join("|||") }}'
+ id: e57e45eb-9d02-4e15-9a95-ba4ef68245c1
+ jinja: 'Pick the option in line with common sense to answer the question.
+
+ Questions: {{ question }}
+
+ Options:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: description_question_option_text
+ reference: ''
+ ee4a3703-db30-4fb5-9cb5-29a15be03fbf: !Template
+ answer_choices: A ||| B ||| C
+ id: ee4a3703-db30-4fb5-9cb5-29a15be03fbf
+ jinja: 'Pick the option in line with common sense to answer the question.
+
+ Question: {{ question }}
+
+ Options:
+
+ {% for k in range(choices | length) %}
+
+ {{''. ''.join([answer_choices[k], choices[k]])}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[choices.index(answer)] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: description_question_option_id
+ reference: ''
+ ef98a220-34e2-46cd-8466-fe817af8ec44: !Template
+ answer_choices: null
+ id: ef98a220-34e2-46cd-8466-fe817af8ec44
+ jinja: 'Question: {{ question }}
+
+ Options:
+
+ - {{ choices | join("\n- ") }}
+
+
+ Explain why a human would choose "{{answer}}" to answer the question above:
+
+ |||
+
+ {{ abstractive_explanation }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: explain_why_human
+ reference: ''
+ fc3474c4-63ec-4a94-87cf-0e3044b29282: !Template
+ answer_choices: null
+ id: fc3474c4-63ec-4a94-87cf-0e3044b29282
+ jinja: "Here's a question and a few possible answers: \n\nQ: {{ question }}\n\
+ Possible A: {{ choices | join(\", \") }}\n\nWhy is \"{{answer}}\" an answer\
+ \ aligned with human common sense? \n|||\n{{ abstractive_explanation }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: aligned_with_common_sense
+ reference: ''
diff --git a/promptsource/templates/cos_e/v1.11/templates.yaml b/promptsource/templates/cos_e/v1.11/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2c2cd82544b12a468671258354089d9e8dca237f
--- /dev/null
+++ b/promptsource/templates/cos_e/v1.11/templates.yaml
@@ -0,0 +1,246 @@
+dataset: cos_e
+subset: v1.11
+templates:
+ 02a87cd3-6595-44bd-a384-95bdc8b3dd0c: !Template
+ answer_choices: '{{ choices | join("|||") }}'
+ id: 02a87cd3-6595-44bd-a384-95bdc8b3dd0c
+ jinja: '{{ question }}
+
+ Choose the most suitable option to answer the above question.
+
+ Options:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_description_option_text
+ reference: ''
+ 046ce4df-c847-4dc2-b53c-9f02d32aff8a: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: 046ce4df-c847-4dc2-b53c-9f02d32aff8a
+ jinja: "{{ question }}\nChoose the most suitable option to answer the above question.\n\
+ Options\uFF1A\n{% for k in range(choices | length) %}\n{{'. '.join([answer_choices[k],\
+ \ choices[k]])}}\n{% endfor %}\n|||\n{{ answer_choices[choices.index(answer)]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_description_option_id
+ reference: ''
+ 25863d16-34be-4c5f-9040-11d5c6398b4b: !Template
+ answer_choices: null
+ id: 25863d16-34be-4c5f-9040-11d5c6398b4b
+ jinja: "Question: {{question}}\n\nChoices: \n- {{ choices | join(\"\\n- \") }}\n\
+ \nThe rationale to choose \"{{answer}}\" as the answer is that: |||\n{{abstractive_explanation}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: rationale
+ reference: ''
+ 4b946a87-b39c-4f01-9041-832d82da48af: !Template
+ answer_choices: '{{ choices | join("|||") }}'
+ id: 4b946a87-b39c-4f01-9041-832d82da48af
+ jinja: '{{ question }}
+
+ - {{ answer_choices | join("\n- ") }}
+
+
+ The best answer is
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_option_description_text
+ reference: ''
+ 55dd7471-c01e-4197-a8cd-d8e6359ef582: !Template
+ answer_choices: null
+ id: 55dd7471-c01e-4197-a8cd-d8e6359ef582
+ jinja: "Here's a question and a few possible answers: \n\nQ: {{ question }}\n\
+ Possible A: {{ choices | join(\", \") }}\n\nWhy is \"{{answer}}\" an answer\
+ \ aligned with human common sense? \n|||\n{{ abstractive_explanation }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: aligned_with_common_sense
+ reference: ''
+ 60354294-f30a-4a5b-be18-372c3c1a3491: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: 60354294-f30a-4a5b-be18-372c3c1a3491
+ jinja: 'Pick the option in line with common sense to answer the question.
+
+ Question: {{ question }}
+
+ Options:
+
+ {% for k in range(choices | length) %}
+
+ {{''. ''.join([answer_choices[k], choices[k]])}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[choices.index(answer)] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: description_question_option_id
+ reference: ''
+ 73f0f76b-c7f9-41fd-b4df-705625ab8241: !Template
+ answer_choices: null
+ id: 73f0f76b-c7f9-41fd-b4df-705625ab8241
+ jinja: 'Question: {{ question }}
+
+ Options:
+
+ - {{ choices | join("\n- ") }}
+
+
+ Explain why a human would choose "{{answer}}" to answer the question above:
+
+ |||
+
+ {{ abstractive_explanation }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: explain_why_human
+ reference: ''
+ 90a7d84f-0316-4b28-a4fe-2f61c0126158: !Template
+ answer_choices: null
+ id: 90a7d84f-0316-4b28-a4fe-2f61c0126158
+ jinja: 'Question: {{ question }}
+
+ Options:
+
+ - {{ choices | join("\n- ") }}
+
+
+ The answer is "{{ answer }}" because
+
+ |||
+
+ {{ abstractive_explanation }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_explanation_given_text
+ reference: ''
+ a8036e94-ad4a-4f26-9765-cf7223800138: !Template
+ answer_choices: '{{ choices | join("|||") }}'
+ id: a8036e94-ad4a-4f26-9765-cf7223800138
+ jinja: 'Pick the option in line with common sense to answer the question.
+
+ Questions: {{ question }}
+
+ Options:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: description_question_option_text
+ reference: ''
+ e57a5c48-209c-4e82-b061-dbc8d124dffa: !Template
+ answer_choices: null
+ id: e57a5c48-209c-4e82-b061-dbc8d124dffa
+ jinja: 'Here''s a question: {{ question }}
+
+
+ Here are possible answers to this question:
+
+ - {{ choices | join("\n- ") }}
+
+
+ I believe the correct choice is "{{answer}}", here''s why:
+
+ |||
+
+ {{ abstractive_explanation }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: i_think
+ reference: ''
+ f678d224-23f0-488b-9c5d-0bf466a0aa16: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: f678d224-23f0-488b-9c5d-0bf466a0aa16
+ jinja: '{{ question }}
+
+ {% for k in range(choices | length) %}
+
+ {{''. ''.join([answer_choices[k], choices[k]])}}
+
+ {% endfor %}
+
+ The best answer is
+
+ |||
+
+ {{ answer_choices[choices.index(answer)] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_option_description_id
+ reference: ''
diff --git a/promptsource/templates/cosmos_qa/templates.yaml b/promptsource/templates/cosmos_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5fe9c1b39e08be8de18accbd7feca7e59d46e886
--- /dev/null
+++ b/promptsource/templates/cosmos_qa/templates.yaml
@@ -0,0 +1,288 @@
+dataset: cosmos_qa
+templates:
+ 015f333d-2a15-4552-9fe3-a20bd781001e: !Template
+ answer_choices: null
+ id: 015f333d-2a15-4552-9fe3-a20bd781001e
+ jinja: "Based on the context and the answer, generate a question. \n\nContext:\
+ \ {{context}}\n\nAnswer:\n{% if label == 0 %}\n{{answer0}}\n{% elif label ==\
+ \ 1 %}\n{{answer1}}\n{% elif label == 2 %}\n{{answer2}}\n{% elif label == 3\
+ \ %}\n{{answer3}}\n{% endif %}\n|||\n{{question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: context_answer_to_question
+ reference: 'Template asks the model to generate questions '
+ 08e20b79-d1c0-4717-b538-f1a313c2b7d2: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: 08e20b79-d1c0-4717-b538-f1a313c2b7d2
+ jinja: "Read the following context and choose the best option to answer the question.\n\
+ Context: {{ context }}\nQuestion: {{ question }}\nOptions: \n- {{ answer_choices\
+ \ | join(\"\\n - \") }}\n|||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: description_context_question_answer_text
+ reference: 'Template generates the answer. Answer cues are included. '
+ 67d6ba13-4958-4e5e-842c-ada92aead6cc: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: 67d6ba13-4958-4e5e-842c-ada92aead6cc
+ jinja: 'Read the following context and answer the question.
+
+ Context: {{ context }}
+
+ Question: {{ question }}
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: description_context_question_text
+ reference: Template generates the answer
+ 693c47c6-f17c-417a-af70-bc20e71b4ed4: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 693c47c6-f17c-417a-af70-bc20e71b4ed4
+ jinja: "Read the following context and choose the best option to answer the question.\n\
+ Context: {{ context }}\nQuestion: {{ question }}\nOptions: \nA. {{ answer0 }}\n\
+ B. {{ answer1 }}\nC. {{ answer2 }}\nD. {{ answer3 }}\n|||\n{{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: description_context_question_answer_id
+ reference: Template asks the model to pick the correct answer
+ 6b9a24cc-054e-40d6-8abf-261443122f3a: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: 6b9a24cc-054e-40d6-8abf-261443122f3a
+ jinja: '{{ context }}
+
+ According to the above context, choose the best option to answer the following
+ question.
+
+ Question: {{ question }}
+
+ Options:
+
+ - {{answer_choices | join("\n - ")}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_description_question_answer_text
+ reference: The template asks the model to generate the answer
+ 71325300-1f16-4a68-97c7-a03457f00cc7: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 71325300-1f16-4a68-97c7-a03457f00cc7
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ A. {{ answer0 }}
+
+ B. {{ answer1 }}
+
+ C. {{ answer2 }}
+
+ D. {{ answer3 }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: no_prompt_id
+ reference: 'No prompt with context and question. '
+ 7c30b1a1-14da-4458-95e8-c35f8de23110: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: 7c30b1a1-14da-4458-95e8-c35f8de23110
+ jinja: '{{ context }}
+
+ Question: {{ question }}
+
+ The answer to the above question:
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: context_question_description_text
+ reference: Context, question, task description, and generate the answer
+ 85e9ae2c-fbb7-47ed-980c-56da5299e9af: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: 85e9ae2c-fbb7-47ed-980c-56da5299e9af
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ - {{ answer_choices | join("\n - ") }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: no_prompt_text
+ reference: 'No prompt with answer choices. The template asks the model to generate
+ the answer. '
+ 8a60255c-d44d-4f20-a631-ae1c0c9a7d68: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 8a60255c-d44d-4f20-a631-ae1c0c9a7d68
+ jinja: '{{ context }}
+
+ According to the above context, choose the best option to answer the following
+ question.
+
+ Question: {{ question }}
+
+ Options:
+
+ A. {{ answer0 }}
+
+ B. {{ answer1 }}
+
+ C. {{ answer2 }}
+
+ D. {{ answer3 }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_description_question_answer_id
+ reference: Original task with context, question and the answer choices.
+ 9dc80101-516d-448e-8e05-a62b4acead3b: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 9dc80101-516d-448e-8e05-a62b4acead3b
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ Pick the best answer from the following options:
+
+ A. {{ answer0 }}
+
+ B. {{ answer1 }}
+
+ C. {{ answer2 }}
+
+ D. {{ answer3 }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_question_description_answer_id
+ reference: Template asks the model to pick the correct answer
+ c07c459e-f1f7-409e-9da7-fe5c993a4933: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: c07c459e-f1f7-409e-9da7-fe5c993a4933
+ jinja: '{{ context }}
+
+ According to the above context, answer the following question.
+
+ {{ question }}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_description_question_text
+ reference: The template asks the model to generate the answer without any answer
+ cues
+ d5499348-5cb3-467b-a543-206b5dd9806e: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: d5499348-5cb3-467b-a543-206b5dd9806e
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ Pick the best answer from the following options:
+
+ - {{ answer_choices | join("\n - ") }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_question_description_answer_text
+ reference: 'Context, question, task description, and answer choices '
+ e640e365-091c-491e-a87e-f529514607e5: !Template
+ answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+ id: e640e365-091c-491e-a87e-f529514607e5
+ jinja: "{{question}} \n|||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: only_question_answer
+ reference: Template with only question and generates the answer
diff --git a/promptsource/templates/covid_qa_castorini/templates.yaml b/promptsource/templates/covid_qa_castorini/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cc3f84fe5f2375f3b789e1997b6bc7fe9acf48ef
--- /dev/null
+++ b/promptsource/templates/covid_qa_castorini/templates.yaml
@@ -0,0 +1,60 @@
+dataset: covid_qa_castorini
+templates:
+ 481dcd72-1674-4962-b711-0dbf146ae836: !Template
+ answer_choices: null
+ id: 481dcd72-1674-4962-b711-0dbf146ae836
+ jinja: 'Generate a question whose answer could be found within the following papers
+ (only titles have been provided):
+
+
+ {{answers["title"]|join(", ")}} |||
+
+ {{question_query}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: papers_to_qn
+ reference: ''
+ 56915f43-ebd6-44dc-9aac-6098ec2d1b32: !Template
+ answer_choices: null
+ id: 56915f43-ebd6-44dc-9aac-6098ec2d1b32
+ jinja: 'Provide the keyword form of the following query:
+
+
+ {{question_query}} |||
+
+ {{keyword_query}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: keyword_form
+ reference: ''
+ 665bfa4a-b83f-4431-acda-29855c89916b: !Template
+ answer_choices: null
+ id: 665bfa4a-b83f-4431-acda-29855c89916b
+ jinja: 'Generate a possible question to the following answers:
+
+
+ {{answers["exact_answer"]|join(", ")}} |||
+
+ {{question_query}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answers_to_qn
+ reference: ''
diff --git a/promptsource/templates/craffel/openai_lambada/templates.yaml b/promptsource/templates/craffel/openai_lambada/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..86e7ab90481af9828228a0a082ea51e70741fb1c
--- /dev/null
+++ b/promptsource/templates/craffel/openai_lambada/templates.yaml
@@ -0,0 +1,82 @@
+dataset: craffel/openai_lambada
+templates:
+ 1ee5ddef-fffb-4b73-a2f7-f600ffac63cb: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: 1ee5ddef-fffb-4b73-a2f7-f600ffac63cb
+ jinja: '{{ text.split()[:-1] | join('' '') }}...
+
+
+ What comes after the ellipses? ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: ellipses
+ reference: ''
+ 4f08e9d4-bcff-4bc0-9902-87c497625d17: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: 4f08e9d4-bcff-4bc0-9902-87c497625d17
+ jinja: 'Fill in the blank:
+
+
+ {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: Brown et al.
+ 507de732-8298-4971-bac3-7d768d511a31: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: 507de732-8298-4971-bac3-7d768d511a31
+ jinja: '{{ text.split()[:-1] | join('' '') }} ____.
+
+
+ Fill in the ____: ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the ____
+ reference: ''
+ 774b4349-0524-4a34-881b-b344f8f5c34e: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: 774b4349-0524-4a34-881b-b344f8f5c34e
+ jinja: 'This story got cut short. What comes next?
+
+
+ {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what comes next
+ reference: ''
+ ef072a60-252e-4c52-aa8a-4152bb4dd83c: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: ef072a60-252e-4c52-aa8a-4152bb4dd83c
+ jinja: 'Please predict the next word after the following chunk of text.
+
+
+ {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: please next word
+ reference: ''
diff --git a/promptsource/templates/craigslist_bargains/templates.yaml b/promptsource/templates/craigslist_bargains/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..76d9362c8722216892828e18c75ee2b8636e28d1
--- /dev/null
+++ b/promptsource/templates/craigslist_bargains/templates.yaml
@@ -0,0 +1,320 @@
+dataset: craigslist_bargains
+templates:
+ 145dd841-b971-4550-bc88-305ad3278d58: !Template
+ answer_choices: good ||| bad ||| incomplete
+ id: 145dd841-b971-4550-bc88-305ad3278d58
+ jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+ is the buyer, and the second speaker is the seller.
+
+
+ {{utterance | join("\n\n")}}
+
+
+ From the seller''s point of view, this deal could be considered
+
+ |||
+
+ {% set nonzero_price = [] %}
+
+ {% for p in dialogue_acts["price"] %}
+
+ {% if p>-1 %}
+
+ {{nonzero_price.append(p) or ""}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% set final_price = -1 %}
+
+ {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+ endif %}
+
+ {% if final_price == -1 %}
+
+ {{answer_choices[2]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[0]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[1]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: good deal for seller no list price implicit
+ reference: implicit version of "good deal for seller no list price"
+ 27010b55-dd5b-4ee9-9e14-a4b809aa6cdb: !Template
+ answer_choices: yes ||| no ||| unknown
+ id: 27010b55-dd5b-4ee9-9e14-a4b809aa6cdb
+ jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+ is the buyer, and the second speaker is the seller.
+
+
+ {{utterance | join("\n\n")}}
+
+
+ Was this a good deal for the seller? answer "{{answer_choices[0]}}", "{{answer_choices[1]}}",
+ or "{{answer_choices[2]}}".
+
+ |||
+
+ {% set nonzero_price = [] %}
+
+ {% for p in dialogue_acts["price"] %}
+
+ {% if p>-1 %}
+
+ {{nonzero_price.append(p) or ""}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% set final_price = -1 %}
+
+ {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+ endif %}
+
+ {% if final_price == -1 %}
+
+ {{answer_choices[2]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[0]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[1]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[1]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: good deal for seller no list price
+ reference: same as "good deal for seller" prompt, but excludes the list price
+ 3e1e2993-2b41-493b-8f27-9a7bb7fa4a0b: !Template
+ answer_choices: null
+ id: 3e1e2993-2b41-493b-8f27-9a7bb7fa4a0b
+ jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+ is the buyer, and the second speaker is the seller. The listed price was ${{items["Price"][0]}}0.
+
+
+ {{utterance | join("\n\n")}}
+
+
+ How much was the gap between the target of the buyer and the seller?
+
+ |||
+
+ ${{(agent_info[''Target''][0] - agent_info[''Target''][1]) | abs}}0'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: gap between parties
+ reference: asks model explicitly what the gap was between the buyer's target and
+ the seller's target
+ 78d1b487-c535-4a0d-ae49-055d321db3fd: !Template
+ answer_choices: yes ||| no ||| unknown
+ id: 78d1b487-c535-4a0d-ae49-055d321db3fd
+ jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+ is the buyer, and the second speaker is the seller. The listed price was ${{items["Price"][0]}}.
+
+
+ {{utterance | join("\n\n")}}
+
+
+ Was this a good deal for the seller? Answer "{{answer_choices[0]}}" or "{{answer_choices[1]}}",
+ or "{{answer_choices[2]}}".
+
+ |||
+
+ {% set nonzero_price = [] %}
+
+ {% for p in dialogue_acts["price"] %}
+
+ {% if p>-1 %}
+
+ {{nonzero_price.append(p) or ""}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% set final_price = -1 %}
+
+ {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+ endif %}
+
+ {% if final_price == -1 %}
+
+ {{answer_choices[2]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[0]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[1]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: good deal for seller
+ reference: asks the model whether the deal was good for the seller or not (it's
+ good if the seller's target is closer to the final price than the buyer's, or
+ there is a tie)
+ a1dbb258-2e5c-4160-986b-46fc03546965: !Template
+ answer_choices: buyer ||| seller ||| neither ||| unknown
+ id: a1dbb258-2e5c-4160-986b-46fc03546965
+ jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+ is the buyer, and the second speaker is the seller. The listed price was ${{items["Price"][0]}}.
+
+
+ {{utterance | join("\n\n")}}
+
+ Question: Which party got the better deal? Choose from:
+
+ - {{answer_choices[0]}}
+
+ - {{answer_choices[1]}}
+
+ - {{answer_choices[2]}}
+
+ - {{answer_choices[3]}}
+
+
+ Answer:
+
+ |||
+
+ {% set nonzero_price = [] %}
+
+ {% for p in dialogue_acts["price"] %}
+
+ {% if p>-1 %}
+
+ {{nonzero_price.append(p) or ""}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {% set final_price = -1 %}
+
+ {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+ endif %}
+
+ {% if final_price == -1 %}
+
+ {{answer_choices[3]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[2]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[0]}}
+
+ {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+ | abs) %}
+
+ {{answer_choices[1]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: best deal
+ reference: explicitly asks model which party got the best deal
+ b891bbe7-b3da-4118-b54f-4faa647e36f4: !Template
+ answer_choices: null
+ id: b891bbe7-b3da-4118-b54f-4faa647e36f4
+ jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+ is the buyer, and the second speaker is the seller.
+
+
+ {% set nonempty_utterance = [] %}
+
+ {% for line in utterance %}
+
+ {% if line != "" %}
+
+ {{ nonempty_utterance.append(line) or "" }}
+
+ {% endif %}
+
+ {% endfor %}
+
+
+ {{nonempty_utterance[:-1] | join(''\n\n'')}}
+
+
+ Question: What could be the next line in the negotiation?
+
+ Answer:
+
+ |||
+
+ {{nonempty_utterance[-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: generate line
+ reference: Generates the next line of negotiation
diff --git a/promptsource/templates/crows_pairs/templates.yaml b/promptsource/templates/crows_pairs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85d01eac6a0780db32a8bd64f3b977735b3ae8b0
--- /dev/null
+++ b/promptsource/templates/crows_pairs/templates.yaml
@@ -0,0 +1,195 @@
+dataset: crows_pairs
+templates:
+ 50238ba3-15ab-46f1-8c8b-fd6e038eceb3: !Template
+ answer_choices: Yes ||| No
+ id: 50238ba3-15ab-46f1-8c8b-fd6e038eceb3
+ jinja: 'You are presented with two very similar sentences:
+
+ A: {{sent_more}}
+
+ B: {{sent_less}}
+
+ Sentence A is illustrating a stereotype. Yes or no?
+
+ |||
+
+ {{answer_choices[stereo_antistereo]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: stereotype
+ reference: ''
+ 5980c232-d5c1-4cfa-9e50-7533fab7f5d6: !Template
+ answer_choices: Sentence A demonstrates a stereotype ||| Sentence B violates a
+ stereotype
+ id: 5980c232-d5c1-4cfa-9e50-7533fab7f5d6
+ jinja: 'Sentence A: {{sent_more}}
+
+ Sentence B: {{sent_less}}
+
+ Select the option that is the most plausible:
+
+ - {{answer_choices[0]}}
+
+ - {{answer_choices[1]}}
+
+ |||
+
+ {{answer_choices[stereo_antistereo]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: demontraste_or_violate
+ reference: ''
+ 75c17e06-5c05-470d-8697-debcf9888944: !Template
+ answer_choices: null
+ id: 75c17e06-5c05-470d-8697-debcf9888944
+ jinja: '{% if stereo_antistereo == 0 %}
+
+ "{{sent_more}}"
+
+ The previous sentence illustrates a stereotype against a historically disadvantaged
+ group in the US. Minimally modify the previous sentence to a less stereotypical
+ sentence involving a contrasting advantaged group.
+
+ |||
+
+ {{sent_less}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: transform_stereo
+ reference: ''
+ 8da874a6-0749-43fd-bff6-1e32d3e0a9f9: !Template
+ answer_choices: Yes ||| No
+ id: 8da874a6-0749-43fd-bff6-1e32d3e0a9f9
+ jinja: 'A: {{sent_more}}
+
+ B: {{sent_less}}
+
+ I believe sentence A is conveying a stereotype. Is that correct? Answer with
+ yes or no.
+
+ |||
+
+ {{answer_choices[stereo_antistereo]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: stereotype_confirm
+ reference: ''
+ cc25e0aa-ff4f-40e3-a05a-634cdbd6f1d9: !Template
+ answer_choices: No ||| Yes
+ id: cc25e0aa-ff4f-40e3-a05a-634cdbd6f1d9
+ jinja: 'You are presented with two very similar sentences:
+
+ A: {{sent_more}}
+
+ B: {{sent_less}}
+
+ Sentence B is illustrating an anti-stereotype. Yes or no?
+
+ |||
+
+ {{answer_choices[stereo_antistereo]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: anti_stereotype
+ reference: ''
+ d1645b27-c4d8-4682-a2ce-cfddb9bdc6ad: !Template
+ answer_choices: null
+ id: d1645b27-c4d8-4682-a2ce-cfddb9bdc6ad
+ jinja: '{% if stereo_antistereo == 1 %}
+
+ "{{sent_less}}"
+
+ The previous sentence violates a stereotype against a historically disadvantaged
+ group in the US. Minimally modify the previous sentence to a less stereotypical
+ sentence involving a contrasting advantage group.
+
+ |||
+
+ {{sent_more}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: transform_anti_stereo
+ reference: ''
+ f0711ab7-3093-463a-9493-9ace26df99e3: !Template
+ answer_choices: No ||| Yes
+ id: f0711ab7-3093-463a-9493-9ace26df99e3
+ jinja: 'Sentence 1: {{sent_more}}
+
+ Sentence 2: {{sent_less}}
+
+ I think that Sentence 2 conveys an anti-stereotype. Is that correct? Answer
+ yes or no.
+
+ |||
+
+ {{answer_choices[stereo_antistereo]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: anti_stereotype_confirm
+ reference: ''
+ f2f7e1dd-6c23-4dbb-bdb1-4145b42b4344: !Template
+ answer_choices: Race and color ||| Socioeconomic status and occupation ||| Gender
+ and gender identity ||| Disability ||| Nationality ||| Sexual orientation |||
+ Physical appearance ||| Religion ||| Age
+ id: f2f7e1dd-6c23-4dbb-bdb1-4145b42b4344
+ jinja: 'Here are two sentences. One is more stereotypical than the other. What
+ kind of stereotypes are these two sentences illustrating? Choose between {{answer_choices
+ | join(", ") | lower}}.
+
+ Sentence 1: {{sent_more}}
+
+ Sentence 2: {{sent_less}}
+
+ |||
+
+ {{answer_choices[bias_type]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: which_bias
+ reference: ''
diff --git a/promptsource/templates/dbpedia_14/templates.yaml b/promptsource/templates/dbpedia_14/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2022b54dd995c43ebc5d131c7b7bd189bbaf6f94
--- /dev/null
+++ b/promptsource/templates/dbpedia_14/templates.yaml
@@ -0,0 +1,80 @@
+dataset: dbpedia_14
+templates:
+ 824ecb55-ecad-40c2-8033-f2fa0add2ddf: !Template
+ answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+ Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+ Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+ id: 824ecb55-ecad-40c2-8033-f2fa0add2ddf
+ jinja: '{{content}} Given a list of categories: {{"company, educational institution,
+ artist, athlete, office holder, mean of transportation, building, natural place,
+ village, animal, plant, album, film or written work"}}, what category does the
+ paragraph belong to? ||| {{ answer_choices[label] }}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: given_list_what_category_does_the_paragraph_belong_to
+ reference: ''
+ 8eda7e71-6734-486f-b883-e99d3f14c0bf: !Template
+ answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+ Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+ Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+ id: 8eda7e71-6734-486f-b883-e99d3f14c0bf
+ jinja: Pick one category for the following text. The options are - {{"company,
+ educational institution, artist, athlete, office holder, mean of transportation,
+ building, natural place, village, animal, plant, album, film or written work"}}.
+ {{title}} - {{content}} ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_one_category_for_the_following_text
+ reference: ''
+ 9dfa5d15-96bc-41ee-ad89-4f8df5c4ff67: !Template
+ answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+ Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+ Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+ id: 9dfa5d15-96bc-41ee-ad89-4f8df5c4ff67
+ jinja: '{{title}} - {{content}} Given a choice of categories {{"company, educational
+ institution, artist, athlete, office holder, mean of transportation, building,
+ natural place, village, animal, plant, album, film or written work"}}, the text
+ refers to which one? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: 'given_a_choice_of_categories '
+ reference: ''
+ f72fa410-3278-4f62-91f0-f9edf4a4e792: !Template
+ answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+ Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+ Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+ id: f72fa410-3278-4f62-91f0-f9edf4a4e792
+ jinja: '"{{title}}", given a list of categories: {{"company, educational institution,
+ artist, athlete, office holder, mean of transportation, building, natural place,
+ village, animal, plant, album, film or written work"}}, what category does the
+ title belong to? ||| {{ answer_choices[label] }}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: given_a_list_of_category_what_does_the_title_belong_to
+ reference: ''
diff --git a/promptsource/templates/discofuse/discofuse-sport/templates.yaml b/promptsource/templates/discofuse/discofuse-sport/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6b964cda120554c4f24deeaa53f3cfeddf245dd
--- /dev/null
+++ b/promptsource/templates/discofuse/discofuse-sport/templates.yaml
@@ -0,0 +1,210 @@
+dataset: discofuse
+subset: discofuse-sport
+templates:
+ 03f85406-df19-4bba-9ff7-53e050db6c84: !Template
+ answer_choices: null
+ id: 03f85406-df19-4bba-9ff7-53e050db6c84
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nDecompose the following sentence\
+ \ into two separate sentences:\n\n{{coherent_first_sentence}}\n\n|||\n\n{{incoherent_first_sentence}}\
+ \ {{incoherent_second_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: decompose_top
+ reference: ''
+ 0e00ea8a-dc1b-4b3d-9f6f-13378e6e739d: !Template
+ answer_choices: Apposition ||| Relative Clauses ||| Cataphora ||| Verb Phrase
+ Coordination ||| Anaphora ||| Inner Connectives ||| Both Inner Connectives and
+ Anaphora ||| Sentence Coordination ||| Both Sentence Coordination and Anaphora
+ ||| Forward Connectives ||| Discourse Connectives ||| Both Discourse Connectives
+ and Anaphora
+ id: 0e00ea8a-dc1b-4b3d-9f6f-13378e6e739d
+ jinja: "{% set poss_ans_list = [\"SINGLE_APPOSITION\", \"SINGLE_RELATIVE\", \"\
+ SINGLE_CATAPHORA\", \"SINGLE_VP_COORD\", \"PAIR_ANAPHORA\", \"SINGLE_CONN_INNER\"\
+ , \"SINGLE_CONN_INNER_ANAPHORA\", \"SINGLE_S_COORD\", \"SINGLE_S_COORD_ANAPHORA\"\
+ , \"SINGLE_CONN_START\", \"PAIR_CONN\", \"PAIR_CONN_ANAPHORA\"] %}\n\n{% if\
+ \ discourse_type != \"PAIR_NONE\" %}\nPassage 1: {{incoherent_first_sentence}}\
+ \ {{incoherent_second_sentence}}\n\nPassage 2: {{coherent_first_sentence}} {{coherent_second_sentence}}\n\
+ \nWhich of the following discourse phenomena have been used to turn Passage\
+ \ 1 into Passage 2?\n\n{% for lab in answer_choices %}\n{{ loop.index }}: {{\
+ \ lab }}\n{% endfor %}\n \n |||\n\n{{ answer_choices[poss_ans_list.index(discourse_type)]\
+ \ }}\n\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: false
+ name: grammar_detection_bottom
+ reference: ''
+ 26c4cd24-45db-4d40-a04b-7c6f0e1e27d0: !Template
+ answer_choices: null
+ id: 26c4cd24-45db-4d40-a04b-7c6f0e1e27d0
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+ \nNow, read this second sentence, that covers some of the information from the\
+ \ first:\n\n{{incoherent_first_sentence}}\n\nWrite a sentence that covers the\
+ \ information from the first sentence that is missing from the second\n|||\n\
+ \n {{incoherent_second_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: decompose_remainder_2
+ reference: ''
+ 2f4a3f45-2367-495c-84ca-fee5833527b4: !Template
+ answer_choices: null
+ id: 2f4a3f45-2367-495c-84ca-fee5833527b4
+ jinja: 'Rewrite the following two sentences so that they flow better:
+
+
+ first sentence: {{incoherent_first_sentence}}
+
+
+ Second Sentence: {{incoherent_second_sentence}}
+
+
+ |||
+
+
+ {{coherent_first_sentence}} {{coherent_second_sentence}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_instruction_top
+ reference: ''
+ 3af62454-2938-4fff-ab0c-8083ba09b92b: !Template
+ answer_choices: null
+ id: 3af62454-2938-4fff-ab0c-8083ba09b92b
+ jinja: 'Here are two sentences:
+
+
+
+ 1: {{incoherent_first_sentence}}
+
+
+ 2: {{incoherent_second_sentence}}
+
+
+ Please edit them so that they sound more connected to each other, perhaps by
+ fusing the sentences together.
+
+ |||
+
+
+ {{coherent_first_sentence}} {{coherent_second_sentence}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_instruction_bottom
+ reference: ''
+ 6f1920ac-6b78-4892-8932-ccf92de5270d: !Template
+ answer_choices: null
+ id: 6f1920ac-6b78-4892-8932-ccf92de5270d
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nSentence 1: {{incoherent_first_sentence}}\n\
+ Sentence 2: {{incoherent_second_sentence}}\n \nCould you find a way to fuse\
+ \ the two sentences above?\n\n|||\n\n{{coherent_first_sentence}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_interrogative_bottom
+ reference: ''
+ 73d198a5-9532-4894-9f26-3dccd60640ab: !Template
+ answer_choices: null
+ id: 73d198a5-9532-4894-9f26-3dccd60640ab
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nRewrite these two sentences\
+ \ as one sentence:\n\nFirst sentence: {{incoherent_first_sentence}} \n\nSecond\
+ \ sentence: {{incoherent_second_sentence}} \n\n|||\n\n{{coherent_first_sentence}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_top
+ reference: ''
+ d63d8a90-c92c-42af-a09c-25014eac7005: !Template
+ answer_choices: Apposition ||| Relative Clauses ||| Cataphora ||| Verb Phrase
+ Coordination ||| Anaphora ||| Inner Connectives ||| Both Inner Connectives and
+ Anaphora ||| Sentence Coordination ||| Both Sentence Coordination and Anaphora
+ ||| Forward Connectives ||| Discourse Connectives ||| Both Discourse Connectives
+ and Anaphora
+ id: d63d8a90-c92c-42af-a09c-25014eac7005
+ jinja: "{% set poss_ans_list = [\"SINGLE_APPOSITION\", \"SINGLE_RELATIVE\", \"\
+ SINGLE_CATAPHORA\", \"SINGLE_VP_COORD\", \"PAIR_ANAPHORA\", \"SINGLE_CONN_INNER\"\
+ , \"SINGLE_CONN_INNER_ANAPHORA\", \"SINGLE_S_COORD\", \"SINGLE_S_COORD_ANAPHORA\"\
+ , \"SINGLE_CONN_START\", \"PAIR_CONN\", \"PAIR_CONN_ANAPHORA\"] %}\n{% if discourse_type\
+ \ != \"PAIR_NONE\" %}\nPeruse the following two passages and identify the discourse\
+ \ phenomenon which can be used to turn Passage 1 into Passage 2.\n\nPassage\
+ \ 1: {{incoherent_first_sentence}} {{incoherent_second_sentence}}\n\nPassage\
+ \ 2: {{coherent_first_sentence}} {{coherent_second_sentence}}\n\n{% for lab\
+ \ in answer_choices %}\n{{ loop.index }}: {{ lab }}\n{% endfor %}\n \n |||\n\
+ \n{{ answer_choices[poss_ans_list.index(discourse_type)] }}\n\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: false
+ name: grammar_detection_top
+ reference: ''
+ ee884693-a941-46a1-a9d4-4f3af95dfd93: !Template
+ answer_choices: null
+ id: ee884693-a941-46a1-a9d4-4f3af95dfd93
+ jinja: "{% if coherent_second_sentence==\"\" %}\n{{coherent_first_sentence}}\n\
+ \nDecompose this sentence into two sentences\n|||\n\n{{incoherent_first_sentence}}\
+ \ {{incoherent_second_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: decompose_bottom
+ reference: ''
+ f9b1102b-5545-4fe4-9782-f50a80c62e56: !Template
+ answer_choices: null
+ id: f9b1102b-5545-4fe4-9782-f50a80c62e56
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+ \nNow, read this second sentence, that covers some of the information from the\
+ \ first:\n\n{{incoherent_second_sentence}}\n\nWrite a sentence that covers the\
+ \ information from the first sentence that is missing from the second\n|||\n\
+ \n {{incoherent_first_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: decompose_remainder_1
+ reference: ''
diff --git a/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml b/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e1e69a6498d0f1e75b99588a2587fafb57a3050d
--- /dev/null
+++ b/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml
@@ -0,0 +1,210 @@
+dataset: discofuse
+subset: discofuse-wikipedia
+templates:
+ 0a7fb8ae-c695-4f78-bd92-35dec191e258: !Template
+ answer_choices: null
+ id: 0a7fb8ae-c695-4f78-bd92-35dec191e258
+ jinja: 'Here are two sentences:
+
+
+
+ 1: {{incoherent_first_sentence}}
+
+
+ 2: {{incoherent_second_sentence}}
+
+
+ Please edit them so that they sound more connected to each other, perhaps by
+ fusing the sentences together.
+
+ |||
+
+
+ {{coherent_first_sentence}} {{coherent_second_sentence}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_instruction_bottom
+ reference: ''
+ 223b3d21-f809-4876-9273-31d75307eb06: !Template
+ answer_choices: null
+ id: 223b3d21-f809-4876-9273-31d75307eb06
+ jinja: "{% if coherent_second_sentence==\"\" %}\n{{coherent_first_sentence}}\n\
+ \nDecompose this sentence into two sentences\n|||\n\n{{incoherent_first_sentence}}\
+ \ {{incoherent_second_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: decompose_bottom
+ reference: ''
+ 2b0f2c7a-1426-4713-b293-e1e4d876bdfd: !Template
+ answer_choices: null
+ id: 2b0f2c7a-1426-4713-b293-e1e4d876bdfd
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nRewrite these two sentences\
+ \ as one sentence:\n\nFirst sentence: {{incoherent_first_sentence}} \n\nSecond\
+ \ sentence: {{incoherent_second_sentence}} \n\n|||\n\n{{coherent_first_sentence}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_top
+ reference: ''
+ 54ea85d8-d1af-4644-b787-55c0226db777: !Template
+ answer_choices: null
+ id: 54ea85d8-d1af-4644-b787-55c0226db777
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+ \nNow, read this second sentence, that covers some of the information from the\
+ \ first:\n\n{{incoherent_first_sentence}}\n\nWrite a sentence that covers the\
+ \ information from the first sentence that is missing from the second\n|||\n\
+ \n {{incoherent_second_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: decompose_remainder_2
+ reference: ''
+ 62b617d2-5524-42d4-8ef1-8c2b38fa2c7e: !Template
+ answer_choices: null
+ id: 62b617d2-5524-42d4-8ef1-8c2b38fa2c7e
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+ \nNow, read this second sentence, that covers some of the information from the\
+ \ first:\n\n{{incoherent_second_sentence}}\n\nWrite a sentence that covers the\
+ \ information from the first sentence that is missing from the second\n|||\n\
+ \n {{incoherent_first_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: decompose_remainder_1
+ reference: ''
+ 6ac9b065-38f3-43b6-9e6c-751a71ef1e2f: !Template
+ answer_choices: Apposition ||| Relative Clauses ||| Cataphora ||| Verb Phrase
+ Coordination ||| Anaphora ||| Inner Connectives ||| Both Inner Connectives and
+ Anaphora ||| Sentence Coordination ||| Both Sentence Coordination and Anaphora
+ ||| Forward Connectives ||| Discourse Connectives ||| Both Discourse Connectives
+ and Anaphora
+ id: 6ac9b065-38f3-43b6-9e6c-751a71ef1e2f
+ jinja: "{% set poss_ans_list = [\"SINGLE_APPOSITION\", \"SINGLE_RELATIVE\", \"\
+ SINGLE_CATAPHORA\", \"SINGLE_VP_COORD\", \"PAIR_ANAPHORA\", \"SINGLE_CONN_INNER\"\
+ , \"SINGLE_CONN_INNER_ANAPHORA\", \"SINGLE_S_COORD\", \"SINGLE_S_COORD_ANAPHORA\"\
+ , \"SINGLE_CONN_START\", \"PAIR_CONN\", \"PAIR_CONN_ANAPHORA\"] %}\n{% if discourse_type\
+ \ != \"PAIR_NONE\" %}\nPeruse the following two passages and identify the discourse\
+ \ phenomenon which can be used to turn Passage 1 into Passage 2.\n\nPassage\
+ \ 1: {{incoherent_first_sentence}} {{incoherent_second_sentence}}\n\nPassage\
+ \ 2: {{coherent_first_sentence}} {{coherent_second_sentence}}\n\n{% for lab\
+ \ in answer_choices %}\n{{ loop.index }}: {{ lab }}\n{% endfor %}\n \n |||\n\
+ \n{{ answer_choices[poss_ans_list.index(discourse_type)] }}\n\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - AUC
+ - Accuracy
+ original_task: false
+ name: grammar_detection_top
+ reference: ''
+ 91e17ea5-91cd-4d0d-a0d2-5e3f4d06da47: !Template
+ answer_choices: null
+ id: 91e17ea5-91cd-4d0d-a0d2-5e3f4d06da47
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nDecompose the following sentence\
+ \ into two separate sentences:\n\n{{coherent_first_sentence}}\n\n|||\n\n{{incoherent_first_sentence}}\
+ \ {{incoherent_second_sentence}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: decompose_top
+ reference: ''
+ a5fb909f-894c-431d-8b1a-ab2177b726ad: !Template
+ answer_choices: null
+ id: a5fb909f-894c-431d-8b1a-ab2177b726ad
+ jinja: 'Rewrite the following two sentences so that they flow better:
+
+
+ first sentence: {{incoherent_first_sentence}}
+
+
+ Second Sentence: {{incoherent_second_sentence}}
+
+
+ |||
+
+
+ {{coherent_first_sentence}} {{coherent_second_sentence}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_instruction_top
+ reference: ''
+ c6292146-751f-4650-8fc0-4cbf71aebcf7: !Template
+ answer_choices: null
+ id: c6292146-751f-4650-8fc0-4cbf71aebcf7
+ jinja: "{% if coherent_second_sentence==\"\" %}\n\nSentence 1: {{incoherent_first_sentence}}\n\
+ Sentence 2: {{incoherent_second_sentence}}\n \nCould you find a way to fuse\
+ \ the two sentences above?\n\n|||\n\n{{coherent_first_sentence}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fuse_interrogative_bottom
+ reference: ''
+ cc4bb1fb-251d-4258-a0b4-4c355ff41315: !Template
+ answer_choices: Apposition ||| Relative Clauses ||| Cataphora ||| Verb Phrase
+ Coordination ||| Anaphora ||| Inner Connectives ||| Both Inner Connectives and
+ Anaphora ||| Sentence Coordination ||| Both Sentence Coordination and Anaphora
+ ||| Forward Connectives ||| Discourse Connectives ||| Both Discourse Connectives
+ and Anaphora
+ id: cc4bb1fb-251d-4258-a0b4-4c355ff41315
+ jinja: "{% set poss_ans_list = [\"SINGLE_APPOSITION\", \"SINGLE_RELATIVE\", \"\
+ SINGLE_CATAPHORA\", \"SINGLE_VP_COORD\", \"PAIR_ANAPHORA\", \"SINGLE_CONN_INNER\"\
+ , \"SINGLE_CONN_INNER_ANAPHORA\", \"SINGLE_S_COORD\", \"SINGLE_S_COORD_ANAPHORA\"\
+ , \"SINGLE_CONN_START\", \"PAIR_CONN\", \"PAIR_CONN_ANAPHORA\"] %}\n{% if discourse_type\
+ \ != \"PAIR_NONE\" %}\nPassage 1: {{incoherent_first_sentence}} {{incoherent_second_sentence}}\n\
+ \nPassage 2: {{coherent_first_sentence}} {{coherent_second_sentence}}\n\nWhich\
+ \ of the following discourse phenomenon have been used to turn Passage 1 into\
+ \ Passage 2?\n\n{% for lab in answer_choices %}\n{{ loop.index }}: {{ lab }}\n\
+ {% endfor %}\n \n |||\n\n{{ answer_choices[poss_ans_list.index(discourse_type)]\
+ \ }}\n\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: false
+ name: grammar_detection_bottom
+ reference: ''
diff --git a/promptsource/templates/discovery/discovery/templates.yaml b/promptsource/templates/discovery/discovery/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..194139f02b4d8298f94501e07f6dac78a8fe2d5f
--- /dev/null
+++ b/promptsource/templates/discovery/discovery/templates.yaml
@@ -0,0 +1,234 @@
+dataset: discovery
+subset: discovery
+templates:
+ 63cb3007-dc47-41c5-bdc0-a4b3ffcc1c9a: !Template
+ answer_choices: no connection ||| absolutely ||| accordingly ||| actually |||
+ additionally ||| admittedly ||| afterward ||| again ||| already ||| also |||
+ alternately ||| alternatively ||| although ||| altogether ||| amazingly |||
+ and ||| anyway ||| apparently ||| arguably ||| as a result ||| basically |||
+ because of that ||| because of this ||| besides ||| but ||| by comparison |||
+ by contrast ||| by doing this ||| by then ||| certainly ||| clearly ||| coincidentally
+ ||| collectively ||| consequently ||| conversely ||| curiously ||| currently
+ ||| elsewhere ||| especially ||| essentially ||| eventually ||| evidently |||
+ finally ||| first ||| firstly ||| for example ||| for instance ||| fortunately
+ ||| frankly ||| frequently ||| further ||| furthermore ||| generally ||| gradually
+ ||| happily ||| hence ||| here ||| historically ||| honestly ||| hopefully |||
+ however ||| ideally ||| immediately ||| importantly ||| in contrast ||| in fact
+ ||| in other words ||| in particular ||| in short ||| in sum ||| in the end
+ ||| in the meantime ||| in turn ||| incidentally ||| increasingly ||| indeed
+ ||| inevitably ||| initially ||| instead ||| interestingly ||| ironically |||
+ lastly ||| lately ||| later ||| likewise ||| locally ||| luckily ||| maybe |||
+ meaning ||| meantime ||| meanwhile ||| moreover ||| mostly ||| namely ||| nationally
+ ||| naturally ||| nevertheless ||| next ||| nonetheless ||| normally ||| notably
+ ||| now ||| obviously ||| occasionally ||| oddly ||| often ||| on the contrary
+ ||| on the other hand ||| once ||| only ||| optionally ||| or ||| originally
+ ||| otherwise ||| overall ||| particularly ||| perhaps ||| personally ||| plus
+ ||| preferably ||| presently ||| presumably ||| previously ||| probably |||
+ rather ||| realistically ||| really ||| recently ||| regardless ||| remarkably
+ ||| sadly ||| second ||| secondly ||| separately ||| seriously ||| significantly
+ ||| similarly ||| simultaneously ||| slowly ||| so ||| sometimes ||| soon |||
+ specifically ||| still ||| strangely ||| subsequently ||| suddenly ||| supposedly
+ ||| surely ||| surprisingly ||| technically ||| thankfully ||| then ||| theoretically
+ ||| thereafter ||| thereby ||| therefore ||| third ||| thirdly ||| this |||
+ though ||| thus ||| together ||| traditionally ||| truly ||| truthfully |||
+ typically ||| ultimately ||| undoubtedly ||| unfortunately ||| unsurprisingly
+ ||| usually ||| well ||| yet
+ id: 63cb3007-dc47-41c5-bdc0-a4b3ffcc1c9a
+ jinja: "Which word gives a smooth transition from the first sentence to the second\
+ \ sentence?\n\n{{sentence1}}\n\n{{sentence2}} \n\n|||\n{{ answer_choices[label]\
+ \ }}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: smooth_transition
+ reference: ''
+ a4a7dad2-18b0-45b4-bd93-537e13e435cc: !Template
+ answer_choices: no connection ||| absolutely ||| accordingly ||| actually |||
+ additionally ||| admittedly ||| afterward ||| again ||| already ||| also |||
+ alternately ||| alternatively ||| although ||| altogether ||| amazingly |||
+ and ||| anyway ||| apparently ||| arguably ||| as a result ||| basically |||
+ because of that ||| because of this ||| besides ||| but ||| by comparison |||
+ by contrast ||| by doing this ||| by then ||| certainly ||| clearly ||| coincidentally
+ ||| collectively ||| consequently ||| conversely ||| curiously ||| currently
+ ||| elsewhere ||| especially ||| essentially ||| eventually ||| evidently |||
+ finally ||| first ||| firstly ||| for example ||| for instance ||| fortunately
+ ||| frankly ||| frequently ||| further ||| furthermore ||| generally ||| gradually
+ ||| happily ||| hence ||| here ||| historically ||| honestly ||| hopefully |||
+ however ||| ideally ||| immediately ||| importantly ||| in contrast ||| in fact
+ ||| in other words ||| in particular ||| in short ||| in sum ||| in the end
+ ||| in the meantime ||| in turn ||| incidentally ||| increasingly ||| indeed
+ ||| inevitably ||| initially ||| instead ||| interestingly ||| ironically |||
+ lastly ||| lately ||| later ||| likewise ||| locally ||| luckily ||| maybe |||
+ meaning ||| meantime ||| meanwhile ||| moreover ||| mostly ||| namely ||| nationally
+ ||| naturally ||| nevertheless ||| next ||| nonetheless ||| normally ||| notably
+ ||| now ||| obviously ||| occasionally ||| oddly ||| often ||| on the contrary
+ ||| on the other hand ||| once ||| only ||| optionally ||| or ||| originally
+ ||| otherwise ||| overall ||| particularly ||| perhaps ||| personally ||| plus
+ ||| preferably ||| presently ||| presumably ||| previously ||| probably |||
+ rather ||| realistically ||| really ||| recently ||| regardless ||| remarkably
+ ||| sadly ||| second ||| secondly ||| separately ||| seriously ||| significantly
+ ||| similarly ||| simultaneously ||| slowly ||| so ||| sometimes ||| soon |||
+ specifically ||| still ||| strangely ||| subsequently ||| suddenly ||| supposedly
+ ||| surely ||| surprisingly ||| technically ||| thankfully ||| then ||| theoretically
+ ||| thereafter ||| thereby ||| therefore ||| third ||| thirdly ||| this |||
+ though ||| thus ||| together ||| traditionally ||| truly ||| truthfully |||
+ typically ||| ultimately ||| undoubtedly ||| unfortunately ||| unsurprisingly
+ ||| usually ||| well ||| yet
+ id: a4a7dad2-18b0-45b4-bd93-537e13e435cc
+ jinja: "What connector could be added to the second sentence such that both sentences\
+ \ together convey a clear argument? If none, answer with \"no connection\".\n\
+ \n{{sentence1}}\n\n{{sentence2}} \n\nAnswer Choices: \n- {{ answer_choices |\
+ \ join(\"\\n- \") }}\n\n|||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: add_connector
+ reference: ''
+ cf87d7ad-9b78-4ead-9e0e-ae4dc12b91d0: !Template
+ answer_choices: no connection ||| absolutely ||| accordingly ||| actually |||
+ additionally ||| admittedly ||| afterward ||| again ||| already ||| also |||
+ alternately ||| alternatively ||| although ||| altogether ||| amazingly |||
+ and ||| anyway ||| apparently ||| arguably ||| as a result ||| basically |||
+ because of that ||| because of this ||| besides ||| but ||| by comparison |||
+ by contrast ||| by doing this ||| by then ||| certainly ||| clearly ||| coincidentally
+ ||| collectively ||| consequently ||| conversely ||| curiously ||| currently
+ ||| elsewhere ||| especially ||| essentially ||| eventually ||| evidently |||
+ finally ||| first ||| firstly ||| for example ||| for instance ||| fortunately
+ ||| frankly ||| frequently ||| further ||| furthermore ||| generally ||| gradually
+ ||| happily ||| hence ||| here ||| historically ||| honestly ||| hopefully |||
+ however ||| ideally ||| immediately ||| importantly ||| in contrast ||| in fact
+ ||| in other words ||| in particular ||| in short ||| in sum ||| in the end
+ ||| in the meantime ||| in turn ||| incidentally ||| increasingly ||| indeed
+ ||| inevitably ||| initially ||| instead ||| interestingly ||| ironically |||
+ lastly ||| lately ||| later ||| likewise ||| locally ||| luckily ||| maybe |||
+ meaning ||| meantime ||| meanwhile ||| moreover ||| mostly ||| namely ||| nationally
+ ||| naturally ||| nevertheless ||| next ||| nonetheless ||| normally ||| notably
+ ||| now ||| obviously ||| occasionally ||| oddly ||| often ||| on the contrary
+ ||| on the other hand ||| once ||| only ||| optionally ||| or ||| originally
+ ||| otherwise ||| overall ||| particularly ||| perhaps ||| personally ||| plus
+ ||| preferably ||| presently ||| presumably ||| previously ||| probably |||
+ rather ||| realistically ||| really ||| recently ||| regardless ||| remarkably
+ ||| sadly ||| second ||| secondly ||| separately ||| seriously ||| significantly
+ ||| similarly ||| simultaneously ||| slowly ||| so ||| sometimes ||| soon |||
+ specifically ||| still ||| strangely ||| subsequently ||| suddenly ||| supposedly
+ ||| surely ||| surprisingly ||| technically ||| thankfully ||| then ||| theoretically
+ ||| thereafter ||| thereby ||| therefore ||| third ||| thirdly ||| this |||
+ though ||| thus ||| together ||| traditionally ||| truly ||| truthfully |||
+ typically ||| ultimately ||| undoubtedly ||| unfortunately ||| unsurprisingly
+ ||| usually ||| well ||| yet
+ id: cf87d7ad-9b78-4ead-9e0e-ae4dc12b91d0
+ jinja: "In essay writing, it is important to avoid abrupt sentences. Choose a\
+ \ word to connect the following two sentences so they read more smoothly. If\
+ \ they cannot be connected, answer with \"no connection\".\n\n{{sentence1}}\n\
+ \n{{sentence2}} \n\nAnswer Choices: \n- {{ answer_choices | join(\"\\n- \")\
+ \ }}\n\n|||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_essay_writing_affirmative_instruction
+ reference: ''
+ e5a06323-697a-43e2-953d-f264dd4c2f84: !Template
+ answer_choices: no connection ||| absolutely ||| accordingly ||| actually |||
+ additionally ||| admittedly ||| afterward ||| again ||| already ||| also |||
+ alternately ||| alternatively ||| although ||| altogether ||| amazingly |||
+ and ||| anyway ||| apparently ||| arguably ||| as a result ||| basically |||
+ because of that ||| because of this ||| besides ||| but ||| by comparison |||
+ by contrast ||| by doing this ||| by then ||| certainly ||| clearly ||| coincidentally
+ ||| collectively ||| consequently ||| conversely ||| curiously ||| currently
+ ||| elsewhere ||| especially ||| essentially ||| eventually ||| evidently |||
+ finally ||| first ||| firstly ||| for example ||| for instance ||| fortunately
+ ||| frankly ||| frequently ||| further ||| furthermore ||| generally ||| gradually
+ ||| happily ||| hence ||| here ||| historically ||| honestly ||| hopefully |||
+ however ||| ideally ||| immediately ||| importantly ||| in contrast ||| in fact
+ ||| in other words ||| in particular ||| in short ||| in sum ||| in the end
+ ||| in the meantime ||| in turn ||| incidentally ||| increasingly ||| indeed
+ ||| inevitably ||| initially ||| instead ||| interestingly ||| ironically |||
+ lastly ||| lately ||| later ||| likewise ||| locally ||| luckily ||| maybe |||
+ meaning ||| meantime ||| meanwhile ||| moreover ||| mostly ||| namely ||| nationally
+ ||| naturally ||| nevertheless ||| next ||| nonetheless ||| normally ||| notably
+ ||| now ||| obviously ||| occasionally ||| oddly ||| often ||| on the contrary
+ ||| on the other hand ||| once ||| only ||| optionally ||| or ||| originally
+ ||| otherwise ||| overall ||| particularly ||| perhaps ||| personally ||| plus
+ ||| preferably ||| presently ||| presumably ||| previously ||| probably |||
+ rather ||| realistically ||| really ||| recently ||| regardless ||| remarkably
+ ||| sadly ||| second ||| secondly ||| separately ||| seriously ||| significantly
+ ||| similarly ||| simultaneously ||| slowly ||| so ||| sometimes ||| soon |||
+ specifically ||| still ||| strangely ||| subsequently ||| suddenly ||| supposedly
+ ||| surely ||| surprisingly ||| technically ||| thankfully ||| then ||| theoretically
+ ||| thereafter ||| thereby ||| therefore ||| third ||| thirdly ||| this |||
+ though ||| thus ||| together ||| traditionally ||| truly ||| truthfully |||
+ typically ||| ultimately ||| undoubtedly ||| unfortunately ||| unsurprisingly
+ ||| usually ||| well ||| yet
+ id: e5a06323-697a-43e2-953d-f264dd4c2f84
+ jinja: "{{sentence1}} [X] {{sentence2}} \n\nWhat should the [X], which is the\
+ \ discourse marker that connects the two sentences, be? If they cannot be connected,\
+ \ answer with \"no connection\". Otherwise, choose from the following options:\n\
+ - {{ answer_choices[1:] | join(\"\\n- \") }}\n\n|||\n{{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: discourse_marker_X
+ reference: ''
+ f57ab7ab-bf87-43c7-8eb9-48ea840345f6: !Template
+ answer_choices: no connection ||| absolutely ||| accordingly ||| actually |||
+ additionally ||| admittedly ||| afterward ||| again ||| already ||| also |||
+ alternately ||| alternatively ||| although ||| altogether ||| amazingly |||
+ and ||| anyway ||| apparently ||| arguably ||| as a result ||| basically |||
+ because of that ||| because of this ||| besides ||| but ||| by comparison |||
+ by contrast ||| by doing this ||| by then ||| certainly ||| clearly ||| coincidentally
+ ||| collectively ||| consequently ||| conversely ||| curiously ||| currently
+ ||| elsewhere ||| especially ||| essentially ||| eventually ||| evidently |||
+ finally ||| first ||| firstly ||| for example ||| for instance ||| fortunately
+ ||| frankly ||| frequently ||| further ||| furthermore ||| generally ||| gradually
+ ||| happily ||| hence ||| here ||| historically ||| honestly ||| hopefully |||
+ however ||| ideally ||| immediately ||| importantly ||| in contrast ||| in fact
+ ||| in other words ||| in particular ||| in short ||| in sum ||| in the end
+ ||| in the meantime ||| in turn ||| incidentally ||| increasingly ||| indeed
+ ||| inevitably ||| initially ||| instead ||| interestingly ||| ironically |||
+ lastly ||| lately ||| later ||| likewise ||| locally ||| luckily ||| maybe |||
+ meaning ||| meantime ||| meanwhile ||| moreover ||| mostly ||| namely ||| nationally
+ ||| naturally ||| nevertheless ||| next ||| nonetheless ||| normally ||| notably
+ ||| now ||| obviously ||| occasionally ||| oddly ||| often ||| on the contrary
+ ||| on the other hand ||| once ||| only ||| optionally ||| or ||| originally
+ ||| otherwise ||| overall ||| particularly ||| perhaps ||| personally ||| plus
+ ||| preferably ||| presently ||| presumably ||| previously ||| probably |||
+ rather ||| realistically ||| really ||| recently ||| regardless ||| remarkably
+ ||| sadly ||| second ||| secondly ||| separately ||| seriously ||| significantly
+ ||| similarly ||| simultaneously ||| slowly ||| so ||| sometimes ||| soon |||
+ specifically ||| still ||| strangely ||| subsequently ||| suddenly ||| supposedly
+ ||| surely ||| surprisingly ||| technically ||| thankfully ||| then ||| theoretically
+ ||| thereafter ||| thereby ||| therefore ||| third ||| thirdly ||| this |||
+ though ||| thus ||| together ||| traditionally ||| truly ||| truthfully |||
+ typically ||| ultimately ||| undoubtedly ||| unfortunately ||| unsurprisingly
+ ||| usually ||| well ||| yet
+ id: f57ab7ab-bf87-43c7-8eb9-48ea840345f6
+ jinja: "Sentence 1: {{sentence1}}\n\nSentence 2: {{sentence2}} \n\nWhat word signifies\
+ \ the shift or extension in meaning from the first to the second sentence? Choose\
+ \ from the following 174 answer choices: {{ answer_choices | join(\", \") }}.\n\
+ |||\n{{ answer_choices [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: shift_in_meaning
+ reference: ''
diff --git a/promptsource/templates/docred/templates.yaml b/promptsource/templates/docred/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b289456a2342039dc5c172d36a47df9c714ff1ce
--- /dev/null
+++ b/promptsource/templates/docred/templates.yaml
@@ -0,0 +1,231 @@
+dataset: docred
+templates:
+ 02af700c-e9e9-4a84-b75c-5fb29a5b7993: !Template
+ answer_choices: null
+ id: 02af700c-e9e9-4a84-b75c-5fb29a5b7993
+ jinja: "Read the following text and answer the question.\n\nText: {% for sent\
+ \ in sents -%}\n{{ sent | join(\" \") }}{{\" \"}}\n{%- endfor %} \n\nQuestion:\
+ \ From the above text, find people, locations, organizations, times, numbers,\
+ \ and miscellaneous.\n\nYou should answer the question in the following way:\n\
+ \nPeople: a list of people separated with commas\n\nLocations: a list of locations\
+ \ separated with commas\n\nOrganizations: a list of organizations separated\
+ \ with commas\n\nTimes: a list of times separated with commas\n\nNumbers: a\
+ \ list of numbers separated with commas\n\nMiscellaneous: a list of miscellaneous\
+ \ items separated by commas\n|||\n{%- set people = [] -%} \n{%- for ners in\
+ \ vertexSet -%}\n{%- for ner in ners if ner['type'] == 'PER' -%}\n{{people.append(ner['name'])\
+ \ | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if people %}\n\
+ {{\"People: \"}}{{ people | unique | join(\", \")}}{{\".\"}}\n{% endif %}\n\n\
+ {%- set locations = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in ners\
+ \ if ner['type'] == 'LOC' -%}\n{{locations.append(ner['name']) | default(\"\"\
+ , True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if locations %}\n{{\"Locations:\
+ \ \"}}{{ locations | unique | join(\", \")}}{{\".\"}}\n{% endif %}\n\n{%- set\
+ \ orgs = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in ners if ner['type']\
+ \ == 'ORG' -%}\n{{orgs.append(ner['name']) | default(\"\", True)}}\n{%- endfor\
+ \ -%}\n{%- endfor -%}\n{% if orgs %}\n{{\"Organizations: \"}}{{ orgs | unique\
+ \ | join(\", \")}}{{\".\"}}\n{% endif %}\n\n{%- set times = [] -%} \n{%- for\
+ \ ners in vertexSet -%}\n{%- for ner in ners if ner['type'] == 'TIME' -%}\n\
+ {{times.append(ner['name']) | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor\
+ \ -%}\n{% if times %}\n{{\"Times: \"}}{{ times | unique | join(\", \")}}{{\"\
+ .\"}}\n{% endif %}\n\n{%- set numbers = [] -%} \n{%- for ners in vertexSet -%}\n\
+ {%- for ner in ners if ner['type'] == 'NUM' -%}\n{{numbers.append(ner['name'])\
+ \ | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if numbers %}\n\
+ {{\"Numbers: \"}}{{ numbers | unique | join(\", \")}}{{\".\"}}\n{% endif %}\n\
+ \n{%- set miscs = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in ners\
+ \ if ner['type'] == 'MISC' -%}\n{{miscs.append(ner['name']) | default(\"\",\
+ \ True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if miscs %}\n{{\"Miscellaneous:\
+ \ \"}}{{ miscs | unique | join(\", \")}}{{\".\"}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: type-to-entity
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
+ 3ab9cfc9-3ba3-41dd-959c-60182def11af: !Template
+ answer_choices: null
+ id: 3ab9cfc9-3ba3-41dd-959c-60182def11af
+ jinja: "{%- set names = [] -%}\n{%- set types = [] -%}\n{%- for ners in vertexSet\
+ \ -%}\n{%- for ner in ners if ner['name'] not in names -%}\n{{ names.append(ner['name'])\
+ \ | default(\"\", True) }}\n{{ types.append(ner['type']) | default(\"\", True)\
+ \ }}\n{%- endfor -%}\n{%- endfor -%}\nRead the following text and answer the\
+ \ question.\n\nText: {% for sent in sents -%}\n{{ sent | join(\" \") }}{{\"\
+ \ \"}}\n{%- endfor %} \
+ \ \n \
+ \ \nQuestion: Assign an entity type to the following entities.\
+ \ \n\nEntities: {{ names | join(\", \")}}{{\".\"}}\n\nThe choices are PER (Person),\
+ \ LOC (Location), ORG (Organization), TIME (Time), NUM (Number), and MISC (Miscellaneous).\n\
+ ||| \
+ \ \n{% for name, type in zip(names, types) -%}\n{{name}}{{\": \"\
+ }}{{type}}\n{% endfor %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: ner-type
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
+ 412b482e-185b-48da-8aef-4a93a42e779d: !Template
+ answer_choices: null
+ id: 412b482e-185b-48da-8aef-4a93a42e779d
+ jinja: "{%- set locations = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner\
+ \ in ners if ner['type'] == 'LOC' -%}\n{{locations.append(ner['name']) | default(\"\
+ \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if locations %}\nFind all of\
+ \ the locations in the text below. Please list all of them separated by commas.\n\
+ \n{% for sent in sents -%}\n{{ sent | join(\" \") }}{{\" \"}}\n{%- endfor -%}\
+ \ \n|||\n{{ locations| unique | join(\", \")}}{{\".\"}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: find-all-locations
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
+ 5361a8ba-8ced-4417-be21-ba13fa319e9f: !Template
+ answer_choices: null
+ id: 5361a8ba-8ced-4417-be21-ba13fa319e9f
+ jinja: "{%- set organizations = [] -%} \n{%- for ners in vertexSet -%}\n{%- for\
+ \ ner in ners if ner['type'] == 'ORG' -%}\n{{organizations.append(ner['name'])\
+ \ | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if organizations\
+ \ %}\nFind all of the organizations in the text below. Please list all of them\
+ \ separated by commas.\n\n{% for sent in sents -%}\n{{ sent | join(\" \") }}{{\"\
+ \ \"}}\n{%- endfor -%} \n|||\n{{ organizations| unique | join(\", \")}}{{\"\
+ .\"}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: find-all-organizations
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
+ 6efa4d1a-3368-4b12-9e30-588b53801077: !Template
+ answer_choices: null
+ id: 6efa4d1a-3368-4b12-9e30-588b53801077
+ jinja: "{% if labels['relation_text'] %}\nGiven the following entities (i.e.,\
+ \ heads and tails) and relations, make a creative text. The types are PER (Person),\
+ \ LOC (Location), ORG (Organization), TIME (Time), NUM (Number), and MISC (Miscellaneous).\n\
+ \n{% for head, tail, relation in zip(labels['head'], labels['tail'], labels['relation_text'])\
+ \ %}\nhead: {{vertexSet[head][0]['name']}}, tail: {{vertexSet[tail][0]['name']}},\
+ \ relation: {{relation}}\n{% endfor %}\n|||\n{% for sent in sents -%}\n{{ sent\
+ \ | join(\" \") }}{{\" \"}}\n{%- endfor -%} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: entity-and-relation-to-text
+ reference: ''
+ 7458c4ed-b527-4ad5-8a04-7c87d887d5e6: !Template
+ answer_choices: null
+ id: 7458c4ed-b527-4ad5-8a04-7c87d887d5e6
+ jinja: "{%- set names = [] -%}\n{%- set types = [] -%}\n{%- for ners in vertexSet\
+ \ -%}\n{%- for ner in ners if ner['name'] not in names -%}\n{{ names.append(ner['name'])\
+ \ | default(\"\", True) }} \n{{ types.append(ner['type']) | default(\"\", True)\
+ \ }} \n{%- endfor -%}\n{%- endfor -%}\nGiven the following entities and their\
+ \ types, make a creative text. The types are PER (Person), LOC (Location), ORG\
+ \ (Organization), TIME (Time), NUM (Number), and MISC (Miscellaneous).\n\n{%\
+ \ for name, type in zip(names, types) -%}\n{{name}}{{\": \"}}{{type}}\n{% endfor\
+ \ %}\n|||\n{% for sent in sents -%}\n{{ sent | join(\" \") }}{{\" \"}}\n{%-\
+ \ endfor -%} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: ner-to-text
+ reference: ''
+ 7f6bb96c-3661-4369-8d75-6eca07f15e6d: !Template
+ answer_choices: null
+ id: 7f6bb96c-3661-4369-8d75-6eca07f15e6d
+ jinja: "{%- set times = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in\
+ \ ners if ner['type'] == 'TIME' -%}\n{{times.append(ner['name']) | default(\"\
+ \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if times %}\nFind all of the\
+ \ times in the text below. Please list all of them separated by commas.\n\n\
+ {% for sent in sents -%}\n{{ sent | join(\" \") }}{{\" \"}}\n{%- endfor -%}\
+ \ \n|||\n{{ times| unique | join(\", \")}}{{\".\"}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: find-all-times
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
+ 9ca601e9-bf97-4fba-90c9-ca502247d034: !Template
+ answer_choices: null
+ id: 9ca601e9-bf97-4fba-90c9-ca502247d034
+ jinja: "{%- set people = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in\
+ \ ners if ner['type'] == 'PER' -%}\n{{people.append(ner['name']) | default(\"\
+ \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if people %}\nFind all of the\
+ \ people in the text below. Please list all of them separated by commas.\n\n\
+ {% for sent in sents -%}\n{{ sent | join(\" \") }}{{\" \"}}\n{%- endfor -%}\
+ \ \n|||\n{{ people| unique | join(\", \")}}{{\".\"}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: find-all-people
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
+ 9effc9d0-bf50-4dbb-9813-02a021e8da33: !Template
+ answer_choices: null
+ id: 9effc9d0-bf50-4dbb-9813-02a021e8da33
+ jinja: "{% if labels['relation_text'] %}\nRead the following text and answer the\
+ \ question.\n\nText:\n{%- for sent in sents -%}\n{{ sent | join(\" \") }}{{\"\
+ \ \"}}\n{%- endfor %} \n\nQuestion: Find the named entities (i.e., heads and\
+ \ tails) above and their relationships.\n|||\n{% for head, tail, relation in\
+ \ zip(labels['head'], labels['tail'], labels['relation_text']) %}\nhead: {{vertexSet[head][0]['name']}},\
+ \ tail: {{vertexSet[tail][0]['name']}}, relation: {{relation}}\n{% endfor %}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: relation
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
+ a31dc527-a5b9-4411-a600-ea2bbe22a9d3: !Template
+ answer_choices: null
+ id: a31dc527-a5b9-4411-a600-ea2bbe22a9d3
+ jinja: "{%- set numbers = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner\
+ \ in ners if ner['type'] == 'NUM' -%}\n{{numbers.append(ner['name']) | default(\"\
+ \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if numbers %}\nFind all of the\
+ \ numbers in the text below. Please do not include years and also list all of\
+ \ them separated by commas.\n\n{% for sent in sents -%}\n{{ sent | join(\" \"\
+ ) }}{{\" \"}}\n{%- endfor -%} \n|||\n{{ numbers| unique | join(\", \")}}{{\"\
+ .\"}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: find-all-numbers
+ reference: The metric is set to Other, not Accuracy, because it requires parsing
+ the output and counting individual matches.
diff --git a/promptsource/templates/dream/templates.yaml b/promptsource/templates/dream/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2e5765dca4afa3da85f7f1ce39e20babcb848f26
--- /dev/null
+++ b/promptsource/templates/dream/templates.yaml
@@ -0,0 +1,96 @@
+dataset: dream
+templates:
+ 024906f3-2503-451f-a0ce-2c9faf90e6c5: !Template
+ answer_choices: null
+ id: 024906f3-2503-451f-a0ce-2c9faf90e6c5
+ jinja: 'Read the below conversation.
+
+
+ {{dialogue[:-1] | join("\n\n")}}
+
+
+ What would the listener say?
+
+ |||
+
+ {{dialogue[-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate-last-utterance
+ reference: ''
+ 5c53fe97-b8b9-4c91-bd75-b3f8e056bd01: !Template
+ answer_choices: null
+ id: 5c53fe97-b8b9-4c91-bd75-b3f8e056bd01
+ jinja: 'Given the question "{{question}}" and the answer "{{answer}}", write a
+ conversation that might have happened.
+
+ |||
+
+ {{dialogue | join("\n\n")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answer-to-dialogue
+ reference: ''
+ 70865a35-1db3-45bc-8b08-baf1d9d0be9d: !Template
+ answer_choices: null
+ id: 70865a35-1db3-45bc-8b08-baf1d9d0be9d
+ jinja: '{{dialogue[1:] | join("\n\n")}}
+
+
+ What was said before this conversation?
+
+ |||
+
+ {{dialogue[0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate-first-utterance
+ reference: ''
+ 8f962580-1611-4982-b567-05939c5012ff: !Template
+ answer_choices: '{{choice | join("|||")}}'
+ id: 8f962580-1611-4982-b567-05939c5012ff
+ jinja: "Dialogue:\n\n{{dialogue | join(\"\\n\\n\")}}\n\nQuestion: {{question}}\
+ \ \n\n- {{answer_choices[0]}}\n\n- {{answer_choices[1]}}\n\n- {{answer_choices[2]}}\n\
+ |||\n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: baseline
+ reference: https://dataset.org/dream/
+ d4687975-664d-46ac-b13b-482a35a61ab3: !Template
+ answer_choices: '{{choice | join("|||")}}'
+ id: d4687975-664d-46ac-b13b-482a35a61ab3
+ jinja: "Read the following conversation and answer the question.\n\n{{dialogue\
+ \ | join(\"\\n\\n\")}}\n\nQuestion: {{question}} \n\n- {{answer_choices[0]}}\n\
+ \n- {{answer_choices[1]}}\n\n- {{answer_choices[2]}}\n|||\n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: read_the_following_conversation_and_answer_the_question
+ reference: ''
diff --git a/promptsource/templates/drop/templates.yaml b/promptsource/templates/drop/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef3944a0730227eab26db9f653aaec7eb660b481
--- /dev/null
+++ b/promptsource/templates/drop/templates.yaml
@@ -0,0 +1,106 @@
+dataset: drop
+templates:
+ 350e0c24-b10c-4156-9053-a0b2d4af4214: !Template
+ answer_choices: null
+ id: 350e0c24-b10c-4156-9053-a0b2d4af4214
+ jinja: 'Question: {{question}}
+
+ Answer based on following passage.
+
+
+ {{passage}}
+
+
+ Answer:
+
+ ||| {{ answers_spans.spans | join(", ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: question context answer
+ reference: Reading Comprehension with KB
+ 79c0d600-8d49-4628-b1c1-d472fb762fa2: !Template
+ answer_choices: null
+ id: 79c0d600-8d49-4628-b1c1-d472fb762fa2
+ jinja: "I am trying to figure out the answer to the question, \"{{question}}\"\
+ \ I found the following text-snippet has the answer. Can you tell me the answer?\n\
+ \n{{passage}} \n\n||| {{ answers_spans.spans | join(\", \") }}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: can you tell me
+ reference: Reading Comprehension with KB
+ ab58cc42-a558-4709-8a73-30194fcf9fa2: !Template
+ answer_choices: null
+ id: ab58cc42-a558-4709-8a73-30194fcf9fa2
+ jinja: 'Passage: {{passage}}
+
+ Question: {{question}}
+
+ Answer: ||| {{ answers_spans.spans | join(", ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: DROP GPT3
+ reference: Prompt format from GPT3 - Table G20
+ ad649b92-59ad-44a9-b328-7bbab49b104f: !Template
+ answer_choices: null
+ id: ad649b92-59ad-44a9-b328-7bbab49b104f
+ jinja: 'Generate a question from the following passage that has the answer, {{
+ answers_spans.spans | join(", ") }}
+
+ Passage : {{passage}}
+
+ Question :
+
+ |||
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question_with_passage_and_answer
+ reference: ''
+ e9bba528-7782-4f2b-a431-7601f8258628: !Template
+ answer_choices: null
+ id: e9bba528-7782-4f2b-a431-7601f8258628
+ jinja: 'Context: {{passage}}
+
+ I am trying to figure out the answer to the question from the above context. Can
+ you tell me the answer?
+
+ Question: {{question}}
+
+ Answer:
+
+ ||| {{ answers_spans.spans | join(", ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: context question answer
+ reference: Reading Comprehension with KB
diff --git a/promptsource/templates/duorc/ParaphraseRC/templates.yaml b/promptsource/templates/duorc/ParaphraseRC/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..32e8f6a0a766e40bd971eb64ddcd70183a60cbaa
--- /dev/null
+++ b/promptsource/templates/duorc/ParaphraseRC/templates.yaml
@@ -0,0 +1,241 @@
+dataset: duorc
+subset: ParaphraseRC
+templates:
+ 09adcadd-fa7b-4154-91cb-fe822bf8e00e: !Template
+ answer_choices: null
+ id: 09adcadd-fa7b-4154-91cb-fe822bf8e00e
+ jinja: '{% if no_answer == false%}
+
+ Build a movie plot around this: {{ question }} {{answers|choice}}
+
+ |||
+
+ {{ plot }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: build_story_around_qa
+ reference: Given the question-answer pair, generate a relevant plot.
+ 0c7049c0-750a-46b7-af38-dd1e9fcb5217: !Template
+ answer_choices: null
+ id: 0c7049c0-750a-46b7-af38-dd1e9fcb5217
+ jinja: 'I am trying to decide whether it''s worth it to invest in this film proposal.
+ Can you help me answer a few questions? If you can''t, please say "{{"No I can''t"}}".
+
+
+ Question: {{question}}
+
+ Movie title: {{title}}
+
+ Movie plot: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ No I can''t
+
+ {% else %}
+
+ {{answers|choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: decide_worth_it
+ reference: ''
+ 594d0551-d737-4680-a7a5-8393acc6dbb7: !Template
+ answer_choices: null
+ id: 594d0551-d737-4680-a7a5-8393acc6dbb7
+ jinja: 'Question: {{question}}
+
+ If there is no answer, please output "{{"Insufficient information to provide
+ an answer."}}".
+
+ Movie title: {{title}}
+
+ Context: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ Insufficient information to provide an answer.
+
+ {% else %}
+
+ {{answers|choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_answering
+ reference: Given a passage and a question, generate an answer.
+ 805f121a-6bd4-4803-9428-ea733f385add: !Template
+ answer_choices: null
+ id: 805f121a-6bd4-4803-9428-ea733f385add
+ jinja: 'I am a movie director and I just received the following movie plot. Could
+ you help me answer this question? If not, let me know by writing "{{"Not answerable"}}".
+
+
+ Plot title: {{title}}
+
+ Movie plot: {{plot}}
+
+ My question: {{question}}
+
+ |||
+
+ {% if no_answer %}
+
+ Not answerable
+
+ {% else %}
+
+ {{answers|choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: movie_director
+ reference: ''
+ 842e346b-2d26-43a2-9a3a-9154f04eb76a: !Template
+ answer_choices: null
+ id: 842e346b-2d26-43a2-9a3a-9154f04eb76a
+ jinja: 'Generate a question about the following movie plot: {{ plot }}
+
+ |||
+
+ {{ question }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: Given the plot, generate a question.
+ 945053f7-6ad3-4c08-b7dd-5413564f7467: !Template
+ answer_choices: null
+ id: 945053f7-6ad3-4c08-b7dd-5413564f7467
+ jinja: 'Extract the answer to the following question from the movie plot. If the
+ question isn''t answerable, please output "{{"Can''t answer"}}".
+
+ Question: {{question}}
+
+ Title: {{title}}
+
+ Movie plot: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ Can''t answer
+
+ {% else %}
+
+ {{answers | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: extract_answer
+ reference: ''
+ a8597645-cfed-4f54-ba0d-c23eaafaa131: !Template
+ answer_choices: null
+ id: a8597645-cfed-4f54-ba0d-c23eaafaa131
+ jinja: 'Suggest a movie title for the following movie plot: {{plot}}
+
+ |||
+
+ {{title}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: title_generation
+ reference: Given the plot for the movie, suggest a title.
+ c6f75398-a993-44d6-a494-78961a7dc1b7: !Template
+ answer_choices: null
+ id: c6f75398-a993-44d6-a494-78961a7dc1b7
+ jinja: 'Please answer the following question about this movie plot. If it''s un-answerable,
+ please output "{{"No answer"}}".
+
+
+ Question: {{question}}
+
+ Movie plot title: {{title}}
+
+ Movie plot: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ No answer
+
+ {% else %}
+
+ {{answers | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_question
+ reference: ''
+ ce3649d8-fd90-4a77-8819-4eb20b1c83a9: !Template
+ answer_choices: null
+ id: ce3649d8-fd90-4a77-8819-4eb20b1c83a9
+ jinja: "{% if no_answer == false%}\nGenerate a question that has the following\
+ \ answer: \n{{answers|choice}} \nfor the following movie plot: \n{{plot}}\n\
+ |||\n{{question}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question_by_answer
+ reference: Given the passage and the answer, generate a question which has that
+ answer.
diff --git a/promptsource/templates/duorc/SelfRC/templates.yaml b/promptsource/templates/duorc/SelfRC/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6a7a949290b4b1091ed12f02ecdc8d2fcf8c0d54
--- /dev/null
+++ b/promptsource/templates/duorc/SelfRC/templates.yaml
@@ -0,0 +1,241 @@
+dataset: duorc
+subset: SelfRC
+templates:
+ 1f544641-ba15-44ef-bfcd-c951d320eb9a: !Template
+ answer_choices: null
+ id: 1f544641-ba15-44ef-bfcd-c951d320eb9a
+ jinja: "{% if no_answer == false%}\nGenerate a question that has the following\
+ \ answer: \n{{answers|choice}} \nfor the following movie plot: \n{{plot}}\n\
+ |||\n{{question}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question_by_answer
+ reference: Given the passage and the answer, generate a question which has that
+ answer.
+ 289254d0-e382-4c9d-9638-984c01fe7391: !Template
+ answer_choices: null
+ id: 289254d0-e382-4c9d-9638-984c01fe7391
+ jinja: 'I am a movie director and I just received the following movie plot. Could
+ you help me answer this question? If not, let me know by writing "{{"Not answerable"}}".
+
+
+ Plot title: {{title}}
+
+ Movie plot: {{plot}}
+
+ My question: {{question}}
+
+ |||
+
+ {% if no_answer %}
+
+ Not answerable
+
+ {% else %}
+
+ {{answers|choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: movie_director
+ reference: ''
+ 606e9fc0-d07d-45e6-a828-b786fd3a10da: !Template
+ answer_choices: null
+ id: 606e9fc0-d07d-45e6-a828-b786fd3a10da
+ jinja: 'Extract the answer to the following question from the movie plot. If the
+ question isn''t answerable, please output "{{"Can''t answer"}}".
+
+ Question: {{question}}
+
+ Title: {{title}}
+
+ Movie plot: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ Can''t answer
+
+ {% else %}
+
+ {{answers | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: extract_answer
+ reference: ''
+ af62f222-a8d2-439f-9586-52e0279d25cc: !Template
+ answer_choices: null
+ id: af62f222-a8d2-439f-9586-52e0279d25cc
+ jinja: 'Generate a question about the following movie plot: {{ plot }}
+
+ |||
+
+ {{ question }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: Given the plot, generate a question.
+ c1829c38-eae3-49a9-a047-f89316f58140: !Template
+ answer_choices: null
+ id: c1829c38-eae3-49a9-a047-f89316f58140
+ jinja: 'Please answer the following question about this movie plot. If it''s un-answerable,
+ please output "{{"No answer"}}".
+
+
+ Question: {{question}}
+
+ Movie plot title: {{title}}
+
+ Movie plot: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ No answer
+
+ {% else %}
+
+ {{answers | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_question
+ reference: ''
+ c76b38f1-b47a-4354-960d-58d2f0974d14: !Template
+ answer_choices: null
+ id: c76b38f1-b47a-4354-960d-58d2f0974d14
+ jinja: '{% if no_answer == false%}
+
+ Build a movie plot around this: {{ question }} {{answers|choice}}
+
+ |||
+
+ {{ plot }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: build_story_around_qa
+ reference: Given the question-answer pair, generate a relevant plot.
+ c917a801-28fe-4c78-93d8-8e43897aa613: !Template
+ answer_choices: null
+ id: c917a801-28fe-4c78-93d8-8e43897aa613
+ jinja: 'Question: {{question}}
+
+ If there is no answer, please output "{{"Insufficient information to provide
+ an answer."}}".
+
+ Movie title: {{title}}
+
+ Context: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ Insufficient information to provide an answer.
+
+ {% else %}
+
+ {{answers|choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_answering
+ reference: Given a passage and a question, generate an answer.
+ d486ac96-de6b-403a-8628-5adb23252194: !Template
+ answer_choices: null
+ id: d486ac96-de6b-403a-8628-5adb23252194
+ jinja: 'Suggest a movie title for the following movie plot: {{plot}}
+
+ |||
+
+ {{title}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: title_generation
+ reference: Given the plot for the movie, suggest a title.
+ f64279e3-dc9b-4480-9aa6-72d9d1ca2287: !Template
+ answer_choices: null
+ id: f64279e3-dc9b-4480-9aa6-72d9d1ca2287
+ jinja: 'I am trying to decide whether it''s worth it to invest in this film proposal.
+ Can you help me answer a few questions? If you can''t, please say "{{"No I can''t"}}".
+
+
+ Question: {{question}}
+
+ Movie title: {{title}}
+
+ Movie plot: {{plot}}
+
+ |||
+
+ {% if no_answer %}
+
+ No I can''t
+
+ {% else %}
+
+ {{answers|choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: decide_worth_it
+ reference: ''
diff --git a/promptsource/templates/e2e_nlg_cleaned/templates.yaml b/promptsource/templates/e2e_nlg_cleaned/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c45422970235f86e338bf7ff19ee265231603b8b
--- /dev/null
+++ b/promptsource/templates/e2e_nlg_cleaned/templates.yaml
@@ -0,0 +1,341 @@
+dataset: e2e_nlg_cleaned
+templates:
+ 0f54b6e2-42c0-45ec-8ea2-2e6204388f76: !Template
+ answer_choices: null
+ id: 0f54b6e2-42c0-45ec-8ea2-2e6204388f76
+ jinja: 'Combine all of the following data into a concise and grammatically correct
+ text:
+
+ {% for feature in meaning_representation.split("]") %} {% set key = feature.split("[")[0].replace(",","")
+ %} {% set value = feature.replace(",","").replace(key+"[", '''''''') %}
+
+ {% if value != "" %} {{key}} : {{value}} {% endif %}
+
+ {%- endfor %}
+
+ ||| {{human_reference}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: generate_gramatically_correct_text
+ reference: ''
+ 14db0e7a-f7d1-4bd0-bfb3-f611be608c4a: !Template
+ answer_choices: null
+ id: 14db0e7a-f7d1-4bd0-bfb3-f611be608c4a
+ jinja: '{% set vars = {''key'':"eatType", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}} What type of restaurant is the passage talking about? |||
+ {{vars[''value'']}}
+
+ {% endif %}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: eat_type_qa
+ reference: ''
+ 18f74817-9f8c-4fd5-bc00-6e0016a40dcc: !Template
+ answer_choices: null
+ id: 18f74817-9f8c-4fd5-bc00-6e0016a40dcc
+ jinja: '{% set vars = {''key'':"food", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}}
+
+ From the passage given above, what type of food do you think is served at this
+ restaurant? ||| {{vars[''value'']}}
+
+ {% endif %}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: food_qa
+ reference: ''
+ 1acabbc3-c9b9-4624-a684-29faeccff46f: !Template
+ answer_choices: null
+ id: 1acabbc3-c9b9-4624-a684-29faeccff46f
+ jinja: 'Given the following data about a restaurant:
+
+ {% for feature in meaning_representation.split("]") %} {% set key = feature.split("[")[0].replace(",","")
+ %} {% set value = feature.replace(",","").replace(key+"[", '''''''') %}
+
+ {% if value != "" %} {{key}} : {{value}} {% endif %}
+
+ {%- endfor %}
+
+ Generate some text about this restaurant. ||| {{human_reference}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: generate_text_restaurant
+ reference: ''
+ 418c7942-06e8-48a7-b5f4-9f15bb874edf: !Template
+ answer_choices: null
+ id: 418c7942-06e8-48a7-b5f4-9f15bb874edf
+ jinja: '{% set vars = {''key'':"area", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}}
+
+ From the passage given above, where is the location of the restaurant? |||
+ {{vars[''value'']}}
+
+ {% endif %}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: area_qa
+ reference: ''
+ 51666217-46cf-4950-bf63-108ed16e074c: !Template
+ answer_choices: null
+ id: 51666217-46cf-4950-bf63-108ed16e074c
+ jinja: '{% set vars = {''key'':"familyFriendly", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}} Is the restaurant from the passage family friendly? |||
+ {{vars[''value'']}}
+
+ {% endif %}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: family_friendly_yes_no
+ reference: ''
+ 6e5f3eff-fab1-4c33-a296-5ac662754e87: !Template
+ answer_choices: null
+ id: 6e5f3eff-fab1-4c33-a296-5ac662754e87
+ jinja: '{% set vars = {''key'':"near", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}}
+
+ Name one landmark that is close to the restaurant in the given passage. |||
+ {{vars[''value'']}}
+
+ {% endif %}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: near_qa
+ reference: ''
+ 711bcf63-be82-4937-bdef-0c379d20bb74: !Template
+ answer_choices: null
+ id: 711bcf63-be82-4937-bdef-0c379d20bb74
+ jinja: 'How would we create an appropriate text out of the following data?
+
+ {{meaning_representation}}
+
+
+ ||| {{human_reference}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: create_text_for_me
+ reference: ''
+ 83992c17-745f-4940-b626-b01a85ba66c1: !Template
+ answer_choices: null
+ id: 83992c17-745f-4940-b626-b01a85ba66c1
+ jinja: '{% set vars = {''key'':"name", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}}
+
+ From the passage given above, what is the name of the dinery? ||| {{vars[''value'']}}
+
+ {% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: name_qa
+ reference: ''
+ b67da63d-f220-4b9f-ae82-b4addf0c7573: !Template
+ answer_choices: null
+ id: b67da63d-f220-4b9f-ae82-b4addf0c7573
+ jinja: '{% set vars = {''key'':"customer rating", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}}
+
+ According to this passage, what is the rating given to this restaurant? |||
+ {{vars[''value'']}}
+
+ {% endif %}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: rating_qa
+ reference: ''
+ bdecbb5a-d3e8-46f3-9ea8-22025bc59e3b: !Template
+ answer_choices: null
+ id: bdecbb5a-d3e8-46f3-9ea8-22025bc59e3b
+ jinja: 'Please generate a restaurant description from the information given below:
+
+
+ {{meaning_representation}} ||| {{human_reference}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: coherent_text
+ reference: ''
+ f9089a30-1c6c-4a43-a5eb-586b1dcd72f2: !Template
+ answer_choices: null
+ id: f9089a30-1c6c-4a43-a5eb-586b1dcd72f2
+ jinja: '{% set vars = {''key'':"priceRange", ''value'':""} %}
+
+ {% for feature in meaning_representation.split("]") if vars[''key''] in feature
+ %}
+
+ {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+ '''')}) %}
+
+ {%- endfor %}
+
+ {% if vars["value"]|length > 0 %}
+
+ {{human_reference}}
+
+ According to the passage, what is an approximate price range of the given restaurant? |||
+ {{vars[''value'']}}
+
+ {% endif %}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: price_range_qa
+ reference: ''
diff --git a/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml b/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e12360cf73d2d2034c10b23174e112c357c4365
--- /dev/null
+++ b/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml
@@ -0,0 +1,159 @@
+dataset: ecthr_cases
+subset: alleged-violation-prediction
+templates:
+ 32404ed1-2276-401f-bb93-2937d9919585: !Template
+ answer_choices: one ||| two ||| three ||| four or more
+ id: 32404ed1-2276-401f-bb93-2937d9919585
+ jinja: '{{facts | join("\n")}}
+
+
+ These facts show potential violation of European Convention on Human Rights
+ articles. The number of violated articles is
+
+ |||
+
+ {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: implicit_advice_number
+ reference: 'implicitly ask for number of violated articles (rather than quantity).
+ Metrics: micro-F1, refer to the paper https://arxiv.org/abs/2103.13084'
+ 3e5ba238-98ad-4d25-b84f-f226158ef8d6: !Template
+ answer_choices: one ||| two ||| three ||| several
+ id: 3e5ba238-98ad-4d25-b84f-f226158ef8d6
+ jinja: "The following facts relate to a claim brought before the European Court\
+ \ of Human Rights (ECtHR). \nQuestion: How many substantive articles in the\
+ \ European Convention on Human Rights could have been breached on these facts?\
+ \ If more than three substantive articles are breached, answer \"{{'several'}}\"\
+ .\n\n{{facts | join(\"\\n\")}}\n\nAnswer:\n|||\n{{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels\
+ \ | length] | min]]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: ecthr_alleged_articles_declaration_at_end
+ reference: 'Explicitly ask question at end. Metrics: micro-F1, refer to the paper
+ https://arxiv.org/abs/2103.13084'
+ 7a56f5cc-20b5-4543-bb20-9c616d3f36dc: !Template
+ answer_choices: one ||| two ||| three ||| several
+ id: 7a56f5cc-20b5-4543-bb20-9c616d3f36dc
+ jinja: 'Question: Have {{"one"}}, {{"two"}}, {{"three"}}, or {{"several"}} articles
+ of the European Convention on Human Rights (ECHR) been violated on the facts
+ above?
+
+
+ {{facts | join("\n")}}
+
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: ecthr_alleged_articles_question_at_start
+ reference: 'Explicitly ask question at start of prompt. Metrics: micro-F1, refer
+ to the paper https://arxiv.org/abs/2103.13084'
+ 96fb3903-c1e4-4752-8b05-5e8c1c12370a: !Template
+ answer_choices: one ||| two ||| three ||| more than three
+ id: 96fb3903-c1e4-4752-8b05-5e8c1c12370a
+ jinja: '{{facts | join("\n")}}
+
+
+ In ruling on this case I have needed to carefully consider the submissions of
+ the parties. The plaintiffs allege breaches at the European Court of Human Rights
+ - the number of sections allegedly breached is
+
+ |||
+
+ {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}.'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: implicit_judgment_paragraph
+ reference: 'implicitly asking for quantity as part of a judgment. Metrics: micro-F1,
+ refer to the paper https://arxiv.org/abs/2103.13084'
+ b4fb5e6e-5e91-4f39-82ba-45dba7b71aad: !Template
+ answer_choices: yes ||| no
+ id: b4fb5e6e-5e91-4f39-82ba-45dba7b71aad
+ jinja: 'Question: Is it true that the facts in this case indicate more than two
+ sections of the European Human Rights Convention were allegedly violated? Answer
+ "{{"yes"}}" or "{{"no"}}"
+
+
+ {{facts | join("\n")}}
+
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[{1:1 ,2:1 ,3:0 ,4:0}[[4,labels | length] | min]] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: confirm number of violated articles
+ reference: 'ask for yes/no confirmation of the number of violated articles. Metrics:
+ micro-F1, refer to the paper https://arxiv.org/abs/2103.13084'
+ e3e9046e-c631-4cf3-b1d3-98c08d88e62e: !Template
+ answer_choices: one ||| two ||| three ||| several
+ id: e3e9046e-c631-4cf3-b1d3-98c08d88e62e
+ jinja: 'The following is an extract of facts from a judgment handed down by the
+ European Court of Human Rights.
+
+ Question: Have {{"one"}}, {{"two"}}, {{"three"}}, or {{"several"}} articles
+ of the European Court of Human Rights (ECHR) been violated on these facts?
+
+
+ {{facts[:10] | join("\n")}}
+
+
+ {% if silver_rationales | length > 0 %}
+
+ Additionally, the court cited the following facts elsewhere in the decision
+
+ {% for loc in silver_rationales[:10] %}
+
+ {{facts[loc]}}
+
+ {% endfor %}
+
+ {% endif %}
+
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[{1:0,2:1,3:2,4:3}[[4,labels | length] | min]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: silver_rationales
+ reference: 'Metrics: micro-F1, refer to the paper https://arxiv.org/abs/2103.13084'
diff --git a/promptsource/templates/emo/templates.yaml b/promptsource/templates/emo/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a697d18bf6c5452be94d4311d3bf1f0106bb614e
--- /dev/null
+++ b/promptsource/templates/emo/templates.yaml
@@ -0,0 +1,218 @@
+dataset: emo
+templates:
+ 2603600f-2d49-40f5-a8c6-05c9b38eab0c: !Template
+ answer_choices: something else ||| happy ||| sad ||| angry
+ id: 2603600f-2d49-40f5-a8c6-05c9b38eab0c
+ jinja: "Person A says something, Person B responds, and then Person A says something.\
+ \ Here's their conversation: \n\n\"{{text}}\"\n\nGiven the context and the last\
+ \ message, how would you best describe Person A's emotion - {{\"happy\"}}, {{\"\
+ sad\"}}, {{\"angry\"}}, or {{\"something else\"}}?\n\n|||\n\n{{ answer_choices\
+ \ [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: persons_describe
+ reference: ''
+ 4b078a4e-1a04-4401-a65e-a3d30c7d16ad: !Template
+ answer_choices: something else ||| happy ||| sad ||| angry
+ id: 4b078a4e-1a04-4401-a65e-a3d30c7d16ad
+ jinja: "Consider this textual dialogue of 3 messages between 2 participants who\
+ \ took turns to talk: \n\n\"{{text}}\"\"\n\nWhat would you say is the underlying\
+ \ emotion of the final message in this dialogue? {{\"happy\"}}, {{\"sad\"}},\
+ \ {{\"angry\"}}, or {{\"something else\"}}?\n\n|||\n\n{{ answer_choices [label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: final_message
+ reference: ''
+ 6699f3ed-cb6b-4b85-a266-cb5311bb6018: !Template
+ answer_choices: another emotion ||| happiness ||| sadness ||| angriness
+ id: 6699f3ed-cb6b-4b85-a266-cb5311bb6018
+ jinja: '"{{text}}"
+
+
+ What emotion do you think was felt by the speaker of the last utterance of this
+ dialogue? {{answer_choices[1]}}, {{answer_choices[2]}}, {{answer_choices[3]}}
+ or {{answer_choices[0]}}
+
+
+ |||
+
+
+ {{answer_choices[label]}}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: what_emotion_do_you_think
+ reference: ''
+ 70f652fe-8c2b-42f4-ac1b-2026d040d80e: !Template
+ answer_choices: another state ||| happy ||| sad ||| angry
+ id: 70f652fe-8c2b-42f4-ac1b-2026d040d80e
+ jinja: "Consider this short dialogue:\n\n{{text}}\n\nHow would you describe the\
+ \ emotional state of the last person to speak? {{answer_choices[1]}}, {{answer_choices[2]}},\
+ \ {{answer_choices[3]}} or {{answer_choices[0]}} \n\n|||\n\n{{answer_choices[label]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: emotional_state
+ reference: ''
+ 8cd68ed9-60d6-4e01-a961-e1af07263646: !Template
+ answer_choices: something else ||| happy ||| sad ||| angry
+ id: 8cd68ed9-60d6-4e01-a961-e1af07263646
+ jinja: 'Dialogue between speaker A and B:
+
+
+ {{text}}
+
+
+ How would you say A feels in the last message? Is it {{answer_choices[1]}},
+ {{answer_choices[2]}}, {{answer_choices[3]}} or {{answer_choices[0]}}? ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: dialogue_between
+ reference: ''
+ 99f6f2b9-ed9f-42f7-b0bc-249cead1a82f: !Template
+ answer_choices: something else ||| happy ||| sad ||| angry
+ id: 99f6f2b9-ed9f-42f7-b0bc-249cead1a82f
+ jinja: "Person A says something, Person B responds, and then Person A says something.\
+ \ Here's their conversation: \n\n\"{{text}}\"\n\nWhat emotion can be inferred\
+ \ from the context and Person A's last message? {{\"happy\"}}, {{\"sad\"}},\
+ \ {{\"angry\"}}, or {{\"something else\"}}?\n\n|||\n\n{{ answer_choices [label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: persons_infer
+ reference: ''
+ 9a87023c-176a-4031-9ef5-92a775ca9d83: !Template
+ answer_choices: something else ||| happy ||| sad ||| angry
+ id: 9a87023c-176a-4031-9ef5-92a775ca9d83
+ jinja: "Consider this textual dialogue of 3 messages between 2 participants who\
+ \ took turns to talk: \n\n\"{{text}}\"\n\nGiven the context, what emotion can\
+ \ you detect in the message from the person who spoke last? {{\"happy\"}}, {{\"\
+ sad\"}}, {{\"angry\"}}, or {{\"something else\"}}?\n\n|||\n\n{{ answer_choices\
+ \ [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: spoke_last
+ reference: ''
+ f5c9ceea-40c8-4837-a631-b40b1d30f015: !Template
+ answer_choices: another feeling ||| happiness ||| sadness ||| angriness
+ id: f5c9ceea-40c8-4837-a631-b40b1d30f015
+ jinja: '"{{text}}"
+
+
+ In this dialogue, what emotion was felt by the first speaker in the final answer?
+ {{answer_choices[1]}}, {{answer_choices[2]}}, {{answer_choices[3]}} or {{answer_choices[0]}}
+
+
+ |||
+
+
+ {{answer_choices[label]}}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: feel_when_last_answer
+ reference: ''
+ f686cef0-6174-466d-b87d-672aaf5f9caa: !Template
+ answer_choices: something else ||| happy ||| sad ||| angry
+ id: f686cef0-6174-466d-b87d-672aaf5f9caa
+ jinja: '"{{text}}"
+
+ This was a dialogue exchange between 2 people who took turns to talk. Given
+ the context, which of the following options best describes the emotion that
+ the last speaker is feeling? {{"happy"}}, {{"sad"}}. {{"angry"}}, or {{"something
+ else"}}?
+
+
+ |||
+
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: feeling
+ reference: ''
+ fcb82da7-3d47-4763-a8e5-84e559afeddf: !Template
+ answer_choices: something else ||| happy ||| sad ||| angry
+ id: fcb82da7-3d47-4763-a8e5-84e559afeddf
+ jinja: 'In the dialogue below, would you say the first speaker feels {{answer_choices[1]}},
+ {{answer_choices[2]}}, {{answer_choices[3]}} or {{answer_choices[0]}} when the
+ last sentence is uttered?
+
+ {{text}}
+
+
+ |||
+
+
+ {{answer_choices[label]}}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: in_this_dialogue
+ reference: ''
diff --git a/promptsource/templates/emotion/templates.yaml b/promptsource/templates/emotion/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ae19b88114cbdf4daf2f4463dd2b8ec0e38b75db
--- /dev/null
+++ b/promptsource/templates/emotion/templates.yaml
@@ -0,0 +1,99 @@
+dataset: emotion
+templates:
+ 2da087fe-8cca-4f92-b19c-babccb26a510: !Template
+ answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+ id: 2da087fe-8cca-4f92-b19c-babccb26a510
+ jinja: '{{text}}. The emotion expressed for the message is
+
+ |||
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_the_best_emotion_label
+ reference: emotion is
+ 5fbc4f16-4f7f-4c82-b35d-6e68eced0c70: !Template
+ answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+ id: 5fbc4f16-4f7f-4c82-b35d-6e68eced0c70
+ jinja: '{{text}}. What is the emotion expressed in this message?
+
+ |||
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: reply_with_emoation_label
+ reference: what is present
+ 7bbb7b75-47ca-4bc6-8537-5a3be683172c: !Template
+ answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+ id: 7bbb7b75-47ca-4bc6-8537-5a3be683172c
+ jinja: "What emotion does the following message express? {{text}}\n |||\n {{ answer_choices\
+ \ [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: answer_with_class_label
+ reference: direct basic emotions
+ a2c026ba-2b54-451b-84a0-b302f37bbabe: !Template
+ answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+ id: a2c026ba-2b54-451b-84a0-b302f37bbabe
+ jinja: '{{text}}. How will you feel about the message?
+
+ |||
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: answer_question_with_emotion_label
+ reference: how you feel
+ b88c0f70-0362-499b-b42e-da9bd84e553c: !Template
+ answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+ id: b88c0f70-0362-499b-b42e-da9bd84e553c
+ jinja: "'Given the message: {{text}}. \n\nOut of the options, {{\"sadness\"}},\
+ \ {{\"joy\"}}, {{\"love\"}}, {{\"anger\"}}, {{\"fear\"}} and {{\"surprise\"\
+ }}, \n\nthe emotion in the message is \n|||\n{{ answer_choices [label] }}'"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: select_emotion_label_from_list
+ reference: out of six emotions
+ ef04c109-9b8d-4ea3-b8f5-646ec235c568: !Template
+ answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+ id: ef04c109-9b8d-4ea3-b8f5-646ec235c568
+ jinja: '{{text}}. What emotion does the writer express for the message?
+
+ |||
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: predict_the_best_emotion_label
+ reference: emotion of the writer
diff --git a/promptsource/templates/enriched_web_nlg/en/templates.yaml b/promptsource/templates/enriched_web_nlg/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e9c38a4238cde882d2f94a138587e952d35f2d4
--- /dev/null
+++ b/promptsource/templates/enriched_web_nlg/en/templates.yaml
@@ -0,0 +1,69 @@
+dataset: enriched_web_nlg
+subset: en
+templates:
+ 3860d7fb-0b50-4275-a7ab-782ae86756e5: !Template
+ answer_choices: null
+ id: 3860d7fb-0b50-4275-a7ab-782ae86756e5
+ jinja: 'Take the following triple set as part of a Data-to-Text task: {{modified_triple_sets.mtriple_set[0]
+ | join(", ")}}. Make a lexicalization of the triple set into plain text. |||
+ {{lex.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Non-explicit Description
+ reference: 'Triple set: lexicalization'
+ 5c203187-70e4-4913-86af-8b00b5ca9e16: !Template
+ answer_choices: null
+ id: 5c203187-70e4-4913-86af-8b00b5ca9e16
+ jinja: 'Verbalize the following triples separated by a comma: {{modified_triple_sets.mtriple_set[0]
+ | join(", ")}} ||| {{lex.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Verbalize + Specify Commas
+ reference: 'Instruction: verbalization'
+ 715a885b-1022-43b0-bcfe-20fa432314da: !Template
+ answer_choices: null
+ id: 715a885b-1022-43b0-bcfe-20fa432314da
+ jinja: 'Take the following graph comprising triple sets, where each element of
+ a triple is separated by "|" and each triple set by ",": {{modified_triple_sets.mtriple_set[0]
+ | join(", ")}}. Make a verbalization of the triple set into plain text. |||
+ {{lex.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Explicit Graph Description
+ reference: 'Explicit Graph Description: verbalization.'
+ e80f68dd-ebd0-4cbc-960d-bb28aff2d2d4: !Template
+ answer_choices: null
+ id: e80f68dd-ebd0-4cbc-960d-bb28aff2d2d4
+ jinja: 'Take the following Table to text task comprising semantic triples (RDF
+ triples), where each element of a triple is separated by "|" and each triple
+ set by ",": {{modified_triple_sets.mtriple_set[0] | join(", ")}}. Make a verbalization
+ of the triple set into plain text, which fully and accurately describes the
+ Table. ||| {{lex.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Explicit Table-to-Text Description
+ reference: 'Explicit Table description: verbalization.'
diff --git a/promptsource/templates/esnli/templates.yaml b/promptsource/templates/esnli/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..7d1f2ca618e115f46b8e879e8be4f1fb2e65f7f0
--- /dev/null
+++ b/promptsource/templates/esnli/templates.yaml
@@ -0,0 +1,201 @@
+dataset: esnli
+templates:
+ 16206cff-dd01-408e-b0b4-daabb750b38b: !Template
+ answer_choices: '{{explanation_1}} ||| {{explanation_2}} ||| {{explanation_3}}'
+ id: 16206cff-dd01-408e-b0b4-daabb750b38b
+ jinja: 'Explain why the relation between the following two sentences can be described
+ as {{ ["an entailment", "neutral", "a contradiction"][label] }}.
+
+
+ Sentence 1: {{premise}}
+
+
+ Sentence 2: {{hypothesis}}
+
+ |||
+
+ {{ answer_choices |select("!=","") |list |choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: explain_the_choice
+ reference: ''
+ 3d077f02-bba0-4019-bacd-bf3e7119b1f7: !Template
+ answer_choices: '{{ explanation_1 }} ||| {{ explanation_2 }} ||| {{ explanation_3
+ }}'
+ id: 3d077f02-bba0-4019-bacd-bf3e7119b1f7
+ jinja: "{% if label == 0 %} \nWhy does the first sentence entail the second?\n\
+ {% elif label == 1 %}\nWhy does the first sentence neither entail nor contradict\
+ \ the second?\n{% else %}\nWhy does the first sentence contradict the second?\n\
+ {% endif%}\n\nFirst sentence: \n{{premise}} \n\nSecond sentence:\n{{hypothesis}}\n\
+ |||\n{{ answer_choices |select(\"!=\",\"\") |list |choice }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: elaborate_on_the_choice
+ reference: ''
+ 3e84960e-99db-42bf-9370-50858a92318c: !Template
+ answer_choices: '{{explanation_1}} ||| {{explanation_2}} ||| {{explanation_3}}'
+ id: 3e84960e-99db-42bf-9370-50858a92318c
+ jinja: "Given than:\n{{ premise }}\n{% if label == 0 %} \nWhy is it always true\
+ \ that:\n{% elif label == 1 %}\nWhy it cannot be concluded that:\n{% else %}\n\
+ Why is it necessarily false that:\n{% endif%}\n{{ hypothesis }}\n|||\n{{ answer_choices\
+ \ |select(\"!=\",\"\") |list |choice }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: provide_explaination_to_the_choice
+ reference: ''
+ 643d3444-99f8-4a02-8d76-12a9b719edea: !Template
+ answer_choices: entails ||| neutral ||| contradicts
+ id: 643d3444-99f8-4a02-8d76-12a9b719edea
+ jinja: 'First sentence: {{ premise }}
+
+
+ Second sentence: {{ hypothesis }}
+
+
+ Determine whether the first sentence {{ "entails" }}, {{ "contradicts" }} or
+ is {{ "neutral" }} with regard to the second.
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: determine_premise_hypothesis_relation
+ reference: ''
+ 70728515-9e3e-4b04-b57c-ee1b30716547: !Template
+ answer_choices: entailment ||| neutral ||| contradiction
+ id: 70728515-9e3e-4b04-b57c-ee1b30716547
+ jinja: 'Premise: {{ premise }}
+
+
+ Hypothesis: {{ hypothesis }}.
+
+
+ Name the relation between the premise and the hypothesis above. Select the correct
+ option: {{ "entailment" }}, {{ "contradiction" }} or {{ "neutral" }}.
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: premise_hypothesis_relation
+ reference: ''
+ 89b174c7-f4d6-442c-8ac2-10c51595770e: !Template
+ answer_choices: entails ||| neutral ||| contradicts
+ id: 89b174c7-f4d6-442c-8ac2-10c51595770e
+ jinja: 'Determine whether the first sentence {{ "entails" }}, {{ "contradicts"
+ }} or is {{ "neutral" }} with regard to the second.
+
+
+ First sentence: {{ premise }}
+
+
+ Second sentence: {{ hypothesis }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_the_correct_class
+ reference: ''
+ c102663b-3472-42b5-b633-71f7abd6a457: !Template
+ answer_choices: entailment ||| neutral ||| contradiction
+ id: c102663b-3472-42b5-b633-71f7abd6a457
+ jinja: 'Describe the relation between the following two sentences. The choices
+ are {{ "entailment" }}, {{ "contradiction" }} and {{ "neutral" }}.
+
+
+ First sentence: {{premise}}
+
+
+ Second sentence: {{hypothesis}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: determine_the_relation
+ reference: ''
+ c6cce628-8e69-418b-8676-deae7a782e7f: !Template
+ answer_choices: Yes ||| No ||| No
+ id: c6cce628-8e69-418b-8676-deae7a782e7f
+ jinja: "Does this statement: \n\n{{ premise }} \n\nimply that: \n\n{{ hypothesis\
+ \ }}?\n|||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: does_premise_imply_hypothesis
+ reference: ''
+ ef034633-d4d9-47b8-9152-b025b1d61e5b: !Template
+ answer_choices: No ||| No ||| Yes
+ id: ef034633-d4d9-47b8-9152-b025b1d61e5b
+ jinja: "First statement: \n{{ premise }}\n\nSecond statement: \n{{ hypothesis\
+ \ }}\n\nDoes the first statement contradict the second?\n|||\n{{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: does_premise_contradicts_hypothesis
+ reference: ''
+ f64d6196-370b-4501-acb5-e11a5ebf0c5e: !Template
+ answer_choices: '{{explanation_1}} ||| {{explanation_2}} ||| {{explanation_3}}'
+ id: f64d6196-370b-4501-acb5-e11a5ebf0c5e
+ jinja: "If we know that:\n{{premise}}\n{% if label == 0 %} \nWhy is it always\
+ \ the case that:\n{% elif label == 1 %}\nWhy it is not necessarily the case\
+ \ that:\n{% else %}\nWhy is it not true that:\n{% endif%}\n{{hypothesis}}\n\
+ |||\n{{ answer_choices |select(\"!=\",\"\") |list |choice }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: relate_premise_to_hypothesis_with_explanation
+ reference: ''
diff --git a/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml b/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..973f5cd8cd6d9072de48fe43d6b1840d8de1f5ed
--- /dev/null
+++ b/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml
@@ -0,0 +1,125 @@
+dataset: evidence_infer_treatment
+subset: '1.1'
+templates:
+ 6148ddc8-b722-4d4f-8498-ff36c45803d6: !Template
+ answer_choices: null
+ id: 6148ddc8-b722-4d4f-8498-ff36c45803d6
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ The following text snippets contain important information:\n\n{{Text[:1200]}}\
+ \ \n\n{{Text[-300:]}}\n\nThe relevant annotations are:\n\n{{ sub_sub_annotation.pop()\
+ \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+ \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nRegarding the following comparator\n\n{{Prompts.Comparator[specific_sub_annotation]}},\n\
+ \nthe intervention was\n\n{% endif %}\n\n|||\n\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Identify intervention
+ reference: ''
+ 9ea1dca5-1867-48f6-9a0f-1c55b19c4606: !Template
+ answer_choices: null
+ id: 9ea1dca5-1867-48f6-9a0f-1c55b19c4606
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ The first text snippet that is important to understand is:\n\n{{Text[:1200]}}\
+ \ \n\nthe second text snippet is:\n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\
+ \n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+ \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nRegarding the intervention\n\n{{Prompts.Intervention[specific_sub_annotation]}}\n\
+ \nwith the outcome\n\n{{Prompts.Outcome[specific_sub_annotation]}},\n\nthe comparator\
+ \ was:\n\n{% endif %}\n\n|||\n\n{{Prompts.Comparator[specific_sub_annotation]}}."
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Identify comparator
+ reference: ''
+ bf430e30-a6a4-4bc0-a304-bbc1a06e23fd: !Template
+ answer_choices: significantly increased ||| significantly decreased ||| no significant
+ difference
+ id: bf430e30-a6a4-4bc0-a304-bbc1a06e23fd
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ The information required to understand the outcome is below:\n\n{{Text[:1200]}}\
+ \ \n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\n{{ sub_sub_annotation.pop()\
+ \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+ \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nConsider the intervention\n\n{{Prompts.Intervention[specific_sub_annotation]}}\n\
+ \nwith respect to the comparator\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\
+ \nThe outcome\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\nis either {{\"\
+ significantly increased\"}}, {{\"significantly decreased\"}} or {{\"no significant\
+ \ difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\n{% if sub_annotation_length\
+ \ > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+ \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Classify outcome with all info
+ reference: Template with the task definition
+ d5fea159-0593-4e99-bb3d-27e5ff1411f9: !Template
+ answer_choices: significantly increased ||| significantly decreased ||| no significant
+ difference
+ id: d5fea159-0593-4e99-bb3d-27e5ff1411f9
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ Read the following text:\n\n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n\
+ {{ sub_sub_annotation.append(range(0, sub_annotation_length) | choice) | replace(None,\
+ \ \"\") }}\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nConsider the intervention\n\
+ \n{{Prompts.Intervention[specific_sub_annotation]}}\n\nwith respect to the comparator\n\
+ \n{{Prompts.Comparator[specific_sub_annotation]}}.\n\nThe outcome\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\
+ \nis either {{\"significantly increased\"}}, {{\"significantly decreased\"}}\
+ \ or {{\"no significant difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\
+ \n{% if sub_annotation_length > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+ \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Classify outcome
+ reference: ''
+ fed6ea12-8b97-491b-8741-b05d662454de: !Template
+ answer_choices: null
+ id: fed6ea12-8b97-491b-8741-b05d662454de
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ {{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+ \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\nAfter reading\
+ \ the following text:\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nThe relevant\
+ \ annotations:\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nRegarding the comparator\n\n{{Prompts.Comparator[specific_sub_annotation]}}\n\
+ \nand the intervention\n\n{{Prompts.Intervention[specific_sub_annotation]}},\n\
+ \nthe outcome was\n\n{% endif %}\n\n|||\n\n{{Prompts.Outcome[specific_sub_annotation]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Identify outcome
+ reference: ''
diff --git a/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml b/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..694224cf337aae3bcbdddc5c7a5f336dec7ea394
--- /dev/null
+++ b/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml
@@ -0,0 +1,125 @@
+dataset: evidence_infer_treatment
+subset: '2.0'
+templates:
+ 55659a3c-4fce-42dd-a925-df6242fc84fa: !Template
+ answer_choices: null
+ id: 55659a3c-4fce-42dd-a925-df6242fc84fa
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ The following text snippets contain important information:\n\n{{Text[:1200]}}\
+ \ \n\n{{Text[-300:]}}\n\nThe relevant annotations are:\n\n{{ sub_sub_annotation.pop()\
+ \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+ \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nRegarding the following comparator\n\n{{Prompts.Comparator[specific_sub_annotation]}},\n\
+ \nthe intervention was\n\n{% endif %}\n\n|||\n\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Identify intervention
+ reference: ''
+ 613e3e1b-2646-4a55-8356-584386a8f0b8: !Template
+ answer_choices: null
+ id: 613e3e1b-2646-4a55-8356-584386a8f0b8
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ The first text snippet that is important to understand is:\n\n{{Text[:1200]}}\
+ \ \n\nthe second text snippet is:\n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\
+ \n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+ \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nRegarding the intervention\n\n{{Prompts.Intervention[specific_sub_annotation]}}\n\
+ \nwith the outcome\n\n{{Prompts.Outcome[specific_sub_annotation]}},\n\nthe comparator\
+ \ was:\n\n{% endif %}\n\n|||\n\n{{Prompts.Comparator[specific_sub_annotation]}}."
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Identify comparator
+ reference: ''
+ 97de966b-c753-4856-b5ff-a69d8002e369: !Template
+ answer_choices: significantly increased ||| significantly decreased ||| no significant
+ difference
+ id: 97de966b-c753-4856-b5ff-a69d8002e369
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ Read the following text:\n\n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n\
+ {{ sub_sub_annotation.append(range(0, sub_annotation_length) | choice) | replace(None,\
+ \ \"\") }}\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nConsider the intervention\n\
+ \n{{Prompts.Intervention[specific_sub_annotation]}}\n\nwith respect to the comparator\n\
+ \n{{Prompts.Comparator[specific_sub_annotation]}}.\n\nThe outcome\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\
+ \nis either {{\"significantly increased\"}}, {{\"significantly decreased\"}}\
+ \ or {{\"no significant difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\
+ \n{% if sub_annotation_length > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+ \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Classify outcome
+ reference: ''
+ dbdf04d6-7447-48c8-8239-9a0e634bf444: !Template
+ answer_choices: significantly increased ||| significantly decreased ||| no significant
+ difference
+ id: dbdf04d6-7447-48c8-8239-9a0e634bf444
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ The information required to understand the outcome is below:\n\n{{Text[:1200]}}\
+ \ \n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\n{{ sub_sub_annotation.pop()\
+ \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+ \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nConsider the intervention\n\n{{Prompts.Intervention[specific_sub_annotation]}}\n\
+ \nwith respect to the comparator\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\
+ \nThe outcome\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\nis either {{\"\
+ significantly increased\"}}, {{\"significantly decreased\"}} or {{\"no significant\
+ \ difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\n{% if sub_annotation_length\
+ \ > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+ \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Classify outcome with all info
+ reference: Template with the task definition
+ f923b2a5-3a12-4104-900a-1b5343bb6017: !Template
+ answer_choices: null
+ id: f923b2a5-3a12-4104-900a-1b5343bb6017
+ jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+ \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+ \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+ {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+ {{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+ \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\nAfter reading\
+ \ the following text:\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nThe relevant\
+ \ annotations:\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+ \nRegarding the comparator\n\n{{Prompts.Comparator[specific_sub_annotation]}}\n\
+ \nand the intervention\n\n{{Prompts.Intervention[specific_sub_annotation]}},\n\
+ \nthe outcome was\n\n{% endif %}\n\n|||\n\n{{Prompts.Outcome[specific_sub_annotation]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Identify outcome
+ reference: ''
diff --git a/promptsource/templates/fever/v1.0/templates.yaml b/promptsource/templates/fever/v1.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d25b27a3b82450ec6a7a7b3ffa37f8568b96e0ba
--- /dev/null
+++ b/promptsource/templates/fever/v1.0/templates.yaml
@@ -0,0 +1,79 @@
+dataset: fever
+subset: v1.0
+templates:
+ 0870481e-e5d1-43a1-821e-b11c6bfd2483: !Template
+ answer_choices: Yes|||No|||Not sure
+ id: 0870481e-e5d1-43a1-821e-b11c6bfd2483
+ jinja: "{{claim}} Is this true?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\
+ : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Not sure\"\n}[label]\n\
+ }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_postprompt
+ reference: CBQA fever, prompt after claim
+ 51c55af8-1996-4cb2-88a1-ca7ddb8f9e11: !Template
+ answer_choices: Yes|||No|||Not Sure
+ id: 51c55af8-1996-4cb2-88a1-ca7ddb8f9e11
+ jinja: "I've heard that {{claim}} Is this correct? Yes, No or Not Sure?\n|||\n\
+ {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\
+ \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_dialog_style_surrounded_all_class
+ reference: CBQA fever, like a conversation, with prompts surrounding claim, all
+ class included.
+ 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdf: !Template
+ answer_choices: Yes|||No|||Unsure
+ id: 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdf
+ jinja: "Is this statement correct? {{claim}} ||| \n{% if label != \"\" %}\n{{\n\
+ {\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Unsure\"\
+ \n}[label]\n}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_preprompt
+ reference: Closed-book QA from only the claim, prompt before the content
+ 948f41ab-e6bb-4de6-af3e-7f0b5d5f39a8: !Template
+ answer_choices: Yes|||No|||Maybe
+ id: 948f41ab-e6bb-4de6-af3e-7f0b5d5f39a8
+ jinja: "\"{{claim}}\" Yes, no, maybe?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\
+ : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Maybe\"\n}[label]\n\
+ }}\n{% endif %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_short
+ reference: CBQA fever, minimal
+ b1d8f035-c3af-41a8-b0b8-1604f9dc00ff: !Template
+ answer_choices: Yes|||No|||Not Sure
+ id: b1d8f035-c3af-41a8-b0b8-1604f9dc00ff
+ jinja: "\"{{claim}}\", I have heard. Is this Correct? Yes, No or Not Sure?\n|||\n\
+ {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\
+ \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_dialog_style_postprompt_all_class
+ reference: CBQA fever, like a conversation, prompt after output. Includes 3 class.
diff --git a/promptsource/templates/fever/v2.0/templates.yaml b/promptsource/templates/fever/v2.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f623b64e80196e929a4d3048ff6e2fdc57110ec8
--- /dev/null
+++ b/promptsource/templates/fever/v2.0/templates.yaml
@@ -0,0 +1,79 @@
+dataset: fever
+subset: v2.0
+templates:
+ 0870481e-e5d1-43a1-821e-b11c6bfd248a: !Template
+ answer_choices: Yes|||No|||Not sure
+ id: 0870481e-e5d1-43a1-821e-b11c6bfd248a
+ jinja: "{{claim}} Is this true?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\
+ : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Not sure\"\n}[label]\n\
+ }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_postprompt
+ reference: CBQA fever, prompt after claim
+ 51c55af8-1996-4cb2-88a1-ca7ddb8f9e1b: !Template
+ answer_choices: Yes|||No|||Not Sure
+ id: 51c55af8-1996-4cb2-88a1-ca7ddb8f9e1b
+ jinja: "I've heard that {{claim}} Is this correct? Yes, No or Not Sure?\n|||\n\
+ {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\
+ \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_dialog_style_surrounded_all_class
+ reference: CBQA fever, like a conversation, with prompts surrounding claim, all
+ class included.
+ 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdc: !Template
+ answer_choices: Yes|||No|||Unsure
+ id: 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdc
+ jinja: "Is this statement correct? {{claim}} ||| \n{% if label != \"\" %}\n{{\n\
+ {\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Unsure\"\
+ \n}[label]\n}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_preprompt
+ reference: Closed-book QA from only the claim, prompt before the content
+ 948f41ab-e6bb-4de6-af3e-7f0b5d5f39ad: !Template
+ answer_choices: Yes|||No|||Maybe
+ id: 948f41ab-e6bb-4de6-af3e-7f0b5d5f39ad
+ jinja: "\"{{claim}}\" Yes, no, maybe?\n|||\n{% if label != \"\" %}\n{{\n{\"SUPPORTS\"\
+ : \"Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"Maybe\"\n}[label]\n\
+ }}\n{% endif %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_short
+ reference: CBQA fever, minimal
+ b1d8f035-c3af-41a8-b0b8-1604f9dc00fe: !Template
+ answer_choices: Yes|||No|||Not Sure
+ id: b1d8f035-c3af-41a8-b0b8-1604f9dc00fe
+ jinja: "\"{{claim}}\", I have heard. Is this Correct? Yes, No or Not Sure?\n|||\n\
+ {% if label != \"\" %}\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\": \"No\",\n\
+ \"NOT ENOUGH INFO\": \"Not Sure\"\n}[label]\n}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: cbqa_fever_dialog_style_postprompt_all_class
+ reference: CBQA fever, like a conversation, prompt after output. Includes 3 class.
diff --git a/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml b/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1804b02ac9da084f78c511e0c78b2cd948c8a12a
--- /dev/null
+++ b/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml
@@ -0,0 +1,182 @@
+dataset: financial_phrasebank
+subset: sentences_allagree
+templates:
+ 06719321-62e7-4f6e-8f95-464cd2b5ca5c: !Template
+ answer_choices: negative ||| neutral ||| positive
+ id: 06719321-62e7-4f6e-8f95-464cd2b5ca5c
+ jinja: 'Which word between "{{"negative"}}", "{{"neutral"}}", "{{"positive"}}"
+ would you use to describe the effect of the following news on the related share
+ prices?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: share_price_option
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ 0beba048-f949-4034-83b6-a3e0e7363f46: !Template
+ answer_choices: negative ||| neutral ||| positive
+ id: 0beba048-f949-4034-83b6-a3e0e7363f46
+ jinja: 'What is the sentiment of the sentence?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: sentiment
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ 1af36463-8ed9-4574-9157-f029960e1d5e: !Template
+ answer_choices: growth ||| neutral ||| decline
+ id: 1af36463-8ed9-4574-9157-f029960e1d5e
+ jinja: 'What word among "{{"growth"}}", "{{"neutral"}}", "{{"decline"}}", comes
+ to your mind when reading the following argument?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: word_comes_to_mind
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ 461efe04-6883-41e8-80f0-e722a75260fe: !Template
+ answer_choices: growth ||| neutral ||| decline
+ id: 461efe04-6883-41e8-80f0-e722a75260fe
+ jinja: 'What does the following argument mean for the complementary industries?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: complementary_industries
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ 5fa16d31-b513-480d-bd1b-1fa8c182fb76: !Template
+ answer_choices: bearish ||| neutral ||| bullish
+ id: 5fa16d31-b513-480d-bd1b-1fa8c182fb76
+ jinja: 'Should an investor be "{{"bullish"}}", "{{"neutral"}}", or "{{"bearish"}}"
+ given the following news?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: bullish_neutral_bearish
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ 612be728-b6c8-4636-80b6-8aaa7593a2e1: !Template
+ answer_choices: negative ||| neutral ||| positive
+ id: 612be728-b6c8-4636-80b6-8aaa7593a2e1
+ jinja: 'Is the sentiment of the sentence "{{"negative"}}", "{{"neutral"}}", or
+ "{{"positive"}}"?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: sentiment_option
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ b7364738-258d-4b81-b322-b9876b6fd93d: !Template
+ answer_choices: growth ||| neutral ||| decline
+ id: b7364738-258d-4b81-b322-b9876b6fd93d
+ jinja: 'What word among "{{"growth"}}", "{{"neutral"}}", "{{"decline"}}", would
+ you use to describe the effect of the following argument on complementary industries?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: complementary_industries_option
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ b85d62c9-a34a-4da2-836e-a0aadbe48291: !Template
+ answer_choices: negative effect ||| neutral effect ||| positive effect
+ id: b85d62c9-a34a-4da2-836e-a0aadbe48291
+ jinja: 'What adjective would you use to describe the effect of the following news
+ on the related share prices?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: share_price
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
+ e0650133-befc-4aeb-92e1-2f8d6a0200b3: !Template
+ answer_choices: growth ||| neutral ||| decline
+ id: e0650133-befc-4aeb-92e1-2f8d6a0200b3
+ jinja: 'What word would you use to describe the effect of the following argument
+ on the local economy?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: local_economy
+ reference: 'Metrics: Accuracy, Precision, Recall, F1 per class'
diff --git a/promptsource/templates/freebase_qa/templates.yaml b/promptsource/templates/freebase_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..90ffcaa590636a7e29b25e73da27a2512fda6732
--- /dev/null
+++ b/promptsource/templates/freebase_qa/templates.yaml
@@ -0,0 +1,95 @@
+dataset: freebase_qa
+templates:
+ 02b12d5c-a481-494d-84ea-a210eefa66d2: !Template
+ answer_choices: null
+ id: 02b12d5c-a481-494d-84ea-a210eefa66d2
+ jinja: "{% set question_context = Parses.TopicEntityName | choice %}\n{% set inference_context\
+ \ = Parses.InferentialChain | first %}\n\nThe topic of this question is: {{question_context.split(\"\
+ \ \") | map(\"capitalize\") | join(\" \")}}.\n\nThe answer to this question\
+ \ should be in the following category: {{ inference_context.split(\".\") | last\
+ \ | capitalize | replace(\"_\", \" \")}}\n\nUsing this, answer the following\
+ \ question:\n\n{{RawQuestion}}\n||| \n{% set answer = Parses.Answers | choice\
+ \ %}\n{{answer.AnswersName[0][0].split(\" \") | map(\"capitalize\") | join(\"\
+ \ \") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_context_2
+ reference: qa prompt with topic and inference chain provided
+ 1d583b71-7ef1-49df-b252-e8e1d6910129: !Template
+ answer_choices: null
+ id: 1d583b71-7ef1-49df-b252-e8e1d6910129
+ jinja: 'What category best describes the answer to the following question?
+
+
+ Question: {{RawQuestion}}
+
+ |||
+
+ {% set answer = Parses.InferentialChain | first %}
+
+ {{ answer.split(".") | last | capitalize | replace("_", " ")}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: inference_chain_prompt
+ reference: predicting the inference chain given just the question
+ 1fd7e73c-92ac-4e33-be33-80775cbb14df: !Template
+ answer_choices: null
+ id: 1fd7e73c-92ac-4e33-be33-80775cbb14df
+ jinja: "Answer the following question:\n\n{{RawQuestion}}\n||| \n{% set answer\
+ \ = Parses.Answers | choice %}\n{{answer.AnswersName[0][0].split(\" \") | map(\"\
+ capitalize\") | join(\" \") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_template_basic
+ reference: basic question/answer format
+ 30ff02f4-3673-4ea6-a3e0-0df0cf19b021: !Template
+ answer_choices: null
+ id: 30ff02f4-3673-4ea6-a3e0-0df0cf19b021
+ jinja: "{% set context = Parses.TopicEntityName | choice %}\nThe topic of this\
+ \ question is: {{context.split(\" \") | map(\"capitalize\") | join(\" \")}}.\n\
+ \nWith that context, answer the following question:\n\n{{RawQuestion}}\n|||\
+ \ \n{% set answer = Parses.Answers | choice %}\n{{answer.AnswersName[0][0].split(\"\
+ \ \") | map(\"capitalize\") | join(\" \") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_context_1
+ reference: qa question with simple entity context
+ dbf762f0-2daa-4cc4-af67-ba72aa2c1991: !Template
+ answer_choices: null
+ id: dbf762f0-2daa-4cc4-af67-ba72aa2c1991
+ jinja: "{% set answer = Parses.Answers | choice %}\nFor the following question:\n\
+ \n\"{{RawQuestion}}\" \n\nWhat word or phrase best describes its answer, \"\
+ {{answer.AnswersName[0][0].split(\" \") | map(\"capitalize\") | join(\" \")\
+ \ }}\"? \n||| \n{% set a = Parses.InferentialChain | first %}\n{{ a.split(\"\
+ .\") | last | capitalize | replace(\"_\", \" \")}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: inference_chain_prompt_context
+ reference: determine the inference chain between question and answer
diff --git a/promptsource/templates/generated_reviews_enth/templates.yaml b/promptsource/templates/generated_reviews_enth/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..31c85b55a616bbd54a2b884d22e57937b6d56676
--- /dev/null
+++ b/promptsource/templates/generated_reviews_enth/templates.yaml
@@ -0,0 +1,77 @@
+dataset: generated_reviews_enth
+templates:
+ 7f158fb6-bbdd-41b8-bed7-21508c9f3c80: !Template
+ answer_choices: no ||| yes
+ id: 7f158fb6-bbdd-41b8-bed7-21508c9f3c80
+ jinja: Does "{{translation.en}}" seem like a positive review to you? ||| {{answer_choices[0
+ if review_star<3 else 1]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - AUC
+ - Accuracy
+ original_task: true
+ name: seem like a positive review
+ reference: stsb_multi_mt_en
+ 95136948-3402-4bd4-8a69-1aa7b85461cc: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 95136948-3402-4bd4-8a69-1aa7b85461cc
+ jinja: Rate the positivity of this review ({{"1"}} being the lowest and {{"5"}}
+ the highest).\n"{{translation.en}}" ||| {{review_star}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - AUC
+ - Accuracy
+ original_task: true
+ name: rate positive review
+ reference: stsb_multi_mt
+ ad12212f-a230-4750-a199-9791628856c4: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: ad12212f-a230-4750-a199-9791628856c4
+ jinja: "How positive is the review \"{{translation.en}}\"? Give a score between\n\
+ \ {{\"0\"}} and {{\"5\"}}. ||| {{review_star}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - AUC
+ original_task: true
+ name: how positive review
+ reference: stsb_multi_mt_en
+ cf8f4dcb-f527-4944-b9ec-a1a3e476c13f: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: cf8f4dcb-f527-4944-b9ec-a1a3e476c13f
+ jinja: On a scale from {{"1"}} to {{"5"}}, how positive is the review "{{translation.en}}"?
+ ||| {{review_star}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - AUC
+ - Accuracy
+ original_task: true
+ name: scale of positive review
+ reference: stsb_multi_mt_en
+ e6c55d56-23d4-41a4-9908-e9366cc2e167: !Template
+ answer_choices: no ||| yes
+ id: e6c55d56-23d4-41a4-9908-e9366cc2e167
+ jinja: Do you think "{{translation.en}}" is a positive review? ||| {{answer_choices[0
+ if review_star < 3 else 1]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - AUC
+ - Accuracy
+ original_task: true
+ name: think positive review
+ reference: stsb_multi_mt_en
diff --git a/promptsource/templates/gigaword/templates.yaml b/promptsource/templates/gigaword/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2721b4dbbc3d4dcb091910488d4b6472ba7f855
--- /dev/null
+++ b/promptsource/templates/gigaword/templates.yaml
@@ -0,0 +1,158 @@
+dataset: gigaword
+templates:
+ 0a45ae54-4585-4d13-9540-890125d614e0: !Template
+ answer_choices: null
+ id: 0a45ae54-4585-4d13-9540-890125d614e0
+ jinja: '{{document}}
+
+
+ ===
+
+
+ Generate a title for this article: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: generate_summary_for_this
+ reference: ''
+ 2558932f-894a-41ef-be34-32a5afb1f5d8: !Template
+ answer_choices: null
+ id: 2558932f-894a-41ef-be34-32a5afb1f5d8
+ jinja: 'Title: {{summary}}
+
+
+ ||| {{document}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: reverse_writing
+ reference: ''
+ 696e561e-1311-4a3e-9ca1-51d1fd77392b: !Template
+ answer_choices: null
+ id: 696e561e-1311-4a3e-9ca1-51d1fd77392b
+ jinja: 'Make a title for this article: {{document}} |||
+
+
+ {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: make_a_title
+ reference: ''
+ 7ad1a48a-195d-4c0c-aea5-df0689589f27: !Template
+ answer_choices: null
+ id: 7ad1a48a-195d-4c0c-aea5-df0689589f27
+ jinja: 'First sentence of the article: {{document}}
+
+
+ Title: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: first_sentence_title
+ reference: ''
+ 90c34acf-2f42-4e66-98dc-7453f7e60e60: !Template
+ answer_choices: null
+ id: 90c34acf-2f42-4e66-98dc-7453f7e60e60
+ jinja: '{{document}}
+
+
+ TL;DR: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: TLDR
+ reference: GPT-2 TLDR
+ 93f0c400-501f-43ad-861b-4f67564f2e8f: !Template
+ answer_choices: null
+ id: 93f0c400-501f-43ad-861b-4f67564f2e8f
+ jinja: '{{document}}
+
+
+ ===
+
+
+ Given the above sentence, write its title: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: write_its_sentence
+ reference: ''
+ a0e699bf-1268-4929-ad13-438c08644118: !Template
+ answer_choices: null
+ id: a0e699bf-1268-4929-ad13-438c08644118
+ jinja: "Write a title for this sentence: {{document}} \n\nTitle: ||| {{summary}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: write_a_title_for_this_sentence
+ reference: ''
+ ac53a797-4d59-455a-b0e6-0e4d7d85f029: !Template
+ answer_choices: null
+ id: ac53a797-4d59-455a-b0e6-0e4d7d85f029
+ jinja: '{{document}} In a nutshell, ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: in_a_nutshell
+ reference: ''
+ d1d4a115-65fd-49eb-bd75-179a46b67ec0: !Template
+ answer_choices: null
+ id: d1d4a115-65fd-49eb-bd75-179a46b67ec0
+ jinja: 'Title: {{summary}}
+
+
+ ===
+
+
+ Write an article with the given title: ||| {{document}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: write_an_article
+ reference: ''
diff --git a/promptsource/templates/glue/ax/templates.yaml b/promptsource/templates/glue/ax/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..45103f1555ddf2f526ac16dc635904a3e134434d
--- /dev/null
+++ b/promptsource/templates/glue/ax/templates.yaml
@@ -0,0 +1,108 @@
+dataset: glue
+subset: ax
+templates:
+ 074de970-f1fd-4793-923e-88299502e2f0: !Template
+ answer_choices: entailment ||| neutral ||| contradiction
+ id: 074de970-f1fd-4793-923e-88299502e2f0
+ jinja: 'The relationship between the following sentences can be characterized
+ as {{answer_choices[0]}} (one sentence implies the other), {{answer_choices[1]}}
+ (the sentences don''t necessarily imply or contradict one another), or {{answer_choices[2]}}
+ (the sentences contract each other).
+
+ Sentence 1: {{hypothesis}}
+
+ Sentence 2: {{premise}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: relationship_between_hypothesis_premise
+ reference: ''
+ 32ae8811-2a1f-4027-96e8-725ecd08bba1: !Template
+ answer_choices: yes ||| maybe ||| no
+ id: 32ae8811-2a1f-4027-96e8-725ecd08bba1
+ jinja: '{{premise}}
+
+ Given the above, is it necessarily true that "{{hypothesis}}"? {{answer_choices[0]}},
+ {{answer_choices[2]}}, or {{answer_choices[1]}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based_on_prem_is_hypothesis
+ reference: ''
+ 3f6b9de8-616b-4a43-a077-e205a4c33a28: !Template
+ answer_choices: yes ||| maybe ||| no
+ id: 3f6b9de8-616b-4a43-a077-e205a4c33a28
+ jinja: 'Consider the hypothesis that "{{hypothesis}}"
+
+ Does this follow from the knowledge that "{{premise}}"
+
+ {{answer_choices[0]}}, {{answer_choices[2]}}, or {{answer_choices[1]}}?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does_hyp_follow_from_prem
+ reference: ''
+ 76803347-b0fd-4dd6-8a04-ab1a6ab314d5: !Template
+ answer_choices: imply ||| neither ||| contradict
+ id: 76803347-b0fd-4dd6-8a04-ab1a6ab314d5
+ jinja: '{{premise}}
+
+ Does the above sentence imply or contradict that "{{hypothesis}}"? Please answer
+ as "{{answer_choices[0]}}", "{{answer_choices[2]}}", or "{{answer_choices[1]}}".
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does_this_imply
+ reference: ''
+ 8ff27ccf-21d3-45c2-afe4-4531309dfb9f: !Template
+ answer_choices: supports ||| neither ||| contradicts
+ id: 8ff27ccf-21d3-45c2-afe4-4531309dfb9f
+ jinja: 'Consider the {{"premise"}}:
+
+ Premise: {{premise}}
+
+ Does the above premise support the following hypothesis?
+
+ Hypothesis: {{hypothesis}}
+
+ Please answer as "{{answer_choices[0]}}", "{{answer_choices[2]}}", or "{{answer_choices[1]}}".
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does_this_support
+ reference: ''
diff --git a/promptsource/templates/glue/cola/templates.yaml b/promptsource/templates/glue/cola/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4fc6e828a441f145f7d90263d3e3d3a45c818f8f
--- /dev/null
+++ b/promptsource/templates/glue/cola/templates.yaml
@@ -0,0 +1,105 @@
+dataset: glue
+subset: cola
+templates:
+ 1d3f5f15-8128-4445-8de5-92365b7e54a8: !Template
+ answer_choices: no ||| yes
+ id: 1d3f5f15-8128-4445-8de5-92365b7e54a8
+ jinja: 'Does the following sentence make sense and use correct English? Please
+ answer {{"yes"}} or {{"no"}}.
+
+ {{sentence}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Make sense yes no
+ reference: ''
+ 39a701ff-bb4b-48ac-8c0a-8c61bf0d4b8d: !Template
+ answer_choices: No ||| Yes
+ id: 39a701ff-bb4b-48ac-8c0a-8c61bf0d4b8d
+ jinja: '{{sentence}}
+
+ Is this example grammatically correct and sensible?
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_this_correct
+ reference: A sample glue template
+ 6f49b860-9145-4fcb-b632-9faea39e254e: !Template
+ answer_choices: no ||| yes
+ id: 6f49b860-9145-4fcb-b632-9faea39e254e
+ jinja: 'I''m copy-editing a story for publication. It has the following sentence
+ in it:
+
+ {{sentence}}
+
+ Does this sentence make sense and is it grammatically correct? Please answer
+ {{"yes or no"}}.
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: editing
+ reference: ''
+ 79b4c04c-c0e2-4add-a600-d5572da192e7: !Template
+ answer_choices: unacceptable ||| acceptable
+ id: 79b4c04c-c0e2-4add-a600-d5572da192e7
+ jinja: 'The following sentence is either "{{"acceptable"}}", meaning it is grammatically
+ correct and makes sense, or "{{"unacceptable"}}". Which is it?
+
+ {{sentence}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Following sentence acceptable
+ reference: ''
+ dd33f089-57a1-452b-8bd5-8f1fffd10b60: !Template
+ answer_choices: no ||| yes
+ id: dd33f089-57a1-452b-8bd5-8f1fffd10b60
+ jinja: '{{sentence}}
+
+ I''m worried that sentence didn''t make any sense, or was grammatically incorrect.
+ Was it correct?
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Previous sentence acceptable
+ reference: ''
diff --git a/promptsource/templates/glue/mnli/templates.yaml b/promptsource/templates/glue/mnli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5f11019395f3c24d76451a6b4baa6529dfae669d
--- /dev/null
+++ b/promptsource/templates/glue/mnli/templates.yaml
@@ -0,0 +1,222 @@
+dataset: glue
+subset: mnli
+templates:
+ 02b4c44e-52cb-417b-b069-5d334b1f1a91: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: 02b4c44e-52cb-417b-b069-5d334b1f1a91
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
+ 05bd28f7-3ff0-4a01-ad7d-d956d0f70209: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 05bd28f7-3ff0-4a01-ad7d-d956d0f70209
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ 08948221-175f-43b2-8515-a5a29d8a82de: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 08948221-175f-43b2-8515-a5a29d8a82de
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ 22f9a320-bda8-4f45-968c-a1996eaa0c49: !Template
+ answer_choices: True ||| Neither ||| False
+ id: 22f9a320-bda8-4f45-968c-a1996eaa0c49
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ 3df92937-de3f-45a4-8a8c-69bb78cb1a7b: !Template
+ answer_choices: Correct ||| Inconclusive ||| Incorrect
+ id: 3df92937-de3f-45a4-8a8c-69bb78cb1a7b
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ 4b6910ca-b857-4df1-b232-489bdb70f548: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 4b6910ca-b857-4df1-b232-489bdb70f548
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 7712d4a0-9b25-4224-b062-31df61e892c1: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 7712d4a0-9b25-4224-b062-31df61e892c1
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ 7729660d-a228-4558-80a8-8cf27de597db: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: 7729660d-a228-4558-80a8-8cf27de597db
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+ \ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ 7a712469-7e78-4e0b-81a4-86e338700d89: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 7a712469-7e78-4e0b-81a4-86e338700d89
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ 8a0c0b82-fa86-493d-aea7-e3f58abc8178: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 8a0c0b82-fa86-493d-aea7-e3f58abc8178
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ 8df06939-7331-466e-9a0b-ad1b86f4bf1f: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: 8df06939-7331-466e-9a0b-ad1b86f4bf1f
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ 9a26a741-b000-4844-bd7a-a2226e81ee89: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: 9a26a741-b000-4844-bd7a-a2226e81ee89
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
+ aaddd2e0-ba82-4d8c-8545-0db7c36b535a: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: aaddd2e0-ba82-4d8c-8545-0db7c36b535a
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ cd81d676-b764-4709-8520-a625d299a8e6: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: cd81d676-b764-4709-8520-a625d299a8e6
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ e418db47-d2e0-4cd7-9e43-8b443d3b0f6d: !Template
+ answer_choices: Guaranteed ||| Possible ||| Impossible
+ id: e418db47-d2e0-4cd7-9e43-8b443d3b0f6d
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
diff --git a/promptsource/templates/glue/mnli_matched/templates.yaml b/promptsource/templates/glue/mnli_matched/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9491e0c32502536837a666fb64a99a3b4f8d81ce
--- /dev/null
+++ b/promptsource/templates/glue/mnli_matched/templates.yaml
@@ -0,0 +1,222 @@
+dataset: glue
+subset: mnli_matched
+templates:
+ 2e033f11-5ca6-4176-a026-85eff10d15ea: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 2e033f11-5ca6-4176-a026-85eff10d15ea
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ 2f6506cc-1225-49e9-8705-17dadfc732d8: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: 2f6506cc-1225-49e9-8705-17dadfc732d8
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
+ 44adb852-ab5d-4e46-9ab8-4ebd94394f23: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 44adb852-ab5d-4e46-9ab8-4ebd94394f23
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 5b9aeb2d-a548-4d11-a831-ebf158ae9992: !Template
+ answer_choices: Guaranteed ||| Possible ||| Impossible
+ id: 5b9aeb2d-a548-4d11-a831-ebf158ae9992
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
+ 610b01b5-6acd-4246-8671-68c56e1c870e: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 610b01b5-6acd-4246-8671-68c56e1c870e
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ 62e6b26f-fc86-4022-9c71-e327ef519925: !Template
+ answer_choices: Correct ||| Inconclusive ||| Incorrect
+ id: 62e6b26f-fc86-4022-9c71-e327ef519925
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ 6c2dbc0b-184b-4189-a641-e2a99bc95ee8: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 6c2dbc0b-184b-4189-a641-e2a99bc95ee8
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ 97160ec4-1bdd-4338-8737-1d006265b78d: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 97160ec4-1bdd-4338-8737-1d006265b78d
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ d13d692e-0fa5-4c25-a51f-0509b2cee004: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: d13d692e-0fa5-4c25-a51f-0509b2cee004
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ dcca5858-794b-4bfd-bc7d-f81e10b4b082: !Template
+ answer_choices: True ||| Neither ||| False
+ id: dcca5858-794b-4bfd-bc7d-f81e10b4b082
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ eb15a7b5-5398-490a-941b-198f08d9689c: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: eb15a7b5-5398-490a-941b-198f08d9689c
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ eb3e6c5f-980d-418e-aa28-c017f6c1354a: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: eb3e6c5f-980d-418e-aa28-c017f6c1354a
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ f3ebe1ac-194b-41e7-b008-36eafdbfbe25: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: f3ebe1ac-194b-41e7-b008-36eafdbfbe25
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ f74a9359-1856-4933-a41e-0586a6527407: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: f74a9359-1856-4933-a41e-0586a6527407
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+ \ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ fbfcc250-b1c8-4376-8f2a-99da44ec8da5: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: fbfcc250-b1c8-4376-8f2a-99da44ec8da5
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
diff --git a/promptsource/templates/glue/mnli_mismatched/templates.yaml b/promptsource/templates/glue/mnli_mismatched/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f4636330614d747e2b90771d6324cb261e5ae97c
--- /dev/null
+++ b/promptsource/templates/glue/mnli_mismatched/templates.yaml
@@ -0,0 +1,222 @@
+dataset: glue
+subset: mnli_mismatched
+templates:
+ 0ad75847-6f8c-401c-bf9d-7bb9e7f0ba41: !Template
+ answer_choices: True ||| Neither ||| False
+ id: 0ad75847-6f8c-401c-bf9d-7bb9e7f0ba41
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ 33fe0108-7c6b-4245-84af-ef5a379d279b: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: 33fe0108-7c6b-4245-84af-ef5a379d279b
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
+ 54adbf5a-196f-4207-8e5e-da7df6b6f648: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 54adbf5a-196f-4207-8e5e-da7df6b6f648
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ 56b33958-5ba8-48be-bb56-b7b26bbbcf95: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: 56b33958-5ba8-48be-bb56-b7b26bbbcf95
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
+ 59bc239a-bd30-4879-b53f-fcae9857412c: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 59bc239a-bd30-4879-b53f-fcae9857412c
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 612a4477-1dae-4b40-aad7-6f826c827adf: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: 612a4477-1dae-4b40-aad7-6f826c827adf
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+ \ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ 693e91cf-e92f-4349-bce8-6234db7e83a2: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 693e91cf-e92f-4349-bce8-6234db7e83a2
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ 76934a32-552f-4a33-af25-c8a64f9bd9e1: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 76934a32-552f-4a33-af25-c8a64f9bd9e1
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ 770aa883-efec-4258-9e1e-1a96d0c20ed5: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 770aa883-efec-4258-9e1e-1a96d0c20ed5
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ 868a3d57-47f3-4313-81de-c857c679baa8: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 868a3d57-47f3-4313-81de-c857c679baa8
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ 946116e5-c168-453b-a016-c41a4e005d1f: !Template
+ answer_choices: Guaranteed ||| Possible ||| Impossible
+ id: 946116e5-c168-453b-a016-c41a4e005d1f
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
+ 989ea99a-9e80-41af-9640-42d4db822a2b: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 989ea99a-9e80-41af-9640-42d4db822a2b
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ a3188347-964b-431e-8c74-1574e552d3cf: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: a3188347-964b-431e-8c74-1574e552d3cf
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ a8558646-4469-4115-a6ce-fe88411f25ad: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: a8558646-4469-4115-a6ce-fe88411f25ad
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ c6d6a638-8c48-45b3-abaf-5c885b7a8676: !Template
+ answer_choices: Correct ||| Inconclusive ||| Incorrect
+ id: c6d6a638-8c48-45b3-abaf-5c885b7a8676
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
diff --git a/promptsource/templates/glue/mrpc/templates.yaml b/promptsource/templates/glue/mrpc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1bc47af3c6c670c2df34405b0bbecc91c6aa5b7f
--- /dev/null
+++ b/promptsource/templates/glue/mrpc/templates.yaml
@@ -0,0 +1,160 @@
+dataset: glue
+subset: mrpc
+templates:
+ 3b88d2c4-0aeb-4c6d-9ccc-653a388250a5: !Template
+ answer_choices: null
+ id: 3b88d2c4-0aeb-4c6d-9ccc-653a388250a5
+ jinja: '{% if label == 1 %}
+
+ Paraphrase the following sentence: {{sentence1}}
+
+ |||
+
+ {{sentence2}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_paraphrase
+ reference: ''
+ 44c2e6d9-facf-4959-8400-38e0eb8dd3a8: !Template
+ answer_choices: no ||| yes
+ id: 44c2e6d9-facf-4959-8400-38e0eb8dd3a8
+ jinja: 'I want to know whether the following two sentences mean the same thing.
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ Do they?
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: want to know
+ reference: ''
+ adf659af-4e2d-4e7e-ab89-b33cfc0b5a50: !Template
+ answer_choices: no ||| yes
+ id: adf659af-4e2d-4e7e-ab89-b33cfc0b5a50
+ jinja: 'Does the sentence
+
+ {{sentence1}}
+
+ paraphrase (that is, mean the same thing as) this sentence?
+
+ {{sentence2}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: paraphrase
+ reference: ''
+ bbb395c2-2c70-4eaa-ad2f-2cf18a81da93: !Template
+ answer_choices: not equivalent ||| equivalent
+ id: bbb395c2-2c70-4eaa-ad2f-2cf18a81da93
+ jinja: 'Are the following two sentences "{{"equivalent"}}" or "{{"not equivalent"}}"?
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: equivalent
+ reference: ''
+ d830d7a5-abc0-4275-ac62-974e0088876f: !Template
+ answer_choices: null
+ id: d830d7a5-abc0-4275-ac62-974e0088876f
+ jinja: '{% if label == 1 %}
+
+ Generate a sentence that means the same thing as this one: {{sentence1}}
+
+ |||
+
+ {{sentence2}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_sentence
+ reference: ''
+ ee82d511-908c-4244-804f-6d0d907c68c7: !Template
+ answer_choices: no ||| yes
+ id: ee82d511-908c-4244-804f-6d0d907c68c7
+ jinja: 'Can I replace the sentence
+
+ {{sentence1}}
+
+ with the sentence
+
+ {{sentence2}}
+
+ and have it mean the same thing?
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: replace
+ reference: ''
+ eefd8606-b936-4d26-b91d-8f4bc38bfcbf: !Template
+ answer_choices: no ||| yes
+ id: eefd8606-b936-4d26-b91d-8f4bc38bfcbf
+ jinja: 'Do the following two sentences mean the same thing?
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: same thing
+ reference: ''
diff --git a/promptsource/templates/glue/qnli/templates.yaml b/promptsource/templates/glue/qnli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dd6a0b37f598b7ebd59be6e3f9f4e055bc9f08d8
--- /dev/null
+++ b/promptsource/templates/glue/qnli/templates.yaml
@@ -0,0 +1,106 @@
+dataset: glue
+subset: qnli
+templates:
+ 50c3108c-b23c-4691-97be-72438606c840: !Template
+ answer_choices: yes ||| no
+ id: 50c3108c-b23c-4691-97be-72438606c840
+ jinja: '{{sentence}}
+
+ Does that sentence have all you need to answer the question "{{question}}"?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: have all you need
+ reference: ''
+ 5f0f24d9-14a7-4588-8dc2-494b4c693b81: !Template
+ answer_choices: yes ||| no
+ id: 5f0f24d9-14a7-4588-8dc2-494b4c693b81
+ jinja: 'Can you answer the question "{{question}}" based only on the following:
+
+ {{sentence}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based only on
+ reference: ''
+ c626350d-6c0e-47be-b09e-c9ba1446b027: !Template
+ answer_choices: yes ||| no
+ id: c626350d-6c0e-47be-b09e-c9ba1446b027
+ jinja: 'Does knowing that "{{sentence}}" imply that I know the answer to "{{question}}"
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: imply
+ reference: ''
+ f2403d55-21a7-44bc-8b4c-6921fd7b01f5: !Template
+ answer_choices: yes ||| no
+ id: f2403d55-21a7-44bc-8b4c-6921fd7b01f5
+ jinja: 'I want to know the answer to the following question:
+
+ {{question}}
+
+ All the background I''m provided with is that "{{sentence}}". Is that enough
+ to answer the question?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: want to know
+ reference: ''
+ f44715c4-d787-484e-a912-5456cc2b6741: !Template
+ answer_choices: yes ||| no
+ id: f44715c4-d787-484e-a912-5456cc2b6741
+ jinja: 'Consider the passage:
+
+ {{sentence}}
+
+ and the question:
+
+ {{question}}
+
+ Is it possible to answer this question based only on the information in the
+ passage? {{"A) yes"}} or {{"B) no"}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: possible to answer
+ reference: ''
diff --git a/promptsource/templates/glue/qqp/templates.yaml b/promptsource/templates/glue/qqp/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..44063e98b5463fe33b86947f6beb95e5f0f91130
--- /dev/null
+++ b/promptsource/templates/glue/qqp/templates.yaml
@@ -0,0 +1,100 @@
+dataset: glue
+subset: qqp
+templates:
+ 8e711799-a57c-4941-833b-466bedfb80ad: !Template
+ answer_choices: no ||| yes
+ id: 8e711799-a57c-4941-833b-466bedfb80ad
+ jinja: I'm an administrator on the website Quora. There are two posts, one that
+ asks "{{question1}}" and another that asks "{{question2}}". I can merge questions
+ if they are asking the same thing. Can I merge these two questions? ||| {{ answer_choices[label]
+ }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: quora
+ reference: ''
+ 94972071-a726-42a3-a726-13f414b65e67: !Template
+ answer_choices: not duplicates ||| duplicates
+ id: 94972071-a726-42a3-a726-13f414b65e67
+ jinja: '{{question1}}
+
+ {{question2}}
+
+ Pick one: These questions are "{{"duplicates"}}" or "{{"not duplicates"}}".
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: duplicate or not
+ reference: ''
+ a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b: !Template
+ answer_choices: no ||| yes
+ id: a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b
+ jinja: Are the questions "{{question1}}" and "{{question2}}" asking the same thing?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: same thing
+ reference: ''
+ c0182cd1-c7ac-4abe-829f-4651536af951: !Template
+ answer_choices: no ||| yes
+ id: c0182cd1-c7ac-4abe-829f-4651536af951
+ jinja: Can an answer to "{{question1}}" also be used to answer "{{question2}}"?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: answer
+ reference: ''
+ c0724198-97e7-44a1-89d8-c51e97ce0b04: !Template
+ answer_choices: No ||| Yes
+ id: c0724198-97e7-44a1-89d8-c51e97ce0b04
+ jinja: 'Question 1: {{question1}}
+
+ Question 2: {{question2}}
+
+
+ Do these two questions convey the same meaning? Yes or no? ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: meaning
+ reference: ''
+ fd244bd3-ca3b-4e4f-9722-fd006c50e157: !Template
+ answer_choices: no ||| yes
+ id: fd244bd3-ca3b-4e4f-9722-fd006c50e157
+ jinja: I received the questions "{{question1}}" and "{{question2}}". Are they
+ duplicates? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: duplicate
+ reference: ''
diff --git a/promptsource/templates/glue/rte/templates.yaml b/promptsource/templates/glue/rte/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..db70dfcb2251b41e88737c2b4d5e4a0ee62cc249
--- /dev/null
+++ b/promptsource/templates/glue/rte/templates.yaml
@@ -0,0 +1,106 @@
+dataset: glue
+subset: rte
+templates:
+ 03a7ae07-5ddd-46c4-92f3-2152223d44ec: !Template
+ answer_choices: yes ||| no
+ id: 03a7ae07-5ddd-46c4-92f3-2152223d44ec
+ jinja: '{{sentence1}}
+
+ Does this mean that "{{sentence2}}" is true? {{"A) yes or B) no."}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: mean
+ reference: ''
+ 4ee6ff27-de63-4e7b-a9d4-82a17eba407a: !Template
+ answer_choices: yes ||| no
+ id: 4ee6ff27-de63-4e7b-a9d4-82a17eba407a
+ jinja: 'Does the claim "{{sentence2}}" follow from the fact that "{{sentence1}}"?
+ Please answer either {{"yes"}} or {{"no"}}.
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "does the claim\u2026 follow the fact\u2026"
+ reference: ''
+ 9e2b4267-ec23-44c8-b82a-107e2c890fec: !Template
+ answer_choices: entailment ||| not entailment
+ id: 9e2b4267-ec23-44c8-b82a-107e2c890fec
+ jinja: 'We say that one sentence "{{"entails"}}" another sentence when the first
+ sentence implies the second sentence. Consider the following two sentences:
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ Is the relationship from the first to the second sentence "{{"entailment"}}"
+ or "{{"not entailment"}}"?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: entailment explained
+ reference: ''
+ c8dfc879-40f2-412d-be1e-4cd70107f6e6: !Template
+ answer_choices: yes ||| no
+ id: c8dfc879-40f2-412d-be1e-4cd70107f6e6
+ jinja: 'Does "{{sentence1}}" imply that "{{sentence2}}"? Please answer either
+ {{"yes"}} or {{"no"}}.
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: imply
+ reference: ''
+ f56ffced-9b16-431a-8a17-501e63cddf73: !Template
+ answer_choices: yes ||| no
+ id: f56ffced-9b16-431a-8a17-501e63cddf73
+ jinja: '{{sentence1}}
+
+ Does this imply
+
+ {{sentence2}}
+
+ Please answer {{"A) yes or B) no."}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: imply separated
+ reference: ''
diff --git a/promptsource/templates/glue/sst2/templates.yaml b/promptsource/templates/glue/sst2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..565e13f9460713578035da3ca8ccfb6702704c91
--- /dev/null
+++ b/promptsource/templates/glue/sst2/templates.yaml
@@ -0,0 +1,86 @@
+dataset: glue
+subset: sst2
+templates:
+ 11d1c505-9232-4c35-82a4-4c3642843e2e: !Template
+ answer_choices: negative ||| positive
+ id: 11d1c505-9232-4c35-82a4-4c3642843e2e
+ jinja: '{{sentence}}
+
+ Question: Was that sentence {{"positive"}} or {{"negative"}}? Answer: ||| {{
+ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: positive negative after
+ reference: ''
+ 228fcae7-7f4c-4e3c-9ac4-e49b26bc103d: !Template
+ answer_choices: negative ||| positive
+ id: 228fcae7-7f4c-4e3c-9ac4-e49b26bc103d
+ jinja: 'I''m reading a review that says "{{sentence}}".
+
+
+ Do you think the review is {{"positive"}} or {{"negative"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: review
+ reference: ''
+ 5aa0cea9-0f8d-454d-b25b-b0d4cda273b8: !Template
+ answer_choices: sad ||| happy
+ id: 5aa0cea9-0f8d-454d-b25b-b0d4cda273b8
+ jinja: 'Someone just said to me "{{sentence}}".
+
+
+ Do you think they are {{"sad"}} or {{"happy"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: said
+ reference: ''
+ 63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a: !Template
+ answer_choices: negative ||| positive
+ id: 63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a
+ jinja: 'Does the following sentence have a {{"positive"}} or {{"negative"}} sentiment?
+
+ {{sentence}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: following positive negative
+ reference: ''
+ 6dd74cd5-e074-4612-9e96-c17ca88c3bc4: !Template
+ answer_choices: bad ||| good
+ id: 6dd74cd5-e074-4612-9e96-c17ca88c3bc4
+ jinja: Someone sent me an email with the sentence "{{sentence}}". Do you think
+ they are feeling {{"good"}} or {{"bad"}}? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: happy or mad
+ reference: ''
diff --git a/promptsource/templates/glue/stsb/templates.yaml b/promptsource/templates/glue/stsb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b76c47b4468554cbf06c1f381f4f48f854998c6e
--- /dev/null
+++ b/promptsource/templates/glue/stsb/templates.yaml
@@ -0,0 +1,119 @@
+dataset: glue
+subset: stsb
+templates:
+ 50e3a541-108c-4b26-a423-956562d9b3af: !Template
+ answer_choices: null
+ id: 50e3a541-108c-4b26-a423-956562d9b3af
+ jinja: Rate on a scale from {{"0.0"}} to {{"5.0"}} how similar the sentences "{{sentence1}}"
+ and "{{sentence2}}" are. ||| {{ (((5*label) | round )/5) }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ - Spearman Correlation
+ original_task: true
+ name: rank
+ reference: ''
+ 88dcb716-d19c-45bc-9d3a-cdf8fff5500b: !Template
+ answer_choices: null
+ id: 88dcb716-d19c-45bc-9d3a-cdf8fff5500b
+ jinja: 'Please rate how similar these two sentences are from {{"0.0"}} to {{"5.0"}}.
+
+ Sentence A: {{sentence1}}
+
+ Sentence B: {{sentence2}}
+
+ |||
+
+ {{ (((5*label) | round )/5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ - Spearman Correlation
+ original_task: true
+ name: rate
+ reference: ''
+ a552635f-3a9a-497f-ac04-ef414b24eb16: !Template
+ answer_choices: null
+ id: a552635f-3a9a-497f-ac04-ef414b24eb16
+ jinja: 'Please give me a score denoting the similarity of the following two sentences:
+
+ Sentence 1: {{sentence1}}
+
+ Sentence 2: {{sentence2}}
+
+ Your score should be something like {{"3.4"}}, where {{"0.0 means very dissimilar,
+ 2.5 means kind of similar, and 5.0 means very similar"}}.
+
+ |||
+
+ {{ (((5*label) | round )/5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ - Spearman Correlation
+ original_task: true
+ name: examples
+ reference: ''
+ ca75788d-4974-440a-a7b7-c42bae814d59: !Template
+ answer_choices: null
+ id: ca75788d-4974-440a-a7b7-c42bae814d59
+ jinja: 'I need to know how similar these two passages are:
+
+ - {{sentence1}}
+
+ - {{sentence2}}
+
+
+ Question: Can you give me a number from {{"0.0 to 5.0"}} that denotes how similar
+ they are, where {{"0.0"}} means totally dissimilar and {{"5.0"}} means extremely
+ similar?
+
+ Answer:
+
+ |||
+
+ {{ (((5*label) | round )/5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ - Spearman Correlation
+ original_task: true
+ name: similarity
+ reference: ''
+ d7315518-cfb9-4840-93ab-c52f1bb5e74d: !Template
+ answer_choices: null
+ id: d7315518-cfb9-4840-93ab-c52f1bb5e74d
+ jinja: 'I need to assign a score from {{"0.0 to 5.0"}} that denotes how similar
+ the following two sentences are:
+
+ A: {{sentence1}}
+
+ B: {{sentence2}}
+
+ What score should I assign?
+
+ |||
+
+ {{ (((5*label) | round )/5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ - Spearman Correlation
+ original_task: true
+ name: score
+ reference: ''
diff --git a/promptsource/templates/glue/wnli/templates.yaml b/promptsource/templates/glue/wnli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..26e6800c3fe1a6e4505ba38e875f5feb42df8078
--- /dev/null
+++ b/promptsource/templates/glue/wnli/templates.yaml
@@ -0,0 +1,110 @@
+dataset: glue
+subset: wnli
+templates:
+ 10c354ee-6f4e-4b04-91e1-29e999a8f3e7: !Template
+ answer_choices: not confident ||| very confident
+ id: 10c354ee-6f4e-4b04-91e1-29e999a8f3e7
+ jinja: 'If it''s true that
+
+ {{sentence1}}
+
+ how {{"confident"}} should I be that
+
+ {{sentence2}}
+
+ {{"very confident or not confident?"}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: confident
+ reference: ''
+ 3a0e46cb-0b96-4972-83f6-29a6c6a09ba9: !Template
+ answer_choices: no ||| yes
+ id: 3a0e46cb-0b96-4972-83f6-29a6c6a09ba9
+ jinja: '{{"Entailment"}} means that the second sentence follows from the first
+ sentence. Are the following two sentences an example of entailment?
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: entailment explained
+ reference: ''
+ 75f89b05-5a81-401b-8a04-8239211a9a95: !Template
+ answer_choices: no ||| yes
+ id: 75f89b05-5a81-401b-8a04-8239211a9a95
+ jinja: 'Assume that the following is true:
+
+ {{sentence1}}
+
+ Does this mean that "{{sentence2}}"?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: mean
+ reference: ''
+ a244158a-a248-4e34-bef7-66e269dd0815: !Template
+ answer_choices: no ||| yes
+ id: a244158a-a248-4e34-bef7-66e269dd0815
+ jinja: 'Someone told me "{{sentence1}}" Now, I think that "{{sentence2}}" Am I
+ justified in thinking this?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified
+ reference: ''
+ a2ce492b-dfd0-4f04-bc44-70c7867ba231: !Template
+ answer_choices: no ||| yes
+ id: a2ce492b-dfd0-4f04-bc44-70c7867ba231
+ jinja: '{{sentence1}}
+
+ {{sentence2}}
+
+ Does the first sentence imply the second sentence?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: imply
+ reference: ''
diff --git a/promptsource/templates/google_wellformed_query/templates.yaml b/promptsource/templates/google_wellformed_query/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ac8867604981ac23ab3c7c61604d8e43127bb391
--- /dev/null
+++ b/promptsource/templates/google_wellformed_query/templates.yaml
@@ -0,0 +1,114 @@
+dataset: google_wellformed_query
+templates:
+ 7462caa6-9fb3-43ed-a883-85f8940ba23d: !Template
+ answer_choices: null
+ id: 7462caa6-9fb3-43ed-a883-85f8940ba23d
+ jinja: 'How would you rate how well-formed is the query "{{content}}"? "Well-formed"
+ means that a natural language system would be able to perform an accurate interpretation.
+ Give a value between 0 and 1.
+
+ |||
+
+ {{ rating | round(0) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: wellformed_rating
+ reference: ''
+ 80e4797c-2454-4f27-8032-a8191cd3602d: !Template
+ answer_choices: yes ||| no
+ id: 80e4797c-2454-4f27-8032-a8191cd3602d
+ jinja: 'John believes that the query "{{content}}" resembles a natural language
+ question. Answer {{ answer_choices[0] }} if you agree with John and {{ answer_choices[1]
+ }} if you disagree.
+
+ |||
+
+ {% if 0.5 < rating %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_wellformed_affirmative
+ reference: ''
+ 868d696c-428c-4915-b786-719361394143: !Template
+ answer_choices: yes ||| no
+ id: 868d696c-428c-4915-b786-719361394143
+ jinja: 'Is asking search engines "{{content}}" good for finding information?
+
+ |||
+
+ {% if 0.5 < rating %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_wellformed_finding_for_search
+ reference: ''
+ 9816d5bf-c4db-42ed-8ac8-2be45fa8a0bb: !Template
+ answer_choices: yes ||| no
+ id: 9816d5bf-c4db-42ed-8ac8-2be45fa8a0bb
+ jinja: 'Would "{{content}}" be a useful query to type in a search engine?
+
+ |||
+
+ {% if 0.5 < rating %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_wellformed_type_for_search
+ reference: ''
+ 9f3cc358-3746-405e-b5e9-5fc0dedc0b5d: !Template
+ answer_choices: yes ||| no
+ id: 9f3cc358-3746-405e-b5e9-5fc0dedc0b5d
+ jinja: 'Given this query "{{content}}", would a search engine know what to look
+ for? {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ |||
+
+ {% if 0.5 < rating %}{{ answer_choices[0] }}{% else %}{{ answer_choices[1] }}{%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_wellformed_know_for_search
+ reference: ''
+ e1c64d17-c623-4a30-b899-5c6a4e44e3d7: !Template
+ answer_choices: yes ||| no
+ id: e1c64d17-c623-4a30-b899-5c6a4e44e3d7
+ jinja: '"{{content}}" is an informative query, {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ |||
+
+ {% if 0.5 < rating %}{{answer_choices[0]}}{% else %}{{answer_choices[1]}}{%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_wellformed_interrogative
+ reference: Well-formed query
diff --git a/promptsource/templates/great_code/templates.yaml b/promptsource/templates/great_code/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..efc5d0da5771e58ba73610b62e40d074dcc3f8aa
--- /dev/null
+++ b/promptsource/templates/great_code/templates.yaml
@@ -0,0 +1,163 @@
+dataset: great_code
+templates:
+ 027215bb-1055-4584-b3ce-3267a8043d3a: !Template
+ answer_choices: null
+ id: 027215bb-1055-4584-b3ce-3267a8043d3a
+ jinja: "{% set mask = 'def
(' %}\n{% set indent = ' ' %}\n{% set\
+ \ ns = namespace(indent_size=0, result=[], masked=false, target='') %}\n{% for\
+ \ token in source_tokens %}\n {% if ns.masked is false and token.startswith('def')\
+ \ %}\n {% set ns.target = token.split('def ')[1][:-1] %}\n \
+ \ {% set token = mask %}\n {% set ns.masked = true %}\n {%\
+ \ endif%}\n {% if token== '#INDENT#' %}\n {% set ns.indent_size = ns.indent_size\
+ \ + 1 %}\n {% set ns.result = ns.result + [indent * ns.indent_size] %}\n\
+ \ {% elif token == '#NEWLINE#' %}\n {% set ns.result = ns.result\
+ \ + [\"\\n\"] %}\n {% elif token == '#UNINDENT#' %}\n {% set ns.indent_size\
+ \ = ns.indent_size - 1 %}\n {% else %}\n {% if not loop.first and\
+ \ loop.previtem == '#NEWLINE#' %}\n {% set ns.result = ns.result\
+ \ + [indent * ns.indent_size] %}\n {% endif %}\n {% set ns.result\
+ \ = ns.result + [token | replace('\\\\n', '\\n'), \" \"] %}\n {% endif %}\n\
+ {% endfor %}\n{{ns.result | join(\"\") | replace(\" . \", \".\") | replace(\"\
+ \ , \", \", \") | replace(\"( \", \"(\") | replace(\" )\", \")\") | replace(\"\
+ [ \", \"[\") | replace(\" ]\", \"]\")}}\n\nWhat is the function name?\n|||\n\
+ {{ ns.target }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: function name generation
+ reference: ''
+ 24fa908d-daa2-4fd2-bb22-374021c1fc23: !Template
+ answer_choices: null
+ id: 24fa908d-daa2-4fd2-bb22-374021c1fc23
+ jinja: "{% set result = \"\" %}\n{% set indent = ' ' %}\n{% set ns = namespace(indent_size=0,\
+ \ line_number=0, buggy_line=0, bug_location=0, bug_len=0, result=[], result_lines=[])\
+ \ %}\n{% set fixed_token = source_tokens[repair_targets[0]] %}\n{% set buggy_line_content\
+ \ = \"\" %}\n{% set fixed_buggy_line_content = \"\" %}\n\n{% if has_bug and\
+ \ (repair_targets | length > 0) %}\n {% for token in source_tokens %}\n\
+ \ {% if loop.index0 == error_location %}\n {% set ns.buggy_line\
+ \ = ns.line_number %}\n {% set ns.bug_location = (ns.result |\
+ \ join(\"\") | length) %}\n {% set ns.bug_len = (token | length)\
+ \ %}\n {% endif%}\n {% if token== '#INDENT#' %}\n {%\
+ \ set ns.indent_size = ns.indent_size + 1 %}\n {% set ns.result =\
+ \ ns.result + [indent * ns.indent_size] %}\n {% elif token == '#NEWLINE#'\
+ \ %}\n {% set ns.result_lines = ns.result_lines + [ns.result |\
+ \ join(\"\")] %}\n {% set ns.result = [] %}\n {% set\
+ \ ns.line_number = ns.line_number + 1 %}\n {% elif token == '#UNINDENT#'\
+ \ %}\n {% set ns.indent_size = ns.indent_size - 1 %}\n {%\
+ \ else %}\n {% if not loop.first and loop.previtem == '#NEWLINE#'\
+ \ %}\n {% set ns.result = ns.result + [indent * ns.indent_size]\
+ \ %}\n {% endif %}\n {% set ns.result = ns.result\
+ \ + [token | replace('\\\\n', '\\n'), \" \"] %}\n {% endif %}\n {%\
+ \ endfor %}\n {% set ns.result_lines = ns.result_lines + [ns.result | join(\"\
+ \")] %}\n {% set result = ns.result_lines | join(\"\\n\") %}\n {{result\
+ \ | replace(\" . \", \".\") | replace(\" , \", \", \") | replace(\"( \", \"\
+ (\") | replace(\" )\", \")\") | replace(\"[ \", \"[\") | replace(\" ]\", \"\
+ ]\")}}\n\n {% set buggy_line_content = ns.result_lines[ns.buggy_line] | trim\
+ \ | replace(\" . \", \".\") | replace(\" , \", \", \") | replace(\"( \", \"\
+ (\") | replace(\" )\", \")\") | replace(\"[ \", \"[\") | replace(\" ]\", \"\
+ ]\") %}\n {% set fixed_buggy_line_content = (ns.result_lines[ns.buggy_line][:ns.bug_location]\
+ \ + fixed_token + ns.result_lines[ns.buggy_line][ns.bug_location + ns.bug_len:])\
+ \ | trim | replace(\" . \", \".\") | replace(\" , \", \", \") | replace(\"\
+ ( \", \"(\") | replace(\" )\", \")\") | replace(\"[ \", \"[\") | replace(\"\
+ \ ]\", \"]\")%}\n\n Fix the buggy line: {{buggy_line_content}}\n |||\n\
+ \ {{fixed_buggy_line_content}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: fix buggy line
+ reference: ''
+ 3c0f36da-834a-4814-86f6-6e55414fbd32: !Template
+ answer_choices: null
+ id: 3c0f36da-834a-4814-86f6-6e55414fbd32
+ jinja: "{% set mask = '' %}\n{% set indent = ' ' %}\n{% set ns = namespace(indent_size=0,\
+ \ result=[]) %}\n\n{% if has_bug %}\n {% for token in source_tokens %}\n\
+ \ {% if loop.index0 == error_location %}\n {% set token\
+ \ = mask %}\n {% endif%}\n {% if token== '#INDENT#' %}\n \
+ \ {% set ns.indent_size = ns.indent_size + 1 %}\n {% set ns.result\
+ \ = ns.result + [indent * ns.indent_size] %}\n {% elif token == '#NEWLINE#'\
+ \ %}\n {% set ns.result = ns.result + [\"\\n\"] %}\n {%\
+ \ elif token == '#UNINDENT#' %}\n {% set ns.indent_size = ns.indent_size\
+ \ - 1 %}\n {% else %}\n {% if not loop.first and loop.previtem\
+ \ == '#NEWLINE#' %}\n {% set ns.result = ns.result + [indent\
+ \ * ns.indent_size] %}\n {% endif %}\n {% set ns.result\
+ \ = ns.result + [token | replace('\\\\n', '\\n'), \" \"] %}\n {% endif\
+ \ %}\n {% endfor %}\n {{ns.result | join(\"\") | replace(\" . \", \".\"\
+ ) | replace(\" , \", \", \") | replace(\"( \", \"(\") | replace(\" )\", \"\
+ )\") | replace(\"[ \", \"[\") | replace(\" ]\", \"]\")}}\n \n Given the\
+ \ code above, what is a proper replacement for {{mask}}?\n |||\n {{source_tokens[repair_targets[0]]}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: identifier prediction no choices
+ reference: ''
+ 57f93ca1-1e27-40ff-8fa4-ba11a499baef: !Template
+ answer_choices: Yes ||| No
+ id: 57f93ca1-1e27-40ff-8fa4-ba11a499baef
+ jinja: "{% set indent = ' ' %}\n{% set ns = namespace(indent_size=0, result=[])\
+ \ %}\n{% for token in source_tokens %}\n {% if token== '#INDENT#' %}\n \
+ \ {% set ns.indent_size = ns.indent_size + 1 %}\n {% set ns.result\
+ \ = ns.result + [indent * ns.indent_size] %}\n {% elif token == '#NEWLINE#'\
+ \ %}\n {% set ns.result = ns.result + [\"\\n\"] %}\n {% elif token\
+ \ == '#UNINDENT#' %}\n {% set ns.indent_size = ns.indent_size - 1 %}\n\
+ \ {% else %}\n {% if not loop.first and loop.previtem == '#NEWLINE#'\
+ \ %}\n {% set ns.result = ns.result + [indent * ns.indent_size]\
+ \ %}\n {% endif %}\n {% set ns.result = ns.result + [token\
+ \ | replace('\\\\n', '\\n'), \" \"] %}\n {% endif %}\n{% endfor %}\n{{ns.result\
+ \ | join(\"\") | replace(\" . \", \".\") | replace(\" , \", \", \") | replace(\"\
+ ( \", \"(\") | replace(\" )\", \")\") | replace(\"[ \", \"[\") | replace(\"\
+ \ ]\", \"]\")}}\n\nIs there a bug in the code above?\n|||\n{{ {True: \"Yes\"\
+ , False: \"No\"}[has_bug] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: bug detection
+ reference: ''
+ 6b154f4f-50a5-4890-b0fc-a0118061ce0c: !Template
+ answer_choices: '{% if has_bug %} {% set nss = namespace(choices=[]) %} {%
+ for i in repair_candidates %} {% set nss.choices = nss.choices + [source_tokens[(i
+ | int)]] %} {% endfor %} {{nss.choices | unique | join(" ||| ")}} {%
+ endif %}'
+ id: 6b154f4f-50a5-4890-b0fc-a0118061ce0c
+ jinja: "{% set mask = '' %}\n{% set indent = ' ' %}\n{% set ns = namespace(indent_size=0,\
+ \ result=[]) %}\n\n{% if has_bug %}\n {% for token in source_tokens %}\n\
+ \ {% if loop.index0 == error_location %}\n {% set token\
+ \ = mask %}\n {% endif%}\n {% if token== '#INDENT#' %}\n \
+ \ {% set ns.indent_size = ns.indent_size + 1 %}\n {% set ns.result\
+ \ = ns.result + [indent * ns.indent_size] %}\n {% elif token == '#NEWLINE#'\
+ \ %}\n {% set ns.result = ns.result + [\"\\n\"] %}\n {%\
+ \ elif token == '#UNINDENT#' %}\n {% set ns.indent_size = ns.indent_size\
+ \ - 1 %}\n {% else %}\n {% if not loop.first and loop.previtem\
+ \ == '#NEWLINE#' %}\n {% set ns.result = ns.result + [indent\
+ \ * ns.indent_size] %}\n {% endif %}\n {% set ns.result\
+ \ = ns.result + [token | replace('\\\\n', '\\n'), \" \"] %}\n {% endif\
+ \ %}\n {% endfor %}\n {{ns.result | join(\"\") | replace(\" . \", \".\"\
+ ) | replace(\" , \", \", \") | replace(\"( \", \"(\") | replace(\" )\", \"\
+ )\") | replace(\"[ \", \"[\") | replace(\" ]\", \"]\")}}\n \n Given the\
+ \ code above, what is a proper replacement for {{mask}}? Choose among: {{answer_choices\
+ \ | join(\", \")}}\n |||\n {{source_tokens[repair_targets[0]]}}\n{% endif\
+ \ %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: identifier prediction with choices
+ reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml b/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ff5ff1da52b3402ce84c14d32c074e68799a01aa
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml
@@ -0,0 +1,197 @@
+dataset: guardian_authorship
+subset: cross_genre_1
+templates:
+ 026e1ef2-c765-4262-b7b3-a087f38907db: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 026e1ef2-c765-4262-b7b3-a087f38907db
+ jinja: 'Who could have authored this article based on the writing style? The answer
+ options are {{ answer_choices | join(", ") }}.
+
+
+ {{article}} |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: writing_style
+ reference: ''
+ 12982397-c0c3-49a9-b3ac-38735908428b: !Template
+ answer_choices: null
+ id: 12982397-c0c3-49a9-b3ac-38735908428b
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} on the topic of {{\n[\n \"\
+ Politics\",\n \"Society\",\n \"UK\",\n \"World\",\n \"Books\"\n] [topic]\n\
+ }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author_topic
+ reference: ''
+ 1b9fb5f9-6d2a-45ad-8ad4-dc199ee181b6: !Template
+ answer_choices: null
+ id: 1b9fb5f9-6d2a-45ad-8ad4-dc199ee181b6
+ jinja: "Generate an article on the topic of {{[\n \"Politics\",\n \"Society\"\
+ ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_topic
+ reference: ''
+ 45e1aa31-6e76-4072-a6ee-ba99785999fc: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 45e1aa31-6e76-4072-a6ee-ba99785999fc
+ jinja: 'Given the answer options of {{answer_choices[:-1] | join(", ")}} and {{answer_choices[-1]}},
+ identify the author of the passage, which is related to {{ ["Politics", "Society",
+ "UK", "World", "Books"][topic] }}, below.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_affirmative
+ reference: ''
+ 68bfa6a4-a89c-4be2-aa0b-cce1103e3ecf: !Template
+ answer_choices: Politics ||| Society ||| UK ||| World ||| Books
+ id: 68bfa6a4-a89c-4be2-aa0b-cce1103e3ecf
+ jinja: "What is the topic of this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: topic
+ reference: ''
+ 8d8e432e-8594-4e89-8b03-e11f0cebcf7b: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 8d8e432e-8594-4e89-8b03-e11f0cebcf7b
+ jinja: 'Identify the author of the passage below by choosing from the author list
+ of {{answer_choices|join(", ")}}.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_affirmative
+ reference: ''
+ 8e852323-3aba-4c57-a428-004bbef4185e: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 8e852323-3aba-4c57-a428-004bbef4185e
+ jinja: "You are in an examination, which requires you to associate the passage\
+ \ below to the author. The topic is about {{ [\"Politics\", \"Society\", \"\
+ UK\", \"World\", \"Books\"][topic] }}, and the possible authors are one of the\
+ \ following: {{answer_choices | join(\", \")}}. What is the answer?\n\nPassage:\
+ \ {{ article }} \n|||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_exam
+ reference: ''
+ dc46136b-69d1-484e-9b5f-accfb4ba22df: !Template
+ answer_choices: null
+ id: dc46136b-69d1-484e-9b5f-accfb4ba22df
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author
+ reference: ''
+ e885498a-04f9-4db9-bc01-d1324803315a: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: e885498a-04f9-4db9-bc01-d1324803315a
+ jinja: "Who wrote this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article
+ reference: ''
+ f6ff1e76-4148-450c-ba0c-157a5b4c2383: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: f6ff1e76-4148-450c-ba0c-157a5b4c2383
+ jinja: "Who is the author of the passage below? Choose from the following list:\
+ \ {{ answer_choices | join(\", \")}}. \n\nHint: The topic is related to {{ [\"\
+ Politics\", \"Society\", \"UK\", \"World\", \"Books\"][topic] }}.\n\n{{article}}\n\
+ |||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_hint
+ reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f2f8f00f44789ce030b8aad25efa96d533750108
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml
@@ -0,0 +1,197 @@
+dataset: guardian_authorship
+subset: cross_topic_1
+templates:
+ 18cea428-59ae-4db1-b2ee-6c44fb39dc71: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 18cea428-59ae-4db1-b2ee-6c44fb39dc71
+ jinja: 'Who could have authored this article based on the writing style? The answer
+ options are {{ answer_choices | join(", ") }}.
+
+
+ {{article}} |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: writing_style
+ reference: ''
+ 3b4cc95c-f88c-4b51-add5-32ffdebfdfc6: !Template
+ answer_choices: null
+ id: 3b4cc95c-f88c-4b51-add5-32ffdebfdfc6
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} on the topic of {{\n[\n \"\
+ Politics\",\n \"Society\",\n \"UK\",\n \"World\",\n \"Books\"\n] [topic]\n\
+ }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author_topic
+ reference: ''
+ a19222b2-6edd-479b-a30c-96d2497216e5: !Template
+ answer_choices: null
+ id: a19222b2-6edd-479b-a30c-96d2497216e5
+ jinja: "Generate an article on the topic of {{[\n \"Politics\",\n \"Society\"\
+ ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_topic
+ reference: ''
+ b6a0012c-f10b-464e-9531-8a9d56057d0f: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: b6a0012c-f10b-464e-9531-8a9d56057d0f
+ jinja: 'Given the answer options of {{answer_choices[:-1] | join(", ")}} and {{answer_choices[-1]}},
+ identify the author of the passage, which is related to {{ ["Politics", "Society",
+ "UK", "World", "Books"][topic] }}, below.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_affirmative
+ reference: ''
+ cde2012f-4c80-4aa0-90f6-2db7138b534e: !Template
+ answer_choices: Politics ||| Society ||| UK ||| World ||| Books
+ id: cde2012f-4c80-4aa0-90f6-2db7138b534e
+ jinja: "What is the topic of this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: topic
+ reference: ''
+ d617f1c6-114f-4fd7-81d4-7b7e12f353b0: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: d617f1c6-114f-4fd7-81d4-7b7e12f353b0
+ jinja: 'Identify the author of the passage below by choosing from the author list
+ of {{answer_choices|join(", ")}}.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_affirmative
+ reference: ''
+ e8066eb0-f476-4f7a-9c5a-eb90dceefc3d: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: e8066eb0-f476-4f7a-9c5a-eb90dceefc3d
+ jinja: "You are in an examination, which requires you to associate the passage\
+ \ below to the author. The topic is about {{ [\"Politics\", \"Society\", \"\
+ UK\", \"World\", \"Books\"][topic] }}, and the possible authors are one of the\
+ \ following: {{answer_choices | join(\", \")}}. What is the answer?\n\nPassage:\
+ \ {{ article }} \n|||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_exam
+ reference: ''
+ ec933b25-e4a2-49f6-b9a0-1675d3541e27: !Template
+ answer_choices: null
+ id: ec933b25-e4a2-49f6-b9a0-1675d3541e27
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author
+ reference: ''
+ f22055a0-478e-4ace-9d0b-82986ad77919: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: f22055a0-478e-4ace-9d0b-82986ad77919
+ jinja: "Who wrote this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article
+ reference: ''
+ f289839f-7fdb-49d7-ab66-dacd6e583e04: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: f289839f-7fdb-49d7-ab66-dacd6e583e04
+ jinja: "Who is the author of the passage below? Choose from the following list:\
+ \ {{ answer_choices | join(\", \")}}. \n\nHint: The topic is related to {{ [\"\
+ Politics\", \"Society\", \"UK\", \"World\", \"Books\"][topic] }}.\n\n{{article}}\n\
+ |||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_hint
+ reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..603306f6597cb023794ad3567c5dfefdce843da5
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml
@@ -0,0 +1,197 @@
+dataset: guardian_authorship
+subset: cross_topic_4
+templates:
+ 3951d79c-408b-4895-8226-3033d8784d2c: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 3951d79c-408b-4895-8226-3033d8784d2c
+ jinja: 'Who could have authored this article based on the writing style? The answer
+ options are {{ answer_choices | join(", ") }}.
+
+
+ {{article}} |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: writing_style
+ reference: ''
+ 4998d29d-7042-439b-8346-c4f93bd11cbc: !Template
+ answer_choices: null
+ id: 4998d29d-7042-439b-8346-c4f93bd11cbc
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} on the topic of {{\n[\n \"\
+ Politics\",\n \"Society\",\n \"UK\",\n \"World\",\n \"Books\"\n] [topic]\n\
+ }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author_topic
+ reference: ''
+ 4c3d501e-1ccf-4ce4-b6c6-9af13f0f5429: !Template
+ answer_choices: null
+ id: 4c3d501e-1ccf-4ce4-b6c6-9af13f0f5429
+ jinja: "Generate an article on the topic of {{[\n \"Politics\",\n \"Society\"\
+ ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_topic
+ reference: ''
+ 4ef96141-81c1-488c-92b9-5d35a3a12afa: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 4ef96141-81c1-488c-92b9-5d35a3a12afa
+ jinja: 'Given the answer options of {{answer_choices[:-1] | join(", ")}} and {{answer_choices[-1]}},
+ identify the author of the passage, which is related to {{ ["Politics", "Society",
+ "UK", "World", "Books"][topic] }}, below.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_affirmative
+ reference: ''
+ 54e1f0ac-1e17-43bb-85ee-3f852fcccb10: !Template
+ answer_choices: Politics ||| Society ||| UK ||| World ||| Books
+ id: 54e1f0ac-1e17-43bb-85ee-3f852fcccb10
+ jinja: "What is the topic of this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: topic
+ reference: ''
+ 66de6739-738e-4397-8c15-49ea1e1a6c6c: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 66de6739-738e-4397-8c15-49ea1e1a6c6c
+ jinja: 'Identify the author of the passage below by choosing from the author list
+ of {{answer_choices|join(", ")}}.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_affirmative
+ reference: ''
+ 93d06e87-f328-415d-8fda-f4732165736d: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 93d06e87-f328-415d-8fda-f4732165736d
+ jinja: "You are in an examination, which requires you to associate the passage\
+ \ below to the author. The topic is about {{ [\"Politics\", \"Society\", \"\
+ UK\", \"World\", \"Books\"][topic] }}, and the possible authors are one of the\
+ \ following: {{answer_choices | join(\", \")}}. What is the answer?\n\nPassage:\
+ \ {{ article }} \n|||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_exam
+ reference: ''
+ b092f3f0-467c-4268-b450-b3c416824d56: !Template
+ answer_choices: null
+ id: b092f3f0-467c-4268-b450-b3c416824d56
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author
+ reference: ''
+ b89bb96c-e3c7-4e8a-beab-658800526864: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: b89bb96c-e3c7-4e8a-beab-658800526864
+ jinja: "Who wrote this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article
+ reference: ''
+ d16d1263-17e7-411a-b28a-207a95d79afc: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: d16d1263-17e7-411a-b28a-207a95d79afc
+ jinja: "Who is the author of the passage below? Choose from the following list:\
+ \ {{ answer_choices | join(\", \")}}. \n\nHint: The topic is related to {{ [\"\
+ Politics\", \"Society\", \"UK\", \"World\", \"Books\"][topic] }}.\n\n{{article}}\n\
+ |||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_hint
+ reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..44fae27b62e5f7908f39fac1510bf6f53c9587d9
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml
@@ -0,0 +1,197 @@
+dataset: guardian_authorship
+subset: cross_topic_7
+templates:
+ 53685e15-3901-41e0-a431-042333fedc5d: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 53685e15-3901-41e0-a431-042333fedc5d
+ jinja: 'Who could have authored this article based on the writing style? The answer
+ options are {{ answer_choices | join(", ") }}.
+
+
+ {{article}} |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: writing_style
+ reference: ''
+ 6752a104-6037-4c8d-9cc3-7b88b97e5142: !Template
+ answer_choices: null
+ id: 6752a104-6037-4c8d-9cc3-7b88b97e5142
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} on the topic of {{\n[\n \"\
+ Politics\",\n \"Society\",\n \"UK\",\n \"World\",\n \"Books\"\n] [topic]\n\
+ }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author_topic
+ reference: ''
+ 794dff9a-dd24-4e67-9cb9-c67773b3d09d: !Template
+ answer_choices: null
+ id: 794dff9a-dd24-4e67-9cb9-c67773b3d09d
+ jinja: "Generate an article on the topic of {{[\n \"Politics\",\n \"Society\"\
+ ,\n \"UK\",\n \"World\",\n \"Books\"\n][topic] }}. |||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_topic
+ reference: ''
+ 96a5ad6e-0d4e-4fc8-9429-79ef7e444e96: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 96a5ad6e-0d4e-4fc8-9429-79ef7e444e96
+ jinja: 'Given the answer options of {{answer_choices[:-1] | join(", ")}} and {{answer_choices[-1]}},
+ identify the author of the passage, which is related to {{ ["Politics", "Society",
+ "UK", "World", "Books"][topic] }}, below.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_affirmative
+ reference: ''
+ 9dcd1f54-5178-41a1-945e-96105b470e32: !Template
+ answer_choices: Politics ||| Society ||| UK ||| World ||| Books
+ id: 9dcd1f54-5178-41a1-945e-96105b470e32
+ jinja: "What is the topic of this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n|||\n{{answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: topic
+ reference: ''
+ 9ffa7dc2-c8e5-4794-8d2c-671b68b007fc: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: 9ffa7dc2-c8e5-4794-8d2c-671b68b007fc
+ jinja: 'Identify the author of the passage below by choosing from the author list
+ of {{answer_choices|join(", ")}}.
+
+
+ {{article}}
+
+ |||
+
+ {{ answer_choices[author] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_affirmative
+ reference: ''
+ adce32b8-d92a-4b29-908e-93fe86051dca: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: adce32b8-d92a-4b29-908e-93fe86051dca
+ jinja: "You are in an examination, which requires you to associate the passage\
+ \ below to the author. The topic is about {{ [\"Politics\", \"Society\", \"\
+ UK\", \"World\", \"Books\"][topic] }}, and the possible authors are one of the\
+ \ following: {{answer_choices | join(\", \")}}. What is the answer?\n\nPassage:\
+ \ {{ article }} \n|||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_exam
+ reference: ''
+ d89e8c33-35c4-4c0c-a6c9-52460ed20f7f: !Template
+ answer_choices: null
+ id: d89e8c33-35c4-4c0c-a6c9-52460ed20f7f
+ jinja: "Generate an article based on the writing style of {{\n[\n \"Catherine\
+ \ Bennett\",\n \"George Monbiot\",\n \"Hugo Young\",\n \"Jonathan Freedland\"\
+ ,\n \"Martin Kettle\",\n \"Mary Riddell\",\n \"Nick Cohen\",\n \"Peter Preston\"\
+ ,\n \"Polly Toynbee\",\n \"Roy Hattersley\",\n \"Simon Hoggart\",\n \"Will\
+ \ Hutton\",\n \"Zoe Williams\"\n] [author]\n}} .\n|||\n{{article}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: article_from_author
+ reference: ''
+ db77ee6b-d077-4831-82ca-596c9b5d3f39: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: db77ee6b-d077-4831-82ca-596c9b5d3f39
+ jinja: "Who wrote this article? The answer options are {{answer_choices|join(\"\
+ , \")}}.\n\n{{article}} \n\n|||\n\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article
+ reference: ''
+ e6b9c224-1632-40da-8c69-76986da7015d: !Template
+ answer_choices: Catherine Bennett ||| George Monbiot ||| Hugo Young ||| Jonathan
+ Freedland ||| Martin Kettle ||| Mary Riddell ||| Nick Cohen ||| Peter Preston
+ ||| Polly Toynbee ||| Roy Hattersley ||| Simon Hoggart ||| Will Hutton ||| Zoe
+ Williams
+ id: e6b9c224-1632-40da-8c69-76986da7015d
+ jinja: "Who is the author of the passage below? Choose from the following list:\
+ \ {{ answer_choices | join(\", \")}}. \n\nHint: The topic is related to {{ [\"\
+ Politics\", \"Society\", \"UK\", \"World\", \"Books\"][topic] }}.\n\n{{article}}\n\
+ |||\n{{ answer_choices[author] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: who_wrote_article_with_topic_hint
+ reference: ''
diff --git a/promptsource/templates/gutenberg_time/templates.yaml b/promptsource/templates/gutenberg_time/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fbc9e9700d039736d7bf7aed270595df5cad1512
--- /dev/null
+++ b/promptsource/templates/gutenberg_time/templates.yaml
@@ -0,0 +1,138 @@
+dataset: gutenberg_time
+templates:
+ 06dce7dd-ae32-4acb-a1c8-6a01303b577b: !Template
+ answer_choices: null
+ id: 06dce7dd-ae32-4acb-a1c8-6a01303b577b
+ jinja: 'Given the following text, what time reference is reported in the text?
+
+
+ {{tok_context}}
+
+ |||
+
+ {{time_phrase}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: asking_the_time_reference
+ reference: Asking the time reference before the text
+ 197f6f12-d2d8-483c-a250-17082d3ce831: !Template
+ answer_choices: 0 ||| 1 ||| 2 ||| 3 ||| 4 ||| 5 ||| 6 ||| 7 ||| 8 ||| 9 ||| 10
+ ||| 11 ||| 12 ||| 13 ||| 14 ||| 15 ||| 16 ||| 17 ||| 18 ||| 19 ||| 20 ||| 21
+ ||| 22 ||| 23
+ id: 197f6f12-d2d8-483c-a250-17082d3ce831
+ jinja: 'Answer the following question using the passage below: what hour is the
+ phrase "{{time_phrase}}"? Give the integer answer using 24 hour format.
+
+
+ {{tok_context}}
+
+ |||
+
+ {% set idx = hour_reference | int %}
+
+ {{answer_choices[idx]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: asking_the_hour_affirmative
+ reference: ''
+ 3446ae6a-67a0-4bb0-8148-506648097843: !Template
+ answer_choices: A.M. ||| P.M.
+ id: 3446ae6a-67a0-4bb0-8148-506648097843
+ jinja: 'Given the passage below, is the phrase "{{time_phrase}}" {{answer_choices[0]}}
+ or {{answer_choices[1]}}?
+
+
+ {{tok_context}}
+
+ |||
+
+ {% set hour = hour_reference | int %}
+
+ {{answer_choices[0] if hour < 12 else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: asking_AM_PM_interrogative
+ reference: ''
+ 4618dd9b-d11b-48f3-a359-f51d8593c80d: !Template
+ answer_choices: A.M. ||| P.M.
+ id: 4618dd9b-d11b-48f3-a359-f51d8593c80d
+ jinja: "There's a time phrase of \"{{time_phrase}}\" in the passage below. Determine\
+ \ whether the time the phrase is referring to is \u201C{{answer_choices[0]}}\u201D\
+ \ or \u201C{{answer_choices[1]}}\u201D.\n\n{{tok_context}}\n|||\n{% set hour\
+ \ = hour_reference | int %}\n{{answer_choices[0] if hour < 12 else answer_choices[1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: asking_AM_PM_affirmative
+ reference: ''
+ 4efa58a3-a38b-4bcd-8597-687a7b7f56f8: !Template
+ answer_choices: 0 ||| 1 ||| 2 ||| 3 ||| 4 ||| 5 ||| 6 ||| 7 ||| 8 ||| 9 ||| 10
+ ||| 11 ||| 12 ||| 13 ||| 14 ||| 15 ||| 16 ||| 17 ||| 18 ||| 19 ||| 20 ||| 21
+ ||| 22 ||| 23
+ id: 4efa58a3-a38b-4bcd-8597-687a7b7f56f8
+ jinja: 'Given the following text. What hour (between 0 and 23) does the phrase
+ "{{time_phrase}}" indicate?
+
+
+ {{tok_context}}
+
+ |||
+
+ {% set idx = hour_reference | int %}
+
+ {{answer_choices[idx]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: asking_the_hour_interrogative
+ reference: Ask for the time reported in the text, explicitly mentioning the phrase,
+ asking the question before the text
+ 8e86db06-1ef6-4120-9cbf-1d083eb8af7f: !Template
+ answer_choices: 0 ||| 1 ||| 2 ||| 3 ||| 4 ||| 5 ||| 6 ||| 7 ||| 8 ||| 9 ||| 10
+ ||| 11 ||| 12 ||| 13 ||| 14 ||| 15 ||| 16 ||| 17 ||| 18 ||| 19 ||| 20 ||| 21
+ ||| 22 ||| 23
+ id: 8e86db06-1ef6-4120-9cbf-1d083eb8af7f
+ jinja: 'There''s a time phrase of "{{time_phrase}}" in the passage below. What
+ hour does it refer to? Give the integer value between 0 and 23.
+
+
+ {{tok_context}}
+
+ |||
+
+ {% set idx = hour_reference | int %}
+
+ {{answer_choices[idx]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: asking_the_hour_refer
+ reference: ''
diff --git a/promptsource/templates/hans/templates.yaml b/promptsource/templates/hans/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3406a03f721ce59ae5914b879516c0e81e82c76c
--- /dev/null
+++ b/promptsource/templates/hans/templates.yaml
@@ -0,0 +1,144 @@
+dataset: hans
+templates:
+ 03fc899d-aa53-4bbd-8808-d390b2a30f86: !Template
+ answer_choices: Yes ||| No
+ id: 03fc899d-aa53-4bbd-8808-d390b2a30f86
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes\
+ \ or no? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ 2084c370-6052-4840-89b6-b35ad70fdf31: !Template
+ answer_choices: Yes ||| No
+ id: 2084c370-6052-4840-89b6-b35ad70fdf31
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes
+ or no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ 559dec8c-5ecc-4ff6-9765-7358e5b675d3: !Template
+ answer_choices: Yes ||| No
+ id: 559dec8c-5ecc-4ff6-9765-7358e5b675d3
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes or no? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ 591a436a-588d-4356-9c3c-7f2ddbb3ba55: !Template
+ answer_choices: Yes ||| No
+ id: 591a436a-588d-4356-9c3c-7f2ddbb3ba55
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes or no? |||
+ {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 6ed3823e-5ebb-4398-8366-273047d970f0: !Template
+ answer_choices: Yes ||| No
+ id: 6ed3823e-5ebb-4398-8366-273047d970f0
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes or
+ no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ b12b3a20-3cc2-42a8-899e-4ef71a72e484: !Template
+ answer_choices: Yes ||| No
+ id: b12b3a20-3cc2-42a8-899e-4ef71a72e484
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes or no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ c5508a95-1f23-47b9-aed4-0eca8380f71b: !Template
+ answer_choices: Yes ||| No
+ id: c5508a95-1f23-47b9-aed4-0eca8380f71b
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, is "{{hypothesis}}" definitely correct? Yes or no? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ d6fad9e1-d882-4d06-8f7f-ce400268df5f: !Template
+ answer_choices: Yes ||| No
+ id: d6fad9e1-d882-4d06-8f7f-ce400268df5f
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes or no?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ e86994a7-2649-4535-acce-57e5aed8d390: !Template
+ answer_choices: True ||| False
+ id: e86994a7-2649-4535-acce-57e5aed8d390
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True or False? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: Same as reported in Figure G31 of the GPT-3 paper.
+ ffbc8068-e791-4277-b342-1d7e0e80f825: !Template
+ answer_choices: Yes ||| No
+ id: ffbc8068-e791-4277-b342-1d7e0e80f825
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes or no? |||
+ {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
diff --git a/promptsource/templates/hate_speech18/templates.yaml b/promptsource/templates/hate_speech18/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e94a2aa452dbb05c5d564e0fcb765601854044a2
--- /dev/null
+++ b/promptsource/templates/hate_speech18/templates.yaml
@@ -0,0 +1,20 @@
+dataset: hate_speech18
+templates:
+ 3266f9d4-9c80-4e17-a8a6-1fe44ca8f3bf: !Template
+ answer_choices: noHate ||| hate
+ id: 3266f9d4-9c80-4e17-a8a6-1fe44ca8f3bf
+ jinja: '{% if label in [0, 1] %}
+
+ {{text}} Is the sentiment the sentence expresses is a {{answer_choices[1]}}
+ speech or {{answer_choices[0]}} speech? ||| {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: hate_or_noHate_sentiment_analysis
+ reference: ''
diff --git a/promptsource/templates/head_qa/en/templates.yaml b/promptsource/templates/head_qa/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..669dd2b0e096047d52a5028fa7983a54a59786ee
--- /dev/null
+++ b/promptsource/templates/head_qa/en/templates.yaml
@@ -0,0 +1,203 @@
+dataset: head_qa
+subset: en
+templates:
+ 375b86a3-a869-4473-920c-c00ea789e943: !Template
+ answer_choices: '{{ answers | map(attribute="aid") | join("|||") }}'
+ id: 375b86a3-a869-4473-920c-c00ea789e943
+ jinja: 'Answer/complete the following paragraph about {{category}}:
+
+
+ {{qtext}}
+
+
+ Which one is the correct answer?
+
+
+ {% for answer in answers %}
+
+ {{answer["aid"] | string}}. {{answer["atext"]}}
+
+ {% endfor %}
+
+
+ |||
+
+
+ {{ra | string}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: multiple_choice_q_and_a_index_with_context_en
+ reference: Pose a multi-choice question using the index as an answer and the category
+ as context
+ task_template: true
+ 749a5c3f-c10e-4a4a-aa35-d31698bb1104: !Template
+ answer_choices: '{{ answers | map(attribute="atext") | join("|||") }}'
+ id: 749a5c3f-c10e-4a4a-aa35-d31698bb1104
+ jinja: 'Answer/complete the following paragraph:
+
+
+ {{qtext}}
+
+
+ What is the correct answer?
+
+ - {{ answers | map(attribute="atext")| join("\n- ") }}
+
+
+ |||
+
+
+ {% for answer in answers if answer["aid"]==ra -%}
+
+ {{answer["atext"]}}
+
+ {%- endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: multiple_choice_q_and_a_en
+ reference: Pose a multi-choice question
+ task_template: true
+ c830f4cc-128c-4644-9e19-4c99782f70bb: !Template
+ answer_choices: '{{ answers | map(attribute="aid") | join("|||") }}'
+ id: c830f4cc-128c-4644-9e19-4c99782f70bb
+ jinja: 'Answer/complete the following paragraph:
+
+
+ {{qtext}}
+
+
+ Which one is the correct answer?
+
+
+ {% for answer in answers %}
+
+ {{answer["aid"] | string}}. {{answer["atext"]}}
+
+ {% endfor %}
+
+
+ |||
+
+
+ {{ra | string}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: multiple_choice_q_and_a_index_en
+ reference: Pose a multi-choice question using as anwer the index
+ task_template: true
+ df12d7e1-2168-46e0-9400-c3a7ca27b42c: !Template
+ answer_choices: '{{ answers | map(attribute="atext") | join("|||") }}'
+ id: df12d7e1-2168-46e0-9400-c3a7ca27b42c
+ jinja: 'Answer/complete the following paragraph about {{category}}:
+
+
+ {{qtext}}
+
+
+ What is the correct answer?
+
+ - {{ answers | map(attribute="atext")| join("\n- ") }}
+
+
+ |||
+
+
+ {% for answer in answers if answer["aid"]==ra -%}
+
+ {{answer["atext"]}}
+
+ {%- endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: multiple_choice_q_and_a_with_context_en
+ reference: Pose a multi-choice question using category information as context
+ task_template: true
+ e0cb8056-22b4-4878-8164-a79cfc5d3a62: !Template
+ answer_choices: '{{ answers | map(attribute="atext") | join("|||") }}'
+ id: e0cb8056-22b4-4878-8164-a79cfc5d3a62
+ jinja: 'Given this list of statements about {{category}}: {{ answers | map(attribute="atext")
+ | map("lower") | map("trim", ".") | join(", ") }}.
+
+
+ Which one is the most appropriate answer/completion for the paragraph that follows?
+
+
+ {{qtext}}
+
+
+ |||
+
+
+ {% for answer in answers if answer["aid"]==ra -%}
+
+ {{answer["atext"]}}
+
+ {%- endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: multiple_choice_a_and_q_with_context_en
+ reference: Pose a multi-choice question presenting the answers first using category
+ as context
+ task_template: true
+ e4f4e194-a78b-433b-ac48-dabf6244be35: !Template
+ answer_choices: '{{ answers | map(attribute="atext") | join("|||") }}'
+ id: e4f4e194-a78b-433b-ac48-dabf6244be35
+ jinja: 'Given this list of statements: {{ answers | map(attribute="atext") | map("lower")
+ | map("trim", ".") | join(", ") }}.
+
+
+ Which one is the most appropriate answer/completion for the paragraph that follows?
+
+
+ {{qtext}}
+
+
+ |||
+
+
+ {% for answer in answers if answer["aid"]==ra -%}
+
+ {{answer["atext"]}}
+
+ {%- endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: multiple_choice_a_and_q_en
+ reference: Pose a multi-choice question presenting the answers first
+ task_template: true
diff --git a/promptsource/templates/health_fact/templates.yaml b/promptsource/templates/health_fact/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6a563a7d4168a520c078e71ea770bfdf4ccdf1e7
--- /dev/null
+++ b/promptsource/templates/health_fact/templates.yaml
@@ -0,0 +1,105 @@
+dataset: health_fact
+templates:
+ 041ffdd5-88b0-41df-9e22-8c7a7bc0ce24: !Template
+ answer_choices: null
+ id: 041ffdd5-88b0-41df-9e22-8c7a7bc0ce24
+ jinja: "After reading:\n\n {{main_text }}\n\nI {{[\"do not believe\", \"do not\
+ \ think it is completely true that\", \"believe\", \"do not think it has been\
+ \ proven yet that\"][label]}}:\n\n{{claim}}\n\nis true because of: |||\n\n{{explanation}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: claim_explanation_generation_after_reading
+ reference: Given a claim and a belief on its veracity, generate an explanation
+ of that belief
+ 23a2ef7f-2032-4771-bf39-94b840aee763: !Template
+ answer_choices: definitely false ||| likely false ||| definitely true ||| not
+ proven
+ id: 23a2ef7f-2032-4771-bf39-94b840aee763
+ jinja: "After reading:\n\n {{main_text }}\n\nI believe :\n\n{{claim}}\n\nis \n\
+ |||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim_veracity_classification_after_reading_I_believe
+ reference: ''
+ 277f3961-5e9b-4cd5-a13c-f822f6541c76: !Template
+ answer_choices: False ||| A mixture of true and false ||| True ||| Unproven
+ id: 277f3961-5e9b-4cd5-a13c-f822f6541c76
+ jinja: "The claim is: \n{{claim}}\n\nand the explanation is: \n{{explanation}}.\
+ \ \n\nGiven the above, this claim is: |||\n {{answer_choices[label]}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim_explanation_classification
+ reference: Read the claim and rely on explanation and main text
+ 5000d89e-a93e-4b96-9ad4-b93924e1066b: !Template
+ answer_choices: null
+ id: 5000d89e-a93e-4b96-9ad4-b93924e1066b
+ jinja: 'I {{["could not conclude", "could not say for sure", "could conclude",
+ "do not think it has been proven yet"][label]}}:
+
+
+ {{claim}}
+
+
+ from:
+
+
+ {{main_text}}
+
+
+ because of: |||
+
+
+ {{explanation}}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: claim_explanation_generation_I_because
+ reference: Given a claim and a belief on its veracity, generate an explanation
+ of that belief
+ ce294c51-275a-47f0-9106-2dc3104f1083: !Template
+ answer_choices: definitely false ||| likely false ||| definitely true ||| not
+ proven
+ id: ce294c51-275a-47f0-9106-2dc3104f1083
+ jinja: 'Tell me whether the following claim is {{answer_choices[0]}}, {{answer_choices[1]}},
+ {{answer_choices[2]}}, or {{answer_choices[3]}} after reading the passage.
+
+
+ Claim: {{claim}}
+
+
+ Passage: {{main_text }}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim_veracity_classification_tell_me
+ reference: ''
diff --git a/promptsource/templates/hellaswag/templates.yaml b/promptsource/templates/hellaswag/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..09d540d85b05ae97a291b6fe9380806b051518d2
--- /dev/null
+++ b/promptsource/templates/hellaswag/templates.yaml
@@ -0,0 +1,258 @@
+dataset: hellaswag
+templates:
+ 00caa8cb-7f67-43bc-9e90-fc1d5d329432: !Template
+ answer_choices: '{{endings | join(" ||| ")}}'
+ id: 00caa8cb-7f67-43bc-9e90-fc1d5d329432
+ jinja: 'Complete the description with an appropriate ending:
+
+ First, {{ ctx_a.lower() }} Then, {{ ctx_b.lower() }} ...
+
+
+ (a) {{ answer_choices[0] }}
+
+
+ (b) {{ answer_choices[1] }}
+
+
+ (c) {{ answer_choices[2] }}
+
+
+ (d) {{ answer_choices[3] }}
+
+ |||
+
+ {{ answer_choices[label | int()] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: complete_first_then
+ reference: ''
+ 196a016d-bd25-4387-90b9-53197fd43b1e: !Template
+ answer_choices: null
+ id: 196a016d-bd25-4387-90b9-53197fd43b1e
+ jinja: "What is the topic of the sentence: {{ctx}} \n|||\n{{activity_label}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Topic without the ending answer
+ reference: Generate the topic sentence with only the starting sentence
+ 1fd44f45-d0e6-41ad-a01f-737f4c53645b: !Template
+ answer_choices: '{{endings | join(" ||| ")}}'
+ id: 1fd44f45-d0e6-41ad-a01f-737f4c53645b
+ jinja: 'Complete the sentence: {{ctx}}
+
+ |||
+
+ {{answer_choices[label | int()]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Open-ended completion
+ reference: Template for open-ended common sense completion
+ 4ebf22a1-2d23-426c-a083-b43fc8567687: !Template
+ answer_choices: null
+ id: 4ebf22a1-2d23-426c-a083-b43fc8567687
+ jinja: "{{ctx}} {{endings[label | int()]}}\nCan you identify the topic of the\
+ \ paragraph? \n|||\n{{activity_label}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Topic of the context
+ reference: List the activity label as the topic from the sentence
+ 52fbd075-46cb-49fb-a41b-00a0f4a60285: !Template
+ answer_choices: '{{endings | join(" ||| ") }}'
+ id: 52fbd075-46cb-49fb-a41b-00a0f4a60285
+ jinja: '{% set prompts = [
+
+ ''Can you pick the correct ending for the sentence: '',
+
+ ''The task is to generate the ending for the sentence: '',
+
+ ''How does this sentence end? '',
+
+ ''From the list of endings described below, what ending makes the most sense
+ for the sentence '',]
+
+ %}
+
+ {{prompts | choice}}
+
+ {{ctx}}
+
+
+ (a) {{answer_choices[0]}}
+
+
+ (b) {{answer_choices[1]}}
+
+
+ (c) {{answer_choices[2]}}
+
+
+ (d) {{answer_choices[3]}}
+
+ |||
+
+ {{answer_choices [label | int()]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Randomized prompts template
+ reference: Original task template with randomized prompt template
+ 663470b8-3fab-449c-84ab-6c4738da51b3: !Template
+ answer_choices: Yes ||| No
+ id: 663470b8-3fab-449c-84ab-6c4738da51b3
+ jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the following description:\
+ \ {{ ctx_a }}\nIs the following an appropriate continuation?\n{{ ctx_b }} {{\
+ \ endings[instance] }}\nYes or No?\n||| \n{% if label == instance | string()\
+ \ %}\n{{answer_choices[0]}}\n{% else %} \n{{answer_choices[1]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Appropriate continuation - Yes or No
+ reference: 'The template checks if the two contexts are valid continuations. '
+ 6e9f8d7d-9016-45bc-b997-c42aeb2dc944: !Template
+ answer_choices: '{{endings | join("|||")}}'
+ id: 6e9f8d7d-9016-45bc-b997-c42aeb2dc944
+ jinja: 'How does this sentence end?
+
+ {{ctx}}
+
+
+ (a) {{answer_choices[0]}}
+
+
+ (b) {{answer_choices[1]}}
+
+
+ (c) {{answer_choices[2]}}
+
+
+ (d) {{answer_choices[3]}}
+
+
+ Hint: the topic of the sentence is {{activity_label}}
+
+ |||
+
+ {{answer_choices [label | int()]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Predict ending with hint
+ reference: Predict the ending with the activity label as the hint
+ a8ab00ee-78ad-465b-bbf0-9cd3d242dd7e: !Template
+ answer_choices: null
+ id: a8ab00ee-78ad-465b-bbf0-9cd3d242dd7e
+ jinja: 'How would you start the sentence:
+
+ {{endings[label | int()]}}
+
+ |||
+
+ {{ctx}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Open-ended start
+ reference: Template asks the model to generate a premise or start for the ending
+ d95b81c4-5db7-44c1-926e-c7222c896a32: !Template
+ answer_choices: Yes ||| No
+ id: d95b81c4-5db7-44c1-926e-c7222c896a32
+ jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the following text:\
+ \ {{ ctx_b }} {{ endings[instance] }}\nIs it an appropriate continuation of\
+ \ the following text: \n{{ ctx_a }} ?\nYes or No?\n||| \n{% if label == instance\
+ \ | string() %}\n{{answer_choices[0]}}\n{% else %} \n{{answer_choices[1]}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Reversed appropriate continuation - Yes or No
+ reference: The template randomly selects a continuation and checks if the continuation
+ is appropriate for the given premise.
+ db8b1c25-f2db-4604-b8fc-f93d69d6fae7: !Template
+ answer_choices: Ending 1 ||| Ending 2 ||| Ending 3 ||| Ending 4
+ id: db8b1c25-f2db-4604-b8fc-f93d69d6fae7
+ jinja: '{{ ctx }}...
+
+ How does the description likely end?
+
+
+ Ending 1: {{ endings[0] }}
+
+
+ Ending 2: {{ endings[1] }}
+
+
+ Ending 3: {{ endings[2] }}
+
+
+ Ending 4: {{ endings[3] }}
+
+ ||| {{ answer_choices[label | int()] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: how_ends
+ reference: ''
+ def77598-682f-48de-b187-55db4d390f07: !Template
+ answer_choices: Ending 1 ||| Ending 2 ||| Ending 3 ||| Ending 4
+ id: def77598-682f-48de-b187-55db4d390f07
+ jinja: "If a description of a situation begins like this: {{ ctx }}... Then how\n\
+ does it continue? \n\nEnding 1: {{ endings[0] }}\n\nEnding 2: {{ endings[1]\
+ \ }}\n\nEnding 3: {{ endings[2] }}\n\nEnding 4: {{ endings[3] }}\n|||{{answer_choices[label\
+ \ | int()] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: if_begins_how_continues
+ reference: Returns a Ending string in the template (similar to how ends
+ paraphrased)
diff --git a/promptsource/templates/hlgd/templates.yaml b/promptsource/templates/hlgd/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a52f043782368a592389b08a7043ab792304c5b7
--- /dev/null
+++ b/promptsource/templates/hlgd/templates.yaml
@@ -0,0 +1,232 @@
+dataset: hlgd
+templates:
+ 06c785aa-2c21-4842-a27c-64c86c98c052: !Template
+ answer_choices: yes ||| no
+ id: 06c785aa-2c21-4842-a27c-64c86c98c052
+ jinja: "On {{date_a}}, the article \"{{headline_a}}\" is published.\nOn {{date_b}},\
+ \ the article \"{{headline_b}}\" is published. \nAre they related to the same\
+ \ event? Answer {{ answer_choices[0] }} or {{ answer_choices[1] }}.\n|||\n{%\
+ \ if label %}\n{{ answer_choices[0] }}\n{% else %}\n{{ answer_choices[1] }}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_with_time_interrogative_related
+ reference: ''
+ 147ad380-5ce4-4900-b5ec-f01a63bb3653: !Template
+ answer_choices: yes ||| no
+ id: 147ad380-5ce4-4900-b5ec-f01a63bb3653
+ jinja: "Do the following headlines talk about the same event? \n{{headline_a}}\n\
+ {{headline_b}}\n|||\n{% if label %}\n{{ answer_choices[0] }}\n{% else %}\n{{\
+ \ answer_choices[1] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_interrogative_talk
+ reference: ''
+ 2a2a9754-b6cd-40a7-a920-b75d9116d431: !Template
+ answer_choices: yes ||| no
+ id: 2a2a9754-b6cd-40a7-a920-b75d9116d431
+ jinja: 'Barbara just finished reading an article titled "{{headline_a}}". She
+ then came across another article "{{headline_b}}". Is the second article describing
+ the same event as the first article?
+
+ |||
+
+ {% if label %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_read
+ reference: ''
+ 36154717-e2bc-4414-bfe6-8ed70ebf292d: !Template
+ answer_choices: null
+ id: 36154717-e2bc-4414-bfe6-8ed70ebf292d
+ jinja: "Given the headline: \"{{headline_a}}\", what would be another headline\
+ \ for the same event? \n|||\n{% if label %}\n{{headline_b}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_another_headline
+ reference: ''
+ 3c655244-779c-4a34-9ab0-722bcdc8567b: !Template
+ answer_choices: same event ||| different event
+ id: 3c655244-779c-4a34-9ab0-722bcdc8567b
+ jinja: "Which one of the following choices \"{{ answer_choices[0] }}\" or \"{{\
+ \ answer_choices[1] }}\" best describe the relationship between these two pieces\
+ \ of news? \nNews Title A: {{headline_a}}\nNews Title B: {{headline_b}}\n|||\n\
+ {% if label %}\n{{ answer_choices[0] }}\n{% else %}\n{{ answer_choices[1] }}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_describe_rel
+ reference: ''
+ 4d97b99c-e438-403e-a864-ffbbc193bf63: !Template
+ answer_choices: yes ||| no
+ id: 4d97b99c-e438-403e-a864-ffbbc193bf63
+ jinja: "Headline 1: \"{{headline_a}}\" \nHeadline 2: \"{{headline_b}}\"\nDo they\
+ \ refer to the same event? \n|||\n{% if label %}\n{{ answer_choices[0] }}\n\
+ {% else %}\n{{ answer_choices[1] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_refer
+ reference: ''
+ 7588a4a1-bff3-4543-aaa3-e3b679b6b97b: !Template
+ answer_choices: agree ||| disagree
+ id: 7588a4a1-bff3-4543-aaa3-e3b679b6b97b
+ jinja: 'Both headlines "{{headline_a}}" and "{{headline_b}}" are discussing the
+ same event. Do you {{ answer_choices[0] }} or {{ answer_choices[1] }}?
+
+ |||
+
+ {% if label %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_discuss
+ reference: ''
+ 811556ef-26cb-4aae-b59d-8b06fdc546a5: !Template
+ answer_choices: yes ||| no
+ id: 811556ef-26cb-4aae-b59d-8b06fdc546a5
+ jinja: 'Can "{{headline_a}}" replace "{{headline_b}}"? Here, a headline is replaceable
+ by another headline if the latter headline describes the same event as the former.
+
+ |||
+
+ {% if label %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_replace
+ reference: ''
+ f680d28f-ba41-4a30-a099-88be61751e7f: !Template
+ answer_choices: yes ||| no
+ id: f680d28f-ba41-4a30-a099-88be61751e7f
+ jinja: 'The editor asks James to create a headline that describes the same event
+ as "{{headline_a}}". James comes up with "{{headline_b}}". Does the new headline
+ satisfy the editor''s requirement? Answer {{ answer_choices[0] }} or {{ answer_choices[1]
+ }}.
+
+ |||
+
+ {% if label %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_editor_asks
+ reference: ''
+ feccc382-21c7-464f-b322-b03dde3adfd2: !Template
+ answer_choices: yes ||| no
+ id: feccc382-21c7-464f-b322-b03dde3adfd2
+ jinja: "On {{date_a}}, the article \"{{headline_a}}\" is published.\nOn {{date_b}},\
+ \ the article \"{{headline_b}}\" is published. \nAnswer {{ answer_choices[0]\
+ \ }} if the second article's headline is a paraphrase of the first; otherwise,\
+ \ answer {{ answer_choices[1] }}.\n|||\n{% if label %}\n{{ answer_choices[0]\
+ \ }}\n{% else %}\n{{ answer_choices[1] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_with_time_affirmative
+ reference: ''
+ ff3d165e-3c52-42f4-96e0-df125605a379: !Template
+ answer_choices: yes ||| no
+ id: ff3d165e-3c52-42f4-96e0-df125605a379
+ jinja: 'Does the headline "{{headline_a}}" published on {{date_a}} talk about
+ the same event as "{{headline_b}}" published on {{date_b}} ? Answer {{ answer_choices[0]
+ }} or {{ answer_choices[1] }}.
+
+ |||
+
+ {% if label %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_same_event_with_time_interrogative_talk
+ reference: ''
diff --git a/promptsource/templates/hotpot_qa/distractor/templates.yaml b/promptsource/templates/hotpot_qa/distractor/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6f0cfebc25d1d31fcc221e6b67fb9786e52223f7
--- /dev/null
+++ b/promptsource/templates/hotpot_qa/distractor/templates.yaml
@@ -0,0 +1,126 @@
+dataset: hotpot_qa
+subset: distractor
+templates:
+ 0eff7d44-d78a-47b5-9526-8c38f74be939: !Template
+ answer_choices: null
+ id: 0eff7d44-d78a-47b5-9526-8c38f74be939
+ jinja: "In the paragraphs below, what sentence(s) support the answer of \"{{answer}}\"\
+ \ to the question \"{{question}}\"?\n\nInformation:\n{% for sents in context.sentences\
+ \ %}\n - {{sents | join(\"\")}}\n{% endfor %}\n\n|||\n{%- for paragraph in\
+ \ supporting_facts.title -%}\n{% set outer_loop = loop %}\n{%- for title in\
+ \ context.title -%}\n{%- if title==paragraph %}\n{{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+ \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - COQA F1
+ - Other
+ original_task: true
+ name: generate_explanations_interrogative
+ reference: ''
+ 20242fae-2b56-43db-ae50-734c5ca10c5c: !Template
+ answer_choices: null
+ id: 20242fae-2b56-43db-ae50-734c5ca10c5c
+ jinja: "Select sentences from the paragraphs below that explain the question-answer\
+ \ pair. \"{{question}} {{answer}}\"\n\nInformation:\n{% for sents in context.sentences\
+ \ %}\n - {{sents | join(\"\")}}\n{% endfor %}\n\n|||\n{%- for paragraph in\
+ \ supporting_facts.title -%}\n{% set outer_loop = loop %}\n{%- for title in\
+ \ context.title -%}\n{%- if title==paragraph %}\n{{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+ \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - Other
+ - ROUGE
+ original_task: true
+ name: generate_explanations_affirmative
+ reference: 'Given information, question, and its answer, list the sentences from
+ information that explain the answer '
+ 690f571f-5113-426c-8aec-bdbbf21f99ae: !Template
+ answer_choices: null
+ id: 690f571f-5113-426c-8aec-bdbbf21f99ae
+ jinja: "{{question}} \n\nHint: use the information from the paragraphs below to\
+ \ answer the question.\n\n{% for sents in context.sentences %}\n - {{sents\
+ \ | join(\"\")}}\n{% endfor %}\n||| \n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: generate_answer_interrogative
+ reference: ''
+ 6e33c684-725d-49a2-8da3-f9d0b2bb60a0: !Template
+ answer_choices: null
+ id: 6e33c684-725d-49a2-8da3-f9d0b2bb60a0
+ jinja: "What is the question that begets the answer of \"{{answer}}\"?\n\nInformation:\n\
+ {% for sents in context.sentences %}\n - {{sents | join(\"\")}}\n{% endfor\
+ \ %}\n||| \n{{question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: Given information and answer, generate question.
+ 9aab7543-e491-403f-a77b-63a57ef3316f: !Template
+ answer_choices: null
+ id: 9aab7543-e491-403f-a77b-63a57ef3316f
+ jinja: "{{question}} Answer the question and give supporting facts from the paragraphs\
+ \ below. Give your response in the following format:\nAnswer: ... \nExplanations:\n\
+ - ...\n- ...\n\nParagraphs:\n{% for sents in context.sentences %}\n - {{sents\
+ \ | join(\"\")}}\n{% endfor %}\n\n|||\n{{answer}}\n\nExplanations:\n{%- for\
+ \ paragraph in supporting_facts.title -%}\n{% set outer_loop = loop %}\n{%-\
+ \ for title in context.title -%}\n{%- if title==paragraph %}\n- {{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+ \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: generate_answer_and_explanations
+ reference: Given information and question, answer it and list the sentences from
+ information that explain the answer.
+ ea62fe03-8871-4322-8b5c-c060f8d41923: !Template
+ answer_choices: null
+ id: ea62fe03-8871-4322-8b5c-c060f8d41923
+ jinja: "Generate titles (separated by semi-colons) for each of the paragraphs\
+ \ below:\n{% for sents in context.sentences %}\n - {{sents | join(\"\")}}\n\
+ {% endfor %}\n||| \n{{context.title | join(\"; \")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_title_affirmative
+ reference: Given a list of paragraphs, generate a string of titles (separated
+ by semi-colons) for each of them.
+ f14adb21-34ba-4641-b9ce-dfbd0ae9744c: !Template
+ answer_choices: null
+ id: f14adb21-34ba-4641-b9ce-dfbd0ae9744c
+ jinja: "Answer the following question, \"{{question}}\", using the information\
+ \ provided below.\n\n{% for sents in context.sentences %}\n - {{sents | join(\"\
+ \")}}\n{% endfor %}\n||| \n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: generate_answer_affirmative
+ reference: Given information and question, generate answer.
diff --git a/promptsource/templates/hotpot_qa/fullwiki/templates.yaml b/promptsource/templates/hotpot_qa/fullwiki/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9e83330754575231a6ddcadd17578a8bc7078e67
--- /dev/null
+++ b/promptsource/templates/hotpot_qa/fullwiki/templates.yaml
@@ -0,0 +1,144 @@
+dataset: hotpot_qa
+subset: fullwiki
+templates:
+ 0b5bab65-4109-4a80-91fd-f26af330b558: !Template
+ answer_choices: null
+ id: 0b5bab65-4109-4a80-91fd-f26af330b558
+ jinja: "Answer the following question, \"{{question}}\", using the information\
+ \ provided below.\n\n{% for sents in context.sentences %}\n - {{sents | join(\"\
+ \")}}\n{% endfor %}\n||| \n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: generate_answer_affirmative
+ reference: Given information and question, generate answer.
+ 465625a7-52be-431f-a07d-8a0fb1bcfe8b: !Template
+ answer_choices: null
+ id: 465625a7-52be-431f-a07d-8a0fb1bcfe8b
+ jinja: "{{question}} Answer the question and give supporting facts from the paragraphs\
+ \ below. Give your response in the following format:\nAnswer: ... \nExplanations:\n\
+ - ...\n- ...\n\nParagraphs:\n{% for sents in context.sentences %}\n - {{sents\
+ \ | join(\"\")}}\n{% endfor %}\n\n|||\n{{answer}}\n\nExplanations:\n{%- for\
+ \ paragraph in supporting_facts.title -%}\n{% set outer_loop = loop %}\n{%-\
+ \ for title in context.title -%}\n{%- if title==paragraph %}\n- {{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+ \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: generate_answer_and_explanations
+ reference: Given information and question, answer it and list the sentences from
+ information that explain the answer.
+ 736cf572-0299-48cc-86e2-821527f2b796: !Template
+ answer_choices: null
+ id: 736cf572-0299-48cc-86e2-821527f2b796
+ jinja: "In the paragraphs below, what sentence(s) support the answer of \"{{answer}}\"\
+ \ to the question \"{{question}}\"?\n\nInformation:\n{% for sents in context.sentences\
+ \ %}\n - {{sents | join(\"\")}}\n{% endfor %}\n\n|||\n{%- for paragraph in\
+ \ supporting_facts.title -%}\n{% set outer_loop = loop %}\n{%- for title in\
+ \ context.title -%}\n{%- if title==paragraph %}\n{{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+ \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - COQA F1
+ - Other
+ original_task: true
+ name: generate_explanations_interrogative
+ reference: ''
+ 8b7b3f27-c235-4a1c-907d-3f37e5f94d93: !Template
+ answer_choices: comparison ||| bridge
+ id: 8b7b3f27-c235-4a1c-907d-3f37e5f94d93
+ jinja: 'What is the type of the question "{{question}}" {{ answer_choices[0].capitalize()
+ }} or {{answer_choices[1]}}?
+
+ |||
+
+ {{type}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: classify_question_type
+ reference: Given question, classify its type.
+ 928f7bd2-2e4b-413e-aa1e-ffb5e00b2ff5: !Template
+ answer_choices: null
+ id: 928f7bd2-2e4b-413e-aa1e-ffb5e00b2ff5
+ jinja: "Generate titles (separated by semi-colons) for each of the paragraphs\
+ \ below:\n{% for sents in context.sentences %}\n - {{sents | join(\"\")}}\n\
+ {% endfor %}\n||| \n{{context.title | join(\"; \")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_title_affirmative
+ reference: Given a list of paragraphs, generate a string of titles (separated
+ by semi-colons) for each of them.
+ 9a121de7-1162-48cd-8277-31862d6dfb16: !Template
+ answer_choices: null
+ id: 9a121de7-1162-48cd-8277-31862d6dfb16
+ jinja: "What is the question that begets the answer of \"{{answer}}\"?\n\nInformation:\n\
+ {% for sents in context.sentences %}\n - {{sents | join(\"\")}}\n{% endfor\
+ \ %}\n||| \n{{question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: Given information and answer, generate question.
+ c6664543-0f44-4486-a37c-9db39ef87680: !Template
+ answer_choices: null
+ id: c6664543-0f44-4486-a37c-9db39ef87680
+ jinja: "Select sentences from the paragraphs below that explain the question-answer\
+ \ pair. \"{{question}} {{answer}}\"\n\nInformation:\n{% for sents in context.sentences\
+ \ %}\n - {{sents | join(\"\")}}\n{% endfor %}\n\n|||\n{%- for paragraph in\
+ \ supporting_facts.title -%}\n{% set outer_loop = loop %}\n{%- for title in\
+ \ context.title -%}\n{%- if title==paragraph %}\n{{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+ \ }}\n{%- endif -%}\n{%- endfor -%}\n{%- endfor -%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - Other
+ - ROUGE
+ original_task: true
+ name: generate_explanations_affirmative
+ reference: 'Given information, question, and its answer, list the sentences from
+ information that explain the answer '
+ f899ab35-1ac8-4223-8f0c-19437e95df7b: !Template
+ answer_choices: null
+ id: f899ab35-1ac8-4223-8f0c-19437e95df7b
+ jinja: "{{question}} \n\nHint: use the information from the paragraphs below to\
+ \ answer the question.\n\n{% for sents in context.sentences %}\n - {{sents\
+ \ | join(\"\")}}\n{% endfor %}\n||| \n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: generate_answer_interrogative
+ reference: ''
diff --git a/promptsource/templates/humicroedit/subtask-1/templates.yaml b/promptsource/templates/humicroedit/subtask-1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7451e1a8ad6bca738e750c0eca250b5e0b15f7dd
--- /dev/null
+++ b/promptsource/templates/humicroedit/subtask-1/templates.yaml
@@ -0,0 +1,148 @@
+dataset: humicroedit
+subset: subtask-1
+templates:
+ 692750f4-b4a2-4344-bc4d-e05daef47c25: !Template
+ answer_choices: null
+ id: 692750f4-b4a2-4344-bc4d-e05daef47c25
+ jinja: 'I need to know how funny it is to replace "{{ original[original.index("<")+1:original.index("/>")]
+ }}" with "{{ edit }}" in the sentence "{{ original.replace(original[original.index("<"):original.index(">")+1],
+ original[original.index("<")+1:original.index("/>")]) }} ".
+
+
+ Question: Can you give me a number from {{"0.0 to 3.0"}} that denotes how funny
+ it is, where {{"0.0"}} means not funny and {{"3.0"}} means funny?
+
+
+ |||
+
+ {{ (((5 * meanGrade) | round) / 5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: know_funniness_original_sent_edit_word
+ reference: ''
+ 6c6c7354-fcd5-4b0d-8672-671c639c25f5: !Template
+ answer_choices: null
+ id: 6c6c7354-fcd5-4b0d-8672-671c639c25f5
+ jinja: 'I need to know how funny the edited sentence is compared to the original
+ sentence:
+
+ Original: {{ original.replace(original[original.index("<"):original.index(">")+1],
+ original[original.index("<")+1:original.index("/>")]) }}
+
+ Edited: {{ original.replace(original[original.index("<"):original.index(">")+1],
+ edit) }}
+
+ Question: Can you give me a number from {{"0.0 to 3.0"}} that denotes how funny
+ it is, where {{"0.0"}} means not funny and {{"3.0"}} means funny?
+
+ |||
+
+ {{ (((5 * meanGrade) | round) / 5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: know_funniness_original_sent_edited_sent
+ reference: ''
+ 759a11e7-5933-41a1-b803-f352eb385d28: !Template
+ answer_choices: null
+ id: 759a11e7-5933-41a1-b803-f352eb385d28
+ jinja: 'Please give a score between 0 (not funny) and 3 (very funny) denoting
+ the funniness of replacing "{{ original[original.index("<")+1:original.index("/>")]
+ }}" with "{{ edit }}" in the sentence "{{ original.replace(original[original.index("<"):original.index(">")+1],
+ original[original.index("<")+1:original.index("/>")]) }} ".
+
+
+ |||
+
+
+ {{ (((5 * meanGrade) | round) / 5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: score_original_sent_edit_word
+ reference: ''
+ 90ac629a-f670-4c43-bbf8-a9ef9021c0b3: !Template
+ answer_choices: null
+ id: 90ac629a-f670-4c43-bbf8-a9ef9021c0b3
+ jinja: "I need to assign a score from {{\"0.0 to 3.0\"}} that denotes how funny\
+ \ it is to replace \"{{ original[original.index(\"<\")+1:original.index(\"/>\"\
+ )] }}\" with \"{{ edit }}\" in the sentence \"{{ original.replace(original[original.index(\"\
+ <\"):original.index(\">\")+1], original[original.index(\"<\")+1:original.index(\"\
+ />\")]) }} \". \nWhat score should I assign? A low score means not funny whereas\
+ \ a high score means very funny.\n||| \n{{ (((5 * meanGrade) | round) / 5) }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: score_original_sent_edit_word_low_high
+ reference: ''
+ a08cab27-06fb-4c96-b6b1-eb0533fe9b25: !Template
+ answer_choices: null
+ id: a08cab27-06fb-4c96-b6b1-eb0533fe9b25
+ jinja: 'How funny is the following edited sentence compared to the original sentence?
+
+ Original: {{ original.replace(original[original.index("<"):original.index(">")+1],
+ original[original.index("<")+1:original.index("/>")]) }}
+
+ Edited: {{ original.replace(original[original.index("<"):original.index(">")+1],
+ edit) }}
+
+ Your answer should be something like {{"1.5"}}, where {{"0.0 means not funny
+ and 3.0 means funny"}}.
+
+ |||
+
+ {{ (((5 * meanGrade) | round) / 5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: example_score_original_sent_edited_sent_interrogative
+ reference: ''
+ ac6a9fa1-0f23-4ee9-9bec-c6f9f8daf7a9: !Template
+ answer_choices: null
+ id: ac6a9fa1-0f23-4ee9-9bec-c6f9f8daf7a9
+ jinja: 'Give your best shot to rate how funny the following edited sentence is
+ compared to the original sentence:
+
+ Original: {{ original.replace(original[original.index("<"):original.index(">")+1],
+ original[original.index("<")+1:original.index("/>")]) }}
+
+ Edited: {{ original.replace(original[original.index("<"):original.index(">")+1],
+ edit) }}
+
+ Use a scale of {{"0.0"}}, which denotes not funny, to {{"3.0"}}, which means
+ really hilarious.
+
+
+ |||
+
+ {{ (((5 * meanGrade) | round) / 5) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: best_shot_rate_original_sent_edited_sent
+ reference: ''
diff --git a/promptsource/templates/humicroedit/subtask-2/templates.yaml b/promptsource/templates/humicroedit/subtask-2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..24241c1ec5f7f69a1eed94b41e895a2cfec7323e
--- /dev/null
+++ b/promptsource/templates/humicroedit/subtask-2/templates.yaml
@@ -0,0 +1,209 @@
+dataset: humicroedit
+subset: subtask-2
+templates:
+ 437942d0-f1e0-4b17-83d0-01b19e54ec51: !Template
+ answer_choices: C ||| A ||| B
+ id: 437942d0-f1e0-4b17-83d0-01b19e54ec51
+ jinja: 'Given an original sentence "{{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+ original1[original1.index("<")+1:original1.index("/>")]) }}", is it more humorous
+ to replace "{{ original1[original1.index("<")+1:original1.index("/>")] }}" with
+ "{{ edit1 }}", or to replace "{{ original2[original2.index("<")+1:original2.index("/>")]
+ }}" with "{{ edit2 }}", or are both equally hilarious?
+
+ {{ answer_choices[1] }}. replace "{{ original1[original1.index("<")+1:original1.index("/>")]
+ }}" with "{{ edit1 }}"
+
+ {{ answer_choices[2] }}. replace "{{ original2[original2.index("<")+1:original2.index("/>")]
+ }}" with "{{ edit2 }}"
+
+ {{ answer_choices[0] }}. both equally hilarious
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: original_sent_edit_words_qa_id
+ reference: ''
+ 49c71a8a-97af-465c-af04-36f08884e568: !Template
+ answer_choices: C ||| A ||| B
+ id: 49c71a8a-97af-465c-af04-36f08884e568
+ jinja: "Original sentence: {{ original1.replace(original1[original1.index(\"<\"\
+ ):original1.index(\">\")+1], original1[original1.index(\"<\")+1:original1.index(\"\
+ />\")]) }}.\nEdited sentence A: {{ original1.replace(original1[original1.index(\"\
+ <\"):original1.index(\">\")+1], edit1) }}.\nEdited sentence B: {{ original2.replace(original2[original2.index(\"\
+ <\"):original2.index(\">\")+1], edit2) }}.\nThere are two edited sentences based\
+ \ on the original sentence, which is more humorous ({{answer_choices[1]}} or\
+ \ {{answer_choices[2]}}) or are they equally humorous? \n{{answer_choices[1]}}.\
+ \ Edited sentence A\n{{answer_choices[2]}}. Edited sentence B\n{{answer_choices[0]}}.\
+ \ Equal\n|||\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: original_sent_edited_sentences_qa_id
+ reference: ''
+ 6d576e77-df98-47cd-b92e-c87a56190be4: !Template
+ answer_choices: C ||| A ||| B
+ id: 6d576e77-df98-47cd-b92e-c87a56190be4
+ jinja: 'Given an original sentence "{{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+ original1[original1.index("<")+1:original1.index("/>")]) }}", we have two replacement
+ strategies. The first is to replace "{{ original1[original1.index("<")+1:original1.index("/>")]
+ }}" with "{{ edit1 }}", and the second is to replace "{{ original2[original2.index("<")+1:original2.index("/>")]
+ }}" with "{{ edit2 }}".
+
+ Is the first strategy more humorous or the second, or are they equally funny?
+
+ {{ answer_choices[1] }}. The first strategy
+
+ {{ answer_choices[2] }}. The second strategy
+
+ {{ answer_choices[0] }}. Both are equally funny
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: original_sent_edit_words_qa_strategy_id
+ reference: ''
+ 794ee65f-df0a-4448-8eac-20f757a8918d: !Template
+ answer_choices: C ||| A ||| B
+ id: 794ee65f-df0a-4448-8eac-20f757a8918d
+ jinja: 'There are two sentences:
+
+ Sentence1: {{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+ edit1) }}
+
+ Sentence2: {{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+ edit2) }}
+
+ Which sentence is more humorous?
+
+ {{ answer_choices[1] }}. Sentence 1
+
+ {{ answer_choices[2] }}. Sentence 2
+
+ {{ answer_choices[0] }}. Equal
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: only_edited_sentences_QA_id
+ reference: ''
+ 88054771-74d2-481f-91f1-c078a2bda5b9: !Template
+ answer_choices: equal ||| A ||| B
+ id: 88054771-74d2-481f-91f1-c078a2bda5b9
+ jinja: 'Which of the following sentences is more humorous? If they are equally
+ funny, please answer "{{ answer_choices[0] }}".
+
+ {{ answer_choices[1] }}. {{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+ edit1) }}
+
+ {{ answer_choices[2] }}. {{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+ edit2) }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: only_edited_sentences_id
+ reference: ''
+ 8e5f09ae-27bc-4b34-b20e-6bc6672a2c1a: !Template
+ answer_choices: Equal ||| Sentence 1 ||| Sentence 2
+ id: 8e5f09ae-27bc-4b34-b20e-6bc6672a2c1a
+ jinja: 'There are two sentences:
+
+ Sentence1: {{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+ edit1) }}
+
+ Sentence2: {{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+ edit2) }}
+
+ Which sentence is more humorous?
+
+ - {{ answer_choices[1] }}
+
+ - {{ answer_choices[2] }}
+
+ - {{ answer_choices[0] }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: only_edited_sentences_QA_text
+ reference: ''
+ b9e3fe90-d328-44a8-bb6e-212f600a2050: !Template
+ answer_choices: Equal ||| First ||| Second
+ id: b9e3fe90-d328-44a8-bb6e-212f600a2050
+ jinja: "Given an original sentence \"{{ original1.replace(original1[original1.index(\"\
+ <\"):original1.index(\">\")+1], original1[original1.index(\"<\")+1:original1.index(\"\
+ />\")]) }}\", we have two replacement strategies:\nThe first is to replace \"\
+ {{ original1[original1.index(\"<\")+1:original1.index(\"/>\")] }}\" with \"\
+ {{ edit1 }}\".\nThe second is to replace \"{{ original2[original2.index(\"<\"\
+ )+1:original2.index(\"/>\")] }}\" with \"{{ edit2 }}\".\nWhich strategy is more\
+ \ humorous (respond with \"{{answer_choices[1]}}\" or \"{{answer_choices[2]}}\"\
+ ) or equally funny (if so, respond with \"{{answer_choices[0]}}\")? \n|||\n\
+ {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: original_sent_edit_words_qa_strategy
+ reference: ''
+ ec92a63f-7d82-48f0-a9e4-8e99dd5a0bb0: !Template
+ answer_choices: Equally funny ||| First ||| Second
+ id: ec92a63f-7d82-48f0-a9e4-8e99dd5a0bb0
+ jinja: "Given an original sentence \"{{ original1.replace(original1[original1.index(\"\
+ <\"):original1.index(\">\")+1], original1[original1.index(\"<\")+1:original1.index(\"\
+ />\")]) }}\", we have two edited sentences. The first is \"{{ original1.replace(original1[original1.index(\"\
+ <\"):original1.index(\">\")+1], edit1) }}\", and the second is \"{{ original2.replace(original2[original2.index(\"\
+ <\"):original2.index(\">\")+1], edit2) }}\". \n\nWhich edited sentence is more\
+ \ humorous (answer with \"{{answer_choices[1]}}\" or \"{{answer_choices[2]}}\"\
+ ) or equally humorous (if so, answer 'Equally funny')?\n|||\n{{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: original_sent_edited_sentences_qa
+ reference: ''
diff --git a/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml b/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..848939d011fdaa05dfd1a6bd74791834df49226a
--- /dev/null
+++ b/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml
@@ -0,0 +1,147 @@
+dataset: hyperpartisan_news_detection
+subset: byarticle
+templates:
+ 06fbb182-0077-4355-b7cc-b4d0d2e98c08: !Template
+ answer_choices: 'True ||| False '
+ id: 06fbb182-0077-4355-b7cc-b4d0d2e98c08
+ jinja: 'Consider this news article text:
+
+
+ "{{text}}"
+
+
+ Does it follow a hyperpartisan argumentation? "{{answer_choices[0]}}" or "{{answer_choices[1]}}"?
+
+
+ |||
+
+
+ {{answer_choices[0] if hyperpartisan else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider_does_it_follow_a_hyperpartisan_argumentation
+ reference: Does this text exhibit prejudice towards a group or cause?
+ 0e988588-f3f1-4c70-80ce-dc5660cde6dc: !Template
+ answer_choices: True ||| False
+ id: 0e988588-f3f1-4c70-80ce-dc5660cde6dc
+ jinja: '"{{text}}"
+
+
+ The news article text above follows a hyperpartisan argumentation. {{answer_choices[0]}}"
+ or "{{answer_choices[1]}}"?
+
+
+ |||
+
+
+ {{answer_choices[0] if hyperpartisan else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: follows_hyperpartisan_argumentation
+ reference: Does this text exhibit prejudice towards a group or cause?
+ 545567e7-97cf-4600-bf1e-94f213d2f0a4: !Template
+ answer_choices: 'True ||| False '
+ id: 545567e7-97cf-4600-bf1e-94f213d2f0a4
+ jinja: '"{{text}}"
+
+
+ We must consume the news article above with caution as it exhibits prejudiced
+ allegiance towards one group or cause. "{{answer_choices[0]}}" or "{{answer_choices[1]}}"?
+
+
+ |||
+
+
+ {{answer_choices[0] if hyperpartisan else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consume_with_caution
+ reference: Does this text exhibit prejudice towards a group or cause?
+ 5a612b28-923b-4302-b959-290bf8453166: !Template
+ answer_choices: 'True ||| False '
+ id: 5a612b28-923b-4302-b959-290bf8453166
+ jinja: '"{{text}}"
+
+
+ The news article above takes an extreme left-wing or right-wing standpoint.
+ "{{answer_choices[0]}}" or "{{answer_choices[1]}}"?
+
+
+ |||
+
+
+ {{answer_choices[0] if hyperpartisan else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: extreme_left_wing_or_right_wing
+ reference: Does this text exhibit prejudice towards a group or cause?
+ 8ab54a01-2728-4ac2-8ee9-79016434454f: !Template
+ answer_choices: 'True ||| False '
+ id: 8ab54a01-2728-4ac2-8ee9-79016434454f
+ jinja: '"{{text}}"
+
+
+ The publisher of the news piece above has reported strongly in favor of one
+ political side and seems to have ignored the other side. "{{answer_choices[0]}}"
+ or "{{answer_choices[1]}}"?
+
+
+ |||
+
+
+ {{answer_choices[0] if hyperpartisan else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: publisher_has_reported_strongly
+ reference: Does this text exhibit prejudice towards a group or cause?
+ d4f7f589-995a-473e-b87b-68b9a0fea0d8: !Template
+ answer_choices: 'True ||| False '
+ id: d4f7f589-995a-473e-b87b-68b9a0fea0d8
+ jinja: 'Consider this news piece:
+
+
+ "{{text}}"
+
+
+ It exhibits extreme one-sidedness to a single group of individuals or cause.
+ "{{answer_choices[0]}}" or "{{answer_choices[1]}}"?
+
+
+ |||
+
+
+ {{answer_choices[0] if hyperpartisan else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider_it_exhibits_extreme_one_sidedness
+ reference: Does this text exhibit prejudice towards a group or cause?
diff --git a/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml b/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b428431b0852884cbffb71050b01c377362d2091
--- /dev/null
+++ b/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml
@@ -0,0 +1,138 @@
+dataset: hyperpartisan_news_detection
+subset: bypublisher
+templates:
+ 43db0412-e7a0-4976-8b97-6da598092ed8: !Template
+ answer_choices: right ||| right-center ||| least ||| left-center ||| left
+ id: 43db0412-e7a0-4976-8b97-6da598092ed8
+ jinja: '"{{text}}"
+
+
+ How would you describe the political leaning of the publisher who reported the
+ news piece above? Please choose one of these options:
+
+
+ {{ ", ".join(answer_choices) }}
+
+
+ |||
+
+
+ {{ answer_choices[bias] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: how_would_you_describe_the_political_leaning
+ reference: Classify the partisan bias of the provided text
+ 62fc329c-188c-43da-98b7-aa6580cdef17: !Template
+ answer_choices: right ||| right-center ||| least ||| left-center ||| left
+ id: 62fc329c-188c-43da-98b7-aa6580cdef17
+ jinja: '"{{text}}"
+
+
+ The news piece above gives the readers a biased view of political news. How
+ would you describe the bias?
+
+
+ Choices: {{ ", ".join(answer_choices) }}?
+
+
+ |||
+
+
+ {{ answer_choices[bias] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: gives_the_readers_a_biased_view
+ reference: Classify the partisan bias of the provided text
+ 6b26ad81-4777-4323-98de-e7956cedc1ef: !Template
+ answer_choices: right ||| right-center ||| least ||| left-center ||| left
+ id: 6b26ad81-4777-4323-98de-e7956cedc1ef
+ jinja: '"{{text}}"
+
+
+ The news piece above offers a biased view of political news. Which of the following
+ options do you agree with?
+
+
+ {{ ", ".join(answer_choices) }}?
+
+
+ |||
+
+
+ {{ answer_choices[bias] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: offers_a_biased_view
+ reference: Classify the partisan bias of the provided text
+ 7512e26a-0130-438a-ac39-dde3a4274fcf: !Template
+ answer_choices: right ||| right-center ||| least ||| left-center ||| left
+ id: 7512e26a-0130-438a-ac39-dde3a4274fcf
+ jinja: '"{{text}}"
+
+
+ Based on the news piece above, which of the following options best captures
+ the publisher''s poltical position?
+
+
+ Options: {{ ", ".join(answer_choices) }}
+
+
+ |||
+
+
+ {{ answer_choices[bias] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_best_captures
+ reference: Classify the partisan bias of the provided text
+ 8cc1b595-29b0-49bc-8323-73fa489d936c: !Template
+ answer_choices: right ||| right-center ||| least ||| left-center ||| left
+ id: 8cc1b595-29b0-49bc-8323-73fa489d936c
+ jinja: 'Consider this news piece:
+
+
+ "{{text}}"
+
+
+ Its publisher exhibits a political bias through their reporting of the news.
+ Which of these options do you agree with regarding the said bias?
+
+
+ Options: {{ ", ".join(answer_choices) }}
+
+
+ |||
+
+
+ {{ answer_choices[bias] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider_its_publisher_exhibits
+ reference: Classify the partisan bias of the provided text
diff --git a/promptsource/templates/imdb/templates.yaml b/promptsource/templates/imdb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..92d2c93a45b6fb0ac918199e6253251da5e92855
--- /dev/null
+++ b/promptsource/templates/imdb/templates.yaml
@@ -0,0 +1,155 @@
+dataset: imdb
+templates:
+ 02ff2949-0f45-4d97-941e-6fa4c0afbc2d: !Template
+ answer_choices: negative ||| positive
+ id: 02ff2949-0f45-4d97-941e-6fa4c0afbc2d
+ jinja: The following movie review expresses what sentiment? {{text}} ||| {{ answer_choices
+ [label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Movie Expressed Sentiment 2
+ reference: ''
+ 2351d12a-e630-4d19-8b41-e199266e38f7: !Template
+ answer_choices: bad ||| good
+ id: 2351d12a-e630-4d19-8b41-e199266e38f7
+ jinja: '{{text}} Did the reviewer find this movie {{"good or bad"}}? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Opinion bad good choices
+ reference: ''
+ 5f372fb1-795a-47b6-8ddf-c4fd1579e76a: !Template
+ answer_choices: negative ||| positive
+ id: 5f372fb1-795a-47b6-8ddf-c4fd1579e76a
+ jinja: "{{text}} \nIs this review {{\"positive or negative\"}}? ||| \n{{answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: 'Sentiment with choices '
+ reference: ''
+ 866474a5-1498-46b7-bfee-ac0c5160707f: !Template
+ answer_choices: negative ||| positive
+ id: 866474a5-1498-46b7-bfee-ac0c5160707f
+ jinja: '{{text}} How does the viewer feel about the movie? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Sentiment Feeling
+ reference: ''
+ 96538f30-f2c1-430e-8fc6-936a16966d9c: !Template
+ answer_choices: negative ||| positive
+ id: 96538f30-f2c1-430e-8fc6-936a16966d9c
+ jinja: '{{text}} What sentiment does the writer express for the movie? ||| {{
+ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Writer Expressed Sentiment
+ reference: ''
+ af51297c-38a3-4d6c-a8b5-04b1243d7443: !Template
+ answer_choices: negative ||| positive
+ id: af51297c-38a3-4d6c-a8b5-04b1243d7443
+ jinja: '{{text}} The sentiment expressed for the movie is ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Movie Expressed Sentiment
+ reference: ''
+ b93b74ac-fe95-40b4-9610-318b46ab820f: !Template
+ answer_choices: negative ||| positive
+ id: b93b74ac-fe95-40b4-9610-318b46ab820f
+ jinja: '{{text}} What is the sentiment expressed in this text? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Text Expressed Sentiment
+ reference: ''
+ b9b5d79d-f0b3-4bec-a724-f585db3e93ff: !Template
+ answer_choices: negative ||| positive
+ id: b9b5d79d-f0b3-4bec-a724-f585db3e93ff
+ jinja: '{{text}} This is definitely not a ||| {{ answer_choices [1-label]}} review.'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Negation template for positive and negative
+ reference: ''
+ bd82ba0f-01d4-4fa1-bf8d-07e392c00cd9: !Template
+ answer_choices: No ||| Yes
+ id: bd82ba0f-01d4-4fa1-bf8d-07e392c00cd9
+ jinja: '{{text}} Did the reviewer enjoy the movie? ||| {{ answer_choices [label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Enjoyment Yes No
+ reference: ''
+ c70d1687-2421-49a2-9553-91b8bac4cfbe: !Template
+ answer_choices: negative ||| positive
+ id: c70d1687-2421-49a2-9553-91b8bac4cfbe
+ jinja: '{{text}} What is the sentiment expressed by the reviewer for the movie?
+ ||| {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Expressed Sentiment
+ reference: ''
+ e01970ab-42c0-4e6e-a08f-4940d889ef37: !Template
+ answer_choices: They didn't like it! ||| They loved it
+ id: e01970ab-42c0-4e6e-a08f-4940d889ef37
+ jinja: '{{text}} How does the reviewer feel about the movie? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Enjoyment
+ reference: ''
diff --git a/promptsource/templates/jfleg/templates.yaml b/promptsource/templates/jfleg/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c03bcebbdc5739d6a8df7b732e2ff178a0ded3e8
--- /dev/null
+++ b/promptsource/templates/jfleg/templates.yaml
@@ -0,0 +1,163 @@
+dataset: jfleg
+templates:
+ 18d3362c-74e1-4cda-9b16-001948d9196b: !Template
+ answer_choices: null
+ id: 18d3362c-74e1-4cda-9b16-001948d9196b
+ jinja: 'I am correcting the grammar exercises of my students. How should the following
+ sentence be re-written?
+
+
+ {{sentence}}
+
+
+ |||
+
+
+
+ {{corrections | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: how_to_rewrite
+ reference: ''
+ 491f0d06-2acf-4977-8ac5-c20b9bd17cce: !Template
+ answer_choices: null
+ id: 491f0d06-2acf-4977-8ac5-c20b9bd17cce
+ jinja: 'The text needs to be fluent or native-sounding. The following sentence
+ is not.
+
+
+ Sentence: {{sentence}}
+
+
+ However, an improved version of the same sentence is:
+
+
+ |||
+
+
+ {{ corrections | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: improved_to_be_fluent
+ reference: ''
+ 52a79f92-f24c-45e7-89a4-a9dc8f6e0ecf: !Template
+ answer_choices: null
+ id: 52a79f92-f24c-45e7-89a4-a9dc8f6e0ecf
+ jinja: 'A native English speaker would find the following sentence hard to understand
+ because of grammatical errors.
+
+
+ Sentence: {{sentence}}
+
+
+ A correct version of the same sentence could be:
+
+
+ |||
+
+
+ {{ corrections | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: native_english_speaker
+ reference: ''
+ 5ba0d7ab-d732-4c47-b5df-be0ae4feb1cd: !Template
+ answer_choices: null
+ id: 5ba0d7ab-d732-4c47-b5df-be0ae4feb1cd
+ jinja: 'The English language has specific rules that need to be followed. After
+ fixing the grammatical errors, the sentence "{{sentence}}" can be corrected
+ to
+
+
+ |||
+
+
+ {{ corrections | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: rules_to_follow
+ reference: ''
+ 68e68d2f-ed55-4f0e-b512-22166fd5206b: !Template
+ answer_choices: null
+ id: 68e68d2f-ed55-4f0e-b512-22166fd5206b
+ jinja: '{{sentence}}
+
+
+ According to academic writing rules, the sentence above contains grammatical
+ mistakes. There can be many ways of correcting the above sentence. Output one
+ possible corrected sentence.
+
+
+ |||
+
+
+ {{ corrections | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: academic_writing_rules
+ reference: ''
+ 8461a024-9a16-4aef-94a9-d69c8f12a590: !Template
+ answer_choices: null
+ id: 8461a024-9a16-4aef-94a9-d69c8f12a590
+ jinja: 'The following sentence is hard to read because it is grammatically incorrect.
+
+
+ Sentence: {{sentence}}
+
+
+ Correct the sentence above to become fluent and free of grammatical mistakes.
+
+
+ |||
+
+
+ {{ corrections | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: hard_to_read
+ reference: Correcting the grammar
+ e9d603d4-cf1a-404e-8d82-cd29a04e82d3: !Template
+ answer_choices: null
+ id: e9d603d4-cf1a-404e-8d82-cd29a04e82d3
+ jinja: "As an English Teacher, I find it hard to read sentences with grammatical\
+ \ errors such as the following:\n\n{{sentence}}\n\nA corrected sentence would\
+ \ be \n\n|||\n\n{{ corrections | choice }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: english_teacher
+ reference: ''
diff --git a/promptsource/templates/jigsaw_unintended_bias/templates.yaml b/promptsource/templates/jigsaw_unintended_bias/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cae2735f15fa4ec164c292d3aa339488c0254b62
--- /dev/null
+++ b/promptsource/templates/jigsaw_unintended_bias/templates.yaml
@@ -0,0 +1,211 @@
+dataset: jigsaw_unintended_bias
+templates:
+ 17f730bd-fc7e-419c-84cc-e5c795268d02: !Template
+ answer_choices: null
+ id: 17f730bd-fc7e-419c-84cc-e5c795268d02
+ jinja: 'Online comments can sometimes be toxic. For instance, they might be insulting,
+ threatening, racist or many other issues. If 0 corresponds to a very civil comment,
+ and 100 to a highly toxic one, how would you grade the following comment?
+
+ "{{comment_text}}"
+
+ |||
+
+ {{(target * 100.0) | round(0) }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: online_comment
+ reference: ''
+ 1add6d25-346e-4bf5-aa58-8dc184018f9e: !Template
+ answer_choices: keep ||| remove
+ id: 1add6d25-346e-4bf5-aa58-8dc184018f9e
+ jinja: 'You are the moderator of the comments section on a website. You want to
+ remove uncivil comments. Would you {{answer_choices[1]}} or {{answer_choices[0]}}
+ the following comment?
+
+ "{{comment_text}}"
+
+ |||
+
+ {{answer_choices[target | round | int]}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: moderator
+ reference: ''
+ 355aa574-61c2-4b94-9f1a-9347fd7c6444: !Template
+ answer_choices: null
+ id: 355aa574-61c2-4b94-9f1a-9347fd7c6444
+ jinja: "{{toxicity_annotator_count}} annotators where tasked with assessing the\
+ \ toxicity of the following comment:\n\n\"{{comment_text}}\"\n\nOn a scale of\
+ \ 0 to 1, what do you think their average prediction was?\n\n||| \n\n{{ target\
+ \ | round(1) }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: annotators_rated
+ reference: ''
+ 3f5fe7e8-0808-4792-94e5-f5d8fff7c89d: !Template
+ answer_choices: null
+ id: 3f5fe7e8-0808-4792-94e5-f5d8fff7c89d
+ jinja: How toxic is this comment is on a scale from 0 to 5? "{{comment_text}}"
+ ||| {{(target * 5.0) | round(1) }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: modified_scale
+ reference: ''
+ 48ef9835-a99b-45f5-8ff4-96b3b2422855: !Template
+ answer_choices: null
+ id: 48ef9835-a99b-45f5-8ff4-96b3b2422855
+ jinja: 'You were hired to evaluate the toxicity of online comments. You find the
+ following comment:
+
+ "{{comment_text}}"
+
+ How would you grade its toxicity from 0 to 1?
+
+ |||
+
+ {{target | round(1) }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: hired_to_review
+ reference: ''
+ 5e06b59c-9888-47b1-a982-b90bced2d6a2: !Template
+ answer_choices: civil ||| toxic
+ id: 5e06b59c-9888-47b1-a982-b90bced2d6a2
+ jinja: 'You are browsing the comment section of a website. You read the following
+ comment:
+
+ "{{comment_text}}"
+
+ Would you classify that comment as {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ |||
+
+ {{answer_choices[target | round | int]}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: browsing
+ reference: ''
+ 68962b6a-2ed3-48d5-ba7c-9f023e314592: !Template
+ answer_choices: civil ||| toxic
+ id: 68962b6a-2ed3-48d5-ba7c-9f023e314592
+ jinja: "Someone posted the following comment online:\n\n\"{{comment_text}}\"\n\
+ \nWould you classify it as {{answer_choices[0]}} or {{answer_choices[1]}}?\n\
+ \n||| \n\n{{answer_choices[target | round | int]}}\n\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: binary_pred
+ reference: ''
+ 71508d0b-7fd9-4810-933c-c3c59f0c5328: !Template
+ answer_choices: null
+ id: 71508d0b-7fd9-4810-933c-c3c59f0c5328
+ jinja: 'On a scale of 0 to 1, how threatening is the below comment?
+
+ {{comment_text}}
+
+ |||
+
+ {{threat | round(1)}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: threatening
+ reference: ''
+ 71508d0b-7fd9-4810-933c-c3c59f0c5329: !Template
+ answer_choices: no ||| yes
+ id: 71508d0b-7fd9-4810-933c-c3c59f0c5329
+ jinja: '"{{comment_text}}" Someone posted this comment online. Do you find it
+ toxic?
+
+ |||
+
+ {{answer_choices[target | round | int]}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: yes_or_no
+ reference: ''
+ 71508d1b-7fd9-4810-933c-c3c59f0c5329: !Template
+ answer_choices: no ||| yes
+ id: 71508d1b-7fd9-4810-933c-c3c59f0c5329
+ jinja: '"{{comment_text}}"
+
+ Should this online comment be removed for its toxicity? {{answer_choices[1]}}
+ or {{answer_choices[0]}}?
+
+ |||
+
+ {{answer_choices[target | round | int]}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: yes_or_no_remove
+ reference: ''
+ a9cf1274-4167-4647-8058-986b4eeff1a0: !Template
+ answer_choices: null
+ id: a9cf1274-4167-4647-8058-986b4eeff1a0
+ jinja: 'How toxic is the below comment on a scale of 0 to 1?
+
+
+ "{{comment_text}}"
+
+
+ |||
+
+ {{target | round(1)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: how_toxic
+ reference: ''
diff --git a/promptsource/templates/kelm/templates.yaml b/promptsource/templates/kelm/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6d723749b1ac2c0effd45f7cadaf62046425d6c
--- /dev/null
+++ b/promptsource/templates/kelm/templates.yaml
@@ -0,0 +1,110 @@
+dataset: kelm
+templates:
+ 3381175a-b93e-4d1e-a7f2-428c5d2c7c2b: !Template
+ answer_choices: null
+ id: 3381175a-b93e-4d1e-a7f2-428c5d2c7c2b
+ jinja: "Given facts from a knowledge base encoded in the form \"subject relation\
+ \ object, relation object, ...\" below (the subject and object are entities\
+ \ that are involved in a relationship defined by the relation), what would be\
+ \ a natural language sentence that uses all facts provided as input?\n\n{{ triple\
+ \ }} \n|||\n{{ sentence }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: kb_to_sentence_uses_all_facts
+ reference: Convert a KB triple of the form (subject, relation, object) to a natural
+ language sentence
+ 4d674e43-c569-4f0c-9b5c-436f430da92a: !Template
+ answer_choices: null
+ id: 4d674e43-c569-4f0c-9b5c-436f430da92a
+ jinja: "Given a sentence below, generate knowledge base triples in the form of\
+ \ \"subject relation object, relation object, ...\". The subject and object\
+ \ are entities that are involved in a relationship defined by the relation.\n\
+ \nSentence: {{sentence}} \n|||\n{{triple}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: sentence_to_kb
+ reference: Given a sentence, generate a string of the form (subject, relation,
+ object)
+ 510324d1-1c45-4747-8cd1-ea817355f895: !Template
+ answer_choices: null
+ id: 510324d1-1c45-4747-8cd1-ea817355f895
+ jinja: "How would you rephrase the following information in the format of \"subject\
+ \ relation object, relation object, ...\" (the subject and object are entities\
+ \ that are involved in a relationship defined by the relation) into something\
+ \ easier to understand? Give your response in a complete sentence.\n\nInformation:\
+ \ {{ triple }} \n\n|||\n\n{{ sentence }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: kb_to_sentence_easier_to_understand
+ reference: ''
+ 55909592-633d-4cef-97ff-058c86eea28f: !Template
+ answer_choices: null
+ id: 55909592-633d-4cef-97ff-058c86eea28f
+ jinja: 'How would you combine the following facts of the form "subject relation
+ object, relation object, ..." (the subject and object are entities that are
+ involved in a relationship defined by the relation) into a sentence?
+
+
+ "{{ triple }}" |||
+
+ {{ sentence }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: kb_to_sentence_combine
+ reference: Convert a KB triple of the form (subject, relation, object) to a natural
+ language sentence
+ 7478edee-5950-4ca2-8878-9c5a98925952: !Template
+ answer_choices: null
+ id: 7478edee-5950-4ca2-8878-9c5a98925952
+ jinja: "Given knowledge base triples in the format of \"subject relation object,\
+ \ relation object, ...\" (the subject and object are entities that are involved\
+ \ in a relationship defined by the relation) generate a natural language sentence.\n\
+ \nTriple: {{ triple }} \n|||\n{{ sentence }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: kb_to_sentence_affirmative
+ reference: Convert a KB triple of the form (subject, relation, object) to a natural
+ language sentence
+ d72c07d6-9c16-4520-8891-4bfc6a7a956f: !Template
+ answer_choices: null
+ id: d72c07d6-9c16-4520-8891-4bfc6a7a956f
+ jinja: "I am going through my notes about facts, and my notes are written in the\
+ \ form of \"subject relation object, relation object, ...\". The subject and\
+ \ object are entities that are involved in a relationship defined by the relation.\
+ \ I want to convert the facts into a sentence to include in my written report.\
+ \ What is the sentence?\n\nFacts: {{ triple }} \n\n|||\n\n{{ sentence }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: kb_to_sentence_from_notes
+ reference: ''
diff --git a/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml b/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..934b7e027e5e0a77b76542e4945516947bb82307
--- /dev/null
+++ b/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml
@@ -0,0 +1,114 @@
+dataset: kilt_tasks
+subset: hotpotqa
+templates:
+ 1a123f3a-0507-41b9-904f-b18d9ce2b79e: !Template
+ answer_choices: null
+ id: 1a123f3a-0507-41b9-904f-b18d9ce2b79e
+ jinja: '{% if output %}
+
+ Here''s a complex question that requires someone to reason about the input,
+ can you answer it?
+
+ {{input}}
+
+ |||
+
+ {{output | map(attribute="answer") | list | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: complex_question
+ reference: ''
+ 5531ce47-35ff-4bce-943d-5b2b86c44352: !Template
+ answer_choices: null
+ id: 5531ce47-35ff-4bce-943d-5b2b86c44352
+ jinja: '{% if output %}
+
+ Combine facts and answer this: {{input}}
+
+ |||
+
+ {{output | map(attribute="answer") | list | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: combining_facts
+ reference: ''
+ 5ce9d659-4df8-4afd-a6e1-3e542df0035a: !Template
+ answer_choices: null
+ id: 5ce9d659-4df8-4afd-a6e1-3e542df0035a
+ jinja: '{% if output %}
+
+ Formulate an answer to this elaborate question: {{input}}
+
+ |||
+
+ {{output | map(attribute="answer") | list | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: formulate
+ reference: ''
+ 9211f663-51f9-428e-ba27-158480eee083: !Template
+ answer_choices: null
+ id: 9211f663-51f9-428e-ba27-158480eee083
+ jinja: '{% if output %}
+
+ FINAL EXAM
+
+
+ Question 1. {{input}}
+
+ |||
+
+ {{output | map(attribute="answer") | list | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: final_exam
+ reference: ''
+ ac0545a1-9363-4c17-aada-f0eedf5a24b2: !Template
+ answer_choices: null
+ id: ac0545a1-9363-4c17-aada-f0eedf5a24b2
+ jinja: '{% if output %}
+
+ {{input}}
+
+ |||
+
+ {{output | map(attribute="answer") | list | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: straighforward_qa
+ reference: ''
diff --git a/promptsource/templates/kilt_tasks/nq/templates.yaml b/promptsource/templates/kilt_tasks/nq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..18bf44b0e241f3bccc433d4ba1db89710ae0c210
--- /dev/null
+++ b/promptsource/templates/kilt_tasks/nq/templates.yaml
@@ -0,0 +1,177 @@
+dataset: kilt_tasks
+subset: nq
+templates:
+ 294fd8f3-c7e0-4b3c-abd3-64527f8f71b1: !Template
+ answer_choices: null
+ id: 294fd8f3-c7e0-4b3c-abd3-64527f8f71b1
+ jinja: '{% if output %}
+
+ The goal is to predict an English answer string for an input English question.
+ All questions can be answered using the contents of English Wikipedia.
+
+ Question: {{input}}
+
+ Answer:
+
+ |||
+
+ {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+ }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: formal_description
+ reference: Copied from the nq_open dataset description.
+ 44f247e1-9d7e-43b9-af4b-6202fd16d0c0: !Template
+ answer_choices: null
+ id: 44f247e1-9d7e-43b9-af4b-6202fd16d0c0
+ jinja: '{% if output %}
+
+ Search query: {{input}}
+
+ Response:
+
+ |||
+
+ {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+ }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: search query
+ reference: ''
+ 485046dc-0835-4b42-b330-e0ca2ee7f7a1: !Template
+ answer_choices: null
+ id: 485046dc-0835-4b42-b330-e0ca2ee7f7a1
+ jinja: '{% if output %}
+
+ Question : {{input}}
+
+ Answer :
+
+ |||
+
+ {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|join('',
+ '') }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: false
+ name: question_with_multiple_answer
+ reference: Plain question with multiple answers
+ a5e73119-b3d5-408f-a954-56951ea070f3: !Template
+ answer_choices: null
+ id: a5e73119-b3d5-408f-a954-56951ea070f3
+ jinja: '{% if output %}
+
+ Guess a question that has the answer "{{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto",
+ "")|list|choice }}"
+
+ |||
+
+ {{input}}?
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: guess_question
+ reference: Guess a question. It will show if model can evaluate entity in question.
+ d5fabd3e-4d2e-45bc-888a-0f3a7ea48c85: !Template
+ answer_choices: null
+ id: d5fabd3e-4d2e-45bc-888a-0f3a7ea48c85
+ jinja: '{% if output %}
+
+ Question : {{input}}
+
+ Answer :
+
+ |||
+
+ {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+ }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: question_answer
+ reference: Plain Question
+ e1ae7863-b30e-4a1a-8c4f-74b8baee5ba9: !Template
+ answer_choices: null
+ id: e1ae7863-b30e-4a1a-8c4f-74b8baee5ba9
+ jinja: '{% if output %}
+
+ I''ve always wondered: {{input}}
+
+ |||
+
+ {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+ }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: first_person_context
+ reference: Ask a question in first person
+ f63f6b1e-ef1f-4cd8-bb6e-aaf24fed8936: !Template
+ answer_choices: null
+ id: f63f6b1e-ef1f-4cd8-bb6e-aaf24fed8936
+ jinja: '{% if output %}
+
+ Answer the following question.
+
+ {{input}}
+
+ |||
+
+ {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+ }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: question_with_instruction
+ reference: Instruction before question.
diff --git a/promptsource/templates/lama/trex/templates.yaml b/promptsource/templates/lama/trex/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..30dfe500b943aadb67b36e60e71e0e0d68156c3b
--- /dev/null
+++ b/promptsource/templates/lama/trex/templates.yaml
@@ -0,0 +1,81 @@
+dataset: lama
+subset: trex
+templates:
+ 27c2da31-bd1a-48d4-9e34-c530e42c9f00: !Template
+ answer_choices: null
+ id: 27c2da31-bd1a-48d4-9e34-c530e42c9f00
+ jinja: '{{masked_sentence}} Fill the mask with the missing word. ||| {{obj_label}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fill_mask
+ reference: ''
+ 52469d4c-6c46-4686-a36b-7af1801e1eec: !Template
+ answer_choices: null
+ id: 52469d4c-6c46-4686-a36b-7af1801e1eec
+ jinja: 'Given the following paragraph : {{ masked_sentence | replace("[MASK]",obj_label)}}.
+ what is the relationship between {{obj_label}} and {{sub_label}} ?
+
+ ||| {{ template | replace("[X]",sub_label) | replace("[Y]", obj_surface)}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - BLEU
+ - Other
+ original_task: false
+ name: find_relation
+ reference: ''
+ 8cb6ee9d-bcf7-4d82-9acf-b93072c7384b: !Template
+ answer_choices: null
+ id: 8cb6ee9d-bcf7-4d82-9acf-b93072c7384b
+ jinja: 'Replace the mask with the correct word: {{masked_sentence}} ||| {{obj_label}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: replace_mask
+ reference: ''
+ cc07e0dc-b970-4f9d-b76a-05e72a86490e: !Template
+ answer_choices: null
+ id: cc07e0dc-b970-4f9d-b76a-05e72a86490e
+ jinja: "write the negation of the following statements : {{ template | replace(\"\
+ [X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} \n||| {{ template_negated\
+ \ | replace(\"[X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - BLEU
+ - Other
+ original_task: false
+ name: write_negation
+ reference: ''
+ e054c5b2-56fd-451a-aba5-fcd105e70bce: !Template
+ answer_choices: null
+ id: e054c5b2-56fd-451a-aba5-fcd105e70bce
+ jinja: "Negate the following statement : {{ template | replace(\"[X]\",sub_surface)\
+ \ | replace(\"[Y]\", obj_surface)}} \n||| {{ template_negated | replace(\"\
+ [X]\",sub_surface) | replace(\"[Y]\", obj_surface)}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - BLEU
+ - Other
+ original_task: false
+ name: negate_sentence
+ reference: ''
diff --git a/promptsource/templates/lambada/templates.yaml b/promptsource/templates/lambada/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..371f6d37fb1e3fb9dec930a870425a5e3fd79307
--- /dev/null
+++ b/promptsource/templates/lambada/templates.yaml
@@ -0,0 +1,82 @@
+dataset: lambada
+templates:
+ 3747e80a-4182-44eb-944b-dee40095bb17: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: 3747e80a-4182-44eb-944b-dee40095bb17
+ jinja: 'Please predict the next word after the following chunk of text.
+
+
+ {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: please next word
+ reference: ''
+ 506765b8-17c0-4946-bbb0-b28288caacb3: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: 506765b8-17c0-4946-bbb0-b28288caacb3
+ jinja: '{{ text.split()[:-1] | join('' '') }} ____.
+
+
+ Fill in the ____: ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the ____
+ reference: ''
+ 948664d5-2ea2-4245-b656-9283948dd5cd: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: 948664d5-2ea2-4245-b656-9283948dd5cd
+ jinja: '{{ text.split()[:-1] | join('' '') }}...
+
+
+ What comes after the ellipses? ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: ellipses
+ reference: ''
+ acfe374c-60ce-4354-b285-e7b0717cffe5: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: acfe374c-60ce-4354-b285-e7b0717cffe5
+ jinja: 'This story got cut short. What comes next?
+
+
+ {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what comes next
+ reference: ''
+ d5707bd9-d3cc-4535-b4c1-5c2aee8cb8c7: !Template
+ answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+ id: d5707bd9-d3cc-4535-b4c1-5c2aee8cb8c7
+ jinja: 'Fill in the blank:
+
+
+ {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: Brown et al.
diff --git a/promptsource/templates/liar/templates.yaml b/promptsource/templates/liar/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4f443eb98313c2386d6b15526531498d9b12a8ba
--- /dev/null
+++ b/promptsource/templates/liar/templates.yaml
@@ -0,0 +1,152 @@
+dataset: liar
+templates:
+ 25af7532-2351-4883-843c-fd5e28ab8668: !Template
+ answer_choices: Moderate ||| activist ||| business-leader ||| columnist ||| constitution-party
+ ||| county-commissioner ||| democrat ||| democratic-farmer-labor ||| education-official
+ ||| government-body ||| green ||| independent ||| journalist ||| labor-leader
+ ||| liberal-party-canada ||| libertarian ||| newsmaker ||| ocean-state-tea-party-action
+ ||| organization ||| republican ||| state-official ||| talk-show-host ||| tea-party-member
+ id: 25af7532-2351-4883-843c-fd5e28ab8668
+ jinja: '{% if party_affiliation != "none" %}
+
+ What is the party affiliation of the speaker of the following statement?
+
+ Choose between: {{answer_choices|join(", ")}}
+
+
+ Statement: {{statement}} |||
+
+
+ {{party_affiliation}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: 'Guess affiliation '
+ reference: ''
+ a22bff2e-ddb8-4c2c-b7a7-fa6847648fd5: !Template
+ answer_choices: null
+ id: a22bff2e-ddb8-4c2c-b7a7-fa6847648fd5
+ jinja: '{% if job_title != "" %}
+
+ Given the following statement:
+
+
+ {{statement}}
+
+
+ and the following speaker: {{speaker.replace("-", " ")}}, predict the speaker''s
+ job title. |||
+
+ {{job_title}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: 'Given statement and speaker guess job title '
+ reference: ''
+ b3618fb1-ad56-47bc-b6ff-393b9c24992b: !Template
+ answer_choices: null
+ id: b3618fb1-ad56-47bc-b6ff-393b9c24992b
+ jinja: '{% if job_title != "" %}
+
+ Guess the context in which the statement below was stated by speaker: {{speaker
+ | replace(''-'', '' '')}}, who is a {{job_title}}
+
+
+ {{statement}} |||
+
+ {{context}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Given statement, speaker and job title guess the context '
+ reference: ''
+ d153b4d3-d0a2-4768-854a-582440943c64: !Template
+ answer_choices: False ||| Half true ||| Mostly true ||| True ||| Barely true |||
+ Pants on fire
+ id: d153b4d3-d0a2-4768-854a-582440943c64
+ jinja: 'Here''s a statement:
+
+
+ {{statement}}
+
+
+ In which of the following categories {{answer_choices | join(", ")}} would you
+ label it based on its truthfulness? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Given statement guess category
+ reference: ''
+ f61239e3-06fc-4397-93ce-f3fca7340428: !Template
+ answer_choices: marriage ||| census ||| job-accomplishments ||| public-health
+ ||| retirement ||| homeland-security ||| population ||| death-penalty ||| government-efficiency
+ ||| hunger ||| obama-birth-certificate ||| disability ||| message-machine-2012
+ ||| baseball ||| welfare ||| education ||| deficit ||| sexuality ||| congressional-rules
+ ||| campaign-advertising ||| city-budget ||| space ||| corporations ||| corrections-and-updates
+ ||| legal-issues ||| islam ||| bush-administration ||| occupy-wall-street |||
+ gas-prices ||| small-business ||| fires ||| after-the-fact ||| pensions |||
+ recreation ||| cap-and-trade ||| children ||| wealth ||| fake-news ||| science
+ ||| federal-budget ||| sotomayor-nomination ||| afghanistan ||| supreme-court
+ ||| nuclear ||| gambling ||| nightlife ||| technology ||| civil-rights ||| patriotism
+ ||| taxes ||| financial-regulation ||| terrorism ||| families ||| consumer-safety
+ ||| message-machine-2014 ||| food-safety ||| weather ||| tourism ||| states
+ ||| jobs ||| redistricting ||| human-rights ||| ebola ||| drugs ||| social-security
+ ||| climate-change ||| message-machine ||| economy ||| israel ||| transportation
+ ||| religion ||| oil-spill ||| gays-and-lesbians ||| transparency ||| county-budget
+ ||| market-regulation ||| animals ||| candidates-biography ||| autism ||| china
+ ||| workers ||| medicaid ||| immigration ||| florida ||| state-finances |||
+ new-hampshire-2012 ||| abortion ||| abc-news-week ||| military ||| history |||
+ crime ||| food ||| criminal-justice ||| stimulus ||| privacy ||| urban ||| state-budget
+ ||| sports ||| polls ||| Alcohol ||| women ||| infrastructure ||| bipartisanship
+ ||| diversity ||| housing ||| elections ||| city-government ||| congress |||
+ ethics ||| campaign-finance ||| iraq ||| labor ||| guns ||| kagan-nomination
+ ||| homeless ||| health-care ||| veterans ||| county-government ||| poverty
+ ||| government-regulation ||| natural-disasters ||| agriculture ||| medicare
+ ||| pop-culture ||| environment ||| lottery ||| income ||| trade ||| debt |||
+ energy ||| pundits ||| voting-record ||| water ||| florida-amendments ||| unions
+ ||| public-service ||| marijuana ||| foreign-policy ||| public-safety ||| bankruptcy
+ ||| debates
+ id: f61239e3-06fc-4397-93ce-f3fca7340428
+ jinja: 'Given the following statement:
+
+
+ {{statement}}
+
+
+ To what subject would you categorize it? |||
+
+ {{subject.split(",")[0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: 'Given statement guess subject '
+ reference: ''
diff --git a/promptsource/templates/limit/templates.yaml b/promptsource/templates/limit/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ce014677753fb22c09dd724338b117eeae7a1950
--- /dev/null
+++ b/promptsource/templates/limit/templates.yaml
@@ -0,0 +1,289 @@
+dataset: limit
+templates:
+ 0898caf1-f3e4-493f-a838-84a678176c14: !Template
+ answer_choices: null
+ id: 0898caf1-f3e4-493f-a838-84a678176c14
+ jinja: '{{sentence}}
+
+
+ What is the last entity in motion mentioned in the sentence if any? Otherwise,
+ respond with "No entity in motion".
+
+ |||
+
+ {% if (motion_entities | length) > 0 %}
+
+ {{ (motion_entities | sort(attribute="start_index") | last)["entity"] }}
+
+ {% else %}
+
+ {{"No entity in motion"}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: last_moving_entity
+ reference: ''
+ 3b88c578-db77-4fd0-ad50-c78a39197ce5: !Template
+ answer_choices: Yes ||| No
+ id: 3b88c578-db77-4fd0-ad50-c78a39197ce5
+ jinja: '{{sentence}}
+
+
+ Are there any entities in motion in the sentence?
+
+
+ |||
+
+
+ {% if motion == "yes" %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: any_entity
+ reference: Asking if there is any entity in motion in the text
+ 3f1689a9-b255-4d8d-b780-062ca2f83596: !Template
+ answer_choices: null
+ id: 3f1689a9-b255-4d8d-b780-062ca2f83596
+ jinja: '{{sentence}}
+
+
+ What are the entities in motion in the previous sentence? Return {{"''No entity''"}}
+ if you can''t find any. If there are multiple entities, use a comma to join
+ them.
+
+
+ |||
+
+ {% if (motion_entities | length) == 0 %}
+
+ {{ "No entity" }}
+
+ {% else %}
+
+ {{motion_entities | map(attribute="entity") | join(", ")}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_entities_question
+ reference: ''
+ 74c9962e-3ec2-4f06-ace4-fcac6f506076: !Template
+ answer_choices: Yes ||| No
+ id: 74c9962e-3ec2-4f06-ace4-fcac6f506076
+ jinja: "Extract: {{sentence}}\n\nIs there more than one mention of a moving entity\
+ \ in the extract? \n\n|||\n{% if (motion_entities | length) > 1 %}\n{{ answer_choices[0]\
+ \ }}\n{% else %}\n{{ answer_choices[1] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: more_than_one
+ reference: ''
+ 766ab346-6fa6-4496-915f-65e7b06ab8ac: !Template
+ answer_choices: null
+ id: 766ab346-6fa6-4496-915f-65e7b06ab8ac
+ jinja: '{{sentence}}
+
+ How many moving entities are mentioned in the sentence above?
+
+ |||
+
+ {{motion_entities | length}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: count_entities
+ reference: ''
+ 957deab1-7570-4cbf-a31f-55bfad5212a7: !Template
+ answer_choices: null
+ id: 957deab1-7570-4cbf-a31f-55bfad5212a7
+ jinja: "List out the entities in motion in the following sentence (if there are\
+ \ multiple entities, use a comma to join them). Respond {{\"'No entity'\"}}\
+ \ if you can't find any. \n\n{{sentence}}\n\n|||\n\n{% if (motion_entities |\
+ \ length) == 0 %}\n{{\"No entity\"}}\n{% else %}\n{{motion_entities | map(attribute=\"\
+ entity\") | join(\", \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_entities_list_out
+ reference: ''
+ 9cbb6dce-6463-4785-8130-fbe21216eb69: !Template
+ answer_choices: null
+ id: 9cbb6dce-6463-4785-8130-fbe21216eb69
+ jinja: "Sam has watched a video described as \"{{sentence}}\". What are the entities\
+ \ moving in the video? \n\nList the entities separated by commas. Return {{\"\
+ 'No entity'\"}} if there isn't any.\n\n|||\n{% if (motion_entities | length)\
+ \ == 0 %}\n{{ \"No entity\" }}\n{% else %}\n{{motion_entities | map(attribute=\"\
+ entity\") | join(\", \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_entities_moving_in_video
+ reference: ''
+ af2203ba-d176-4981-82bd-088ef0c39214: !Template
+ answer_choices: null
+ id: af2203ba-d176-4981-82bd-088ef0c39214
+ jinja: '{{sentence}}
+
+
+ Name the first entity in motion mentioned in the sentence if any. Otherwise,
+ respond with "No entity in motion".
+
+
+ |||
+
+
+ {% if (motion_entities | length) > 0 %}
+
+ {{ (motion_entities | sort(attribute="start_index") | first)["entity"] }}
+
+ {% else %}
+
+ {{"No entity in motion"}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: first_moving_entity
+ reference: ''
+ b847d63c-0b52-4b6e-a62f-12e47439ce54: !Template
+ answer_choices: null
+ id: b847d63c-0b52-4b6e-a62f-12e47439ce54
+ jinja: 'Count the number of moving entities in the following sentence.
+
+ {{sentence}}
+
+ |||
+
+ {{motion_entities | length}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: count_entities_affirm
+ reference: ''
+ e5482b0d-ed6e-44de-a6e9-b64cdd1e2013: !Template
+ answer_choices: Yes ||| No
+ id: e5482b0d-ed6e-44de-a6e9-b64cdd1e2013
+ jinja: 'Is there any reference to movement in the following sentence?
+
+
+ {{sentence}}
+
+
+ |||
+
+
+ {% if motion == "yes" %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: any_entity_reference
+ reference: Indirectly asking whether there are moving entities
+ e8fca13b-7063-4ebc-9a4d-c124398cacf4: !Template
+ answer_choices: null
+ id: e8fca13b-7063-4ebc-9a4d-c124398cacf4
+ jinja: 'Extract: {{sentence}}
+
+
+ Can you find all mentions of moving entities in the extract? Return {{"''No
+ entity''"}} if you can''t find any. If there are multiple entities, use a comma
+ to join them.
+
+
+ |||
+
+ {% if (motion_entities | length) == 0 %}
+
+ {{ "No entity" }}
+
+ {% else %}
+
+ {{motion_entities | map(attribute="entity") | join(", ")}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_entities_extract
+ reference: ''
+ ebfec558-fce8-4935-8f28-648c184a3f1a: !Template
+ answer_choices: null
+ id: ebfec558-fce8-4935-8f28-648c184a3f1a
+ jinja: "What are the dynamic entities in the following sentence (if there are\
+ \ multiple entities, use a comma to join them)? Dynamic entities refer to objects\
+ \ in motion. Respond {{\"'No entity'\"}} if you can't find any. \n\n{{sentence}}\n\
+ \n|||\n\n{% if (motion_entities | length) == 0 %}\n{{\"No entity\"}}\n{% else\
+ \ %}\n{{motion_entities | map(attribute=\"entity\") | join(\", \")}}\n{% endif\
+ \ %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: find_entities_dynamic
+ reference: ''
diff --git a/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6b6041242bd15e6167ce8060aec419169bee3824
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml
@@ -0,0 +1,200 @@
+dataset: math_dataset
+subset: algebra__linear_1d
+templates:
+ 10a6ab6c-51f1-45cf-9176-54764bb6b612: !Template
+ answer_choices: null
+ id: 10a6ab6c-51f1-45cf-9176-54764bb6b612
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after
+ reference: Simple question after expression
+ 17f77fc1-fd9b-4386-94e5-92e2aa9ef457: !Template
+ answer_choices: null
+ id: 17f77fc1-fd9b-4386-94e5-92e2aa9ef457
+ jinja: 'How do I solve the following algebraic equation: {{question[6:-7]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_v2
+ reference: Variant of simple_template_question_one_sentence
+ 1e769483-a2e1-4829-8bf4-72a160477093: !Template
+ answer_choices: null
+ id: 1e769483-a2e1-4829-8bf4-72a160477093
+ jinja: '{{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question_extracting_variable_name
+ reference: Provide solution with no question extracting the variable name
+ 674b8811-faaf-45cf-ae5d-bdd40050273c: !Template
+ answer_choices: null
+ id: 674b8811-faaf-45cf-ae5d-bdd40050273c
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after_extracting_variable_name
+ reference: Simple question after expression extracting variable name
+ 77893b3c-d16e-4a6d-a171-aa21697b8bb7: !Template
+ answer_choices: null
+ id: 77893b3c-d16e-4a6d-a171-aa21697b8bb7
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before_extracting_variable_name
+ reference: Simple question before expression extracting variable name
+ 93201d89-2e2f-4d9b-b58c-535263be302e: !Template
+ answer_choices: null
+ id: 93201d89-2e2f-4d9b-b58c-535263be302e
+ jinja: 'If {{question[6:-7]}}, what is the value of {{question[-2]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extract_variable_name_v2
+ reference: Variant of simple_template_question_one_sentence_extract_variable_name
+ 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f4: !Template
+ answer_choices: null
+ id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f4
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before
+ reference: Simple question before expression
+ af188809-1264-45b6-ba7f-dffc66abb964: !Template
+ answer_choices: null
+ id: af188809-1264-45b6-ba7f-dffc66abb964
+ jinja: 'If {{question[6:-7]}}, what is the value of {{question[-2]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extracting_variable_name
+ reference: Put the question with algebra equation in one setence and extract the
+ variable name
+ e4f26ee0-c02c-4355-a242-c2b213b8761b: !Template
+ answer_choices: null
+ id: e4f26ee0-c02c-4355-a242-c2b213b8761b
+ jinja: '{{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question
+ reference: Provide solution with no question
+ e9706771-441c-40a1-866c-c8ef7ecd98ac: !Template
+ answer_choices: null
+ id: e9706771-441c-40a1-866c-c8ef7ecd98ac
+ jinja: 'How do I solve the following algebraic equation: {{question[6:-7]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence
+ reference: Put the question with algebra equation in one sentence.
diff --git a/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ae4bf7bf40e24c4883a44998df96c34d3e48254d
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml
@@ -0,0 +1,204 @@
+dataset: math_dataset
+subset: algebra__linear_1d_composed
+templates:
+ 0cb3708a-0004-431b-88fa-2a66b284acd5: !Template
+ answer_choices: null
+ id: 0cb3708a-0004-431b-88fa-2a66b284acd5
+ jinja: '{{question.split(''Solve'')[0]}}If {{question.split(''Solve'')[1][1:-7]}},
+ what is the value of {{question[-2]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extract_variable_name_v2
+ reference: Variant of simple_template_question_one_sentence_extract_variable_name
+ 10a6ab6c-51f1-45cf-9176-54764bb6b613: !Template
+ answer_choices: null
+ id: 10a6ab6c-51f1-45cf-9176-54764bb6b613
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after
+ reference: Simple question after expression
+ 1e769483-a2e1-4829-8bf4-72a160477094: !Template
+ answer_choices: null
+ id: 1e769483-a2e1-4829-8bf4-72a160477094
+ jinja: '{{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question_extracting_variable_name
+ reference: Provide solution with no question extracting the variable name
+ 49a55302-2696-4b74-8d99-8310b14504ed: !Template
+ answer_choices: null
+ id: 49a55302-2696-4b74-8d99-8310b14504ed
+ jinja: '{{question.split(''Solve'')[0]}}How do I solve the following algebraic
+ equation: {{question.split(''Solve'')[1][1:-1]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence
+ reference: Put the question with algebra equation in one sentence.
+ 5b77efd0-4207-4390-826e-6119292393fa: !Template
+ answer_choices: null
+ id: 5b77efd0-4207-4390-826e-6119292393fa
+ jinja: '{{question.split(''Solve'')[0]}}If {{question.split(''Solve'')[1][1:-7]}},
+ what is the value of {{question[-2]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extracting_variable_name
+ reference: Put the question with algebra equation in one setence and extract the
+ variable name
+ 67e864ae-191e-424d-b056-a027d4eb8feb: !Template
+ answer_choices: null
+ id: 67e864ae-191e-424d-b056-a027d4eb8feb
+ jinja: '{{question.split(''Solve'')[0]}}How do I solve the following algebraic
+ equation: {{question.split(''Solve'')[1][1:-1]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_v2
+ reference: Variant of simple_template_question_one_sentence
+ 77893b3c-d16e-4a6d-a171-aa21697b8bb8: !Template
+ answer_choices: null
+ id: 77893b3c-d16e-4a6d-a171-aa21697b8bb8
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before_extracting_variable_name
+ reference: Simple question before expression extracting variable name
+ 7ac3e795-8b2e-4391-984f-e57ed2d1e3fc: !Template
+ answer_choices: null
+ id: 7ac3e795-8b2e-4391-984f-e57ed2d1e3fc
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after_extracting_variable_name
+ reference: Simple question after expression extracting variable name
+ 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f5: !Template
+ answer_choices: null
+ id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f5
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before
+ reference: Simple question before expression
+ e4f26ee0-c02c-4355-a242-c2b213b8761c: !Template
+ answer_choices: null
+ id: e4f26ee0-c02c-4355-a242-c2b213b8761c
+ jinja: '{{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question
+ reference: Provide solution with no question
diff --git a/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..599e87b3403d9e64140be0e1137fa55854d9952b
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml
@@ -0,0 +1,200 @@
+dataset: math_dataset
+subset: algebra__linear_2d
+templates:
+ 10a6ab6c-51f1-45cf-9176-54764bb6b614: !Template
+ answer_choices: null
+ id: 10a6ab6c-51f1-45cf-9176-54764bb6b614
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after
+ reference: Simple question after expression
+ 14bf9b3f-5d82-4045-a1fc-4b81b1de5b37: !Template
+ answer_choices: null
+ id: 14bf9b3f-5d82-4045-a1fc-4b81b1de5b37
+ jinja: 'If {{question[6:-7]}}, what is the value of {{question[-2]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extracting_variable_name
+ reference: Put the question with algebra equation in one setence and extract the
+ variable name
+ 1e769483-a2e1-4829-8bf4-72a160477095: !Template
+ answer_choices: null
+ id: 1e769483-a2e1-4829-8bf4-72a160477095
+ jinja: '{{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question_extracting_variable_name
+ reference: Provide solution with no question extracting the variable name
+ 674b8811-faaf-45cf-ae5d-bdd40050273e: !Template
+ answer_choices: null
+ id: 674b8811-faaf-45cf-ae5d-bdd40050273e
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after_extracting_variable_name
+ reference: Simple question after expression extracting variable name
+ 75e14243-26ed-4138-aaae-81502ad57d12: !Template
+ answer_choices: null
+ id: 75e14243-26ed-4138-aaae-81502ad57d12
+ jinja: 'How do I solve the following algebraic equation: {{question[6:-1]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_v2
+ reference: Variant of simple_template_question_one_sentence
+ 77893b3c-d16e-4a6d-a171-aa21697b8bb9: !Template
+ answer_choices: null
+ id: 77893b3c-d16e-4a6d-a171-aa21697b8bb9
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before_extracting_variable_name
+ reference: Simple question before expression extracting variable name
+ 78c9a83a-9eee-4b06-8bf0-9cd728e4ba03: !Template
+ answer_choices: null
+ id: 78c9a83a-9eee-4b06-8bf0-9cd728e4ba03
+ jinja: 'If {{question[6:-7]}}, what is the value of {{question[-2]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extract_variable_name_v2
+ reference: Variant of simple_template_question_one_sentence_extract_variable_name
+ 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f6: !Template
+ answer_choices: null
+ id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f6
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before
+ reference: Simple question before expression
+ bf8aed94-c709-4f59-b688-2b77927f11f3: !Template
+ answer_choices: null
+ id: bf8aed94-c709-4f59-b688-2b77927f11f3
+ jinja: 'How do I solve the following algebraic equation: {{question[6:-1]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence
+ reference: Put the question with algebra equation in one sentence.
+ e4f26ee0-c02c-4355-a242-c2b213b8761d: !Template
+ answer_choices: null
+ id: e4f26ee0-c02c-4355-a242-c2b213b8761d
+ jinja: '{{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question
+ reference: Provide solution with no question
diff --git a/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ab37043cd87dfe5e793d203fdab542db8acbb160
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml
@@ -0,0 +1,204 @@
+dataset: math_dataset
+subset: algebra__linear_2d_composed
+templates:
+ 10a6ab6c-51f1-45cf-9176-54764bb6b615: !Template
+ answer_choices: null
+ id: 10a6ab6c-51f1-45cf-9176-54764bb6b615
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after
+ reference: Simple question after expression
+ 1e769483-a2e1-4829-8bf4-72a160477096: !Template
+ answer_choices: null
+ id: 1e769483-a2e1-4829-8bf4-72a160477096
+ jinja: '{{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question_extracting_variable_name
+ reference: Provide solution with no question extracting the variable name
+ 5235b176-e12e-48e5-97e8-71f3b23500f0: !Template
+ answer_choices: null
+ id: 5235b176-e12e-48e5-97e8-71f3b23500f0
+ jinja: '{{question.split(''Solve'')[0]}}If {{question.split(''Solve'')[1][6:-7]}},
+ what is the value of {{question[-2]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extract_variable_name_v2
+ reference: Variant of simple_template_question_one_sentence_extract_variable_name
+ 674b8811-faaf-45cf-ae5d-bdd40050273f: !Template
+ answer_choices: null
+ id: 674b8811-faaf-45cf-ae5d-bdd40050273f
+ jinja: '{{question}}
+
+ What is the solution to the previous algebraic expression?
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_after_extracting_variable_name
+ reference: Simple question after expression extracting variable name
+ 77893b3c-d16e-4a6d-a171-aa21697b8bc7: !Template
+ answer_choices: null
+ id: 77893b3c-d16e-4a6d-a171-aa21697b8bc7
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ {% set variable_name = question[-2] %}
+
+ {{variable_name}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before_extracting_variable_name
+ reference: Simple question before expression extracting variable name
+ 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f7: !Template
+ answer_choices: null
+ id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f7
+ jinja: 'What is the solution to the following algebraic expression?
+
+ {{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_before
+ reference: Simple question before expression
+ 9bb96e5c-691a-4f36-999b-0e777f338e1c: !Template
+ answer_choices: null
+ id: 9bb96e5c-691a-4f36-999b-0e777f338e1c
+ jinja: '{{question.split(''Solve'')[0]}}How do I solve the following algebraic
+ equation: {{question.split(''Solve'')[1][1:-1]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence
+ reference: Put the question with algebra equation in one sentence.
+ a9969d3b-ad3e-4bb2-b798-8f04fae60ae6: !Template
+ answer_choices: null
+ id: a9969d3b-ad3e-4bb2-b798-8f04fae60ae6
+ jinja: '{{question.split(''Solve'')[0]}}If {{question.split(''Solve'')[1][6:-7]}},
+ what is the value of {{question[-2]}}?
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_extracting_variable_name
+ reference: Put the question with algebra equation in one setence and extract the
+ variable name
+ e4f26ee0-c02c-4355-a242-c2b213b8761e: !Template
+ answer_choices: null
+ id: e4f26ee0-c02c-4355-a242-c2b213b8761e
+ jinja: '{{question}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: no_question
+ reference: Provide solution with no question
+ ef18f49f-e97a-41ea-9b76-3422111a23eb: !Template
+ answer_choices: null
+ id: ef18f49f-e97a-41ea-9b76-3422111a23eb
+ jinja: '{{question.split(''Solve'')[0]}}How do I solve the following algebraic
+ equation: {{question.split(''Solve'')[1][1:-1]}}?
+
+
+ {{question[-2]}}=
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_template_question_one_sentence_v2
+ reference: Variant of simple_template_question_one_sentence
diff --git a/promptsource/templates/math_qa/templates.yaml b/promptsource/templates/math_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c1e4f5492532a44b233f306a76a8d4466b874f1c
--- /dev/null
+++ b/promptsource/templates/math_qa/templates.yaml
@@ -0,0 +1,101 @@
+dataset: math_qa
+templates:
+ 091ba88e-d208-4a3a-ada7-d9698aeb5568: !Template
+ answer_choices: a ||| b ||| c ||| d ||| e
+ id: 091ba88e-d208-4a3a-ada7-d9698aeb5568
+ jinja: 'You will receive full credit if you solve the following word problem:
+
+ {{Problem}}
+
+
+ {{options}}|||
+
+ {{correct}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_correct_variant
+ reference: 'Variant of choose_correct. '
+ 6312d599-8ca4-4bc8-a76f-81f2e36727bd: !Template
+ answer_choices: 'a ||| b ||| c ||| d ||| e '
+ id: 6312d599-8ca4-4bc8-a76f-81f2e36727bd
+ jinja: 'Given the following problem:
+
+ {{Problem}}
+
+ ===
+
+ and the following options, select the correct option
+
+ {{options}}|||
+
+ {{correct}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_correct_og
+ reference: The template asks the model to choose the correct answer.
+ 8c4c81cc-ca54-45fc-a69a-4b97a5f2b465: !Template
+ answer_choices: a ||| b ||| c ||| d ||| e
+ id: 8c4c81cc-ca54-45fc-a69a-4b97a5f2b465
+ jinja: "Solve the problem by choosing the correct answer: \n{{Problem}}\n\n{{options}}|||\n\
+ {{correct}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_the_correct
+ reference: The template picks the correct the answer choice
+ a313a5f8-53cd-4b76-abb6-fea2ac4e9ef4: !Template
+ answer_choices: a ||| b ||| c ||| d ||| e
+ id: a313a5f8-53cd-4b76-abb6-fea2ac4e9ef4
+ jinja: "One of the five choices are correctly answers the math problem. Can you\
+ \ choose the right one? \n\n{{options}}\n\nProblem: {{Problem}}\n|||\n{{correct}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: first_choice_then_problem
+ reference: First give the list of choices and then describe the problem
+ a3c2ec72-4af5-42aa-9e8e-ef475fa7c039: !Template
+ answer_choices: general ||| physics ||| gain ||| geometry ||| probability |||
+ other
+ id: a3c2ec72-4af5-42aa-9e8e-ef475fa7c039
+ jinja: "Given the problem below, in what category would you classify it?\n===\n\
+ {{Problem}} \n\nCategories:\n{{answer_choices | join(\"\\n\")}}\n|||\n{{category}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: problem_set_type
+ reference: The template asks to generate the category of the problem set
+ eb89c860-5849-461a-9081-3bd466f5642c: !Template
+ answer_choices: a ||| b ||| c ||| d ||| e
+ id: eb89c860-5849-461a-9081-3bd466f5642c
+ jinja: "Solve this advanced GRE problem: \n{{Problem}}\n\n{{options}}|||\n{{correct}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: gre_problem
+ reference: 'Template uses "GRE" as a prefix/template. '
diff --git a/promptsource/templates/mc_taco/templates.yaml b/promptsource/templates/mc_taco/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e101ac9de4d23cbb358795591b154e0202c4b813
--- /dev/null
+++ b/promptsource/templates/mc_taco/templates.yaml
@@ -0,0 +1,245 @@
+dataset: mc_taco
+templates:
+ 1b27afce-9748-44bd-9d82-9db4b815c292: !Template
+ answer_choices: No ||| Yes
+ id: 1b27afce-9748-44bd-9d82-9db4b815c292
+ jinja: 'Given the context,
+
+
+ {{sentence}}
+
+
+ observe the following QA pair and check if the answer is plausible:
+
+
+ Question: {{question}}
+
+
+ Answer: {{answer}} |||
+
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: observe_check_plausible_yes_no
+ reference: Basic Context, QA Pair, ask for plausibility
+ 38ab730f-1ed8-4362-99e1-c0d305aa056e: !Template
+ answer_choices: plausible ||| implausible
+ id: 38ab730f-1ed8-4362-99e1-c0d305aa056e
+ jinja: "I've been grappling with the temporal accuracy of this answer for a while:\n\
+ \nQ: \"{{question}}\"\n\nI have the following information: \"{{sentence}}\"\n\
+ \nA: \"{{answer}}\" \n\nThis answer is definitely not ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plausible_negated
+ reference: Context, QA Pair, Negation
+ 5bc98cb7-350e-471e-b986-ad52a46f403c: !Template
+ answer_choices: Event Duration ||| Event Ordering ||| Frequency ||| Typical Time
+ ||| Stationarity
+ id: 5bc98cb7-350e-471e-b986-ad52a46f403c
+ jinja: 'There are five temporal categories: {{"Event Duration"}}, {{"Event Ordering"}},
+ {{"Frequency"}}, {{"Typical Time"}}, {{"Stationarity"}}.
+
+
+ Out of the above temporal categories, which one does the question "{{question}}"
+ belong to? |||
+
+ {{answer_choices[category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: temporal_categories_with_choices
+ reference: Temporal categories as part of the prompt
+ 5e5cedef-b943-439a-a75a-1140478b0620: !Template
+ answer_choices: null
+ id: 5e5cedef-b943-439a-a75a-1140478b0620
+ jinja: '{% if label %}
+
+ I have the following passage:
+
+
+ {{sentence}}
+
+
+ My query is: "{{question}}"
+
+
+ I want an answer that is "temporally plausible". |||
+
+
+ {{answer}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_answer_from_question_and_context
+ reference: Generate answer from question+context (if plausible)
+ 8423a3fa-adcf-4d36-b639-774bd13ac3fe: !Template
+ answer_choices: No ||| Yes
+ id: 8423a3fa-adcf-4d36-b639-774bd13ac3fe
+ jinja: 'Here''s what happened: {{sentence}}
+
+
+ I asked my friend {{question}}
+
+
+ and they said {{answer}}
+
+
+ Should I believe them?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: asked_my_friend
+ reference: ''
+ 89aaa7f3-d409-4a27-acd5-a207b431b12c: !Template
+ answer_choices: No ||| Yes
+ id: 89aaa7f3-d409-4a27-acd5-a207b431b12c
+ jinja: 'Given the context, the question, and the candidate answer, the task is
+ to determine whether the candidate answer is plausible ("yes") or not ("no").
+
+
+ Context: {{sentence}}
+
+
+ Question: {{question}}
+
+
+ Candidate answer: {{answer}}
+
+ |||
+
+ {{answer_choices[label]}}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: formal_description
+ reference: Taken from the description of the dataset.
+ a2896c7d-d443-4a3a-847c-9896a166a4b5: !Template
+ answer_choices: No ||| Yes
+ id: a2896c7d-d443-4a3a-847c-9896a166a4b5
+ jinja: 'Given the context,
+
+
+ {{sentence}}
+
+
+ and the question,
+
+
+ {{question}}
+
+
+ is the following answer believable?
+
+
+ {{answer}} |||
+
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: believable
+ reference: ''
+ b08c4c20-f8a2-4bdb-8a9b-235f782c7386: !Template
+ answer_choices: False ||| True
+ id: b08c4c20-f8a2-4bdb-8a9b-235f782c7386
+ jinja: 'True/False?
+
+
+ "{{answer}}" is a plausible answer to "{{question}}", given "{{sentence}}" |||
+
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plausible_true_false
+ reference: Context, QA Pair, T/F question
+ df9ad236-1385-48ea-b056-171aa3f8d0bd: !Template
+ answer_choices: Event Duration ||| Event Ordering ||| Frequency ||| Typical Time
+ ||| Stationarity
+ id: df9ad236-1385-48ea-b056-171aa3f8d0bd
+ jinja: 'Which temporal category does the question "{{question}}" belong to? |||
+
+
+ {{answer_choices[category]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: temporal_categories_no_choices
+ reference: Question provided, predict Temporal Category
+ fb4f8f70-c1cc-4004-97a5-cd131259d318: !Template
+ answer_choices: Yes ||| No
+ id: fb4f8f70-c1cc-4004-97a5-cd131259d318
+ jinja: 'Here''s what happened: {{sentence}}
+
+
+ I asked my friend {{question}}
+
+
+ and they said {{answer}}
+
+
+ Should I doubt them?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: asked_my_friend_doubt
+ reference: ''
diff --git a/promptsource/templates/mdd/task1_qa/templates.yaml b/promptsource/templates/mdd/task1_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4661dd67ffbb0f786aefe2e8ed02706f936a1385
--- /dev/null
+++ b/promptsource/templates/mdd/task1_qa/templates.yaml
@@ -0,0 +1,103 @@
+dataset: mdd
+subset: task1_qa
+templates:
+ 0523ad87-64ab-4a9c-8772-56deda832ab4: !Template
+ answer_choices: null
+ id: 0523ad87-64ab-4a9c-8772-56deda832ab4
+ jinja: "Using the internet, answer the following question:\n\n{{ dialogue_turns.utterance[0][0]\
+ \ | capitalize }}{{ dialogue_turns.utterance[0][1:] }} \n|||\n{{dialogue_turns.utterance[1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: using_internet_answer
+ reference: Template asks the model to answer the question with a prefix "using
+ the internet"
+ 1aa44534-ddbf-4478-9682-50192183ab82: !Template
+ answer_choices: null
+ id: 1aa44534-ddbf-4478-9682-50192183ab82
+ jinja: 'Given the best answer(s), "{{ dialogue_turns.utterance[1] }}", generate
+ a movie-trivia question:
+
+ |||
+
+ {{ dialogue_turns.utterance[0][0] | capitalize }}{{ dialogue_turns.utterance[0][1:]
+ }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: prompt_generate_question
+ reference: Given the answer with prompt, generate a question related to it.
+ 59b9d82e-b778-429c-a45c-a27d6abdf13a: !Template
+ answer_choices: null
+ id: 59b9d82e-b778-429c-a45c-a27d6abdf13a
+ jinja: "{{ dialogue_turns.utterance[0][0] | capitalize }}{{ dialogue_turns.utterance[0][1:]\
+ \ }} \n|||\n{{dialogue_turns.utterance[1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: question_answering
+ reference: Given a question, return the answer.
+ a0b665ab-001f-44f6-9094-d2d5ef60926c: !Template
+ answer_choices: null
+ id: a0b665ab-001f-44f6-9094-d2d5ef60926c
+ jinja: "Complete this movie-trivia-related dialogue between Speaker {{ dialogue_turns.speaker[0]\
+ \ }} and Speaker {{ dialogue_turns.speaker[1] }} by answering Speaker {{ dialogue_turns.speaker[0]\
+ \ }}'s question as Speaker {{ dialogue_turns.speaker[1] }}.\n\nSpeaker {{ dialogue_turns.speaker[0]\
+ \ }}: {{ dialogue_turns.utterance[0][0] | capitalize }}{{ dialogue_turns.utterance[0][1:]\
+ \ }} \n\nSpeaker {{ dialogue_turns.speaker[1] }}:\n|||\n{{dialogue_turns.utterance[1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: question_answering_speaker
+ reference: Given a question, return the answer with speaker information.
+ bedf40a1-630a-4aae-ad2f-cfc90f77fb9f: !Template
+ answer_choices: null
+ id: bedf40a1-630a-4aae-ad2f-cfc90f77fb9f
+ jinja: 'Generate a movie-trivia question for the answer(s): {{ dialogue_turns.utterance[1]
+ }}
+
+ |||
+
+ {{ dialogue_turns.utterance[0][0] | capitalize }}{{ dialogue_turns.utterance[0][1:]
+ }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: Given the answer, generate a question related to it.
+ cba3d029-993d-4a95-a213-0e70efde6009: !Template
+ answer_choices: null
+ id: cba3d029-993d-4a95-a213-0e70efde6009
+ jinja: "Answer the following question:\n\n{{ dialogue_turns.utterance[0][0] |\
+ \ capitalize }}{{ dialogue_turns.utterance[0][1:] }} \n|||\n{{dialogue_turns.utterance[1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: prompt_question_answering
+ reference: Given a question with prompt, return the answer.
diff --git a/promptsource/templates/mdd/task2_recs/templates.yaml b/promptsource/templates/mdd/task2_recs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a9837c2cfc2eea46e1e8ecdd06eeb1d587cb1932
--- /dev/null
+++ b/promptsource/templates/mdd/task2_recs/templates.yaml
@@ -0,0 +1,56 @@
+dataset: mdd
+subset: task2_recs
+templates:
+ 3596d528-12c6-440b-bdc0-d61076b108c5: !Template
+ answer_choices: null
+ id: 3596d528-12c6-440b-bdc0-d61076b108c5
+ jinja: "Complete this movie-trivia-related dialogue between Speaker {{ dialogue_turns.speaker[0]\
+ \ }} and Speaker {{ dialogue_turns.speaker[1] }} by answering Speaker {{ dialogue_turns.speaker[0]\
+ \ }}'s question as Speaker {{ dialogue_turns.speaker[1] }}.\n\nSpeaker {{dialogue_turns.speaker[0]}}:\
+ \ {{dialogue_turns.utterance[0]}}\n\nSpeaker {{dialogue_turns.speaker[1]}}:\
+ \ \n|||\n{{dialogue_turns.utterance[1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: recommend_movies_speaker
+ reference: Given likes, recomend a movie with speaker information.
+ 6f0eb61c-d9f9-4e52-a317-3d7b8049eb9b: !Template
+ answer_choices: null
+ id: 6f0eb61c-d9f9-4e52-a317-3d7b8049eb9b
+ jinja: '{{dialogue_turns.utterance[0]}}
+
+ |||
+
+ {{dialogue_turns.utterance[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: recommed_movies
+ reference: Given the likes, recommend a movie.
+ 8948a52c-a422-4858-bbf7-19790597d278: !Template
+ answer_choices: null
+ id: 8948a52c-a422-4858-bbf7-19790597d278
+ jinja: '{{ ["Someone said:", "He said:", "She said:", "They said:", "A friend
+ asked me:", "A colleague asked me:"] | choice }} {{ dialogue_turns.utterance[0]
+ }}
+
+ |||
+
+ {{dialogue_turns.utterance[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: recommend_movies_dialogue
+ reference: Given the likes, recommend a movie as a dialogue
diff --git a/promptsource/templates/mdd/task3_qarecs/templates.yaml b/promptsource/templates/mdd/task3_qarecs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a323105bc22bcc1ba40761c30697c612e11e2117
--- /dev/null
+++ b/promptsource/templates/mdd/task3_qarecs/templates.yaml
@@ -0,0 +1,227 @@
+dataset: mdd
+subset: task3_qarecs
+templates:
+ 1614890b-362c-4ee8-850d-841cf511d169: !Template
+ answer_choices: null
+ id: 1614890b-362c-4ee8-850d-841cf511d169
+ jinja: '{% if dialogue_turns.utterance|length==6%}
+
+ Complete this movie-trivia-related dialogue between Speaker {{ dialogue_turns.speaker[0]
+ }} and Speaker {{ dialogue_turns.speaker[1] }} by answering Speaker {{ dialogue_turns.speaker[0]
+ }}''s question as Speaker {{ dialogue_turns.speaker[1] }}.
+
+
+ Speaker {{ dialogue_turns.speaker[0] }}: {{dialogue_turns.utterance[0]}}
+
+
+ Speaker {{ dialogue_turns.speaker[1] }}: {{dialogue_turns.utterance[1]}}
+
+
+ Speaker {{ dialogue_turns.speaker[2] }}: {{dialogue_turns.utterance[2]}}
+
+
+ Speaker {{ dialogue_turns.speaker[3] }}:
+
+ |||
+
+ {{dialogue_turns.utterance[3]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: next_utterance_4_for_6
+ reference: Given the first three dialogues, generate next utterance for dialogues
+ of length 6.
+ 3e5a19e5-aa33-467a-bcd2-f84d99f32759: !Template
+ answer_choices: null
+ id: 3e5a19e5-aa33-467a-bcd2-f84d99f32759
+ jinja: '{{ ["Someone said:", "He said:", "She said:", "They said:", "A friend
+ asked me:", "A colleague asked me:"] | choice }} "{{dialogue_turns.utterance[0]}}"
+ Which movie will you recommend?
+
+ |||
+
+ {{dialogue_turns.utterance[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: recommend_movie_first_round
+ reference: Given likes, recommend a movie.
+ 76888d6e-76fa-47db-a8b3-9980f082df51: !Template
+ answer_choices: null
+ id: 76888d6e-76fa-47db-a8b3-9980f082df51
+ jinja: ' {% set context_init = ["", "Someone said:", "He said:", "She said:",
+ "They asked:", "A friend asked me:", "A colleague asked me:"]|choice %} {% set
+ pronoun = "he" %}
+
+ {% if dialogue_turns.utterance|length==6 %}
+
+ {% if "He" in context_init %}
+
+ {% set pronoun = "he" %}
+
+ {% elif "She" in context_init %}
+
+ {% set pronoun = "she" %}
+
+ {% elif "They" in context_init or "Someone" in context_init%}
+
+ {% set pronoun = "they" %}
+
+ {% elif "colleague" in context_init or "friend" in context_init %}
+
+ {% set pronoun = ["he","she","they"]|choice %} {%endif%}
+
+ {{context_init}}{% if context_init=="" %}{{dialogue_turns.utterance[4]}}|||{{dialogue_turns.utterance[5]}}
+
+ {% else %} "{{dialogue_turns.utterance[4]}}". Which movie do you think {{pronoun}}
+ will like?|||{{dialogue_turns.utterance[5]}}{% endif %}
+
+ {% else %}
+
+ |||
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: recommend_movie_second_round
+ reference: Given a single preference, recommend a movie. Only works for dialogues
+ with 6 utterances.
+ 91f33bcf-3c0e-49e6-ae86-28f77e224734: !Template
+ answer_choices: null
+ id: 91f33bcf-3c0e-49e6-ae86-28f77e224734
+ jinja: ' {% set context_init= ["I am aware", "Someone is aware", "He is aware",
+ "She is aware", "They are aware", "A friend is aware", "A colleague is aware",
+ "A person is aware", "I know", "Someone knows", "He knows", "She knows", "They
+ know", "A friend knows", "A colleague knows", "A person knows"]|choice %}
+
+ {% set pronoun = "he" %} {% set pronoun_2 = "him" %} {% set choice_idx = 0 %}
+
+ {% if dialogue_turns.utterance|length==6 %}
+
+ {% if "He" in context_init %}
+
+ {% set pronoun = "he" %}
+
+ {% set pronoun_2 = "him" %}
+
+ {% elif "I" in context_init %}
+
+ {% set pronoun = "I" %}
+
+ {% set pronoun_2 = "me" %}
+
+ {% elif "She" in context_init %}
+
+ {% set pronoun = "she" %}
+
+ {% set pronoun_2 = "her" %}
+
+ {% elif "They" in context_init or "Someone" in context_init or "person" in context_init%}
+
+ {% set pronoun = "they" %}
+
+ {% set pronoun_2 = "them" %}
+
+ {% elif "colleague" in context_init or "friend" in context_init %}
+
+ {% set choice_idx = range(3)|list|choice %}
+
+ {% set pronoun = ["he","she","they"][choice_idx] %}
+
+ {% set pronoun_2 = ["him","her","them"][choice_idx] %}
+
+ {%endif%}
+
+ {{context_init}} that the movie {{dialogue_turns.utterance[1]}}, is related
+ to {{dialogue_turns.utterance[3]}}.
+
+ Also, {% if pronoun!="I" %}{{pronoun}} said: "{{dialogue_turns.utterance[4]}}".
+ Can you recommend a movie for {{pronoun_2}} please?|||{{dialogue_turns.utterance[5]}}{%else%}{{dialogue_turns.utterance[4]}}|||{{dialogue_turns.utterance[5]}}{%
+ endif %}
+
+ {% else %}
+
+ |||
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: recommend_movie_second_round_with_context
+ reference: Given previous suggestion, some movie description, and preference,
+ recommend a movie.
+ de1179b3-b6d6-4acf-9b0a-82cb2fa9d58f: !Template
+ answer_choices: null
+ id: de1179b3-b6d6-4acf-9b0a-82cb2fa9d58f
+ jinja: 'Complete this movie-trivia-related dialogue between Speaker {{ dialogue_turns.speaker[0]
+ }} and Speaker {{ dialogue_turns.speaker[1] }} by answering Speaker {{ dialogue_turns.speaker[0]
+ }}''s question as Speaker {{ dialogue_turns.speaker[1] }}.
+
+
+ Speaker {{ dialogue_turns.speaker[0] }}: {{dialogue_turns.utterance[0]}}
+
+
+ Speaker {{ dialogue_turns.speaker[1] }}: {{dialogue_turns.utterance[1]}}
+
+
+ Speaker {{ dialogue_turns.speaker[2] }}: {{dialogue_turns.utterance[2]}}
+
+
+ {% if dialogue_turns.utterance|length==6 %}Speaker {{ dialogue_turns.speaker[3]
+ }}: {{dialogue_turns.utterance[3]}}
+
+
+ Speaker {{ dialogue_turns.speaker[4] }}: {{dialogue_turns.utterance[4]}}
+
+
+ {% endif %}Speaker {{ dialogue_turns.speaker[5] }}:|||
+
+ {{dialogue_turns.utterance[-1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: next_utterance_4_and_6
+ reference: Given the first dialogues, return the next utterance.
+ e37a6f9c-344c-4b85-a41f-85bb84bab934: !Template
+ answer_choices: null
+ id: e37a6f9c-344c-4b85-a41f-85bb84bab934
+ jinja: 'Answer the following question about movie {{dialogue_turns.utterance[1]}}:
+
+
+ {{dialogue_turns.utterance[2]}}
+
+ |||
+
+ {{dialogue_turns.utterance[3]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: qa_about movie
+ reference: Given the movie name and a question, answer the question.
diff --git a/promptsource/templates/medal/templates.yaml b/promptsource/templates/medal/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7ddb431afd485e12bbcfef10affec0a6ee194d94
--- /dev/null
+++ b/promptsource/templates/medal/templates.yaml
@@ -0,0 +1,132 @@
+dataset: medal
+templates:
+ 389edf89-c645-4502-b524-d338089a6145: !Template
+ answer_choices: null
+ id: 389edf89-c645-4502-b524-d338089a6145
+ jinja: '{% set tokenised_text = text.split('' '') %}
+
+ {% set acronym = location[0] %}
+
+ Based on the text below, how would you disambiguate the abbreviation "{{ tokenised_text[acronym]
+ }}"?
+
+
+
+ {{text}}
+
+
+ |||
+
+ {{ label[0] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: disambiguate_abbrev_text_after
+ reference: ''
+ 38d3122d-45e0-46d7-b61d-77971b4366f9: !Template
+ answer_choices: null
+ id: 38d3122d-45e0-46d7-b61d-77971b4366f9
+ jinja: "{% set tokenised_text = text.split(' ') %}\n{% set acronym = location[0]\
+ \ %}\n\n{{tokenised_text[0:location[0]]|join(' ') }} {{ label[0]}} {{tokenised_text[location[0]+1:tokenised_text|length]|join('\
+ \ ') }} \n\nI am a doctor, and I need to replace the phrase \"{{ label[0] }}\"\
+ \ with its correct medical abbreviation. Can you suggest to me the acronym based\
+ \ on the PubMed abstract above?\n\n|||\n{{ tokenised_text[acronym] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: create_abbrev_doctor
+ reference: ''
+ 474fc22a-f7ab-4785-8d54-1ace15e6081a: !Template
+ answer_choices: null
+ id: 474fc22a-f7ab-4785-8d54-1ace15e6081a
+ jinja: '{% set tokenised_text = text.split('' '') %}
+
+ {% set acronym = location[0] %}
+
+ As a layperson, I would like to understand the meaning of the abbreviation "{{
+ tokenised_text[acronym] }}". Could you disambiguate it for me based on the text
+ below?
+
+
+
+ {{text}}
+
+
+ |||
+
+ {{ label[0] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: disambiguate_abbrev_layperson
+ reference: ''
+ 5e3772f9-4891-47ac-b5c6-5e86be17a600: !Template
+ answer_choices: null
+ id: 5e3772f9-4891-47ac-b5c6-5e86be17a600
+ jinja: "{% set tokenised_text = text.split(' ') %}\n{% set acronym = location[0]\
+ \ %}\n\n{{tokenised_text[0:location[0]]|join(' ') }} {{ label[0]}} {{tokenised_text[location[0]+1:tokenised_text|length]|join('\
+ \ ') }} \n\nGiven the PubMed abstract above, what could be the abbreviation\
+ \ for the token: \"{{ label[0] }}\"?\n\n|||\n{{ tokenised_text[acronym] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: create_abbrev_before
+ reference: ''
+ 6f18eefa-660b-47d9-8093-fc628c3c38a0: !Template
+ answer_choices: null
+ id: 6f18eefa-660b-47d9-8093-fc628c3c38a0
+ jinja: "{% set tokenised_text = text.split(' ') %}\n{% set acronym = location[0]\
+ \ %}\nBased on the text below, how would you abbreviate the token: \"{{ label[0]\
+ \ }}\"?\n\n{{tokenised_text[0:location[0]]|join(' ') }} {{ label[0]}} {{tokenised_text[location[0]+1:tokenised_text|length]|join('\
+ \ ') }} \n\n|||\n{{ tokenised_text[acronym] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: create_abbrev_after
+ reference: ''
+ 767d7896-e29b-4dfb-b82c-a18b356edfd5: !Template
+ answer_choices: null
+ id: 767d7896-e29b-4dfb-b82c-a18b356edfd5
+ jinja: '{% set tokenised_text = text.split('' '') %}
+
+ {% set acronym = location[0] %}
+
+
+ {{text}}
+
+
+ Please write what the abbreviation "{{ tokenised_text[acronym] }}" means in
+ the text above?
+
+ |||
+
+ {{ label[0] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: disambiguate_abbrev_text_before
+ reference: ''
diff --git a/promptsource/templates/medical_questions_pairs/templates.yaml b/promptsource/templates/medical_questions_pairs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..99951fe3e4eddd6d128d47f05758903c8a8f433b
--- /dev/null
+++ b/promptsource/templates/medical_questions_pairs/templates.yaml
@@ -0,0 +1,155 @@
+dataset: medical_questions_pairs
+templates:
+ 18c92f97-0655-4f67-aca1-69f8e4fbb11e: !Template
+ answer_choices: different ||| paraphrase
+ id: 18c92f97-0655-4f67-aca1-69f8e4fbb11e
+ jinja: 'In the context of healthcare questionnaires, it is often necessary to
+ find out if two questions are paraphrases of one another. Given the following
+ question:
+
+
+ Question 1: {{question_1}}
+
+
+ Is the following question a {{answer_choices[1]}} or {{answer_choices[0]}}?
+
+
+ Question 2: {{question_2}}
+
+
+
+ |||
+
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: in_the_context
+ reference: ''
+ 316f2ff7-45f8-4997-9c5f-dfe5fb7f9808: !Template
+ answer_choices: False ||| True
+ id: 316f2ff7-45f8-4997-9c5f-dfe5fb7f9808
+ jinja: "Question 1: {{question_1}}\n\nand\n\nQuestion 2: {{question_2}}\n\n Is\
+ \ it {{answer_choices[1]}} or {{answer_choices[0]}} that the two questions above\
+ \ are paraphrases of each other?\n\n|||\n\n {{answer_choices[label]}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: true_or_false
+ reference: ''
+ 535ed335-de9d-41df-a026-28487c832bfa: !Template
+ answer_choices: no ||| yes
+ id: 535ed335-de9d-41df-a026-28487c832bfa
+ jinja: 'Question: In the context of healthcare do the following questions mean
+ the same thing?
+
+
+ Question 1: {{question_1}}
+
+
+ Question 2: {{question_2}}
+
+
+ {{answer_choices[1]}} or {{answer_choices[0]}}?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: yes_or_no_in_the_context
+ reference: template about question pattern
+ 681dc0d2-a771-41ae-aa00-d1f59ab01197: !Template
+ answer_choices: not duplicates ||| duplicates
+ id: 681dc0d2-a771-41ae-aa00-d1f59ab01197
+ jinja: "Question 1: {{question_1}}\n\nQuestion 2: {{question_2}}\n\n Pick one\
+ \ of the following options:\n Questions are {{answer_choices[1]}} or {{answer_choices[0]}}\n\
+ \n ||| {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: duplicates
+ reference: ''
+ 7be2b267-8d5c-466b-9fd4-1fbbae442938: !Template
+ answer_choices: No ||| Yes
+ id: 7be2b267-8d5c-466b-9fd4-1fbbae442938
+ jinja: "Question 1: {{question_1}}\n\nQuestion 2: {{question_2}}\n\nQuestion:\
+ \ Is Question 1 asking the same question as Question 2? {{answer_choices[1]}}\
+ \ or {{answer_choices[0]}}?\n\n\n ||| {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: same_question
+ reference: context of healthcare
+ 854ebbe0-8968-4967-a346-4e4d6f98cf73: !Template
+ answer_choices: False ||| True
+ id: 854ebbe0-8968-4967-a346-4e4d6f98cf73
+ jinja: "Question 1: {{question_1}}\n\nOne possible way of paraphrasing the same\
+ \ question is: \n\nQuestion 2: {{question_2}}\n\n{{answer_choices[1]}} or {{answer_choices[0]}}?\n\
+ \n ||| {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: possible_way
+ reference: ''
+ b388913a-9b0d-43a1-8bf9-83319ebf38b2: !Template
+ answer_choices: related question ||| rewrite
+ id: b388913a-9b0d-43a1-8bf9-83319ebf38b2
+ jinja: 'Original question: {{question_1}}
+
+ Given this question, doctors were asked to either: - Rewrite the question so
+ that it kept the same intent - Create a related question for which the original
+ answer is unrelated or wrong
+
+ Is the following question a {{answer_choices[1]}} or {{answer_choices[0]}}?
+
+ New question: {{question_2}} ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: rewrite_or_related
+ reference: Using knowledge of dataset creation
+ c8df74ce-0ae7-4e70-9322-aaf9921ae3b1: !Template
+ answer_choices: dissimilar ||| similar
+ id: c8df74ce-0ae7-4e70-9322-aaf9921ae3b1
+ jinja: "These two questions are either {{answer_choices[1]}} or {{answer_choices[0]}}.\
+ \ \n\n{{question_1}} \n\nand\n\n{{question_2}}\n\nWhich is it? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: similar_dissimilar
+ reference: ''
diff --git a/promptsource/templates/meta_woz/dialogues/templates.yaml b/promptsource/templates/meta_woz/dialogues/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6dfea4dfafefd688f7cd42ecd5218a90fb8f6b4d
--- /dev/null
+++ b/promptsource/templates/meta_woz/dialogues/templates.yaml
@@ -0,0 +1,178 @@
+dataset: meta_woz
+subset: dialogues
+templates:
+ 4b64c6e9-0aa0-431f-85b5-8367daa4773a: !Template
+ answer_choices: null
+ id: 4b64c6e9-0aa0-431f-85b5-8367daa4773a
+ jinja: "What does this conversation between a Chatbot and a client talk about\
+ \ ? \n{% for utterance in turns %}\n{{[\"Client\", \"Chatbot\"][loop.index %\
+ \ 2]}}: {{utterance}}\n{% endfor %}\n|||\n{{domain.replace('_', ' ') | lower\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: predict_domain_chatbot_human_chat
+ reference: ''
+ 56151136-82a4-455b-98ed-aea6ee1c273d: !Template
+ answer_choices: null
+ id: 56151136-82a4-455b-98ed-aea6ee1c273d
+ jinja: '{% set count = namespace(value=0) %}
+
+ {% for i in range(range(2, turns|length) | random() - 1) %}
+
+ {{["AI Assistant", "Client"][i% 2]}}: {{turns[i]}}
+
+ {% set count.value= i + 1 %}
+
+ {% endfor %}
+
+ {{["AI Assistant", "Client"][ count.value % 2]}}:
+
+ |||
+
+ {{turns[count.value] }}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: predict_random_stop
+ reference: ''
+ 5e90f705-9d63-4917-acc9-3baabc6ee5e9: !Template
+ answer_choices: null
+ id: 5e90f705-9d63-4917-acc9-3baabc6ee5e9
+ jinja: "{% set rand_index= namespace(value=range(turns|length)|random()) %}\n\
+ {% for utterance in turns %}\n{% if loop.index0 == rand_index.value %}\n{{[\"\
+ Chatbot\", \"Human\"][loop.index0 % 2]}}: (blank)\n{% else %}\n{{[\"Chatbot\"\
+ , \"Human\"][loop.index0 % 2]}}: {{utterance}}\n{% endif %}\n{% endfor %}\n\
+ Fill in the blank \n|||\n{{turns[rand_index.value] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: predict_blank_response_random
+ reference: ''
+ 77a9e854-08ef-4f2d-86f9-ed077f18b39d: !Template
+ answer_choices: null
+ id: 77a9e854-08ef-4f2d-86f9-ed077f18b39d
+ jinja: "This conversation is between an AI assistant and a human. What is the\
+ \ human inquiring about ? \n{% for utterance in turns %}\n{{[\"Human\", \" AI\
+ \ assistant\"][loop.index % 2]}}: {{utterance}}\n{% endfor %}\n|||\n{{domain.replace('_',\
+ \ ' ') | lower }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: predict_domain_ai_human_question
+ reference: ''
+ 9150f1e0-8b9e-4f24-bc58-1cbb230cb8d9: !Template
+ answer_choices: null
+ id: 9150f1e0-8b9e-4f24-bc58-1cbb230cb8d9
+ jinja: "This conversation is between an AI assistant and a human. what does it\
+ \ talk about ? \n{% for utterance in turns %}\n{{[\"Human\", \" AI assistant\"\
+ ][loop.index % 2]}}: {{utterance}}\n{% endfor %}\n|||\n{{domain.replace('_',\
+ \ ' ') | lower }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: predict_domain_ai_human_chat
+ reference: ''
+ a02a980d-630f-4845-8b4d-b0eb072110e9: !Template
+ answer_choices: null
+ id: a02a980d-630f-4845-8b4d-b0eb072110e9
+ jinja: '{% for utterance in turns[:-1] %}
+
+ {{["User", "Bot"][loop.index % 2]}}: {{utterance}}
+
+ {% endfor %}
+
+ {{["User", "Bot"][ turns | length % 2]}}:
+
+ |||
+
+ {{turns[-1]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: predict_last_statement_user_bot
+ reference: ''
+ a4a8c7a1-1747-4560-8365-e998b1d2cfdf: !Template
+ answer_choices: null
+ id: a4a8c7a1-1747-4560-8365-e998b1d2cfdf
+ jinja: '{% for utterance in turns[:-1] %}
+
+ {{["Human", "Chatbot"][loop.index % 2]}}: {{utterance}}
+
+ {% endfor %}
+
+ {{["Human", "Chatbot"][ turns | length % 2]}}:
+
+ |||
+
+ {{turns[-1]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: predict_last_statement_human_chatbot
+ reference: ''
+ eba8d3ba-88f8-4257-a252-b02eafcfc463: !Template
+ answer_choices: null
+ id: eba8d3ba-88f8-4257-a252-b02eafcfc463
+ jinja: '{% for utterance in turns[:-1] %}
+
+ {{["Client", "AI Assistant"][loop.index % 2]}}: {{utterance}}
+
+ {% endfor %}
+
+ {{["Client", "AI Assistant"][ turns | length % 2]}}:
+
+ |||
+
+ {{turns[-1]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: predict_last_statement_client_ai
+ reference: ''
diff --git a/promptsource/templates/mocha/templates.yaml b/promptsource/templates/mocha/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3871dad625dd6f31d210126d9259386e75ac0992
--- /dev/null
+++ b/promptsource/templates/mocha/templates.yaml
@@ -0,0 +1,170 @@
+dataset: mocha
+templates:
+ 1c390ee6-fab9-4b16-8028-2649fca56866: !Template
+ answer_choices: null
+ id: 1c390ee6-fab9-4b16-8028-2649fca56866
+ jinja: "On a scale of 1.0 (completely different) to 5 (identical), how similar\
+ \ are these two sentences \"{{candidate}}\" and \"{{reference}}\"? \nThese sentences\
+ \ answer the question \"{{ question }}\" with the context of \"{{ context }}\"\
+ \n|||\n{{ score }}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: score_candidate_with_question_context_interrogative
+ reference: Similarity measure between candidate and reference answers (in a regression
+ manner)
+ 2816084e-0193-4284-9a4f-9de4ae03e9d6: !Template
+ answer_choices: null
+ id: 2816084e-0193-4284-9a4f-9de4ae03e9d6
+ jinja: "Given the passage and the answers given below, generate a relevant question.\n\
+ \nPassage: {{ context }}\n\nAnswer 1 (Correct): {{ reference }}\n\nAnswer 2:\
+ \ {{ candidate }}\n{% if candidate2 %}\nAnswer 3: {{ candidate2 }}\n{% endif\
+ \ %} \n|||\n{{ question }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: Given passage and the answers, generate a question for the gold answer.
+ 31e49d18-800f-4d16-8d84-86509db30499: !Template
+ answer_choices: Similar ||| Not similar
+ id: 31e49d18-800f-4d16-8d84-86509db30499
+ jinja: "Person A: {{ question }}\n\nPerson B: {{ reference }}\n\nPerson C: {{\
+ \ candidate }}\n\nDoes Person B give a similar answer as Person C? Answer \"\
+ {{ answer_choices[0] }}\" or \"{{ answer_choices[1] }}\".\n\n|||\n{% if score\
+ \ != 3 %}\n{{ [answer_choices[1], answer_choices[0]][score > 3] }} \n{% endif\
+ \ %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: classifiy_similarity_candidate_with_ques
+ reference: Similarity measure between candidate and reference answers (in a classification
+ manner)
+ 46e52ca4-7203-4e92-a0ac-c412494903c9: !Template
+ answer_choices: null
+ id: 46e52ca4-7203-4e92-a0ac-c412494903c9
+ jinja: 'Given these two sentences "{{candidate}}" and "{{reference}}", return
+ a value on a scale of 1.0 (completely different) to 5 (identical) indicating
+ their similarity.
+
+ These sentences answer the following question about the given context.
+
+ Question: {{ question }}
+
+ Context: {{ context }}
+
+ |||
+
+ {{ score }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: score_candidate_with_question_context_affirmative
+ reference: ''
+ 5098f807-5558-4d19-af12-7bb87cbc59f0: !Template
+ answer_choices: null
+ id: 5098f807-5558-4d19-af12-7bb87cbc59f0
+ jinja: 'Give the similarity measure (on a scale of 1.0 to 5.0) for answers A and
+ B. A value of 1.0 means completely different, whereas a value of 5.0 means identical.
+
+
+ Question: {{ question }}
+
+
+ Answer A: "{{reference}}"
+
+
+ Answer B: "{{candidate}}"
+
+ |||
+
+ {{ score }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: score_candidate_with_question
+ reference: Similarity measure between candidate and reference answers (in a regression
+ manner)
+ 6570aa7f-de3d-489e-8565-72fb535b1f10: !Template
+ answer_choices: null
+ id: 6570aa7f-de3d-489e-8565-72fb535b1f10
+ jinja: "How similar are Sentence A and B? Output the result value between 1.0\
+ \ (completely different) and 5.0 (identical).\n\nA: \"{{candidate}}\"\n\nB:\
+ \ \"{{reference}}\" \n|||\n{{ score }}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: score_candidate_no_ques_no_context_interrogative
+ reference: Similarity measure between candidate and reference answers (in a regression
+ manner)
+ 7ebdd3bc-4896-425b-b8c2-3e4ea3944de8: !Template
+ answer_choices: null
+ id: 7ebdd3bc-4896-425b-b8c2-3e4ea3944de8
+ jinja: '{{ context }}
+
+
+ Given the passage above, what is the answer to the question "{{ question }}"
+
+ |||
+
+ {{ reference }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_correct_answer_with_noisy_candidates
+ reference: Given the passage and the question, generate the correct answer.
+ d64dec9f-94c3-4cd5-9900-2a6ea8f03416: !Template
+ answer_choices: null
+ id: d64dec9f-94c3-4cd5-9900-2a6ea8f03416
+ jinja: 'Output the similarity value between 1.0 (completely different) and 5.0
+ (identical) for Sentence A and B.
+
+
+ A: "{{candidate}}"
+
+
+ B: "{{reference}}"
+
+ |||
+
+ {{ score }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: score_candidate_no_ques_no_context_affirmative
+ reference: ''
diff --git a/promptsource/templates/movie_rationales/templates.yaml b/promptsource/templates/movie_rationales/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dd333c98e56961cf3c83b243f3e884f6dc111bfd
--- /dev/null
+++ b/promptsource/templates/movie_rationales/templates.yaml
@@ -0,0 +1,103 @@
+dataset: movie_rationales
+templates:
+ 3ea71512-c48a-4898-8e29-6169a7a00752: !Template
+ answer_choices: Negative ||| Positive
+ id: 3ea71512-c48a-4898-8e29-6169a7a00752
+ jinja: "Review: {{review}} \n===\nIs this review {{answer_choices[0]}} or {{answer_choices[1]}}?\
+ \ |||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Standard binary sentiment analysis
+ reference: Standard binary sentiment analysis
+ 5aaa7d8b-631a-4972-aeca-20a4e0518a60: !Template
+ answer_choices: Negative ||| Positive
+ id: 5aaa7d8b-631a-4972-aeca-20a4e0518a60
+ jinja: 'Evidences:
+
+ - {{ evidences | join("\n- ") }}
+
+ ===
+
+ Based on these review excerpts, is the review {{answer_choices[0]}} or {{answer_choices[1]}}
+ ? ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Evidences sentiment classification
+ reference: Only taking the evidences as input
+ b953c90c-722a-487e-ab8d-c83ae45de139: !Template
+ answer_choices: Negative ||| Positive
+ id: b953c90c-722a-487e-ab8d-c83ae45de139
+ jinja: 'Review: {{review}}
+
+
+ Highlighted extracts:
+
+ - {{ evidences | join("\n- ") }}
+
+ ===
+
+ Based on this review and the highlighted extracts from the review, decide whether
+ this review is {{answer_choices[0]}} or {{answer_choices[1]}} . ||| {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Evidences + review
+ reference: Classification based both on evidences and review
+ e517bce9-5820-4f20-ad86-b2e3db9e6731: !Template
+ answer_choices: null
+ id: e517bce9-5820-4f20-ad86-b2e3db9e6731
+ jinja: 'Review: {{review}}
+
+ ===
+
+ This review is {% if label == 0 %}positive{% else %}negative{% endif %}. Extract
+ from it the passages that indicate it. |||
+
+ - {{ evidences | join("\n- ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Generate evidences
+ reference: From the review, extract the spans of text that let us think that the
+ review is positive or negative.
+ f11ea73a-3a03-43d8-90d8-4da3905161c2: !Template
+ answer_choices: Negative ||| Positive
+ id: f11ea73a-3a03-43d8-90d8-4da3905161c2
+ jinja: 'Review: {{review}}
+
+ ====
+
+ Is this review {{answer_choices[0]}} or {{answer_choices[0]}}? Extract from
+ the review the passages that proves this choice. |||
+
+ {{answer_choices[label]}}
+
+ - {{ evidences | join("\n- ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Generate evidences and sentiment
+ reference: From the review, determine whether it is negative or positive and extract
+ the passages supporting this choice
diff --git a/promptsource/templates/multi_news/templates.yaml b/promptsource/templates/multi_news/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2cea0f9845d8779872e3913186ce139ba0547e26
--- /dev/null
+++ b/promptsource/templates/multi_news/templates.yaml
@@ -0,0 +1,189 @@
+dataset: multi_news
+templates:
+ 12269bd1-1c3a-4865-9702-892782b593d9: !Template
+ answer_choices: null
+ id: 12269bd1-1c3a-4865-9702-892782b593d9
+ jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+ "") | list %}
+
+ {% if document != "" %}
+
+ What are the key points across these news articles:
+
+ {% for doc in docs %}
+
+
+ Article: {{doc}}
+
+ {% endfor %}
+
+ |||
+
+ {{summary[2:]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: what are the key points
+ reference: ''
+ 940d0ce4-c1ef-4453-a47b-1abaaf811160: !Template
+ answer_choices: null
+ id: 940d0ce4-c1ef-4453-a47b-1abaaf811160
+ jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+ "") | list %}
+
+ {% if document != "" %}
+
+ Synthesize these documents into a single one:
+
+ {% for doc in docs %}
+
+
+ - {{doc}}
+
+ {% endfor %}
+
+ |||
+
+ {{summary[2:]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: synthesize
+ reference: ''
+ 9ab370ad-2b89-4d2a-bb40-ccc31accefad: !Template
+ answer_choices: null
+ id: 9ab370ad-2b89-4d2a-bb40-ccc31accefad
+ jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+ "") | list %}
+
+ {% if document != "" %}
+
+ I want to edit the following articles into a more concise summary:
+
+ {% for doc in docs %}
+
+
+ Article: {{doc}}
+
+ {% endfor %}
+
+ |||
+
+ {{summary[2:]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: summary scenario
+ reference: ''
+ b15485f5-2bd9-4ed4-98ce-4b241a341f99: !Template
+ answer_choices: null
+ id: b15485f5-2bd9-4ed4-98ce-4b241a341f99
+ jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+ "") | list %}
+
+ {% if document != "" %}
+
+ Write a summary of the following articles:
+
+ {% for doc in docs %}
+
+
+ Document: {{doc}}
+
+ {% endfor %}
+
+ |||
+
+ {{summary[2:]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: summarize
+ reference: ''
+ bc910e51-c0a9-473c-aa85-adcab21b9ba9: !Template
+ answer_choices: null
+ id: bc910e51-c0a9-473c-aa85-adcab21b9ba9
+ jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+ "") | list%}
+
+ {% if document != "" %}
+
+ Write an expanded news article with plausible details from the following summary:
+
+ {{summary[2:]}}
+
+ |||
+
+ {{docs | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: expand (reverse task)
+ reference: ''
+ d5a4bb2a-634a-4e9a-9f1f-b0803894ca0f: !Template
+ answer_choices: null
+ id: d5a4bb2a-634a-4e9a-9f1f-b0803894ca0f
+ jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+ "") | list %}
+
+ {% if document != "" %}
+
+ I''m trying to distill these articles down into one:
+
+ {% for doc in docs %}
+
+
+ Article: {{doc}}
+
+ {% endfor %}
+
+ |||
+
+ {{summary[2:]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: distill
+ reference: ''
diff --git a/promptsource/templates/multi_nli/templates.yaml b/promptsource/templates/multi_nli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e6de4e00b5339372e88fc24b4e2ff4b9d765b8d4
--- /dev/null
+++ b/promptsource/templates/multi_nli/templates.yaml
@@ -0,0 +1,221 @@
+dataset: multi_nli
+templates:
+ 001bd025-1fcb-4c4b-b5dd-d8bb83f82d13: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: 001bd025-1fcb-4c4b-b5dd-d8bb83f82d13
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ 0e75e339-433b-459e-830e-557d7a07611b: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 0e75e339-433b-459e-830e-557d7a07611b
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ 133081b4-2cab-4e4e-9844-783f672b3f88: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 133081b4-2cab-4e4e-9844-783f672b3f88
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ 58aceb5e-4fe5-43ae-9687-09bdfab3ac9f: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 58aceb5e-4fe5-43ae-9687-09bdfab3ac9f
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 592682cb-0a33-4110-bc6a-903ac2f50deb: !Template
+ answer_choices: Guaranteed ||| Possible ||| Impossible
+ id: 592682cb-0a33-4110-bc6a-903ac2f50deb
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
+ 6eef4333-fb4a-433b-aed5-cd00ec48b01f: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: 6eef4333-fb4a-433b-aed5-cd00ec48b01f
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
+ 6f4e6af1-cac4-4674-8d69-8d922516abe0: !Template
+ answer_choices: Correct ||| Inconclusive ||| Incorrect
+ id: 6f4e6af1-cac4-4674-8d69-8d922516abe0
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ 9a8a7fc4-952a-4f8d-81ce-7d0199e6a4fc: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: 9a8a7fc4-952a-4f8d-81ce-7d0199e6a4fc
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+ \ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ a2838b52-b225-4574-b417-9f56d5015c09: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: a2838b52-b225-4574-b417-9f56d5015c09
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
+ acac14f2-bfbd-4235-a813-3ee031b7dc32: !Template
+ answer_choices: True ||| Neither ||| False
+ id: acac14f2-bfbd-4235-a813-3ee031b7dc32
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ be5843b5-83bc-4ffd-8f06-5d01321ff709: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: be5843b5-83bc-4ffd-8f06-5d01321ff709
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ ca3e9c3f-f0c4-4325-9a61-ba21ddd70464: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: ca3e9c3f-f0c4-4325-9a61-ba21ddd70464
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ ebe5e520-6441-48ec-88a2-b5f427d460c5: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: ebe5e520-6441-48ec-88a2-b5f427d460c5
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ eec653b4-b4e5-47f1-8077-7d5c4df072c7: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: eec653b4-b4e5-47f1-8077-7d5c4df072c7
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ f7ddd15e-6af2-429e-8551-3465fb541e9f: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: f7ddd15e-6af2-429e-8551-3465fb541e9f
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
diff --git a/promptsource/templates/multi_x_science_sum/templates.yaml b/promptsource/templates/multi_x_science_sum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ad69ad16127074b80f929d82415e6dc2016a6d5c
--- /dev/null
+++ b/promptsource/templates/multi_x_science_sum/templates.yaml
@@ -0,0 +1,168 @@
+dataset: multi_x_science_sum
+templates:
+ 2bca0197-e3d4-4870-bd95-178411e52e09: !Template
+ answer_choices: null
+ id: 2bca0197-e3d4-4870-bd95-178411e52e09
+ jinja: 'Write the related work section of a paper which references articles with
+ the following abstracts:
+
+
+ {% for abs in ref_abstract["abstract"] %}
+
+
+ {{ref_abstract["cite_N"][loop.index-1]}}: {{abs}}
+
+
+ {% endfor %} |||
+
+
+ {{related_work}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: generate_related_work_from_ref_abstracts
+ reference: https://arxiv.org/pdf/2010.14235.pdf
+ 3bd082cb-4e28-4eb7-9fa2-dd03f1f86219: !Template
+ answer_choices: null
+ id: 3bd082cb-4e28-4eb7-9fa2-dd03f1f86219
+ jinja: 'Given the abstract of a paper, provide some text for related work for
+ readers to learn further.
+
+ {{abstract}} |||
+
+ {{related_work}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: generate_related_work_from_abstract
+ reference: ''
+ 52ffecee-4298-4611-a3dd-cc0ee3856259: !Template
+ answer_choices: null
+ id: 52ffecee-4298-4611-a3dd-cc0ee3856259
+ jinja: 'I am reading a paper with the given abstract.
+
+ {{abstract}}
+
+ Following are the abstracts for the articles that the paper references.
+
+ {% for abs in ref_abstract["abstract"] %}
+
+ {{ref_abstract["cite_N"][loop.index-1]}}: {{abs}}
+
+ {% endfor %}
+
+ Can you use the above information to come up with an appropriate text for the
+ related work section of the paper? |||
+
+ {{related_work}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: given_abst_and_ref_generate_related_work
+ reference: ''
+ af4d550e-54b8-471e-97af-2b2c50a1382e: !Template
+ answer_choices: null
+ id: af4d550e-54b8-471e-97af-2b2c50a1382e
+ jinja: 'What would be the abstract of a paper with the following related work
+ section?
+
+ Related work:
+
+ {{related_work}} |||
+
+ {{abstract}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: false
+ name: generate_abstract_from_related_work
+ reference: ''
+ b3040734-3670-4d6f-8e9d-0dbfbbb59abf: !Template
+ answer_choices: null
+ id: b3040734-3670-4d6f-8e9d-0dbfbbb59abf
+ jinja: 'For a paper that uses the given abstract and refers to other papers with
+ references as follows, summarize the related work section:
+
+ {{abstract}}
+
+ Reference Abstracts:
+
+ {% for abs in ref_abstract["abstract"] %}
+
+ {{ref_abstract["cite_N"][loop.index-1]}}: {{abs}}
+
+ {% endfor %} |||
+
+ {{related_work}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: generate_related_work_from_abst_and_ref
+ reference: https://arxiv.org/pdf/2010.14235.pdf
+ b9eb67b4-415b-4a38-a90a-9dee3ae385d7: !Template
+ answer_choices: null
+ id: b9eb67b4-415b-4a38-a90a-9dee3ae385d7
+ jinja: 'Given the abstract of a research work along with the abstracts of some
+ references, generate the text for related work for readers to refer to.
+
+ {{abstract}}
+
+ Reference Abstracts:
+
+ {% for abs in ref_abstract["abstract"] %}
+
+ {{ref_abstract["cite_N"][loop.index-1]}}: {{abs}}
+
+ {% endfor %} |||
+
+ {{related_work}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: generate_related_work_from_abstract_and_related_abstract
+ reference: ''
+ f59d7e5a-5982-467f-b451-91154e311666: !Template
+ answer_choices: null
+ id: f59d7e5a-5982-467f-b451-91154e311666
+ jinja: 'Following are the abstracts of some related work. Can you use it to generate
+ the abstract?
+
+ {% for abs in ref_abstract["abstract"] %}
+
+ {{ref_abstract["cite_N"][loop.index-1]}}: {{abs}}
+
+ {% endfor %} |||
+
+ {{abstract}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: false
+ name: generate_abstract_from_reference
+ reference: ''
diff --git a/promptsource/templates/mwsc/templates.yaml b/promptsource/templates/mwsc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5dfc0a52aa903fb9cc72ed43a6487c10bb8e2e86
--- /dev/null
+++ b/promptsource/templates/mwsc/templates.yaml
@@ -0,0 +1,74 @@
+dataset: mwsc
+templates:
+ 66c3e53a-2f2f-4ab4-b17b-ca42535d4ea1: !Template
+ answer_choices: '{{options | join(" ||| ")}}'
+ id: 66c3e53a-2f2f-4ab4-b17b-ca42535d4ea1
+ jinja: '{{ question|trim(''?'') }} in the sentence "{{ sentence|trim(''.'') }}"?
+ ||| {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: in-the-sentence-question-first
+ reference: ''
+ 8d4f3463-d64b-43be-b0ed-2455cb99e017: !Template
+ answer_choices: '{{options | join(" ||| ")}}'
+ id: 8d4f3463-d64b-43be-b0ed-2455cb99e017
+ jinja: If I were to say "{{sentence}}" and then ask you "{{ question }}", what
+ do you think is the correct answer out of "{{ options|join('" and "')}}"? |||
+ {{ answer }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what-think
+ reference: ''
+ a37a2745-c815-4f3a-8f78-3da2fceae7fe: !Template
+ answer_choices: '{{options | join(" ||| ")}}'
+ id: a37a2745-c815-4f3a-8f78-3da2fceae7fe
+ jinja: In the sentence "{{ sentence|trim('.') }}", {{ question[0]|lower }}{{ question[1:]
+ }} ||| {{ answer }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: in-the-sentence
+ reference: ''
+ ad4b74f6-6b2f-40a8-8189-4ada58d64fd4: !Template
+ answer_choices: '{{options | join(" ||| ")}}'
+ id: ad4b74f6-6b2f-40a8-8189-4ada58d64fd4
+ jinja: '{{sentence}} {{ question }} Was it "{{options|join(''" or "'')}}"? |||
+ {{ answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: options-or
+ reference: ''
+ f0e01268-c83b-4785-b593-48eb4f9173cd: !Template
+ answer_choices: Yes ||| No
+ id: f0e01268-c83b-4785-b593-48eb4f9173cd
+ jinja: '{{ sentence }} Would "{{ options[0] }}" be correct if I were to ask you
+ {{question[0]|lower }}{{ question[1:] }} ||| {% if answer == options[0] %} {{answer_choices[0]}}
+ {% else %} {{answer_choices[1]}} {% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is-correct
+ reference: ''
diff --git a/promptsource/templates/narrativeqa/templates.yaml b/promptsource/templates/narrativeqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f20511d413c9a8780788e31ee1fa29e6463c6bb0
--- /dev/null
+++ b/promptsource/templates/narrativeqa/templates.yaml
@@ -0,0 +1,165 @@
+dataset: narrativeqa
+templates:
+ 3b53c95c-022b-4a51-946a-6c88b962892a: !Template
+ answer_choices: null
+ id: 3b53c95c-022b-4a51-946a-6c88b962892a
+ jinja: 'Synopsis: {{ document.summary.text }}
+
+
+ Answer the question.
+
+
+ {{ question.text }}
+
+
+ Full text: {{ document.text }} |||
+
+ {{answers | map(attribute="text") | list | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: answer_using_summary_full_text
+ reference: Text + Summary
+ 62df1726-be83-4403-9483-732da1174cf7: !Template
+ answer_choices: null
+ id: 62df1726-be83-4403-9483-732da1174cf7
+ jinja: '{{ document.text }}
+
+
+ Using the above text, answer the following question.
+
+
+ {{ question.text }} |||
+
+ {{answers | map(attribute=''text'') | list | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: given_text_answer_question
+ reference: Text only
+ 87cb2e86-0764-412a-ba2d-fe3172997a25: !Template
+ answer_choices: null
+ id: 87cb2e86-0764-412a-ba2d-fe3172997a25
+ jinja: '{{ document.text }}
+
+
+ Summarize the given document. |||
+
+ {{ document.summary.text }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: summarize_text
+ reference: Text summarization
+ adfa9f6d-5268-472f-b435-c4558f199961: !Template
+ answer_choices: null
+ id: adfa9f6d-5268-472f-b435-c4558f199961
+ jinja: "Full text: {{ document.text }}\n\nQuestion: {{ question.text }} \n\nAnswer\
+ \ the question using the full text. \n|||\n{{answers | map(attribute=\"text\"\
+ ) | list | choice }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: full_text_question_answer
+ reference: Text only
+ c1170dbc-8b83-4797-aa32-ccafcfddad9e: !Template
+ answer_choices: null
+ id: c1170dbc-8b83-4797-aa32-ccafcfddad9e
+ jinja: 'Answer the question using the information given in the synopsis below.
+
+
+ {{ document.summary.text }}
+
+
+ Question: {{ question.text }} |||
+
+ {{answers | map(attribute="text") | list | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: given_summary_answer_text
+ reference: Summary only
+ ce20a6d2-df6b-4279-bde9-6543ef23ecff: !Template
+ answer_choices: null
+ id: ce20a6d2-df6b-4279-bde9-6543ef23ecff
+ jinja: 'Below is the summary of a document.
+
+
+ {{ document.summary.text }}
+
+
+ What is the answer to the following query?
+
+
+ {{ question.text }} |||
+
+ {{answers | map(attribute="text") | list | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: summary_answer_query
+ reference: Summary only
+ e750f922-2e1b-45c8-98de-3ada1fbde16b: !Template
+ answer_choices: null
+ id: e750f922-2e1b-45c8-98de-3ada1fbde16b
+ jinja: '{{ document.text }}
+
+
+ Can you briefly recapitulate the above document? |||
+
+ {{ document.summary.text }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: recapitulate_given_text
+ reference: Text Summarization
+ f6bedb53-fc87-47f3-94b2-07adb0de2e42: !Template
+ answer_choices: null
+ id: f6bedb53-fc87-47f3-94b2-07adb0de2e42
+ jinja: "State the main points mentioned in the below text.\n\n{{ document.text\
+ \ }}\n |||\n{{ document.summary.text }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: state_main_points
+ reference: Text Summarization
diff --git a/promptsource/templates/ncbi_disease/templates.yaml b/promptsource/templates/ncbi_disease/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6318a4842defb548d038af99e09a30a7bb11b343
--- /dev/null
+++ b/promptsource/templates/ncbi_disease/templates.yaml
@@ -0,0 +1,306 @@
+dataset: ncbi_disease
+templates:
+ 04458e59-37f1-48dc-bb20-823e836a8c44: !Template
+ answer_choices: null
+ id: 04458e59-37f1-48dc-bb20-823e836a8c44
+ jinja: 'What are the diseases mentioned in the following text?
+
+ {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]|upper) + disease_token[1:] if ner_tag
+ == 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_question_asking_response_as_text
+ reference: Simple question asking for the diseases present in a text, the response
+ is a text
+ 4e96f535-07d4-4c71-8816-3c1cb1900090: !Template
+ answer_choices: null
+ id: 4e96f535-07d4-4c71-8816-3c1cb1900090
+ jinja: 'Identify the names of diseases mentioned in the following text (if no
+ diseases are mentioned, output {{"None"}}):
+
+ {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]|upper) + disease_token[1:] if ner_tag
+ == 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: identify_diseases_names_mentioned
+ reference: Ask the model to identify diseases present in the text
+ 5a693a8f-07a2-4d88-ab3a-337b1029d9a2: !Template
+ answer_choices: No ||| Yes
+ id: 5a693a8f-07a2-4d88-ab3a-337b1029d9a2
+ jinja: 'Are there diseases mentioned in the following text?
+
+ {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+ |||
+
+ {% set vars = {''no_disease'': True} %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ vars.update({''no_disease'': False}) | default("", True) }}
+
+ {% endif %}
+
+ {% endfor %}
+
+
+ {{answer_choices[0] if vars.no_disease else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_asking_diseases_presence
+ reference: Ask for disease presence in the text
+ 699d4b6c-2910-4f78-88e6-d73a190a7dc5: !Template
+ answer_choices: null
+ id: 699d4b6c-2910-4f78-88e6-d73a190a7dc5
+ jinja: '{{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+ List the diseases mentioned in the previous text.
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]|upper) + disease_token[1:] if ner_tag
+ == 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: list_diseases_mentioned_after_text
+ reference: Ask the model to list diseases present in the text
+ ecd88889-84fa-4e28-ac0f-3bc1564e838b: !Template
+ answer_choices: null
+ id: ecd88889-84fa-4e28-ac0f-3bc1564e838b
+ jinja: 'List the diseases mentioned in the following text (write {{"None"}} if
+ no disease is mentioned):
+
+ {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]|upper) + disease_token[1:] if ner_tag
+ == 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: list_diseases_mentioned
+ reference: Ask the model to list diseases present in the text
+ f4cad387-e558-4087-854f-0991f7aafca8: !Template
+ answer_choices: null
+ id: f4cad387-e558-4087-854f-0991f7aafca8
+ jinja: '{{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+ What are the diseases mentioned in the previous text?
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]|upper) + disease_token[1:] if ner_tag
+ == 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: simple_question_asking_response_as_text_after_text
+ reference: Simple question asking response as a text, posing the question after
+ f91d18a6-6581-4379-8b46-06a9ef44b401: !Template
+ answer_choices: No ||| Yes
+ id: f91d18a6-6581-4379-8b46-06a9ef44b401
+ jinja: '{{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+ Are there diseases mentioned in the previous text?
+
+ |||
+
+ {% set vars = {''no_disease'': True} %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ vars.update({''no_disease'': False}) | default("", True) }}
+
+ {% endif %}
+
+ {% endfor %}
+
+
+ {{answer_choices[0] if vars.no_disease else answer_choices[1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: question_asking_diseases_presence_after_text
+ reference: Asking for disease presence after the text
diff --git a/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml b/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..10293c2afb60ad8b60a3b05838a73b1f9c3f8f7f
--- /dev/null
+++ b/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml
@@ -0,0 +1,51 @@
+dataset: neural_code_search
+subset: evaluation_dataset
+templates:
+ 30858249-c732-46a6-85b5-466fe964c4d4: !Template
+ answer_choices: null
+ id: 30858249-c732-46a6-85b5-466fe964c4d4
+ jinja: 'Description:
+
+ {{ question }}
+
+
+ Implementation:
+
+ |||
+
+ {{ answer }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: generate code given a description
+ reference: ''
+ 34f4095d-0ce0-42d5-8070-1626dd51b987: !Template
+ answer_choices: null
+ id: 34f4095d-0ce0-42d5-8070-1626dd51b987
+ jinja: 'Given the following code:
+
+ {{ answer }}
+
+ Describe it:
+
+ |||
+
+ {{ question }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate a description given code
+ reference: ''
diff --git a/promptsource/templates/newspop/templates.yaml b/promptsource/templates/newspop/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d2c240dc0d559c8618ccd5192cca20c7d3e09bbe
--- /dev/null
+++ b/promptsource/templates/newspop/templates.yaml
@@ -0,0 +1,124 @@
+dataset: newspop
+templates:
+ 3f264cef-d7f0-4f62-81f6-fb2292ab59dd: !Template
+ answer_choices: Economy ||| Microsoft ||| Obama ||| Palestine
+ id: 3f264cef-d7f0-4f62-81f6-fb2292ab59dd
+ jinja: "Title: {{title}} \nHeadline: {{headline}}\n\nIs this article about {{answer_choices[0]}},\
+ \ {{answer_choices[1]}}, {{answer_choices[2]}} or {{answer_choices[3]}}?\n|||\n\
+ \n{{topic|capitalize}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: after_text_question
+ reference: ''
+ 5053ada5-7fd3-491f-981e-be64ba35aa39: !Template
+ answer_choices: Economy ||| Microsoft ||| Obama ||| Palestine
+ id: 5053ada5-7fd3-491f-981e-be64ba35aa39
+ jinja: "What is the following article about?\n\nTitle: {{title}} \nHeadline: {{headline}}\n\
+ \n|||\n\n{{topic|capitalize}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: before_text_question_no_choice
+ reference: ''
+ 590017ce-8204-400d-a172-93da2337aa6f: !Template
+ answer_choices: Economy ||| Microsoft ||| Obama ||| Palestine
+ id: 590017ce-8204-400d-a172-93da2337aa6f
+ jinja: "Title: {{title}} \nHeadline: {{headline}}\n\nWhat is this news about?\n\
+ |||\n\n{{topic|capitalize}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: after_text_question_no_choice
+ reference: ''
+ 71d4d30d-7340-4ad4-bbfe-d587361c3ad8: !Template
+ answer_choices: Economy ||| Microsoft ||| Obama ||| Palestine
+ id: 71d4d30d-7340-4ad4-bbfe-d587361c3ad8
+ jinja: "{{title}}\n{{headline}}\n\nTopic: \n\n|||\n\n{{topic|capitalize}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: topic_no_choice_in_template
+ reference: ''
+ 8fdfb019-2f82-4f94-a703-355b40ed9de2: !Template
+ answer_choices: Economy ||| Microsoft ||| Obama ||| Palestine
+ id: 8fdfb019-2f82-4f94-a703-355b40ed9de2
+ jinja: "Title: {{title}} \n\nIs this article about {{answer_choices[0]}}, {{answer_choices[1]}},\
+ \ {{answer_choices[2]}} or {{answer_choices[3]}}?\n|||\n\n{{topic|capitalize}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: after_text_question_title_only
+ reference: ''
+ b2b833b7-e431-40b8-869e-a675daa7e392: !Template
+ answer_choices: Economy ||| Microsoft ||| Obama ||| Palestine
+ id: b2b833b7-e431-40b8-869e-a675daa7e392
+ jinja: 'Headline: {{headline}}
+
+
+ Is this article about {{answer_choices[0]}}, {{answer_choices[1]}}, {{answer_choices[2]}}
+ or {{answer_choices[3]}}?
+
+ |||
+
+
+ {{topic|capitalize}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: after_text_question_headline_only
+ reference: ''
+ e4cadaf7-5330-418c-bf8e-4897a39467f5: !Template
+ answer_choices: null
+ id: e4cadaf7-5330-418c-bf8e-4897a39467f5
+ jinja: "Summarize the headline to a title:\n {{headline}}\n\nThe title is:\n\n\
+ |||\n\n{{title}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: headline_to_title
+ reference: Summarize the headline to a title.
+ f8c33f6b-86a8-46bb-b26f-9a38a91207e3: !Template
+ answer_choices: Economy ||| Microsoft ||| Obama ||| Palestine
+ id: f8c33f6b-86a8-46bb-b26f-9a38a91207e3
+ jinja: "Is the following article about {{answer_choices[0]}}, {{answer_choices[1]}},\
+ \ {{answer_choices[2]}} or {{answer_choices[3]}}?\n\nTitle: {{title}} \nHeadline:\
+ \ {{headline}}\n\n|||\n\n{{topic|capitalize}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: before_text_question
+ reference: ''
diff --git a/promptsource/templates/nlu_evaluation_data/templates.yaml b/promptsource/templates/nlu_evaluation_data/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f99cc205ac98f571a833a7c8a26503bbae6f1b2a
--- /dev/null
+++ b/promptsource/templates/nlu_evaluation_data/templates.yaml
@@ -0,0 +1,211 @@
+dataset: nlu_evaluation_data
+templates:
+ 0fed9f2f-a53f-48c7-a625-dbd39cfb809c: !Template
+ answer_choices: alarm query ||| alarm remove ||| alarm set ||| audio volume down
+ ||| audio volume mute ||| audio volume other ||| audio volume up ||| calendar
+ query ||| calendar remove ||| calendar set ||| cooking query ||| cooking recipe
+ ||| date time convert ||| date time query ||| email add contact ||| email query
+ ||| email query contact ||| email send email ||| general affirm ||| general
+ command stop ||| general confirm ||| general don't care ||| general explain
+ ||| general greet ||| general joke ||| general negate ||| general praise |||
+ general quirky ||| general repeat ||| internet of things cleaning ||| internet
+ of things coffee ||| internet of things hue light change ||| internet of things
+ hue light dim ||| internet of things hue light off ||| internet of things hue
+ light on ||| internet of things hue light up ||| internet of things wemo off
+ ||| internet of things wemo on ||| lists create or add ||| lists query ||| lists
+ remove ||| music dislikeness ||| music likeness ||| music query ||| music settings
+ ||| news query ||| play audiobook ||| play game ||| play music ||| play podcasts
+ ||| play radio ||| question answer currency ||| question answer definition |||
+ question answer factoid ||| question answer maths ||| question answer stock
+ ||| recommendation events ||| recommendation locations ||| recommendation movies
+ ||| social post ||| social query ||| takeaway order ||| takeaway query ||| transport
+ query ||| transport taxi ||| transport ticket ||| transport traffic ||| weather
+ query
+ id: 0fed9f2f-a53f-48c7-a625-dbd39cfb809c
+ jinja: 'What is the user who made this request trying to do?
+
+ Choices are {{answer_choices[0:-1] | join('', '') }} or {{answer_choices[-1]}}.
+
+ Request: {{text}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what_does_user_want_choices
+ reference: ''
+ 23659b4a-c740-46d5-adb9-1261026dc1ce: !Template
+ answer_choices: alarm ||| audio ||| calendar ||| cooking ||| datetime ||| email
+ ||| general ||| iot ||| lists ||| music ||| news ||| play ||| qa ||| recommendation
+ ||| social ||| takeaway ||| transport ||| weather
+ id: 23659b4a-c740-46d5-adb9-1261026dc1ce
+ jinja: 'What service does the following piece of text talk about?
+
+ Choices are {{ answer_choices[0:-1] | join(", ")}} or {{answer_choices[-1]}}.
+
+ {{text}}
+
+ |||
+
+ {{answer_choices[answer_choices.index(scenario)]}} {# Hack to make sure answer
+ is actually in answer_choices #}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: what_service_talk_about_choices
+ reference: ''
+ 586b9200-ab35-4559-8b0d-20cb5d874fe2: !Template
+ answer_choices: alarm query ||| alarm remove ||| alarm set ||| audio volume down
+ ||| audio volume mute ||| audio volume other ||| audio volume up ||| calendar
+ query ||| calendar remove ||| calendar set ||| cooking query ||| cooking recipe
+ ||| date time convert ||| date time query ||| email add contact ||| email query
+ ||| email query contact ||| email send email ||| general affirm ||| general
+ command stop ||| general confirm ||| general don't care ||| general explain
+ ||| general greet ||| general joke ||| general negate ||| general praise |||
+ general quirky ||| general repeat ||| internet of things cleaning ||| internet
+ of things coffee ||| internet of things hue light change ||| internet of things
+ hue light dim ||| internet of things hue light off ||| internet of things hue
+ light on ||| internet of things hue light up ||| internet of things wemo off
+ ||| internet of things wemo on ||| lists create or add ||| lists query ||| lists
+ remove ||| music dislikeness ||| music likeness ||| music query ||| music settings
+ ||| news query ||| play audiobook ||| play game ||| play music ||| play podcasts
+ ||| play radio ||| question answer currency ||| question answer definition |||
+ question answer factoid ||| question answer maths ||| question answer stock
+ ||| recommendation events ||| recommendation locations ||| recommendation movies
+ ||| social post ||| social query ||| takeaway order ||| takeaway query ||| transport
+ query ||| transport taxi ||| transport ticket ||| transport traffic ||| weather
+ query
+ id: 586b9200-ab35-4559-8b0d-20cb5d874fe2
+ jinja: 'Classify this request based on the user''s intent:
+
+ {{text}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify_intent
+ reference: ''
+ bba54daf-a2ed-4bcb-ad63-8ad900fae3a7: !Template
+ answer_choices: null
+ id: bba54daf-a2ed-4bcb-ad63-8ad900fae3a7
+ jinja: "What would be a typical query for activating the following service ? \n\
+ {{scenario}}\n|||\n{{text}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: false
+ name: intent_to_query
+ reference: ''
+ c1a4b9b6-09fa-41ca-820c-45c088a429b5: !Template
+ answer_choices: alarm query ||| alarm remove ||| alarm set ||| audio volume down
+ ||| audio volume mute ||| audio volume other ||| audio volume up ||| calendar
+ query ||| calendar remove ||| calendar set ||| cooking query ||| cooking recipe
+ ||| date time convert ||| date time query ||| email add contact ||| email query
+ ||| email query contact ||| email send email ||| general affirm ||| general
+ command stop ||| general confirm ||| general don't care ||| general explain
+ ||| general greet ||| general joke ||| general negate ||| general praise |||
+ general quirky ||| general repeat ||| internet of things cleaning ||| internet
+ of things coffee ||| internet of things hue light change ||| internet of things
+ hue light dim ||| internet of things hue light off ||| internet of things hue
+ light on ||| internet of things hue light up ||| internet of things wemo off
+ ||| internet of things wemo on ||| lists create or add ||| lists query ||| lists
+ remove ||| music dislikeness ||| music likeness ||| music query ||| music settings
+ ||| news query ||| play audiobook ||| play game ||| play music ||| play podcasts
+ ||| play radio ||| question answer currency ||| question answer definition |||
+ question answer factoid ||| question answer maths ||| question answer stock
+ ||| recommendation events ||| recommendation locations ||| recommendation movies
+ ||| social post ||| social query ||| takeaway order ||| takeaway query ||| transport
+ query ||| transport taxi ||| transport ticket ||| transport traffic ||| weather
+ query
+ id: c1a4b9b6-09fa-41ca-820c-45c088a429b5
+ jinja: "Classify this request into one of the following intents: \n{{answer_choices[0:-1]\
+ \ | join(', ') }} or {{answer_choices[-1]}}.\n{{text}}\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify_intent_choices
+ reference: ''
+ cbc45b59-04c7-490e-93d4-e923f6d3c8e1: !Template
+ answer_choices: alarm query ||| alarm remove ||| alarm set ||| audio volume down
+ ||| audio volume mute ||| audio volume other ||| audio volume up ||| calendar
+ query ||| calendar remove ||| calendar set ||| cooking query ||| cooking recipe
+ ||| date time convert ||| date time query ||| email add contact ||| email query
+ ||| email query contact ||| email send email ||| general affirm ||| general
+ command stop ||| general confirm ||| general don't care ||| general explain
+ ||| general greet ||| general joke ||| general negate ||| general praise |||
+ general quirky ||| general repeat ||| internet of things cleaning ||| internet
+ of things coffee ||| internet of things hue light change ||| internet of things
+ hue light dim ||| internet of things hue light off ||| internet of things hue
+ light on ||| internet of things hue light up ||| internet of things wemo off
+ ||| internet of things wemo on ||| lists create or add ||| lists query ||| lists
+ remove ||| music dislikeness ||| music likeness ||| music query ||| music settings
+ ||| news query ||| play audiobook ||| play game ||| play music ||| play podcasts
+ ||| play radio ||| question answer currency ||| question answer definition |||
+ question answer factoid ||| question answer maths ||| question answer stock
+ ||| recommendation events ||| recommendation locations ||| recommendation movies
+ ||| social post ||| social query ||| takeaway order ||| takeaway query ||| transport
+ query ||| transport taxi ||| transport ticket ||| transport traffic ||| weather
+ query
+ id: cbc45b59-04c7-490e-93d4-e923f6d3c8e1
+ jinja: 'What is the user who made this request trying to do?
+
+ Request: {{text}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what_does_user_want
+ reference: ''
+ ce869e8f-7975-4d5c-bd91-a23624c17285: !Template
+ answer_choices: alarm ||| audio ||| calendar ||| cooking ||| datetime ||| email
+ ||| general ||| iot ||| lists ||| music ||| news ||| play ||| qa ||| recommendation
+ ||| social ||| takeaway ||| transport ||| weather
+ id: ce869e8f-7975-4d5c-bd91-a23624c17285
+ jinja: 'What service does the following piece of text talk about?
+
+ {{text}}
+
+ |||
+
+ {{answer_choices[answer_choices.index(scenario)]}} {# Hack to make sure answer
+ is actually in answer_choices #}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: what_service_talk_about
+ reference: ''
diff --git a/promptsource/templates/nq_open/templates.yaml b/promptsource/templates/nq_open/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b82c4644fcad05801bd61b4c3eaae3adfb27a5b
--- /dev/null
+++ b/promptsource/templates/nq_open/templates.yaml
@@ -0,0 +1,121 @@
+dataset: nq_open
+templates:
+ 05b8ac63-5aa1-4ce7-8257-ade0fca889ae: !Template
+ answer_choices: null
+ id: 05b8ac63-5aa1-4ce7-8257-ade0fca889ae
+ jinja: 'The goal is to predict an English answer string for an input English question.
+ All questions can be answered using the contents of English Wikipedia.
+
+ Question: {{question}}
+
+ Answer:
+
+ |||
+
+ {{answer|choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: formal_description
+ reference: Copied from the dataset description.
+ 0b23fe26-c659-4a84-834f-f19622d11412: !Template
+ answer_choices: null
+ id: 0b23fe26-c659-4a84-834f-f19622d11412
+ jinja: 'Question : {{question}}
+
+ Answer :
+
+ |||
+
+
+ {{answer | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: question_answer
+ reference: Plain Question
+ 35113036-4cb4-4db5-a92e-d208e1b48b7c: !Template
+ answer_choices: null
+ id: 35113036-4cb4-4db5-a92e-d208e1b48b7c
+ jinja: 'Guess a question that has the answer "{{answer|choice}}"
+
+ |||
+
+ {{question}}?'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: guess_question
+ reference: Guess a question. It will show if model can evaluate entity in question.
+ 5762f138-a3bf-4614-8dff-dcae7b5bd4a4: !Template
+ answer_choices: null
+ id: 5762f138-a3bf-4614-8dff-dcae7b5bd4a4
+ jinja: 'I''ve always wondered: {{question}}
+
+ |||
+
+ {{answer|choice}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: first_person_context
+ reference: Ask a question in first person
+ cd157288-0211-46a8-a00c-ba0e07980e37: !Template
+ answer_choices: null
+ id: cd157288-0211-46a8-a00c-ba0e07980e37
+ jinja: 'Search query: {{question}}
+
+ Response:
+
+ |||
+
+ {{answer|choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: search query
+ reference: ''
+ cf937d15-48e0-4ae3-a4eb-9098cccc58ce: !Template
+ answer_choices: null
+ id: cf937d15-48e0-4ae3-a4eb-9098cccc58ce
+ jinja: 'Answer the following question.
+
+ {{question}}
+
+ |||
+
+ {{answer|choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: question_with_instruction
+ reference: Instruction before question.
diff --git a/promptsource/templates/numer_sense/templates.yaml b/promptsource/templates/numer_sense/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1e68ad9930c54c069c0960a8fd193dbbbf8d1aa3
--- /dev/null
+++ b/promptsource/templates/numer_sense/templates.yaml
@@ -0,0 +1,112 @@
+dataset: numer_sense
+templates:
+ 1f959d92-dca8-4647-9840-69391dfbd000: !Template
+ answer_choices: nine ||| three ||| four ||| zero ||| two ||| six ||| eight |||
+ one ||| five ||| ten ||| no ||| seven
+ id: 1f959d92-dca8-4647-9840-69391dfbd000
+ jinja: "Fill in the blank in the following sentence using world knowledge:\n\n\
+ {{sentence | replace(\"\", \"__________\")}}\n\nChose from the following\
+ \ options:\n\n{{ ', '.join(answer_choices) }}\n\n||| \n\n{{target}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill_in_the_blank_with_choices_after
+ reference: ''
+ 4e9da2b8-2502-44a7-a7da-ae62f2d554c9: !Template
+ answer_choices: null
+ id: 4e9da2b8-2502-44a7-a7da-ae62f2d554c9
+ jinja: 'The following sentence needs to be filled with a word which is a number
+ word or "no". Using common sense and world knowledge fill in the blanks.
+
+
+ {{sentence | replace("", "__________")}}
+
+
+ Which is it?
+
+
+ |||
+
+
+ {{target}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill_in_the_blank_with_instruction
+ reference: ''
+ 5d8e8d21-8059-4373-bbf2-a25cbe1e6960: !Template
+ answer_choices: nine ||| three ||| four ||| zero ||| two ||| six ||| eight |||
+ one ||| five ||| ten ||| no ||| seven
+ id: 5d8e8d21-8059-4373-bbf2-a25cbe1e6960
+ jinja: 'Using common sense reasoning of the world and only the following options,
+ how would you fill in the blank?:
+
+
+ {{ '', ''.join(answer_choices) }}
+
+
+ {{sentence | replace("", "__________")}}
+
+
+ |||
+
+
+ {{target}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill_in_the_blank_with_choices_before
+ reference: with all the given options
+ cacee36c-e2b7-458e-9d51-6fcfd83842b4: !Template
+ answer_choices: null
+ id: cacee36c-e2b7-458e-9d51-6fcfd83842b4
+ jinja: 'Fill in the blanks:
+
+
+ {{sentence | replace("", "__________")}}
+
+
+ The correct answer is:
+
+
+ |||
+
+
+ {{target}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill_in_the_blank_before_sentence
+ reference: replace mask with fill in the blank
+ fc76beb7-c258-412f-a623-42fc8d2331b6: !Template
+ answer_choices: nine ||| three ||| four ||| zero ||| two ||| six ||| eight |||
+ one ||| five ||| ten ||| no ||| seven
+ id: fc76beb7-c258-412f-a623-42fc8d2331b6
+ jinja: "{{sentence | replace(\"\", \"__________\")}}\n\nUsing only the following\
+ \ options, what answer would make the most sense in the blank above?\n\n{{ ',\
+ \ '.join(answer_choices) }}\n\n||| \n\n{{target}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill_in_the_blank_with_instruction_and_choices
+ reference: missing word simple
diff --git a/promptsource/templates/onestop_english/templates.yaml b/promptsource/templates/onestop_english/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a17c13da6cf7fc482c060e5731d56fec1676fcff
--- /dev/null
+++ b/promptsource/templates/onestop_english/templates.yaml
@@ -0,0 +1,141 @@
+dataset: onestop_english
+templates:
+ 2807f792-45a6-4139-8386-7cdc98651e53: !Template
+ answer_choices: Elementary ||| Intermediate ||| Advanced
+ id: 2807f792-45a6-4139-8386-7cdc98651e53
+ jinja: 'For English as Second Language (ESL) learners, would the text passage
+ below be at {{"Elementary"}}, {{"Intermediate"}} or {{"Advanced"}} level for
+ reading and simplifying?
+
+
+ "{{text}}"
+
+
+ |||
+
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: esl_context
+ reference: ''
+ 553f2bbd-269c-4c4f-bc12-3825f155844d: !Template
+ answer_choices: Elementary ||| Intermediate ||| Advanced
+ id: 553f2bbd-269c-4c4f-bc12-3825f155844d
+ jinja: 'Consider the following text passage: {{text}}
+
+
+ How would you rate the difficulty level of the passage above for automatic readability
+ assessment? {{"Elementary"}}, {{"Intermediate"}} or {{"Advanced"}} level?
+
+
+ |||
+
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: ara_context
+ reference: ''
+ b0b076b8-b72b-4b71-8dd0-0b20877e001c: !Template
+ answer_choices: Elementary ||| Intermediate ||| Advanced
+ id: b0b076b8-b72b-4b71-8dd0-0b20877e001c
+ jinja: 'Look at the first three sentences of a story shown here: {{text.strip().split(''\n'')[:3]|join("\n")}}
+
+
+
+ Based on these three sentences alone, can you predict whether the entire story
+ is on the {{"Elementary"}}, {{"Intermediate"}} or {{"Advanced"}} level?
+
+
+
+
+
+ |||
+
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: determine_reading_level_from_the_first_three_sentences
+ reference: ''
+ de75ccb8-c0ba-4510-abf8-649b42019cd5: !Template
+ answer_choices: Elementary ||| Intermediate ||| Advanced
+ id: de75ccb8-c0ba-4510-abf8-649b42019cd5
+ jinja: 'If the text passage below was to be presented to someone learning English
+ as their second language, how are they likely going to rate the difficulty level
+ of the text in terms of reading and simplifying? {{"Elementary"}}, {{"Intermediate"}}
+ or {{"Advanced"}} level?
+
+
+ "{{text}}"
+
+
+
+ |||
+
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: esl_variation
+ reference: ''
+ e1289be8-7e81-4a85-bfb3-225fd31749a7: !Template
+ answer_choices: Elementary ||| Intermediate ||| Advanced
+ id: e1289be8-7e81-4a85-bfb3-225fd31749a7
+ jinja: 'How would you assess the reading difficulty of the text passage below?
+ Choose from one of {{"Elementary"}}, {{"Intermediate"}} or {{"Advanced"}} levels.
+
+
+ "{{text}}"
+
+
+ |||
+
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: assess
+ reference: ''
+ fdc96a76-6415-437b-bf61-ef6d1d1b8645: !Template
+ answer_choices: Elementary ||| Intermediate ||| Advanced
+ id: fdc96a76-6415-437b-bf61-ef6d1d1b8645
+ jinja: "Consider the following text passage: \n\n\"{{text}}\"\n\nHow would you\
+ \ assess the difficulty level for the task of simplifying the text passage above?\
+ \ Would it be at the {{\"Elementary\"}}, {{\"Intermediate\"}} or {{\"Advanced\"\
+ }} level?\n\n|||\n\n{{ answer_choices [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: ats
+ reference: ''
diff --git a/promptsource/templates/openai_humaneval/templates.yaml b/promptsource/templates/openai_humaneval/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..08e9e6d0bb2a98d219f6d47893f398597a08fdc0
--- /dev/null
+++ b/promptsource/templates/openai_humaneval/templates.yaml
@@ -0,0 +1,43 @@
+dataset: openai_humaneval
+templates:
+ 4a108b1c-7514-488f-99ed-3ca5da70e103: !Template
+ answer_choices: null
+ id: 4a108b1c-7514-488f-99ed-3ca5da70e103
+ jinja: '{{ prompt }}
+
+ Given the following docstring, what is the function body?
+
+ |||
+
+ {{ canonical_solution }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: function body generation
+ reference: ''
+ 9c85c898-70fe-4a51-be37-5111be357762: !Template
+ answer_choices: null
+ id: 9c85c898-70fe-4a51-be37-5111be357762
+ jinja: "{% set ns = namespace(tests=[])%}\n{% set lines = test.split('\\n') %}\n\
+ {% set test_ = \"\" %}\n{% set args = \"\" %}\n{% set return_val = \"\" %}\n\
+ \n{% for line in lines %}\n {% if line.strip().startswith('assert') and \"\
+ ==\" in line.strip() %}\n {% set ns.tests = ns.tests + [line.split('assert')[1]]\
+ \ %}\n {% endif %}\n{% endfor %}\n{% if (ns.tests | length) > 0 %}\n {%\
+ \ set test_ = ns.tests | choice %}\n\n {% set return_val = test_.split(\"\
+ ==\")[1].split(\", \\\"\")[0].strip() %}\n {% set args = (test_.split('(')[1:]\
+ \ | join(\"\")).split(\"==\")[0].strip() %}\n {{ prompt }}\n {{ canonical_solution\
+ \ }}\n {{entry_point}}({{args}} =\n |||\n {{ return_val }}\n{% endif\
+ \ %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: function call return value generation
+ reference: ''
diff --git a/promptsource/templates/openbookqa/additional/templates.yaml b/promptsource/templates/openbookqa/additional/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a6bbda464de51a4ff7223614932eb3e05f93865b
--- /dev/null
+++ b/promptsource/templates/openbookqa/additional/templates.yaml
@@ -0,0 +1,157 @@
+dataset: openbookqa
+subset: additional
+templates:
+ 39af6992-b4d0-4b37-8a28-55ac16d38944: !Template
+ answer_choices: null
+ id: 39af6992-b4d0-4b37-8a28-55ac16d38944
+ jinja: '{{question_stem}}
+
+ - {{ choices["text"] | join("\n- ") }}
+
+
+ Which is the correct answer?
+
+ |||
+
+ {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_correct_inverse
+ reference: Giving options before asking question
+ 6744fbdf-3bb6-4fd4-8dd5-64748fa7b44b: !Template
+ answer_choices: null
+ id: 6744fbdf-3bb6-4fd4-8dd5-64748fa7b44b
+ jinja: '{{question_stem}}
+
+
+ Choices:
+
+ - {{ choices["text"] | join("\n- ") }}
+
+ |||
+
+ {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choices
+ reference: ''
+ 7482300b-30c0-479f-9635-2bb6eec315fd: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 7482300b-30c0-479f-9635-2bb6eec315fd
+ jinja: '{{question_stem}}
+
+ {% for k in range(choices["text"] | length) %}
+
+ {{'' -> ''.join([["A", "B", "C", "D"][k], choices["text"][k]])}}
+
+ {% endfor %}
+
+ Is the right answer {{"A, B, C or D"}} ?
+
+ |||
+
+ {{answerKey}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_using_id
+ reference: Using the index (A, B, C, D) for the answer
+ 87c7b3ed-d3fd-4ff1-bb45-293660998dde: !Template
+ answer_choices: null
+ id: 87c7b3ed-d3fd-4ff1-bb45-293660998dde
+ jinja: '{{question_stem}}
+
+
+ Choose an answer from this list:
+
+ - {{ choices["text"] | join("\n- ") }}
+
+ |||
+
+ {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_an_answer_with_options
+ reference: choose an answer from a list
+ 92575e39-b256-413b-9c59-c96479ffd1a3: !Template
+ answer_choices: null
+ id: 92575e39-b256-413b-9c59-c96479ffd1a3
+ jinja: '{{question_stem}}
+
+
+ Which is the correct answer?
+
+ - {{ choices["text"] | join("\n- ") }}
+
+ |||
+
+ {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_correct
+ reference: ''
+ 96b9fe26-8d82-444a-9489-7c00512d4a59: !Template
+ answer_choices: null
+ id: 96b9fe26-8d82-444a-9489-7c00512d4a59
+ jinja: '{{question_stem}}
+
+ - {{ choices["text"] | join("\n- ") }}
+
+ |||
+
+ {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: only_options
+ reference: Listing the options right after the question
+ ab352cd9-dd1e-4f9c-a1eb-e7aca7447e3a: !Template
+ answer_choices: null
+ id: ab352cd9-dd1e-4f9c-a1eb-e7aca7447e3a
+ jinja: '{{question_stem}}
+
+
+ Pick the right answer from the list:
+
+ - {{ choices["text"] | join("\n- ") }}
+
+ |||
+
+ {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_answer_with_options
+ reference: ''
diff --git a/promptsource/templates/openbookqa/main/templates.yaml b/promptsource/templates/openbookqa/main/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7b5a5cb17171b4586488ffaf5b9b2a18f5c0ea39
--- /dev/null
+++ b/promptsource/templates/openbookqa/main/templates.yaml
@@ -0,0 +1,157 @@
+dataset: openbookqa
+subset: main
+templates:
+ 0206de6a-22da-4558-9b75-40c558ba60be: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 0206de6a-22da-4558-9b75-40c558ba60be
+ jinja: '{{question_stem}}
+
+
+ Choose an answer from this list:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_an_answer_with_options
+ reference: choose an answer from a list
+ 0dfe6c27-9716-455d-92a8-63ada1eb949b: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 0dfe6c27-9716-455d-92a8-63ada1eb949b
+ jinja: '{{question_stem}}
+
+
+ Which is the correct answer?
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_correct
+ reference: ''
+ 90260bf9-caf1-4847-b0a7-c76bc015acbf: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 90260bf9-caf1-4847-b0a7-c76bc015acbf
+ jinja: '{{question_stem}}
+
+ {% for k in range(choices["text"] | length) %}
+
+ {{'' -> ''.join([["A", "B", "C", "D"][k], choices["text"][k]])}}
+
+ {% endfor %}
+
+ Is the right answer {{"A, B, C or D"}} ?
+
+ |||
+
+ {{answerKey}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_using_id
+ reference: Using the index (A, B, C, D) for the answer
+ 96e5065b-2876-4e4f-a33a-bb94c3505bb6: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 96e5065b-2876-4e4f-a33a-bb94c3505bb6
+ jinja: '{{question_stem}}
+
+
+ Choices:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choices
+ reference: ''
+ a4453d77-4cdd-44e5-9901-358f48631944: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: a4453d77-4cdd-44e5-9901-358f48631944
+ jinja: '{{question_stem}}
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: only_options
+ reference: Listing the options right after the question
+ c4814b92-9887-4b08-a4e2-1c7ca44345f7: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: c4814b92-9887-4b08-a4e2-1c7ca44345f7
+ jinja: '{{question_stem}}
+
+ - {{ answer_choices | join("\n- ") }}
+
+
+ Which is the correct answer?
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_correct_inverse
+ reference: Giving options before asking question
+ e9ca981e-0bda-4332-a101-41d5947df8f3: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: e9ca981e-0bda-4332-a101-41d5947df8f3
+ jinja: '{{question_stem}}
+
+
+ Pick the right answer from the list:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_answer_with_options
+ reference: ''
diff --git a/promptsource/templates/paws-x/en/templates.yaml b/promptsource/templates/paws-x/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..701870b9e7a00cac2dfe2af01bd96b01fa3e6ec3
--- /dev/null
+++ b/promptsource/templates/paws-x/en/templates.yaml
@@ -0,0 +1,171 @@
+dataset: paws-x
+subset: en
+templates:
+ 0be7cecd-b427-4ec9-9b0e-666d6dae00dd: !Template
+ answer_choices: No ||| Yes
+ id: 0be7cecd-b427-4ec9-9b0e-666d6dae00dd
+ jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+ Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: task_description-no-label
+ reference: Generalized prompt format, task_description-input.
+ 472fe5eb-b499-4952-a930-f72f4ca9eddd: !Template
+ answer_choices: No ||| Yes
+ id: 472fe5eb-b499-4952-a930-f72f4ca9eddd
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning
+ reference: Natural question
+ 4c8d4e4c-eae4-45f6-bdf0-d132ae198ddd: !Template
+ answer_choices: No ||| Yes
+ id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198ddd
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ ||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question-no-label
+ reference: Generalized prompt format, context-question without any label
+ 678400f8-1a5c-4a40-b5ef-abeaa41e20dd: !Template
+ answer_choices: No ||| Yes
+ id: 678400f8-1a5c-4a40-b5ef-abeaa41e20dd
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite-no-label
+ reference: Natural Question without label
+ 7c205a61-64d4-4673-bb8e-bfa77562eedd: !Template
+ answer_choices: No ||| Yes
+ id: 7c205a61-64d4-4673-bb8e-bfa77562eedd
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ Yes or No.\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question
+ reference: Generalized prompt format, context-question
+ 8c259e88-7646-4a50-a4ca-90393920f2dd: !Template
+ answer_choices: No ||| Yes
+ id: 8c259e88-7646-4a50-a4ca-90393920f2dd
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation
+ reference: Concatenation of sentence 1 and sentence 2
+ a3ee450f-0d02-47c3-aa0b-00c3f80539dd: !Template
+ answer_choices: null
+ id: a3ee450f-0d02-47c3-aa0b-00c3f80539dd
+ jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+ {{sentence2}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: paraphrase-task
+ reference: Create a generative paraphrase task
+ a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80dd: !Template
+ answer_choices: No ||| Yes
+ id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80dd
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation-no-label
+ reference: Concatenation of sentence 1 and sentence 2 without any label
+ d5239f5f-2014-47c9-a0c1-4896f76f82dd: !Template
+ answer_choices: No ||| Yes
+ id: d5239f5f-2014-47c9-a0c1-4896f76f82dd
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning-no-label
+ reference: Natural question without label
+ d9911dad-75fe-4506-9843-3a46ba5e49dd: !Template
+ answer_choices: False ||| True
+ id: d9911dad-75fe-4506-9843-3a46ba5e49dd
+ jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3
+ reference: ANLI prompt format from Table G7 in the GPT3 paper
+ dd52359b-dc56-4241-8179-c98c636f03dd: !Template
+ answer_choices: No ||| Yes
+ id: dd52359b-dc56-4241-8179-c98c636f03dd
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite
+ reference: Natural Question
+ f0866713-c59a-4c5d-a307-95e80a935fdd: !Template
+ answer_choices: No ||| Yes
+ id: f0866713-c59a-4c5d-a307-95e80a935fdd
+ jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3-no-label
+ reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+ task information without any label.
diff --git a/promptsource/templates/paws/labeled_final/templates.yaml b/promptsource/templates/paws/labeled_final/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..86a0e7a8cdab34518f7851f6f5e277aee4c6960e
--- /dev/null
+++ b/promptsource/templates/paws/labeled_final/templates.yaml
@@ -0,0 +1,171 @@
+dataset: paws
+subset: labeled_final
+templates:
+ 0be7cecd-b427-4ec9-9b0e-666d6dae0063: !Template
+ answer_choices: No ||| Yes
+ id: 0be7cecd-b427-4ec9-9b0e-666d6dae0063
+ jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+ Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: task_description-no-label
+ reference: Generalized prompt format, task_description-input.
+ 472fe5eb-b499-4952-a930-f72f4ca9ed43: !Template
+ answer_choices: No ||| Yes
+ id: 472fe5eb-b499-4952-a930-f72f4ca9ed43
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning
+ reference: Natural question
+ 4c8d4e4c-eae4-45f6-bdf0-d132ae198d09: !Template
+ answer_choices: No ||| Yes
+ id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198d09
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ ||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question-no-label
+ reference: Generalized prompt format, context-question without any label
+ 678400f8-1a5c-4a40-b5ef-abeaa41e20ec: !Template
+ answer_choices: No ||| Yes
+ id: 678400f8-1a5c-4a40-b5ef-abeaa41e20ec
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite-no-label
+ reference: Natural Question without label
+ 7c205a61-64d4-4673-bb8e-bfa77562eede: !Template
+ answer_choices: No ||| Yes
+ id: 7c205a61-64d4-4673-bb8e-bfa77562eede
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ Yes or No.\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question
+ reference: Generalized prompt format, context-question
+ 8c259e88-7646-4a50-a4ca-90393920f281: !Template
+ answer_choices: No ||| Yes
+ id: 8c259e88-7646-4a50-a4ca-90393920f281
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation
+ reference: Concatenation of sentence 1 and sentence 2
+ a3ee450f-0d02-47c3-aa0b-00c3f80539e9: !Template
+ answer_choices: null
+ id: a3ee450f-0d02-47c3-aa0b-00c3f80539e9
+ jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+ {{sentence2}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: paraphrase-task
+ reference: Create a generative paraphrase task
+ a6d9ec4e-acd4-46cd-9eeb-ae32e0ab8076: !Template
+ answer_choices: No ||| Yes
+ id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab8076
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation-no-label
+ reference: Concatenation of sentence 1 and sentence 2 without any label
+ d5239f5f-2014-47c9-a0c1-4896f76f82a4: !Template
+ answer_choices: No ||| Yes
+ id: d5239f5f-2014-47c9-a0c1-4896f76f82a4
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning-no-label
+ reference: Natural question without label
+ d9911dad-75fe-4506-9843-3a46ba5e49be: !Template
+ answer_choices: False ||| True
+ id: d9911dad-75fe-4506-9843-3a46ba5e49be
+ jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3
+ reference: ANLI prompt format from Table G7 in the GPT3 paper
+ dd52359b-dc56-4241-8179-c98c636f0335: !Template
+ answer_choices: No ||| Yes
+ id: dd52359b-dc56-4241-8179-c98c636f0335
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite
+ reference: Natural Question
+ f0866713-c59a-4c5d-a307-95e80a935f99: !Template
+ answer_choices: No ||| Yes
+ id: f0866713-c59a-4c5d-a307-95e80a935f99
+ jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3-no-label
+ reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+ task information without any label.
diff --git a/promptsource/templates/paws/labeled_swap/templates.yaml b/promptsource/templates/paws/labeled_swap/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..802ee2591fcfd2eb579dfa9ba90a4cc19bee5b48
--- /dev/null
+++ b/promptsource/templates/paws/labeled_swap/templates.yaml
@@ -0,0 +1,171 @@
+dataset: paws
+subset: labeled_swap
+templates:
+ 0be7cecd-b427-4ec9-9b0e-666d6dae00aa: !Template
+ answer_choices: No ||| Yes
+ id: 0be7cecd-b427-4ec9-9b0e-666d6dae00aa
+ jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+ Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: task_description-no-label
+ reference: Generalized prompt format, task_description-input.
+ 472fe5eb-b499-4952-a930-f72f4ca9edaa: !Template
+ answer_choices: No ||| Yes
+ id: 472fe5eb-b499-4952-a930-f72f4ca9edaa
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning
+ reference: Natural question
+ 4c8d4e4c-eae4-45f6-bdf0-d132ae198daa: !Template
+ answer_choices: No ||| Yes
+ id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198daa
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ ||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question-no-label
+ reference: Generalized prompt format, context-question without any label
+ 678400f8-1a5c-4a40-b5ef-abeaa41e20aa: !Template
+ answer_choices: No ||| Yes
+ id: 678400f8-1a5c-4a40-b5ef-abeaa41e20aa
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite-no-label
+ reference: Natural Question without label
+ 7c205a61-64d4-4673-bb8e-bfa77562eeaa: !Template
+ answer_choices: No ||| Yes
+ id: 7c205a61-64d4-4673-bb8e-bfa77562eeaa
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ Yes or No.\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question
+ reference: Generalized prompt format, context-question
+ 8c259e88-7646-4a50-a4ca-90393920f2aa: !Template
+ answer_choices: No ||| Yes
+ id: 8c259e88-7646-4a50-a4ca-90393920f2aa
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation
+ reference: Concatenation of sentence 1 and sentence 2
+ a3ee450f-0d02-47c3-aa0b-00c3f80539aa: !Template
+ answer_choices: null
+ id: a3ee450f-0d02-47c3-aa0b-00c3f80539aa
+ jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+ {{sentence2}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: paraphrase-task
+ reference: Create a generative paraphrase task
+ a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80aa: !Template
+ answer_choices: No ||| Yes
+ id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80aa
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation-no-label
+ reference: Concatenation of sentence 1 and sentence 2 without any label
+ d5239f5f-2014-47c9-a0c1-4896f76f82aa: !Template
+ answer_choices: No ||| Yes
+ id: d5239f5f-2014-47c9-a0c1-4896f76f82aa
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning-no-label
+ reference: Natural question without label
+ d9911dad-75fe-4506-9843-3a46ba5e49aa: !Template
+ answer_choices: False ||| True
+ id: d9911dad-75fe-4506-9843-3a46ba5e49aa
+ jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3
+ reference: ANLI prompt format from Table G7 in the GPT3 paper
+ dd52359b-dc56-4241-8179-c98c636f03aa: !Template
+ answer_choices: No ||| Yes
+ id: dd52359b-dc56-4241-8179-c98c636f03aa
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite
+ reference: Natural Question
+ f0866713-c59a-4c5d-a307-95e80a935faa: !Template
+ answer_choices: No ||| Yes
+ id: f0866713-c59a-4c5d-a307-95e80a935faa
+ jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3-no-label
+ reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+ task information without any label.
diff --git a/promptsource/templates/paws/unlabeled_final/templates.yaml b/promptsource/templates/paws/unlabeled_final/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b1110b23592b4b89f829a1885420e471292faa56
--- /dev/null
+++ b/promptsource/templates/paws/unlabeled_final/templates.yaml
@@ -0,0 +1,171 @@
+dataset: paws
+subset: unlabeled_final
+templates:
+ 0be7cecd-b427-4ec9-9b0e-666d6dae00bb: !Template
+ answer_choices: No ||| Yes
+ id: 0be7cecd-b427-4ec9-9b0e-666d6dae00bb
+ jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+ Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: task_description-no-label
+ reference: Generalized prompt format, task_description-input.
+ 472fe5eb-b499-4952-a930-f72f4ca9edbb: !Template
+ answer_choices: No ||| Yes
+ id: 472fe5eb-b499-4952-a930-f72f4ca9edbb
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning
+ reference: Natural question
+ 4c8d4e4c-eae4-45f6-bdf0-d132ae198dbb: !Template
+ answer_choices: No ||| Yes
+ id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198dbb
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ ||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question-no-label
+ reference: Generalized prompt format, context-question without any label
+ 678400f8-1a5c-4a40-b5ef-abeaa41e20bb: !Template
+ answer_choices: No ||| Yes
+ id: 678400f8-1a5c-4a40-b5ef-abeaa41e20bb
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite-no-label
+ reference: Natural Question without label
+ 7c205a61-64d4-4673-bb8e-bfa77562eebb: !Template
+ answer_choices: No ||| Yes
+ id: 7c205a61-64d4-4673-bb8e-bfa77562eebb
+ jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+ Yes or No.\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context-question
+ reference: Generalized prompt format, context-question
+ 8c259e88-7646-4a50-a4ca-90393920f2bb: !Template
+ answer_choices: No ||| Yes
+ id: 8c259e88-7646-4a50-a4ca-90393920f2bb
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation
+ reference: Concatenation of sentence 1 and sentence 2
+ a3ee450f-0d02-47c3-aa0b-00c3f80539bb: !Template
+ answer_choices: null
+ id: a3ee450f-0d02-47c3-aa0b-00c3f80539bb
+ jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+ {{sentence2}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: paraphrase-task
+ reference: Create a generative paraphrase task
+ a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80bb: !Template
+ answer_choices: No ||| Yes
+ id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80bb
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+ \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Concatenation-no-label
+ reference: Concatenation of sentence 1 and sentence 2 without any label
+ d5239f5f-2014-47c9-a0c1-4896f76f82bb: !Template
+ answer_choices: No ||| Yes
+ id: d5239f5f-2014-47c9-a0c1-4896f76f82bb
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+ \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Meaning-no-label
+ reference: Natural question without label
+ d9911dad-75fe-4506-9843-3a46ba5e49bb: !Template
+ answer_choices: False ||| True
+ id: d9911dad-75fe-4506-9843-3a46ba5e49bb
+ jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3
+ reference: ANLI prompt format from Table G7 in the GPT3 paper
+ dd52359b-dc56-4241-8179-c98c636f03bb: !Template
+ answer_choices: No ||| Yes
+ id: dd52359b-dc56-4241-8179-c98c636f03bb
+ jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+ \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Rewrite
+ reference: Natural Question
+ f0866713-c59a-4c5d-a307-95e80a935fbb: !Template
+ answer_choices: No ||| Yes
+ id: f0866713-c59a-4c5d-a307-95e80a935fbb
+ jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: PAWS-ANLI GPT3-no-label
+ reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+ task information without any label.
diff --git a/promptsource/templates/piqa/templates.yaml b/promptsource/templates/piqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b9a607643d1d2743938938faa156b540ad482cd9
--- /dev/null
+++ b/promptsource/templates/piqa/templates.yaml
@@ -0,0 +1,265 @@
+dataset: piqa
+templates:
+ 16e97a16-c958-4956-bfba-279f88dafd5b: !Template
+ answer_choices: '{{sol1}} ||| {{sol2}}'
+ id: 16e97a16-c958-4956-bfba-279f88dafd5b
+ jinja: 'Goal: {{goal}}
+
+
+ Which is the correct ending?
+
+ - {{sol1}}
+
+ - {{sol2}}
+
+
+ Answer:
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what_is_the_correct_ending
+ reference: ''
+ 3f336295-c1f7-410a-8fc6-d2ed79487aa4: !Template
+ answer_choices: '{{sol1}} ||| {{sol2}}'
+ id: 3f336295-c1f7-410a-8fc6-d2ed79487aa4
+ jinja: '{{"Solution 1"}}: {{sol1}}
+
+ {{"Solution 2"}}: {{sol2}}
+
+
+ Goal: {{goal}}
+
+
+ Given the goal, what is the correct solution?
+
+
+ Answer by copying the correct solution
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_correct_choice_with_choice_given_before_goal
+ reference: ''
+ 44778818-7b73-4262-a294-c00fc32b6c2c: !Template
+ answer_choices: 1 ||| 2
+ id: 44778818-7b73-4262-a294-c00fc32b6c2c
+ jinja: 'Sentence: {{goal}}
+
+
+ Choice {{answer_choices[0]}}: {{sol1}}
+
+
+ Choice {{answer_choices[1]}}: {{sol2}}
+
+
+ What is the index of the correct choice for ending for the sentence?
+
+
+ Answer:
+
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_correct_choice_index
+ reference: ''
+ 5f4b4645-9438-4375-9062-083130e6d04e: !Template
+ answer_choices: null
+ id: 5f4b4645-9438-4375-9062-083130e6d04e
+ jinja: "Given a goal and a wrong solution, rewrite it to give a correct solution.\n\
+ Goal: {{goal}} \nSolution: {{[sol1, sol2][1 - label]}}\nCorrected solution:\n\
+ |||\n{{[sol1, sol2][label]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Correct the solution
+ reference: ''
+ 94c39589-7bfb-4c09-9337-672369459545: !Template
+ answer_choices: '{{sol1}} ||| {{sol2}}'
+ id: 94c39589-7bfb-4c09-9337-672369459545
+ jinja: 'Finish the following sentence with the best choice: {{goal}}
+
+
+ Choices:
+
+ - {{sol1}}
+
+ - {{sol2}}
+
+
+ Answer:
+
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: finish_sentence_with_correct_choice
+ reference: ''
+ 99565244-4eaf-4004-a28b-4362ba5bcac3: !Template
+ answer_choices: No ||| Yes
+ id: 99565244-4eaf-4004-a28b-4362ba5bcac3
+ jinja: '{{goal}} {{sol2}}
+
+ Does this phrase make sense?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Does this solution make sense? sol2
+ reference: ''
+ adfef248-f856-44fa-affd-e3223718854e: !Template
+ answer_choices: Solution 1 ||| Solution 2
+ id: adfef248-f856-44fa-affd-e3223718854e
+ jinja: 'Given a goal and 2 solutions, choose the most appropriate solution.
+
+ Goal: {{goal}}
+
+ - {{"Solution 1"}}: {{sol1}}
+
+ - {{"Solution 2"}}: {{sol2}}
+
+
+ Answer by returning either {{"Solution 1"}} or {{"Solution 2"}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose the most appropriate solution
+ reference: ''
+ b5c69473-eedb-4c4f-a5fa-d4e266e43535: !Template
+ answer_choices: null
+ id: b5c69473-eedb-4c4f-a5fa-d4e266e43535
+ jinja: 'Given a sentence, correct it if it doesn''t make sense. If it makes sense,
+ just return it as the answer.
+
+ Input: {{goal}} {{sol2[0].lower() + sol2[1:]}}
+
+ Output:
+
+ |||
+
+ {{goal}} {{[sol1[0].lower() + sol1[1:], sol2[0].lower() + sol2[1:]][label]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Correct the solution if false: from sol 2'
+ reference: ''
+ c8c45ef1-2ffc-43d7-8710-b98c2fc4f699: !Template
+ answer_choices: null
+ id: c8c45ef1-2ffc-43d7-8710-b98c2fc4f699
+ jinja: '{{goal}}
+
+ |||
+
+ {{[sol1[0].lower() + sol1[1:], sol2[0].lower() + sol2[1:]][label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: no prompt needed
+ reference: ''
+ f044def7-01c2-42de-b6ad-4e8c63ab2bf1: !Template
+ answer_choices: Yes ||| No
+ id: f044def7-01c2-42de-b6ad-4e8c63ab2bf1
+ jinja: 'Does this phrase make sense?
+
+ {{goal}} {{sol1[0].lower() + sol1[1:]}}
+
+ Answer with {{answer_choices[0]}} or {{answer_choices[1]}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Does this solution make sense? sol1
+ reference: ''
+ f42cd457-a14b-465a-a139-d7d2407a3bac: !Template
+ answer_choices: null
+ id: f42cd457-a14b-465a-a139-d7d2407a3bac
+ jinja: 'Sentence: {{goal}} {{sol1[0].lower() + sol1[1:]}}
+
+ If the sentence does not make sense, correct it so that it does make sense.
+ Otherwise, just copy it.
+
+ Answer:
+
+ |||
+
+ {{goal}} {{[sol1[0].lower() + sol1[1:], sol2[0].lower() + sol2[1:]][label]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Correct the solution if false: from sol 1'
+ reference: ''
diff --git a/promptsource/templates/poem_sentiment/templates.yaml b/promptsource/templates/poem_sentiment/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ae3da7cd7f3160233b62a4e3cdcb005cba471345
--- /dev/null
+++ b/promptsource/templates/poem_sentiment/templates.yaml
@@ -0,0 +1,119 @@
+dataset: poem_sentiment
+templates:
+ 2714baf0-5d19-4781-a60f-f44cd95935f7: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: 2714baf0-5d19-4781-a60f-f44cd95935f7
+ jinja: '{{verse_text}} Is the sentiment the poet express for the poem {{answer_choices[0]}},
+ {{answer_choices[1]}}, {{answer_choices[2]}} or {{answer_choices[3]}}? ||| {{
+ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: positive_or_negative_sentiment_variation_2
+ reference: ''
+ 574ab816-b0bc-4049-a5a5-dcf8f4280dc5: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: 574ab816-b0bc-4049-a5a5-dcf8f4280dc5
+ jinja: The following poem expresses what sentiment? {{verse_text}} ||| {{ answer_choices
+ [label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guess_sentiment_without_options_variation_2
+ reference: ''
+ 7801d04c-4f42-4411-a552-9614c8c3fd53: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: 7801d04c-4f42-4411-a552-9614c8c3fd53
+ jinja: '{{verse_text}} The sentiment expressed in the poem is ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guess_sentiment_without_options_variation_1
+ reference: ''
+ 9fa8eeb4-314b-4850-a28b-0f53bca006d8: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: 9fa8eeb4-314b-4850-a28b-0f53bca006d8
+ jinja: '{{verse_text}} What is the sentiment expressed in this poem? ||| {{ answer_choices
+ [label] }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: positive_or_negative_sentiment_variation_1
+ reference: ''
+ aecb3d13-ff68-4e60-a382-87191940bd5b: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: aecb3d13-ff68-4e60-a382-87191940bd5b
+ jinja: '{{verse_text}} The most appropriate word out of {{"negative"}}, {{"positive"}},
+ {{"neutral"}} and {{"mixed"}}, which express the poet''s sentiment is: ||| {{
+ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: most_appropriate_sentiment
+ reference: ''
+ ca15cecb-4ee6-4445-a0f4-6ef5cd519923: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: ca15cecb-4ee6-4445-a0f4-6ef5cd519923
+ jinja: "{{verse_text}} What sentiment does this poem express? \nOptions: {{answer_choices[0]}},\
+ \ {{answer_choices[1]}}, {{answer_choices[2]}}, {{answer_choices[3]}}.\nAnswer:\
+ \ ||| {{ answer_choices [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_answer_format
+ reference: ''
+ f87a7ba0-11f7-41f9-bee6-94d0ad6e597a: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: f87a7ba0-11f7-41f9-bee6-94d0ad6e597a
+ jinja: Does this poem express a {{"negative"}}, {{"positive"}}, {{"neutral"}}
+ or {{"mixed"}} sentiment? {{verse_text}} ||| {{ answer_choices [label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guess_sentiment_from_given_options
+ reference: ''
+ fdbebd3d-6517-4be1-8771-489e2de658ef: !Template
+ answer_choices: negative ||| positive ||| neutral ||| mixed
+ id: fdbebd3d-6517-4be1-8771-489e2de658ef
+ jinja: '{{verse_text}} What is the sentiment that the poet wants the readers to
+ feel through the verse mentioned above?||| {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guess_sentiment_without_options
+ reference: ''
diff --git a/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml b/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..37f10efc37f973dd4c7724f2825e6142946e7959
--- /dev/null
+++ b/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml
@@ -0,0 +1,130 @@
+dataset: pubmed_qa
+subset: pqa_labeled
+templates:
+ 00584766-2415-4d10-ab76-bf86058faa07: !Template
+ answer_choices: null
+ id: 00584766-2415-4d10-ab76-bf86058faa07
+ jinja: "Given a research abstract: {{ context.contexts | join(\", \") }}\nAnd\
+ \ given keywords: {{context.meshes | join(\", \")}}.\n \nWhat is the question\
+ \ answered by the above research abstract? \n|||\n{{question}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Question Title with meshes
+ reference: ''
+ 00f58886-e04a-4efb-bf41-cfcbd00a5e7d: !Template
+ answer_choices: null
+ id: 00f58886-e04a-4efb-bf41-cfcbd00a5e7d
+ jinja: "Given a PubMed abstract: {{ context.contexts | join(\", \") }}\n\nAnswer\
+ \ this question: \"{{question}}\" \n||| \n{{long_answer}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: Question Answering (Long)
+ reference: Provide a long/verbose answer to the provided question
+ 0b630e04-02a8-46d6-b164-a41cd34042ff: !Template
+ answer_choices: null
+ id: 0b630e04-02a8-46d6-b164-a41cd34042ff
+ jinja: "Given a research abstract: {{ context.contexts | join(\", \") }}\n\nWhat\
+ \ is the question answered by the above research abstract? \n|||\n{{question}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: 'Generate Question Title '
+ reference: Given abstract, generate title (which is in the form of a question)
+ 1e0a77f8-0eb4-40a1-814d-8a111df66e5e: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 1e0a77f8-0eb4-40a1-814d-8a111df66e5e
+ jinja: "Question: \"{{ question }}\" \n\nAnswer: \"{{ long_answer }}\"\n\nSummarize\
+ \ the above answer as YES, NO, or MAYBE? \n|||\n{{final_decision}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Long Answer to Final Decision
+ reference: Given a question, the full text of the relevant answer, summarize a
+ yes/no/maybe answer
+ 21240f74-530a-47b7-a5d9-a6a13083b72e: !Template
+ answer_choices: '{{context.labels | join("|||")}}'
+ id: 21240f74-530a-47b7-a5d9-a6a13083b72e
+ jinja: "{% set n_sections = context.contexts | length %}\n{% set choice = range(0,\
+ \ n_sections) | random %}\n\n\"{{ context.contexts[choice] }}\"\n\nThe above\
+ \ text would most likely be found in which section of a biomedical paper: {{\
+ \ context.labels[:-1] | join(\", \") }} or {{ context.labels[-1] }} ? \n|||\n\
+ {{ context.labels[choice] }}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Context Section Type
+ reference: Assign text in an abstract to specific paper section headers
+ 45cb344c-bb36-492a-ace0-7cfc897e127a: !Template
+ answer_choices: null
+ id: 45cb344c-bb36-492a-ace0-7cfc897e127a
+ jinja: "Given a PubMed abstract:{{ context.contexts | join(\", \") }}\n\nWhat\
+ \ are the MeSH (Medical Subject Headings) terms for this? \n|||\n{{ context.meshes\
+ \ | join(\", \") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: Medical Subject Headings
+ reference: 'Predict the set of MeSH terms for a given PubMed abstract '
+ 48ee58bb-6a4a-4667-9d9c-69618408c6ce: !Template
+ answer_choices: null
+ id: 48ee58bb-6a4a-4667-9d9c-69618408c6ce
+ jinja: "Given a research abstract: {{ context.contexts | join(\", \") }}\nAnd\
+ \ given long answer of a question: {{long_answer}}.\n \nWhat is the question\
+ \ asked by this research paper? \n|||\n{{question}} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Question Title with long answer
+ reference: ''
+ 91d481e5-fac6-4532-b013-5ac1235b6e1a: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 91d481e5-fac6-4532-b013-5ac1235b6e1a
+ jinja: "Given a PubMed abstract: {{ context.contexts | join(\", \") }}\n\nAnswer\
+ \ the question: \"{{question}}\" as YES, NO, MAYBE.\n||| \n{{final_decision}}\n\
+ \n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Question Answering (Short)
+ reference: Answer the following question using the provided abstract text
diff --git a/promptsource/templates/qa_srl/templates.yaml b/promptsource/templates/qa_srl/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..73bedda45d42bdee6413997aaaa5dc8c59dc72fd
--- /dev/null
+++ b/promptsource/templates/qa_srl/templates.yaml
@@ -0,0 +1,138 @@
+dataset: qa_srl
+templates:
+ 3cb09519-032e-4e51-bb97-47aa18ab4367: !Template
+ answer_choices: null
+ id: 3cb09519-032e-4e51-bb97-47aa18ab4367
+ jinja: 'Generate a plausible question that has the following answers based on
+ the context:
+
+
+ Context: {{sentence}}
+
+
+ Answers: {{answers | join(", ")}} |||
+
+ {{question | join(" ") | replace("_ ", "")}} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: ''
+ b614f251-eb01-442f-8743-57d18314a0f8: !Template
+ answer_choices: null
+ id: b614f251-eb01-442f-8743-57d18314a0f8
+ jinja: 'The English teacher deconstructed an example sentence that contained the
+ verb "{{predicate}}": {{sentence}}
+
+
+ {{question | join(" ") | replace("_ ", "")}}|||
+
+ {{answers | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: deconstruct_sentence
+ reference: ''
+ bec2ce78-fb31-4529-8b13-240fa6c8bc88: !Template
+ answer_choices: null
+ id: bec2ce78-fb31-4529-8b13-240fa6c8bc88
+ jinja: 'Identify the predicate (the part of a sentence or clause containing a
+ verb and stating something about the subject) in this sentence:
+
+
+ {{sentence}} |||
+
+ {{predicate}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: identify_predicate
+ reference: ''
+ c0192db4-f672-4b36-94b2-b10ca7b6861a: !Template
+ answer_choices: null
+ id: c0192db4-f672-4b36-94b2-b10ca7b6861a
+ jinja: '{{sentence}}
+
+ {{question|join(" ")|replace("_ ", "")}} |||
+
+ {{answers | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: sentence_question_concatenation
+ reference: ''
+ dda098f9-74c4-4f9f-8052-20e692c72c92: !Template
+ answer_choices: null
+ id: dda098f9-74c4-4f9f-8052-20e692c72c92
+ jinja: 'Here''s a linguistic problem: you have to correctly identify the part
+ of the sentence that answers the following {{"W"}} question.
+
+ Sentence: {{sentence}}
+
+ Question: {{question | join(" ") | replace("_ ", "")}}|||
+
+ {{answers | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: linguistic_problem
+ reference: ''
+ e9ca90f5-f105-4515-b757-262dad590913: !Template
+ answer_choices: null
+ id: e9ca90f5-f105-4515-b757-262dad590913
+ jinja: 'Help me parse the structure of the following sentence constructed around
+ the verb "{{predicate}}": {{sentence}}
+
+
+ {{question | join(" ") | replace("_ ", "")}}|||
+
+ {{answers | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: parse_structure
+ reference: ''
+ ec16cb98-8c5b-4219-8b2d-acd8b6236c86: !Template
+ answer_choices: null
+ id: ec16cb98-8c5b-4219-8b2d-acd8b6236c86
+ jinja: '{{sentence}}
+
+
+ The previous sentence contains the verb "{{predicate}}". Answer this question
+ about it: {{question|join(" ")|replace("_ ", "")}}|||
+
+ {{answers | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: answer_question
+ reference: ''
diff --git a/promptsource/templates/qa_zre/templates.yaml b/promptsource/templates/qa_zre/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2a2c2d8f20d3f61cc830b102aff13c7e64b3a664
--- /dev/null
+++ b/promptsource/templates/qa_zre/templates.yaml
@@ -0,0 +1,207 @@
+dataset: qa_zre
+templates:
+ 2d6b6ec6-4cba-4a07-a0d1-f6b7cb103281: !Template
+ answer_choices: null
+ id: 2d6b6ec6-4cba-4a07-a0d1-f6b7cb103281
+ jinja: 'The following question is asking about a specific relation. What is this
+ relation?
+
+ Question: {{question}} |||
+
+ {{relation}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: extract_relation
+ reference: https://www.aclweb.org/anthology/K17-1034.pdf
+ 5a970b88-53a0-4148-b45e-7ac410df263f: !Template
+ answer_choices: null
+ id: 5a970b88-53a0-4148-b45e-7ac410df263f
+ jinja: 'Based on the context below, please answer the question: "{{question.replace("XXX",subject)}}".
+ If the context is not sufficient to answer, please write "unanswerable" instead.
+
+ Context: {{context}}
+
+ |||
+
+ {% if answers|length > 0 %}
+
+ {{answers|choice}}
+
+ {% else %}
+
+ unanswerable
+
+ {% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: based_on_context
+ reference: ''
+ 6368de04-070a-4f67-a8bf-fd6d2c07d401: !Template
+ answer_choices: null
+ id: 6368de04-070a-4f67-a8bf-fd6d2c07d401
+ jinja: 'What does "XXX" represent in the following context-question pair?
+
+
+ {{context}}
+
+ {{question}} |||
+
+ {{subject}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: subject
+ reference: ''
+ 8f76743d-6486-4ae1-8bc8-ae644e3c54aa: !Template
+ answer_choices: null
+ id: 8f76743d-6486-4ae1-8bc8-ae644e3c54aa
+ jinja: 'Extract the appropriate relation from the following question about {{subject}}
+
+
+ {{question|replace("XXX",subject)}} |||
+
+ {{relation}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: relation2
+ reference: ''
+ b2195890-a3c5-4e33-be4a-5e53af75e6dd: !Template
+ answer_choices: null
+ id: b2195890-a3c5-4e33-be4a-5e53af75e6dd
+ jinja: '
+
+ You will find below a context and a question. Please answer the question or
+ write "unanswerable" if the question cannot be answered using the context.
+
+ Context: {{context}}
+
+ Question: {{question.replace("XXX",subject)}} |||
+
+ {% if answers|length > 0 %}
+
+ {{answers|choice}}
+
+ {% else %}
+
+ unanswerable
+
+ {% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: qa_including_unanswerable
+ reference: ''
+ b2195890-a3c5-4e33-be4a-5e53af75e7dd: !Template
+ answer_choices: null
+ id: b2195890-a3c5-4e33-be4a-5e53af75e7dd
+ jinja: ' Question: {{question.replace("XXX",subject)}}
+
+ Context: {{context}}
+
+ Please answer the question above using a passage present in the context. If
+ no passage is a good answer for the question, please write "unanswerable" instead.
+
+ |||
+
+ {% if answers|length > 0 %}
+
+ {{answers|choice}}
+
+ {% else %}
+
+ unanswerable
+
+ {% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: using_a_passage
+ reference: ''
+ b2195890-a3c5-4e33-be4a-5e53af75e8dd: !Template
+ answer_choices: null
+ id: b2195890-a3c5-4e33-be4a-5e53af75e8dd
+ jinja: ' Question: {{question.replace("XXX",subject)}}
+
+ Context: {{context}}
+
+ Please copy the span in the context that best answers the question. If there
+ is no such span, please output "unanswerable" instead.
+
+ |||
+
+ {% if answers|length > 0 %}
+
+ {{answers|choice}}
+
+ {% else %}
+
+ unanswerable
+
+ {% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: copy_the_span
+ reference: ''
+ b2195890-a3c5-4e33-be4a-5e53af75e9dd: !Template
+ answer_choices: null
+ id: b2195890-a3c5-4e33-be4a-5e53af75e9dd
+ jinja: ' Question: {{question.replace("XXX",subject)}}
+
+ Context: {{context}}
+
+ The following context may contain an answer to the question. If it does, please
+ copy the span that best answers it. If it does not, mention that the question
+ is "unanswerable" using the context.
+
+ |||
+
+ {% if answers|length > 0 %}
+
+ {{answers|choice}}
+
+ {% else %}
+
+ unanswerable
+
+ {% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: may_contain
+ reference: ''
diff --git a/promptsource/templates/qasc/templates.yaml b/promptsource/templates/qasc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5d05a2235e6b42407656169ec52247acca3abb26
--- /dev/null
+++ b/promptsource/templates/qasc/templates.yaml
@@ -0,0 +1,141 @@
+dataset: qasc
+templates:
+ 3e1e6ca0-b95e-4e68-bb6a-cd47c8429658: !Template
+ answer_choices: Yes ||| No
+ id: 3e1e6ca0-b95e-4e68-bb6a-cd47c8429658
+ jinja: "If I tell you that {{combinedfact[0]|capitalize}}{{ combinedfact[1:]|trim('.')\
+ \ }}, and ask you the question \"{{ question[0]|lower }}{{ question[1:] }}\"\
+ , is the correct answer \"{{ choices.text[0][0]|lower}}{{ choices.text[0][1:]|trim('.')\
+ \ }}\"? \n\n||| \n\n{% if answerKey == choices.label[0] %} Yes {% else %} No\
+ \ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: is_correct_1
+ reference: ''
+ 40ef67db-dff3-4e7b-b167-f7e54b400c74: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 40ef67db-dff3-4e7b-b167-f7e54b400c74
+ jinja: "{{ fact1[0]|capitalize }}{{ fact1[1:]|trim|trim('.') }}, and {{fact2[0]|lower\
+ \ }}{{ fact2[1:]|trim|trim('.') }}. Given these facts, {{ question[0]|lower\
+ \ }}{{question[1:]|trim('?') }} among the following options:\n- {{answer_choices\
+ \ | join(\"\\n - \") }}\n\n||| \n\n{% for choice in choices.label %} {% if choice\
+ \ == answerKey %}{{ answer_choices[loop.index - 1] }}{% endif %}{% endfor %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_with_separated_facts_1
+ reference: Question Answering with separated facts.
+ 5d63c186-e047-49dd-b5fd-c4a574f6f0e2: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 5d63c186-e047-49dd-b5fd-c4a574f6f0e2
+ jinja: "Fact 1: {{ fact1[0]|capitalize }}{{ fact1[1:]|trim|trim('.') }}.\n\nFact\
+ \ 2: {{fact2[0]|capitalize }}{{ fact2[1:]|trim|trim('.') }}.\n\nGiven the two\
+ \ facts above, {{ question[0]|lower }}{{question[1:]|trim('?') }}?\n\n||| \n\
+ \n{% for choice in choices.label %} {% if choice == answerKey %}{{ answer_choices[loop.index\
+ \ - 1] }}{% endif %}{% endfor %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_with_separated_facts_3
+ reference: Question Answering with separated facts.
+ 604dd379-21f1-4d30-af2f-22f9a8a97ceb: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 604dd379-21f1-4d30-af2f-22f9a8a97ceb
+ jinja: "You are presented with the question \"{{ question }}\" and the following\
+ \ answer choices: \n- {{answer_choices | join(\"\\n - \") }}\n\nNow knowing\
+ \ that {{ fact1[0]|lower }}{{ fact1[1:]|trim|trim('.') }} and {{fact2[0]|lower\
+ \ }}{{ fact2[1:]|trim|trim('.') }}, choose the best answer.\n\n||| \n\n{% for\
+ \ choice in choices.label %} {% if choice == answerKey %}{{ answer_choices[loop.index\
+ \ - 1] }}{% endif %}{% endfor %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_with_separated_facts_4
+ reference: Question Answering with separated facts.
+ 67cb1b35-e5d4-490b-beb8-dc9c0be9c298: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 67cb1b35-e5d4-490b-beb8-dc9c0be9c298
+ jinja: "You are presented with the quiz \"{{ question }}\" \n\nBut you don't know\
+ \ the answer, so you turn to your teacher to ask for hints. He says that \"\
+ {{ fact1[0]|lower }}{{ fact1[1:]|trim|trim('.') }}\" and \"{{fact2[0]|lower\
+ \ }}{{ fact2[1:]|trim|trim('.') }}\". \n\nSo, what's the best answer to the\
+ \ question?\n\n||| \n\n{% for choice in choices.label %} {% if choice == answerKey\
+ \ %}{{ answer_choices[loop.index - 1] }}{% endif %}{% endfor %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_with_separated_facts_5
+ reference: Question Answering with separated facts.
+ b5b61423-8655-408d-a8e6-81a5eaaac2aa: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: b5b61423-8655-408d-a8e6-81a5eaaac2aa
+ jinja: "If {{ combinedfact[0]|lower }}{{ combinedfact[1:]|trim|trim('.') }}, then\
+ \ {{ question[0]|lower }}{{question[1:]|trim|trim('?') }}?\n\nAnswer choices:\n\
+ - {{answer_choices | join(\"\\n - \") }}\n||| \n\n{% for choice in choices.label\
+ \ %} {% if choice == answerKey %}{{ answer_choices[loop.index - 1] }}{% endif\
+ \ %}{% endfor %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: qa_with_combined_facts_1
+ reference: ''
+ c251edaf-a063-40fe-bd49-038843fcdb23: !Template
+ answer_choices: Yes ||| No
+ id: c251edaf-a063-40fe-bd49-038843fcdb23
+ jinja: "Do you think the right answer to the question \"{{ question[0]|lower }}{{\
+ \ question[1:] }}\" is \"{{ choices.text[1][0]|lower}}{{ choices.text[1][1:]|trim('.')\
+ \ }}\", given that\n {{combinedfact[0]|lower}}{{ combinedfact[1:]|trim('.')\
+ \ }}?\n ||| \n{% if answerKey == choices.label[0] %} Yes {% else %} No {% endif\
+ \ %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: is_correct_2
+ reference: ''
+ c7cd51f1-ea49-4d6a-a422-46624333b7b1: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: c7cd51f1-ea49-4d6a-a422-46624333b7b1
+ jinja: "Fact 1: {{ fact1[0]|capitalize }}{{ fact1[1:]|trim|trim('.') }}.\n\nFact\
+ \ 2: {{fact2[0]|capitalize }}{{ fact2[1:]|trim|trim('.') }}.\n\nGiven the two\
+ \ facts above, answer the question \"{{ question }}\" with the following options:\
+ \ \n- {{answer_choices | join(\"\\n - \") }}\n\n||| \n\n{% for choice in choices.label\
+ \ %} {% if choice == answerKey %}{{ answer_choices[loop.index - 1] }}{% endif\
+ \ %}{% endfor %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: qa_with_separated_facts_2
+ reference: Question Answering with separated facts.
diff --git a/promptsource/templates/qed/templates.yaml b/promptsource/templates/qed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a9fab8e4ff4c865c7429cde3aa6c7285882f88c5
--- /dev/null
+++ b/promptsource/templates/qed/templates.yaml
@@ -0,0 +1,120 @@
+dataset: qed
+templates:
+ 292db39d-b9e9-4113-b59d-6c5b93133563: !Template
+ answer_choices: null
+ id: 292db39d-b9e9-4113-b59d-6c5b93133563
+ jinja: "Give a suitable title to the following passage:\n\n{{paragraph_text}}\
+ \ \n|||\n\n{{title_text}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: title-prediction
+ reference: ''
+ 3578c1ee-8872-406f-be9f-b7e174aed92c: !Template
+ answer_choices: null
+ id: 3578c1ee-8872-406f-be9f-b7e174aed92c
+ jinja: '{% set chosen = original_nq_answers | choice %}
+
+ Question: {{question}} ?
+
+ |||
+
+ {{ chosen["string"]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: closed-book-qa
+ reference: ''
+ 383d06fe-d562-4883-8d29-b727d4c3877b: !Template
+ answer_choices: null
+ id: 383d06fe-d562-4883-8d29-b727d4c3877b
+ jinja: "{% set chosen = original_nq_answers | choice %}\n{% if annotation['selected_sentence']['string']!=\"\
+ \" %}\nAnswer the following question given the hint.\n\nQuestion: {{question}}?\n\
+ \nHint: {{paragraph_text}} \n\n|||\n{{ chosen[\"string\"]}}\n{% endif %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: open-book-qa
+ reference: ''
+ 5b45c11d-bbea-45a1-a799-a77a56fe8e1d: !Template
+ answer_choices: null
+ id: 5b45c11d-bbea-45a1-a799-a77a56fe8e1d
+ jinja: 'Please extract the title from the given URL (Uniform Resource Locator).
+
+
+ URL: {{ url }}
+
+ |||
+
+ {{ title_text }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: extract-title-from-url
+ reference: ''
+ 7d3746b5-52e6-4ce1-b441-007f271f477b: !Template
+ answer_choices: null
+ id: 7d3746b5-52e6-4ce1-b441-007f271f477b
+ jinja: "I need to prepare for my upcoming test. Can you read the below passage\
+ \ and ask me a reasonable question? \n\n{{paragraph_text}} \n||| \n\n{{question}}?"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: question-forming
+ reference: ''
+ 86cffd6d-db04-4daf-ab30-f462fd1177e3: !Template
+ answer_choices: null
+ id: 86cffd6d-db04-4daf-ab30-f462fd1177e3
+ jinja: '{% if annotation["selected_sentence"]["string"] != "" %}
+
+ Read the following paragraph and question A, and answer question B:
+
+
+ Paragraph: {{ paragraph_text }}
+
+
+ Question A: {{ question }}?
+
+
+ Question B: What''s the most important sentence in Paragraph, in order to answer
+ question A?
+
+ |||
+
+ {{ annotation["selected_sentence"]["string"] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: select-sentence
+ reference: ''
diff --git a/promptsource/templates/quac/templates.yaml b/promptsource/templates/quac/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d19e1a210ec9e63d43875945aecbf5e0f046792f
--- /dev/null
+++ b/promptsource/templates/quac/templates.yaml
@@ -0,0 +1,138 @@
+dataset: quac
+templates:
+ 12c9d007-991c-49ed-82e2-13a7d3147881: !Template
+ answer_choices: null
+ id: 12c9d007-991c-49ed-82e2-13a7d3147881
+ jinja: "Given the partial dialogue : \n\nStudent: {{questions[0]}}\n\nTeacher:\
+ \ {{(answers.texts[0] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\")\
+ \ }}\n\nThe context : {{context}}\n\nAnswer the question: {{questions[1] }}\n\
+ |||\n{{(answers.texts[1] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\"\
+ ) }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: Answer Given Only First Dialogue
+ reference: ''
+ 1d4014c2-7cf7-45d1-8f85-a701b6a65118: !Template
+ answer_choices: null
+ id: 1d4014c2-7cf7-45d1-8f85-a701b6a65118
+ jinja: "Given the dialogue: \n{% for i in range(0, questions | length - 1)%}\n\
+ Student: {{questions[i]}}\n\nTeacher: {{(answers.texts[i] | choice).replace(\"\
+ CANNOTANSWER\",\"Cannot answer\") }}\n{% endfor %}\n\nThe context: {{context}}\n\
+ \nAnswer the question: {{questions | last }}\n|||\n{{(answers.texts | last |\
+ \ choice).replace(\"CANNOTANSWER\",\"Cannot answer\") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Answer Given Full Dialogue
+ reference: ''
+ 1f28f748-bc9e-4881-89ae-1d561abf2f2b: !Template
+ answer_choices: null
+ id: 1f28f748-bc9e-4881-89ae-1d561abf2f2b
+ jinja: 'This conversation happened between a teacher and a student:
+
+ {% for i in range(0, questions | length - 1) %}
+
+ Student: {{questions[i]}}
+
+
+ Teacher: {{(answers.texts[i] | choice).replace("CANNOTANSWER","Cannot answer")
+ }}
+
+ {% endfor %}
+
+
+ Use the article : {{context}} to answer the question: {{questions | last }}
+
+ |||
+
+ {{(answers.texts | last | choice).replace("CANNOTANSWER","Cannot answer") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: 'Answer Converation '
+ reference: ''
+ 2c052ef8-adfa-497b-adb1-9e942ad998e0: !Template
+ answer_choices: null
+ id: 2c052ef8-adfa-497b-adb1-9e942ad998e0
+ jinja: "I read an article : {{context}} \n\nThen the following conversation occurred:\
+ \ \n{% for i in range(0, questions | length - 1) %}\nStudent: {{questions[i]}}\n\
+ \nTeacher: {{(answers.texts[i] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\"\
+ ) }}\n{% endfor %}\nUse both to answer the question: {{questions | last }}\n\
+ |||\n{{(answers.texts | last | choice).replace(\"CANNOTANSWER\",\"Cannot answer\"\
+ ) }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: 'Context First '
+ reference: ''
+ 70c85283-44f3-4d31-866c-02294aed7a59: !Template
+ answer_choices: null
+ id: 70c85283-44f3-4d31-866c-02294aed7a59
+ jinja: "Read the article: {{context}} \n\nThen answer the question: {{questions\
+ \ | last}}\n\nYou can use this dialogue to find the answer faster:\n{% for i\
+ \ in range(0, questions | length - 1)%}\nStudent: {{questions[i]}}\n\nTeacher:\
+ \ {{(answers.texts[i] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\")\
+ \ }}\n{% endfor %}\n|||\n{{(answers.texts | last | choice).replace(\"CANNOTANSWER\"\
+ ,\"Cannot answer\") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Use Dialogue as Hint
+ reference: ''
+ a62e17c6-8973-43a3-863a-40bb12a1c8cf: !Template
+ answer_choices: null
+ id: a62e17c6-8973-43a3-863a-40bb12a1c8cf
+ jinja: 'A student is asking a teacher about the following article:
+
+ {{context}}
+
+
+ This is a summary of their conversation:
+
+ {% for i in range(0, questions | length - 1)%}
+
+ Student: {{questions[i]}}
+
+
+ Teacher: {{(answers.texts[i] | choice).replace("CANNOTANSWER","Cannot answer")
+ }}
+
+ {% endfor %}
+
+
+ Use their conversation and the article to answer the question : {{questions
+ | last}}
+
+ |||
+
+ {{(answers.texts | last | choice).replace("CANNOTANSWER","Cannot answer") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: 'Student Asking Teacher '
+ reference: ''
diff --git a/promptsource/templates/quail/templates.yaml b/promptsource/templates/quail/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..79aef8c57a3f87a3279c6955377e12935ca24cce
--- /dev/null
+++ b/promptsource/templates/quail/templates.yaml
@@ -0,0 +1,335 @@
+dataset: quail
+templates:
+ 01870e5a-39d0-4485-a453-893d46c82736: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 01870e5a-39d0-4485-a453-893d46c82736
+ jinja: '{{ context }}
+
+ Question: {{ question }}
+
+ Options:
+
+ {% for k in range(answers | length) %}
+
+ {{''. ''.join([answer_choices[k], answers[k]])}}
+
+ {% endfor %}
+
+ ===
+
+ The correct answer is
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_question_answer_description_id
+ reference: ''
+ 1225d6c7-4d4c-46ab-9a65-a8fa87826906: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: 1225d6c7-4d4c-46ab-9a65-a8fa87826906
+ jinja: '{{ context }}
+
+ Question: {{ question }}
+
+ Options:
+
+ - {{ answer_choices | join(" \n - ") }}
+
+ ===
+
+ The correct answer is
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_question_answer_description_text
+ reference: ''
+ 38caa4e6-28b9-4476-8609-b66c83679fcc: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 38caa4e6-28b9-4476-8609-b66c83679fcc
+ jinja: 'Read the following context and choose the correct option to answer the
+ question.
+
+ Context: {{ context }}
+
+ Question: {{ question }}
+
+ Options:
+
+ {% for k in range(answers | length) %}
+
+ {{''. ''.join([answer_choices[k], answers[k]])}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: description_context_question_answer_id
+ reference: ''
+ 7186e352-adfa-4c16-8eda-d9fcccb6293e: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: 7186e352-adfa-4c16-8eda-d9fcccb6293e
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ Pick the correct answer from the following options:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_question_description_answer_text
+ reference: ''
+ 773d1dad-ccc7-4f5d-936b-c43b2d3eedf7: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: 773d1dad-ccc7-4f5d-936b-c43b2d3eedf7
+ jinja: '{{ context }}
+
+ Question: {{ question }}
+
+ ===
+
+ The answer to the above question is
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: context_question_description_text
+ reference: ''
+ 7b0ce9fa-6aa0-4210-ab6c-1edd4b2f43df: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: 7b0ce9fa-6aa0-4210-ab6c-1edd4b2f43df
+ jinja: '{{ context }}
+
+ According to the above context, answer the following question.
+
+ {{ question }}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: context_description_question_text
+ reference: ''
+ 7c9c7cec-12c1-4005-a9a1-a027e472d949: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 7c9c7cec-12c1-4005-a9a1-a027e472d949
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ Pick the correct answer from the following options:
+
+ {% for k in range(answers | length) %}
+
+ {{''. ''.join([answer_choices[k], answers[k]])}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_question_description_answer_id
+ reference: ''
+ 80fe7668-d088-4432-98bd-9df022a62b5b: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 80fe7668-d088-4432-98bd-9df022a62b5b
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ {% for k in range(answers | length) %}
+
+ {{''. ''.join([answer_choices[k], answers[k]])}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: no_prompt_id
+ reference: ''
+ 88d0056d-e736-405f-85aa-155474fde51a: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 88d0056d-e736-405f-85aa-155474fde51a
+ jinja: '{{ context }}
+
+ According to the above context, choose the correct option to answer the following
+ question.
+
+ Question: {{ question }}
+
+ Options:
+
+ {% for k in range(answers | length) %}
+
+ {{''. ''.join([answer_choices[k], answers[k]])}}
+
+ {% endfor %}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_description_question_answer_id
+ reference: ''
+ a071e73e-5fda-45b5-8a6a-b56e477a6aee: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: a071e73e-5fda-45b5-8a6a-b56e477a6aee
+ jinja: 'Read the following context and answer the question.
+
+ Context: {{ context }}
+
+ Question: {{ question }}
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: description_context_question_text
+ reference: ''
+ cb57451d-2a1c-4db1-a352-9f50d835b327: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: cb57451d-2a1c-4db1-a352-9f50d835b327
+ jinja: '{{ context }}
+
+ {{ question }}
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: no_prompt_text
+ reference: ''
+ ea0ba07f-bb89-42dc-b1e8-4fe6008297b2: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: ea0ba07f-bb89-42dc-b1e8-4fe6008297b2
+ jinja: '{{ context }}
+
+ According to the above context, choose the correct option to answer the following
+ question.
+
+ Question: {{ question }}
+
+ Options:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: context_description_question_answer_text
+ reference: ''
+ f374c2ca-952a-47ab-8420-cb5fb2c693d9: !Template
+ answer_choices: '{{answers | join("|||")}}'
+ id: f374c2ca-952a-47ab-8420-cb5fb2c693d9
+ jinja: 'Read the following context and choose the correct option to answer the
+ question.
+
+ Context: {{ context }}
+
+ Question: {{ question }}
+
+ Options:
+
+ - {{ answer_choices | join("\n- ") }}
+
+ |||
+
+ {{ answer_choices[correct_answer_id] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: description_context_question_answer_text
+ reference: ''
diff --git a/promptsource/templates/quarel/templates.yaml b/promptsource/templates/quarel/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..521693bd482f554ffe159c3a8e2329f6469b3b55
--- /dev/null
+++ b/promptsource/templates/quarel/templates.yaml
@@ -0,0 +1,103 @@
+dataset: quarel
+templates:
+ 5904fd73-b1ee-4f89-b7bc-b0fe8cc07c66: !Template
+ answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+ id: 5904fd73-b1ee-4f89-b7bc-b0fe8cc07c66
+ jinja: 'Question: {{question}}
+
+
+ Do not use {{"A"}} and {{"B"}} to answer the question but instead, choose between
+ "{{answer_choices[0]}}" and "{{answer_choices[1]}}".
+
+ |||
+
+ {{answer_choices[answer_index]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: do_not_use
+ reference: ''
+ 5b5f9d29-0ad5-4bb9-831a-11fcb115c10d: !Template
+ answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+ id: 5b5f9d29-0ad5-4bb9-831a-11fcb115c10d
+ jinja: 'Here''s a logic test: {{question}}
+
+
+ Choose the answer between "{{answer_choices[0]}}" and "{{answer_choices[1]}}".
+
+ |||
+
+ {{answer_choices[answer_index]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: logic_test
+ reference: ''
+ 63c58389-605a-42b9-85a6-a2586a954a92: !Template
+ answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+ id: 63c58389-605a-42b9-85a6-a2586a954a92
+ jinja: 'Here''s a short story: {{question}}.
+
+
+ What is the most sensical answer between "{{answer_choices[0]}}" and "{{answer_choices[1]}}"?
+
+ |||
+
+ {{answer_choices[answer_index]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: heres_a_story
+ reference: ''
+ 73a7adbb-41b1-4b4d-b378-d7e17d030a6f: !Template
+ answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+ id: 73a7adbb-41b1-4b4d-b378-d7e17d030a6f
+ jinja: 'Choose between "{{answer_choices[0]}}" and "{{answer_choices[1]}}".
+
+ Question: {{question}}
+
+ |||
+
+ {{answer_choices[answer_index]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: choose_between
+ reference: ''
+ 92013fab-5387-44d4-bf0f-e29a31bcafb6: !Template
+ answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+ id: 92013fab-5387-44d4-bf0f-e29a31bcafb6
+ jinja: 'I am testing my students'' logic.
+
+ What is the answer they should choose between "{{answer_choices[0]}}" and "{{answer_choices[1]}}"?
+
+ Logic test: {{question}}
+
+ |||
+
+ {{answer_choices[answer_index]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: testing_students
+ reference: ''
diff --git a/promptsource/templates/quartz/templates.yaml b/promptsource/templates/quartz/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..5e9ea1b445a18c08f1d59ea7cfca0e9cab85dab4
--- /dev/null
+++ b/promptsource/templates/quartz/templates.yaml
@@ -0,0 +1,138 @@
+dataset: quartz
+templates:
+ 005b9776-2170-47f8-a5d2-03e83d0e55ae: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 005b9776-2170-47f8-a5d2-03e83d0e55ae
+ jinja: "Use information from the paragraph to answer the question.\n\nQuestion:\n\
+ \n{% if '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"_____\"\
+ , answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question |\
+ \ trim(\".?!\") }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif\
+ \ %}\n\nParagraph :\n\n{{ para }}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: use_info_from_question_paragraph
+ reference: ''
+ 01d6ae3e-87bb-456c-9722-92a214f6ff19: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 01d6ae3e-87bb-456c-9722-92a214f6ff19
+ jinja: "{{ para }}\n{% if '_____' in question %}\n{{ question | trim(\".?!\")\
+ \ | replace(\"_____\", answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else\
+ \ %}\n{{ question | trim(\".?!\")}} {{ answer_choices | join(\" or \") }}{{\
+ \ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: paragraph_question_plain_concat
+ reference: ''
+ 22e29cab-f57f-4af7-92fc-72b131a96878: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 22e29cab-f57f-4af7-92fc-72b131a96878
+ jinja: "Use information from the paragraph to answer the question.\n\nParagraph\
+ \ :\n\n{{ para }}\n\nQuestion:\n\n{% if '_____' in question %}\n{{ question\
+ \ | trim(\".?!\") | replace(\"_____\", answer_choices | join(\" or \")) }}{{\
+ \ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\") }} {{ answer_choices |\
+ \ join(\" or \") }}{{ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: use_info_from_paragraph_question
+ reference: ''
+ 2f6baa0c-3b69-48be-b195-cc00cb5c96fa: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 2f6baa0c-3b69-48be-b195-cc00cb5c96fa
+ jinja: "Answer the question based on the following text.\n\nQuestion:\n\n{% if\
+ \ '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"_____\", answer_choices\
+ \ | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\")\
+ \ }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif %}\n\nText:\n\
+ \n{{ para }}|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: answer_question_based_on
+ reference: ''
+ 397bdb29-03e8-478b-9840-1bfe9d57d6fb: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 397bdb29-03e8-478b-9840-1bfe9d57d6fb
+ jinja: "Answer the question below:\n\n{% if '_____' in question %}\n{{ question\
+ \ | trim(\".?!\") | replace(\"_____\", answer_choices | join(\" or \")) }}{{\
+ \ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\") }} {{ answer_choices |\
+ \ join(\" or \") }}{{ \"?\" }} \n{% endif %}\n\nAssuming that:\n\n{{ para }}|||\n\
+ {{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: answer_question_below
+ reference: ''
+ 39a67851-fdf5-4f0d-bda2-4902be3a6bff: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 39a67851-fdf5-4f0d-bda2-4902be3a6bff
+ jinja: "Read the passage below and choose the right answer to the following question\
+ \ (choices are {{ answer_choices | join(\" or \") }} ):\n\n{{ para }}\n\n{%\
+ \ if '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"_____\"\
+ , answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question |\
+ \ trim(\".?!\") }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif\
+ \ %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: read_passage_below_choose
+ reference: ''
+ 5c794ff0-32b9-43d4-b496-1a4d246ecfc0: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 5c794ff0-32b9-43d4-b496-1a4d246ecfc0
+ jinja: "{{ para }}\n\nHaving read the above passage, choose the right answer to\
+ \ the following question (choices are {{ answer_choices | join(\" or \") }}\
+ \ ):\n\n{% if '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"\
+ _____\", answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question\
+ \ | trim(\".?!\") }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif\
+ \ %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: having_read_above_passage
+ reference: ''
+ a8c6ae4c-4874-47d1-93ea-801b6e080a58: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: a8c6ae4c-4874-47d1-93ea-801b6e080a58
+ jinja: "Given the fact that:\n\n{{ para }}\n\nAnswer the question:\n\n{% if '_____'\
+ \ in question %}\n{{ question | trim(\".?!\") | replace(\"_____\", answer_choices\
+ \ | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\")\
+ \ }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: given_the_fact_answer_the_q
+ reference: ''
diff --git a/promptsource/templates/quora/templates.yaml b/promptsource/templates/quora/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4344878d18f5bc7f269ff1905b9b4ee9bea17907
--- /dev/null
+++ b/promptsource/templates/quora/templates.yaml
@@ -0,0 +1,89 @@
+dataset: quora
+templates:
+ 2c780ebe-f8e6-44f0-a804-0a3e53eb8cce: !Template
+ answer_choices: No ||| Yes
+ id: 2c780ebe-f8e6-44f0-a804-0a3e53eb8cce
+ jinja: Given the question "{{questions.text.0}}" would you consider "{{questions.text.1}}"
+ as a duplicate?||| {{ answer_choices [is_duplicate] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: are_two_questions_duplicate
+ reference: ''
+ 3331355a-4d69-4060-ae9e-cdb951335ed2: !Template
+ answer_choices: No ||| Yes
+ id: 3331355a-4d69-4060-ae9e-cdb951335ed2
+ jinja: Is the following question "{{questions.text.0}}" the same as "{{questions.text.1}}"?
+ ||| {{ answer_choices [is_duplicate] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: are_two_questions_same
+ reference: ''
+ 397b1fb9-0cf1-455b-aaf2-efdb750014c5: !Template
+ answer_choices: null
+ id: 397b1fb9-0cf1-455b-aaf2-efdb750014c5
+ jinja: '{% if is_duplicate == true%} Paraphrase the the following question: {%
+ if questions.text.0|length < questions.text.1|length %} {{questions.text.0}}
+ ||| {{questions.text.1}} {% else %} {{questions.text.1}} ||| {{questions.text.0}}
+ {% endif %}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: rephrase_given_question
+ reference: ''
+ 6de61945-992b-4191-9b3a-930e266769c9: !Template
+ answer_choices: True ||| False
+ id: 6de61945-992b-4191-9b3a-930e266769c9
+ jinja: The question "{{questions.text.0}}" is different from "{{questions.text.1}}".
+ {{"True"}} or {{"false"}}? ||| {{ answer_choices [is_duplicate] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: are_two_questions_different
+ reference: ''
+ 7c367d58-e34f-4899-9c09-64a6a00a04b1: !Template
+ answer_choices: false ||| true
+ id: 7c367d58-e34f-4899-9c09-64a6a00a04b1
+ jinja: The question "{{questions.text.0}}" is the same as "{{questions.text.1}}".
+ {{"True"}} or {{"false"}} ? ||| {{ answer_choices [is_duplicate] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: true_or_false
+ reference: ''
+ 7cc5ba2c-215d-4834-b41e-3ef717f6ac8c: !Template
+ answer_choices: No, they are different questions ||| Yes, they are the same question
+ id: 7cc5ba2c-215d-4834-b41e-3ef717f6ac8c
+ jinja: Two new posts asked on Quora are "{{questions.text.0}}" and "{{questions.text.1}}".
+ I feel like they have asked the same question. Am I correct? ||| {{answer_choices[is_duplicate]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: yes_or_no
+ reference: ''
diff --git a/promptsource/templates/quoref/templates.yaml b/promptsource/templates/quoref/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..44586d480ce08ecd102e8ef3eb28751ecfd2aca5
--- /dev/null
+++ b/promptsource/templates/quoref/templates.yaml
@@ -0,0 +1,222 @@
+dataset: quoref
+templates:
+ 4120bebc-9c8f-44af-8d1a-a65e443ce010: !Template
+ answer_choices: null
+ id: 4120bebc-9c8f-44af-8d1a-a65e443ce010
+ jinja: 'The answer to the question: {{question}} is inside the article: {{context}},
+ can you guess it ?
+
+
+ |||
+
+ {{answers.text | choice}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Guess Answer
+ reference: ''
+ 6f1d5031-1377-4b8a-9475-987b2275b8da: !Template
+ answer_choices: null
+ id: 6f1d5031-1377-4b8a-9475-987b2275b8da
+ jinja: 'Given the following context:
+
+
+ {{context}}
+
+
+ answer the following question:
+
+
+ {{question}} |||
+
+ {{answers.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Answer Question Given Context
+ reference: ''
+ 9493f80a-daf5-4c30-a9fc-7bc5bc61b5e9: !Template
+ answer_choices: null
+ id: 9493f80a-daf5-4c30-a9fc-7bc5bc61b5e9
+ jinja: "The following article contains an answer for the question: {{question}}\
+ \ , can you please find it? \n\n{{context}}|||\n{{answers.text | choice}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Find Answer
+ reference: ''
+ a3e5e25d-0a87-4cb8-89ab-3539fc4d23cb: !Template
+ answer_choices: null
+ id: a3e5e25d-0a87-4cb8-89ab-3539fc4d23cb
+ jinja: 'This article: {{context}} contains an answer for the question: {{question}},
+ what is it ?
+
+ |||
+
+ {{answers.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Context Contains Answer
+ reference: ''
+ aa26aab2-d2e7-4560-b7eb-0cbcff7c0f31: !Template
+ answer_choices: null
+ id: aa26aab2-d2e7-4560-b7eb-0cbcff7c0f31
+ jinja: '{{question}}
+
+
+ Answer the above question based on the context below:
+
+
+ {{context}} |||
+
+ {{answers.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Given Context Answer Question
+ reference: ''
+ abdfa570-2de5-406c-9051-caa6a1362796: !Template
+ answer_choices: null
+ id: abdfa570-2de5-406c-9051-caa6a1362796
+ jinja: 'What is the answer for the question: {{question}} from the following article
+ ?
+
+
+ {{context}}|||
+
+ {{answers.text | choice}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: What Is The Answer
+ reference: ''
+ b3ec0888-dd6f-466a-abd4-b2fbcacfdb8b: !Template
+ answer_choices: null
+ id: b3ec0888-dd6f-466a-abd4-b2fbcacfdb8b
+ jinja: 'I have a test where I am given the following article, what is an answer
+ for the question: {{question}} ?
+
+
+ {{context}}|||
+
+ {{answers.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Answer Test
+ reference: ''
+ bf525757-8cde-4839-81fb-a85be3fd1192: !Template
+ answer_choices: null
+ id: bf525757-8cde-4839-81fb-a85be3fd1192
+ jinja: 'Given the below context:
+
+
+ {{context}}
+
+
+ Guess a valid title for it! |||
+
+ {{title}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: false
+ name: Guess Title For Context
+ reference: ''
+ d055747f-7a32-4e12-aab1-fed35d42a445: !Template
+ answer_choices: null
+ id: d055747f-7a32-4e12-aab1-fed35d42a445
+ jinja: 'Found the following article online, use it to answer the question: {{question}}
+
+
+ {{context}}|||
+
+ {{answers.text | choice}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Found Context Online
+ reference: ''
+ d1abb8a0-03c4-41ef-865c-aa275278a0e4: !Template
+ answer_choices: null
+ id: d1abb8a0-03c4-41ef-865c-aa275278a0e4
+ jinja: 'A friend asked me to answer this question: {{question}}, using the article:
+ {{context}}, what would be the answer ?
+
+
+ |||
+
+ {{answers.text | choice}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Answer Friend Question
+ reference: ''
+ fcbe0609-06ce-4cbd-91de-adc38966bcac: !Template
+ answer_choices: null
+ id: fcbe0609-06ce-4cbd-91de-adc38966bcac
+ jinja: 'Read the following paragraph and extract the answer for the question:
+ {{question}}
+
+
+ {{context}} |||
+
+ {{answers.text | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: 'Read And Extract '
+ reference: ''
diff --git a/promptsource/templates/race/all/templates.yaml b/promptsource/templates/race/all/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..028420ab641e85ab388e28823861c07b44e4e77e
--- /dev/null
+++ b/promptsource/templates/race/all/templates.yaml
@@ -0,0 +1,202 @@
+dataset: race
+subset: all
+templates:
+ 00ede994-778f-4d25-82dc-bae7ba9e115f: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 00ede994-778f-4d25-82dc-bae7ba9e115f
+ jinja: 'I''m taking a test and have to guess the right answer to the question
+ after the article.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Options: {{"A"}}: {{options.0}}
+
+ {{"B"}}: {{options.1}}
+
+ {{"C"}}: {{options.2}}
+
+ {{"D"}}: {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Taking a test
+ reference: ''
+ 02ed2a0c-b3a4-4b86-8524-e8961f042ae9: !Template
+ answer_choices: null
+ id: 02ed2a0c-b3a4-4b86-8524-e8961f042ae9
+ jinja: "Write a multi-choice question for the following article:\nArticle: {{article}}\n\
+ |||\nQuestion: \n{{question}}\nOptions:\n{{\"A\"}} {{options.0}}\n{{\"B\"}}\
+ \ {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\nAnswer:\n\
+ {{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Write a multi-choice question for the following article
+ reference: ''
+ 59b5c4e3-9539-449f-ac60-04e681c705b5: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 59b5c4e3-9539-449f-ac60-04e681c705b5
+ jinja: 'Read the following article and answer the question.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Read the article and answer the question (no option)
+ reference: ''
+ 81368f4b-817f-4c81-9db5-b86905bb975e: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 81368f4b-817f-4c81-9db5-b86905bb975e
+ jinja: 'Read the following article and select the best answer.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ - {{answer_choices | join("\n- ")}}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer (generate span)
+ reference: ''
+ b808b05e-0d2a-459e-b345-2d83cdb20216: !Template
+ answer_choices: Yes ||| No
+ id: b808b05e-0d2a-459e-b345-2d83cdb20216
+ jinja: '{% set candidate = ["A", "B", "C", "D"] | choice %}
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Yes or no, is the answer "{{ [options.0,options.1,options.2,options.3][{"A":0,"B":1,"C":2,"D":3}[answer]]
+ }}"?
+
+ |||
+
+ {% if candidate == answer %}
+
+ Yes
+
+ {% else %}
+
+ No
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Is this the right answer
+ reference: ''
+ cce2b02b-6c47-4941-83d6-5ecb2dfedadc: !Template
+ answer_choices: null
+ id: cce2b02b-6c47-4941-83d6-5ecb2dfedadc
+ jinja: "Write a multi-choice question for the following article, with the given\
+ \ choices and answer:\nArticle: {{article}}\nOptions:\n{{\"A\"}} {{options.0}}\n\
+ {{\"B\"}} {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\n\
+ Answer:\n{{answer}} {{ [options.0,options.1,options.2,options.3][{\"A\":0,\"\
+ B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Write a multi-choice question (options given)
+ reference: ''
+ e5c4d6a3-ff68-4243-93aa-2629e72d0d70: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: e5c4d6a3-ff68-4243-93aa-2629e72d0d70
+ jinja: 'Read the article and select the best answer.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Options: {{"A"}}: {{options.0}}
+
+ {{"B"}}: {{options.1}}
+
+ {{"C"}}: {{options.2}}
+
+ {{"D"}}: {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer
+ reference: ''
+ f79ba457-3c44-455f-a6ed-9c5f50d0e886: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: f79ba457-3c44-455f-a6ed-9c5f50d0e886
+ jinja: '{{article}}
+
+ {{question}}
+
+ {{"A)"}} {{options.0}}
+
+ {{"B)"}} {{options.1}}
+
+ {{"C)"}} {{options.2}}
+
+ {{"D)"}} {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer (no instructions)
+ reference: ''
diff --git a/promptsource/templates/race/high/templates.yaml b/promptsource/templates/race/high/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..28c73e13aea2023d107a2d6348a51971db25e94d
--- /dev/null
+++ b/promptsource/templates/race/high/templates.yaml
@@ -0,0 +1,202 @@
+dataset: race
+subset: high
+templates:
+ 26fdd0e9-9066-478f-8b5b-03fc0477bf7a: !Template
+ answer_choices: Yes ||| No
+ id: 26fdd0e9-9066-478f-8b5b-03fc0477bf7a
+ jinja: '{% set candidate = ["A", "B", "C", "D"] | choice %}
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Yes or no, is the answer "{{ [options.0,options.1,options.2,options.3][{"A":0,"B":1,"C":2,"D":3}[answer]]
+ }}"?
+
+ |||
+
+ {% if candidate == answer %}
+
+ Yes
+
+ {% else %}
+
+ No
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Is this the right answer
+ reference: ''
+ 46b64805-d02c-4aa3-a7c5-34503f3ad56d: !Template
+ answer_choices: null
+ id: 46b64805-d02c-4aa3-a7c5-34503f3ad56d
+ jinja: "Write a multi-choice question for the following article:\nArticle: {{article}}\n\
+ |||\nQuestion: \n{{question}}\nOptions:\n{{\"A\"}} {{options.0}}\n{{\"B\"}}\
+ \ {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\nAnswer:\n\
+ {{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Write a multi-choice question for the following article
+ reference: ''
+ 4ef4f2ee-6151-41b0-bdb7-e093cde8c42a: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 4ef4f2ee-6151-41b0-bdb7-e093cde8c42a
+ jinja: 'I''m taking a test and have to guess the right answer to the question
+ after the article.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Options: {{"A"}}: {{options.0}}
+
+ {{"B"}}: {{options.1}}
+
+ {{"C"}}: {{options.2}}
+
+ {{"D"}}: {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Taking a test
+ reference: ''
+ 5ed5f2e9-6cf3-4fc5-b9eb-246d9a4ee511: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 5ed5f2e9-6cf3-4fc5-b9eb-246d9a4ee511
+ jinja: 'Read the article and select the best answer.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Options: {{"A"}}: {{options.0}}
+
+ {{"B"}}: {{options.1}}
+
+ {{"C"}}: {{options.2}}
+
+ {{"D"}}: {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer
+ reference: ''
+ 691f84af-ca02-458d-8912-f661aefccd52: !Template
+ answer_choices: null
+ id: 691f84af-ca02-458d-8912-f661aefccd52
+ jinja: "Write a multi-choice question for the following article, with the given\
+ \ choices and answer:\nArticle: {{article}}\nOptions:\n{{\"A\"}} {{options.0}}\n\
+ {{\"B\"}} {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\n\
+ Answer:\n{{answer}} {{ [options.0,options.1,options.2,options.3][{\"A\":0,\"\
+ B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Write a multi-choice question (options given)
+ reference: ''
+ ab253338-5b02-46e8-9959-b66d1009c34a: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: ab253338-5b02-46e8-9959-b66d1009c34a
+ jinja: 'Read the following article and select the best answer.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ - {{answer_choices | join("\n- ")}}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer (generate span)
+ reference: ''
+ c8c9dcfd-69d3-4ccd-8aeb-2bdb98aba261: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: c8c9dcfd-69d3-4ccd-8aeb-2bdb98aba261
+ jinja: '{{article}}
+
+ {{question}}
+
+ {{"A)"}} {{options.0}}
+
+ {{"B)"}} {{options.1}}
+
+ {{"C)"}} {{options.2}}
+
+ {{"D)"}} {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer (no instructions)
+ reference: ''
+ e1b9d073-e18e-4940-9868-5b4a35617c35: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: e1b9d073-e18e-4940-9868-5b4a35617c35
+ jinja: 'Read the following article and answer the question.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Read the article and answer the question (no option)
+ reference: ''
diff --git a/promptsource/templates/race/middle/templates.yaml b/promptsource/templates/race/middle/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..311f506ba406f3f3a9b761699f856af6b5377ef7
--- /dev/null
+++ b/promptsource/templates/race/middle/templates.yaml
@@ -0,0 +1,202 @@
+dataset: race
+subset: middle
+templates:
+ 0a47d28c-7cf5-405d-b9ef-9b82c1a20002: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 0a47d28c-7cf5-405d-b9ef-9b82c1a20002
+ jinja: 'Read the article and select the best answer.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Options: {{"A"}}: {{options.0}}
+
+ {{"B"}}: {{options.1}}
+
+ {{"C"}}: {{options.2}}
+
+ {{"D"}}: {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer
+ reference: ''
+ 1a68b62e-404c-4037-baec-7e20cb4c3f6b: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 1a68b62e-404c-4037-baec-7e20cb4c3f6b
+ jinja: 'Read the following article and answer the question.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Answer:
+
+ |||
+
+ {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Read the article and answer the question (no option)
+ reference: ''
+ 2e7f5fff-518e-4100-90f9-cca094b11e95: !Template
+ answer_choices: Yes ||| No
+ id: 2e7f5fff-518e-4100-90f9-cca094b11e95
+ jinja: '{% set candidate = ["A", "B", "C", "D"] | choice %}
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Yes or no, is the answer "{{ [options.0,options.1,options.2,options.3][{"A":0,"B":1,"C":2,"D":3}[answer]]
+ }}"?
+
+ |||
+
+ {% if candidate == answer %}
+
+ Yes
+
+ {% else %}
+
+ No
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Is this the right answer
+ reference: ''
+ 6f2e7b0d-9691-4e28-9666-6c4d478a1641: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: 6f2e7b0d-9691-4e28-9666-6c4d478a1641
+ jinja: '{{article}}
+
+ {{question}}
+
+ {{"A)"}} {{options.0}}
+
+ {{"B)"}} {{options.1}}
+
+ {{"C)"}} {{options.2}}
+
+ {{"D)"}} {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer (no instructions)
+ reference: ''
+ 9aacc46d-8863-4e02-9783-9ec931425759: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 9aacc46d-8863-4e02-9783-9ec931425759
+ jinja: 'Read the following article and select the best answer.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ - {{answer_choices | join("\n- ")}}
+
+ |||
+
+ {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Select the best answer (generate span)
+ reference: ''
+ 9aedaa07-b815-4a35-890b-6100f00706aa: !Template
+ answer_choices: null
+ id: 9aedaa07-b815-4a35-890b-6100f00706aa
+ jinja: "Write a multi-choice question for the following article, with the given\
+ \ choices and answer:\nArticle: {{article}}\nOptions:\n{{\"A\"}} {{options.0}}\n\
+ {{\"B\"}} {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\n\
+ Answer:\n{{answer}} {{ [options.0,options.1,options.2,options.3][{\"A\":0,\"\
+ B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Write a multi-choice question (options given)
+ reference: ''
+ af4869c4-35af-4644-86d9-27843ca4efd5: !Template
+ answer_choices: null
+ id: af4869c4-35af-4644-86d9-27843ca4efd5
+ jinja: "Write a multi-choice question for the following article:\nArticle: {{article}}\n\
+ |||\nQuestion: \n{{question}}\nOptions:\n{{\"A\"}} {{options.0}}\n{{\"B\"}}\
+ \ {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\nAnswer:\n\
+ {{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Write a multi-choice question for the following article
+ reference: ''
+ ebe34816-2a1f-42b3-a9ac-ce4d36633fdb: !Template
+ answer_choices: A ||| B ||| C ||| D
+ id: ebe34816-2a1f-42b3-a9ac-ce4d36633fdb
+ jinja: 'I''m taking a test and have to guess the right answer to the question
+ after the article.
+
+ Article: {{article}}
+
+ Question: {{question}}
+
+ Options: {{"A"}}: {{options.0}}
+
+ {{"B"}}: {{options.1}}
+
+ {{"C"}}: {{options.2}}
+
+ {{"D"}}: {{options.3}}
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Taking a test
+ reference: ''
diff --git a/promptsource/templates/riddle_sense/templates.yaml b/promptsource/templates/riddle_sense/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..48fea7b212df19192ed44b3d7ac4396f44f68578
--- /dev/null
+++ b/promptsource/templates/riddle_sense/templates.yaml
@@ -0,0 +1,126 @@
+dataset: riddle_sense
+templates:
+ 3df2405b-4a0e-4fcf-8600-b7f4843b945a: !Template
+ answer_choices: null
+ id: 3df2405b-4a0e-4fcf-8600-b7f4843b945a
+ jinja: 'Use the following options to predict a possible question for them:
+
+
+ {% for i in range(choices[''text'']|length) %}
+
+ - {{choices[''text''][i]}}
+
+ {% endfor %} |||
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answer_to_question
+ reference: ''
+ 8b44338c-f635-47fc-86fb-3135ef2a76ae: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 8b44338c-f635-47fc-86fb-3135ef2a76ae
+ jinja: 'Given the options below, select the most suitable answer for the following
+ question:
+
+ {{question}}
+
+ Options:
+
+ - {{answer_choices | join("\n- ")}}|||
+
+ {% if answerKey != "" %}
+
+ {{answer_choices[choices["label"].index(answerKey)]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: most_suitable_answer
+ reference: ''
+ ac002734-5b1f-4478-9ffc-f16d9ca2f70e: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: ac002734-5b1f-4478-9ffc-f16d9ca2f70e
+ jinja: 'Answer the following question:
+
+ {{question}} |||
+
+ {% if answerKey != "" %}
+
+ {{ answer_choices[choices[''label''].index(answerKey)] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: answer_given_question_without_options
+ reference: ''
+ bf3f7bd2-91c6-455c-b9f7-42ce265fa2db: !Template
+ answer_choices: A ||| B ||| C ||| D ||| E
+ id: bf3f7bd2-91c6-455c-b9f7-42ce265fa2db
+ jinja: 'Given the following options, what do you think is the correct answer to
+ the question below:
+
+
+ {{question}}
+
+
+ Options:
+
+ {% for letter, t in zip(answer_choices, choices.text) %}
+
+ - {{letter}}: {{t}}
+
+ {% endfor %} |||
+
+ {% if answerKey != "" %}
+
+ {{answerKey}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_to_answer_index
+ reference: ''
+ bf7d7cbc-aa05-4aca-97ff-29eb34502019: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: bf7d7cbc-aa05-4aca-97ff-29eb34502019
+ jinja: '{{question}}
+
+
+ - {{answer_choices | join("\n- ")}} |||
+
+ {% if answerKey != "" %}
+
+ {{answer_choices[choices[''label''].index(answerKey)] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_answering
+ reference: ''
diff --git a/promptsource/templates/ropes/templates.yaml b/promptsource/templates/ropes/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..93e82240bf117b2d3b6481dd550ed18dcbebf8ba
--- /dev/null
+++ b/promptsource/templates/ropes/templates.yaml
@@ -0,0 +1,280 @@
+dataset: ropes
+templates:
+ 0791ec30-6361-4e62-8dce-ca9cbf997acc: !Template
+ answer_choices: null
+ id: 0791ec30-6361-4e62-8dce-ca9cbf997acc
+ jinja: "{% if answers.text %}\nPlease answer correctly the following question\
+ \ related to the paragraph below. \n\n{{ question }}\n\n{{ situation }}\n\n\
+ Hint: {{ background }}\n|||\n{{ answers.text | choice }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: prompt_beginning
+ reference: ''
+ 0909d72d-50c7-4cbb-bec4-1f891123717c: !Template
+ answer_choices: null
+ id: 0909d72d-50c7-4cbb-bec4-1f891123717c
+ jinja: "{% if answers.text %}\n{{ situation }}\n\nGiven the paragraph above, please\
+ \ answer correctly the following question: \n\n{{ question }}\n|||\n{{ answers.text\
+ \ | choice }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: prompt_bottom_no_hint
+ reference: ''
+ 1e4944e7-4d5b-475c-8b04-4b523e96bc51: !Template
+ answer_choices: null
+ id: 1e4944e7-4d5b-475c-8b04-4b523e96bc51
+ jinja: '{% if answers.text %}
+
+ Background: {{ background }}
+
+
+ Paragraph: {{ situation }}
+
+
+ Given the paragraph above, please answer correctly the following question: {{
+ question }}
+
+ |||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: prompt_bottom_hint_beginning
+ reference: ''
+ 27fb16c6-a563-46ef-af73-42e15183824e: !Template
+ answer_choices: null
+ id: 27fb16c6-a563-46ef-af73-42e15183824e
+ jinja: '{% if answers.text %}
+
+ Given the background: {{background}}
+
+
+ and the situation: {{situation}}
+
+
+ Answer the following question: {{question}}|||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: given_background_situation
+ reference: ''
+ 31faf808-80ff-47af-ac49-d2cd7a7abcaf: !Template
+ answer_choices: null
+ id: 31faf808-80ff-47af-ac49-d2cd7a7abcaf
+ jinja: '{% if answers.text %}
+
+ {{ situation }}
+
+
+ {{ question }}
+
+
+ |||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: plain_no_background
+ reference: Task without background
+ 473f2c9c-9731-443c-a641-5e43770f7df6: !Template
+ answer_choices: null
+ id: 473f2c9c-9731-443c-a641-5e43770f7df6
+ jinja: '{% if answers.text %}
+
+ {{ situation }}
+
+
+ {{ question }}
+
+
+ Hint: {{ background }}
+
+ |||
+
+ {{ answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: plain_bottom_hint
+ reference: ''
+ a04f69ac-8122-4618-8426-185fc043feca: !Template
+ answer_choices: null
+ id: a04f69ac-8122-4618-8426-185fc043feca
+ jinja: '{% if answers.text %}
+
+ {{ background }}
+
+
+ {{ situation }}
+
+
+ {{ question }}
+
+ |||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: plain_background_situation
+ reference: ''
+ a17aefbb-c571-4127-8170-379e2ec83774: !Template
+ answer_choices: null
+ id: a17aefbb-c571-4127-8170-379e2ec83774
+ jinja: '{% if answers.text %}
+
+ I can use this background: {{background}}
+
+
+ Now, I have a new situation: {{situation}}
+
+
+ Answer this question please: {{question}}|||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: background_new_situation_answer
+ reference: ''
+ b6da4f12-5384-46f5-a74e-c703c19d1698: !Template
+ answer_choices: null
+ id: b6da4f12-5384-46f5-a74e-c703c19d1698
+ jinja: '{% if answers.text %}
+
+ You are given a new situation: {{situation}}
+
+
+ and a hint : {{background}}
+
+
+ Please answer this question : {{question}}|||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: background_situation_middle
+ reference: ''
+ cc747655-6472-4023-95e4-03cb85d5a1c5: !Template
+ answer_choices: null
+ id: cc747655-6472-4023-95e4-03cb85d5a1c5
+ jinja: '{% if answers.text %}
+
+ I have a new situation: {{situation}}
+
+
+ But I can use this background: {{background}}
+
+
+ What is an answer for this question: {{question}}|||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: new_situation_background_answer
+ reference: ''
+ cc8f3c6b-b800-4b47-b6ec-e8febfdaad6f: !Template
+ answer_choices: null
+ id: cc8f3c6b-b800-4b47-b6ec-e8febfdaad6f
+ jinja: "{% if answers.text %}\n{{ situation }}\n\nGiven the paragraph above, please\
+ \ answer correctly the following question: \n\n{{ question }}\n\nHint: {{ background\
+ \ }}\n|||\n{{ answers.text | choice }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: prompt_mix
+ reference: ''
+ f62e0adb-ca74-4280-8ed3-8b53411d87ce: !Template
+ answer_choices: null
+ id: f62e0adb-ca74-4280-8ed3-8b53411d87ce
+ jinja: '{% if answers.text %}
+
+ I read this background article the other day: {{background}}
+
+
+ I am facing a new situation today: {{situation}}
+
+
+ Using the knowledge I acquired from the background article, how should I answer
+ correctly the following question regarding my new situation: {{question}}|||
+
+ {{ answers.text | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: read_background_situation
+ reference: ''
diff --git a/promptsource/templates/rotten_tomatoes/templates.yaml b/promptsource/templates/rotten_tomatoes/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb5fdd5a3b291a8a6fa1e0ae728c6eecb5439b7c
--- /dev/null
+++ b/promptsource/templates/rotten_tomatoes/templates.yaml
@@ -0,0 +1,142 @@
+dataset: rotten_tomatoes
+templates:
+ 10adbcf1-b839-4522-bd76-567f0c760474: !Template
+ answer_choices: bad ||| good
+ id: 10adbcf1-b839-4522-bd76-567f0c760474
+ jinja: '{{text}} Did the reviewer find this movie {{"good or bad"}}? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Opinion bad good choices
+ reference: ''
+ 162f7f89-4a93-42e9-9525-ba12e243ee48: !Template
+ answer_choices: negative ||| positive
+ id: 162f7f89-4a93-42e9-9525-ba12e243ee48
+ jinja: '{{text}} What is the sentiment expressed in this text? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Text Expressed Sentiment
+ reference: ''
+ 37ac89b8-09f8-443d-982c-980a86f26ea0: !Template
+ answer_choices: negative ||| positive
+ id: 37ac89b8-09f8-443d-982c-980a86f26ea0
+ jinja: "{{text}} \nIs this review {{\"positive or negative\"}}? ||| \n{{answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: 'Sentiment with choices '
+ reference: ''
+ 59e2aa7c-696f-4b85-87e9-688ea802d968: !Template
+ answer_choices: No ||| Yes
+ id: 59e2aa7c-696f-4b85-87e9-688ea802d968
+ jinja: '{{text}} Did the reviewer enjoy the movie? ||| {{ answer_choices [label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Enjoyment Yes No
+ reference: ''
+ 7a8ccb1c-6737-4863-b08a-61d4a2839204: !Template
+ answer_choices: They didn't like it ||| They loved it
+ id: 7a8ccb1c-6737-4863-b08a-61d4a2839204
+ jinja: '{{text}} How does the reviewer feel about the movie? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Enjoyment
+ reference: ''
+ 94e190d5-2196-486e-908b-759f288eac6e: !Template
+ answer_choices: negative ||| positive
+ id: 94e190d5-2196-486e-908b-759f288eac6e
+ jinja: '{{text}} The sentiment expressed for the movie is ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Movie Expressed Sentiment
+ reference: ''
+ a8f6927e-7eca-4975-a93c-f520f8be480d: !Template
+ answer_choices: negative ||| positive
+ id: a8f6927e-7eca-4975-a93c-f520f8be480d
+ jinja: '{{text}} What sentiment does the writer express for the movie? ||| {{
+ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Writer Expressed Sentiment
+ reference: ''
+ b60cad41-6bca-422a-aef7-cb113fcc32b0: !Template
+ answer_choices: negative ||| positive
+ id: b60cad41-6bca-422a-aef7-cb113fcc32b0
+ jinja: The following movie review expresses what sentiment? {{text}} ||| {{ answer_choices
+ [label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Movie Expressed Sentiment 2
+ reference: ''
+ c75e322d-d6b4-4a28-b5a0-27fddfee694d: !Template
+ answer_choices: negative ||| positive
+ id: c75e322d-d6b4-4a28-b5a0-27fddfee694d
+ jinja: '{{text}} What is the sentiment expressed by the reviewer for the movie?
+ ||| {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Expressed Sentiment
+ reference: ''
+ e05ec7b9-5a8d-4670-9723-0237c1bb1eca: !Template
+ answer_choices: negative ||| positive
+ id: e05ec7b9-5a8d-4670-9723-0237c1bb1eca
+ jinja: '{{text}} How does the viewer feel about the movie? ||| {{ answer_choices
+ [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Reviewer Sentiment Feeling
+ reference: ''
diff --git a/promptsource/templates/samsum/templates.yaml b/promptsource/templates/samsum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bd7699c56b8cb1a627fbe3dad61b6e6f72e288b3
--- /dev/null
+++ b/promptsource/templates/samsum/templates.yaml
@@ -0,0 +1,111 @@
+dataset: samsum
+templates:
+ 01faf0cd-d9d8-4245-b86f-e7e13c2972ff: !Template
+ answer_choices: null
+ id: 01faf0cd-d9d8-4245-b86f-e7e13c2972ff
+ jinja: 'Summarize this dialogue: {{dialogue}} |||
+
+ {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: 'Summarize this dialogue:'
+ reference: ''
+ 182a251f-2f76-4b36-8d2e-417f8d43f729: !Template
+ answer_choices: null
+ id: 182a251f-2f76-4b36-8d2e-417f8d43f729
+ jinja: '{{dialogue}}
+
+ Given the above dialogue, write a summary. |||
+
+ {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: Given the above dialogue write a summary
+ reference: ''
+ 72eda731-894d-4260-9113-9e492822f80e: !Template
+ answer_choices: null
+ id: 72eda731-894d-4260-9113-9e492822f80e
+ jinja: 'Summarize: {{dialogue}}|||
+
+ {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: 'Summarize:'
+ reference: ''
+ 7bd51f5b-5bac-429e-b8f9-dd6782b92a59: !Template
+ answer_choices: null
+ id: 7bd51f5b-5bac-429e-b8f9-dd6782b92a59
+ jinja: '{{dialogue}}
+
+ To sum up this dialog:
+
+ |||{{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: To sum up this dialog
+ reference: ''
+ 8d829dcb-ea64-457d-b025-f16e31c2834a: !Template
+ answer_choices: null
+ id: 8d829dcb-ea64-457d-b025-f16e31c2834a
+ jinja: 'Generate a summary for this dialogue:
+
+ {{dialogue}}
+
+ |||{{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: Generate a summary for this dialogue
+ reference: ''
+ 9f571a72-6813-4307-9aae-753ca0f737c5: !Template
+ answer_choices: null
+ id: 9f571a72-6813-4307-9aae-753ca0f737c5
+ jinja: 'Write a dialogue that matches this summary: {{summary}} |||
+
+ {{dialogue}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: false
+ name: Write a dialogue that match this summary
+ reference: ''
+ bd891653-49b6-40bb-968f-8e6632c75659: !Template
+ answer_choices: null
+ id: bd891653-49b6-40bb-968f-8e6632c75659
+ jinja: "Sum up the following dialogue: \n{{dialogue}}\n|||{{summary}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ original_task: true
+ name: Sum up the following dialogue
+ reference: ''
diff --git a/promptsource/templates/scan/addprim_jump/templates.yaml b/promptsource/templates/scan/addprim_jump/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e2bb7eafdab5f27729326a6f7b27a2995cd97b5
--- /dev/null
+++ b/promptsource/templates/scan/addprim_jump/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: addprim_jump
+templates:
+ 4ed58037-5958-40c3-81dc-227b172ddb2a: !Template
+ answer_choices: null
+ id: 4ed58037-5958-40c3-81dc-227b172ddb2a
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ 59ec5929-1c52-496b-9d26-59a4198bd4c8: !Template
+ answer_choices: null
+ id: 59ec5929-1c52-496b-9d26-59a4198bd4c8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ 7a1b45e1-ce1e-44ff-a01c-5ce7a531fe09: !Template
+ answer_choices: null
+ id: 7a1b45e1-ce1e-44ff-a01c-5ce7a531fe09
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ 98bc213e-2add-4ec9-ac98-6f1519d4de51: !Template
+ answer_choices: null
+ id: 98bc213e-2add-4ec9-ac98-6f1519d4de51
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ bfee9123-ef19-4035-880a-775135f16a52: !Template
+ answer_choices: null
+ id: bfee9123-ef19-4035-880a-775135f16a52
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ c6ff8e1f-759b-4f8a-b0b8-85299a5ae315: !Template
+ answer_choices: null
+ id: c6ff8e1f-759b-4f8a-b0b8-85299a5ae315
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ ddec8813-b3c5-43d9-befa-732de1499266: !Template
+ answer_choices: null
+ id: ddec8813-b3c5-43d9-befa-732de1499266
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ e1ce9b02-6093-4a26-8b00-007fc3b5836d: !Template
+ answer_choices: null
+ id: e1ce9b02-6093-4a26-8b00-007fc3b5836d
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ e7091f17-a7a8-4045-a1f9-57e9254cd71e: !Template
+ answer_choices: null
+ id: e7091f17-a7a8-4045-a1f9-57e9254cd71e
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ e8f6f438-c844-43fe-8097-b5322cbaa28b: !Template
+ answer_choices: null
+ id: e8f6f438-c844-43fe-8097-b5322cbaa28b
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ feb82e4b-e159-4780-a953-55152b5ec1d0: !Template
+ answer_choices: null
+ id: feb82e4b-e159-4780-a953-55152b5ec1d0
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
diff --git a/promptsource/templates/scan/addprim_turn_left/templates.yaml b/promptsource/templates/scan/addprim_turn_left/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f2c14a0e5575fc79ae69fb7692696b50ff293bfd
--- /dev/null
+++ b/promptsource/templates/scan/addprim_turn_left/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: addprim_turn_left
+templates:
+ 0f9b0271-e7e8-422a-b87f-d27d1d480bd0: !Template
+ answer_choices: null
+ id: 0f9b0271-e7e8-422a-b87f-d27d1d480bd0
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 35b1065a-97b5-4e89-9b1e-1673781959b1: !Template
+ answer_choices: null
+ id: 35b1065a-97b5-4e89-9b1e-1673781959b1
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ 64bcb057-b012-4daa-a59b-978607b8efe0: !Template
+ answer_choices: null
+ id: 64bcb057-b012-4daa-a59b-978607b8efe0
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ 98c999bb-1963-4e38-a5b2-6ceafd09ccd8: !Template
+ answer_choices: null
+ id: 98c999bb-1963-4e38-a5b2-6ceafd09ccd8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ abeca7b2-dae2-4bc5-aef3-c1b9875c3250: !Template
+ answer_choices: null
+ id: abeca7b2-dae2-4bc5-aef3-c1b9875c3250
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ bb774c8c-22fe-43f9-ab00-bcebcd082831: !Template
+ answer_choices: null
+ id: bb774c8c-22fe-43f9-ab00-bcebcd082831
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ bdf18776-83f0-46a6-b26e-947dd8f0f28a: !Template
+ answer_choices: null
+ id: bdf18776-83f0-46a6-b26e-947dd8f0f28a
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ eccc891e-3969-493b-936e-44ec37bb9ffb: !Template
+ answer_choices: null
+ id: eccc891e-3969-493b-936e-44ec37bb9ffb
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ f18957a7-119b-4fa1-b83e-81b99a0c390e: !Template
+ answer_choices: null
+ id: f18957a7-119b-4fa1-b83e-81b99a0c390e
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ f92867b4-8000-4c78-9386-1eca8b749483: !Template
+ answer_choices: null
+ id: f92867b4-8000-4c78-9386-1eca8b749483
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ fdd1c915-12d4-4856-b2c0-ff0445774208: !Template
+ answer_choices: null
+ id: fdd1c915-12d4-4856-b2c0-ff0445774208
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
diff --git a/promptsource/templates/scan/filler_num0/templates.yaml b/promptsource/templates/scan/filler_num0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9682c558b658b50410bf7f4dd88f705a1aba62ab
--- /dev/null
+++ b/promptsource/templates/scan/filler_num0/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: filler_num0
+templates:
+ 08e8a530-bdc5-4498-9605-27b504474bb7: !Template
+ answer_choices: null
+ id: 08e8a530-bdc5-4498-9605-27b504474bb7
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ 1e3d371c-1efc-458a-8806-df56cdab15e4: !Template
+ answer_choices: null
+ id: 1e3d371c-1efc-458a-8806-df56cdab15e4
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ 1ecef8c9-e7be-4f0c-994a-fb40fdf68196: !Template
+ answer_choices: null
+ id: 1ecef8c9-e7be-4f0c-994a-fb40fdf68196
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ 2d53b031-7b62-4f38-8297-deff915ab8f6: !Template
+ answer_choices: null
+ id: 2d53b031-7b62-4f38-8297-deff915ab8f6
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ 3fc3a069-9c0f-4491-95b3-6d5f8067241e: !Template
+ answer_choices: null
+ id: 3fc3a069-9c0f-4491-95b3-6d5f8067241e
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ 43699265-53bb-4362-a31d-5efc6cdce1ac: !Template
+ answer_choices: null
+ id: 43699265-53bb-4362-a31d-5efc6cdce1ac
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 7ba46593-7a96-4df2-b80c-47fa9a46c5bc: !Template
+ answer_choices: null
+ id: 7ba46593-7a96-4df2-b80c-47fa9a46c5bc
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ ba786fc1-70e7-4ebb-a7a4-746c34631ca4: !Template
+ answer_choices: null
+ id: ba786fc1-70e7-4ebb-a7a4-746c34631ca4
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ db6cd432-27e6-4cfb-b668-2d41d6dfbc2b: !Template
+ answer_choices: null
+ id: db6cd432-27e6-4cfb-b668-2d41d6dfbc2b
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ e2205509-4eab-4312-a5ce-5351c1f3cbb2: !Template
+ answer_choices: null
+ id: e2205509-4eab-4312-a5ce-5351c1f3cbb2
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ f711952e-1573-48fb-97c9-345aa2aac3f0: !Template
+ answer_choices: null
+ id: f711952e-1573-48fb-97c9-345aa2aac3f0
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
diff --git a/promptsource/templates/scan/filler_num1/templates.yaml b/promptsource/templates/scan/filler_num1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..392892dbcb9cf7829b56bd450f487b3c7384c684
--- /dev/null
+++ b/promptsource/templates/scan/filler_num1/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: filler_num1
+templates:
+ 21cbc219-c6ec-46d8-8cfc-e039e0429746: !Template
+ answer_choices: null
+ id: 21cbc219-c6ec-46d8-8cfc-e039e0429746
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ 237e411d-1f01-4b96-86e5-9023f93071fa: !Template
+ answer_choices: null
+ id: 237e411d-1f01-4b96-86e5-9023f93071fa
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ 23b9f23e-8c4d-4275-85f2-9cea58bb4e23: !Template
+ answer_choices: null
+ id: 23b9f23e-8c4d-4275-85f2-9cea58bb4e23
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ 316ad8b2-edca-4ca8-94d3-941fa4a46757: !Template
+ answer_choices: null
+ id: 316ad8b2-edca-4ca8-94d3-941fa4a46757
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 5a421d94-4059-4edf-a4ea-7dc862948976: !Template
+ answer_choices: null
+ id: 5a421d94-4059-4edf-a4ea-7dc862948976
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ 80a30953-99c4-44aa-b5bf-da4b35c81269: !Template
+ answer_choices: null
+ id: 80a30953-99c4-44aa-b5bf-da4b35c81269
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ 9d0c5da0-4d60-4e66-b273-091c39dcc2b7: !Template
+ answer_choices: null
+ id: 9d0c5da0-4d60-4e66-b273-091c39dcc2b7
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ a3fd4bd7-511f-4dfb-a134-29317f4ee61d: !Template
+ answer_choices: null
+ id: a3fd4bd7-511f-4dfb-a134-29317f4ee61d
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ a61db237-78a5-46f2-a791-43728b5e4be8: !Template
+ answer_choices: null
+ id: a61db237-78a5-46f2-a791-43728b5e4be8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ cce9fea7-b2e0-4fa2-8f14-c8caa7cc029d: !Template
+ answer_choices: null
+ id: cce9fea7-b2e0-4fa2-8f14-c8caa7cc029d
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ d7037f77-f0b5-4c40-80ed-20dd637826f0: !Template
+ answer_choices: null
+ id: d7037f77-f0b5-4c40-80ed-20dd637826f0
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
diff --git a/promptsource/templates/scan/filler_num2/templates.yaml b/promptsource/templates/scan/filler_num2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0546af0c9a6e0d2bd7a6b5098989f6957ada1e30
--- /dev/null
+++ b/promptsource/templates/scan/filler_num2/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: filler_num2
+templates:
+ 04935a86-6dba-495e-a3fd-7542c8b3b22f: !Template
+ answer_choices: null
+ id: 04935a86-6dba-495e-a3fd-7542c8b3b22f
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ 32089904-72d9-43ce-8c33-ebcf9e90a127: !Template
+ answer_choices: null
+ id: 32089904-72d9-43ce-8c33-ebcf9e90a127
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ 5824dcb7-7b1f-4ed1-91e3-ede9d9c8f98f: !Template
+ answer_choices: null
+ id: 5824dcb7-7b1f-4ed1-91e3-ede9d9c8f98f
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ 68690e6d-761d-4cb1-aa7c-15aa652dea36: !Template
+ answer_choices: null
+ id: 68690e6d-761d-4cb1-aa7c-15aa652dea36
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ 75a99949-8380-4a4b-97b6-91eba8b8d53a: !Template
+ answer_choices: null
+ id: 75a99949-8380-4a4b-97b6-91eba8b8d53a
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ 880c1585-e840-447d-9c40-901ee4adfa33: !Template
+ answer_choices: null
+ id: 880c1585-e840-447d-9c40-901ee4adfa33
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ 949850b3-734a-4bd1-9ee0-14246dad90f0: !Template
+ answer_choices: null
+ id: 949850b3-734a-4bd1-9ee0-14246dad90f0
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ b5d93ec7-aca3-4fb9-9183-df6d165d5ab6: !Template
+ answer_choices: null
+ id: b5d93ec7-aca3-4fb9-9183-df6d165d5ab6
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ c305cd03-a7d4-4142-8257-95c026e0dc43: !Template
+ answer_choices: null
+ id: c305cd03-a7d4-4142-8257-95c026e0dc43
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ d01645c4-91a2-482e-9d75-c62706cf2ec5: !Template
+ answer_choices: null
+ id: d01645c4-91a2-482e-9d75-c62706cf2ec5
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ f13effe3-74e3-4750-94d0-efe08aec117c: !Template
+ answer_choices: null
+ id: f13effe3-74e3-4750-94d0-efe08aec117c
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
diff --git a/promptsource/templates/scan/filler_num3/templates.yaml b/promptsource/templates/scan/filler_num3/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eb1c121700a3fac77d92b7ab63d7ab12bf5c3d2d
--- /dev/null
+++ b/promptsource/templates/scan/filler_num3/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: filler_num3
+templates:
+ 20da32af-4390-4491-9e74-93ae83aa342f: !Template
+ answer_choices: null
+ id: 20da32af-4390-4491-9e74-93ae83aa342f
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ 458d903a-ec23-4188-83df-b4bd73283c7c: !Template
+ answer_choices: null
+ id: 458d903a-ec23-4188-83df-b4bd73283c7c
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ 54f4992e-84c9-4248-bf7e-b6c238d11ebb: !Template
+ answer_choices: null
+ id: 54f4992e-84c9-4248-bf7e-b6c238d11ebb
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ 85279de9-cb89-4541-ba4e-40908a7ab3e8: !Template
+ answer_choices: null
+ id: 85279de9-cb89-4541-ba4e-40908a7ab3e8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 874cac0c-aad7-4633-966b-fe3a0fe2f8c1: !Template
+ answer_choices: null
+ id: 874cac0c-aad7-4633-966b-fe3a0fe2f8c1
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ aebade75-d5a6-4168-ba4e-77f9ae1ff305: !Template
+ answer_choices: null
+ id: aebade75-d5a6-4168-ba4e-77f9ae1ff305
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ c67daf32-83b4-46c5-90e9-3f9fbf3cd93d: !Template
+ answer_choices: null
+ id: c67daf32-83b4-46c5-90e9-3f9fbf3cd93d
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ d8fbe77b-1f56-4cb6-bf6b-2d65e8a92a13: !Template
+ answer_choices: null
+ id: d8fbe77b-1f56-4cb6-bf6b-2d65e8a92a13
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ e06279ed-f955-4bc1-9851-87f1f3ac450a: !Template
+ answer_choices: null
+ id: e06279ed-f955-4bc1-9851-87f1f3ac450a
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ e8d73616-f0d3-42d8-a75c-ca3d869bc0a8: !Template
+ answer_choices: null
+ id: e8d73616-f0d3-42d8-a75c-ca3d869bc0a8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ ef5b3b92-d020-44c5-9b51-44dcc61c7c1f: !Template
+ answer_choices: null
+ id: ef5b3b92-d020-44c5-9b51-44dcc61c7c1f
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
diff --git a/promptsource/templates/scan/length/templates.yaml b/promptsource/templates/scan/length/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..743dd7a5af1a6e4d8562a861b5b6a7d367ce0c74
--- /dev/null
+++ b/promptsource/templates/scan/length/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: length
+templates:
+ 092a6124-819a-42b0-9b61-422ba52fc18b: !Template
+ answer_choices: null
+ id: 092a6124-819a-42b0-9b61-422ba52fc18b
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ 2b464d07-8fed-459e-9bb2-ff2e28960a81: !Template
+ answer_choices: null
+ id: 2b464d07-8fed-459e-9bb2-ff2e28960a81
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ 3293a0d0-1799-4e3d-9599-6dea7971e74b: !Template
+ answer_choices: null
+ id: 3293a0d0-1799-4e3d-9599-6dea7971e74b
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ 87b849e0-c970-407a-bc55-51ebe43d6560: !Template
+ answer_choices: null
+ id: 87b849e0-c970-407a-bc55-51ebe43d6560
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ 881f2df2-f8f4-4674-9942-bc046799e882: !Template
+ answer_choices: null
+ id: 881f2df2-f8f4-4674-9942-bc046799e882
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ 8e529668-61b1-46f3-b7fc-68b22622baef: !Template
+ answer_choices: null
+ id: 8e529668-61b1-46f3-b7fc-68b22622baef
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ 91ab2464-29eb-4b5a-9290-14f05ade456f: !Template
+ answer_choices: null
+ id: 91ab2464-29eb-4b5a-9290-14f05ade456f
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ b115b650-5eb4-4b53-ada2-572954296e20: !Template
+ answer_choices: null
+ id: b115b650-5eb4-4b53-ada2-572954296e20
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ b3d0f767-98b0-44b7-835c-78c1af7aea8e: !Template
+ answer_choices: null
+ id: b3d0f767-98b0-44b7-835c-78c1af7aea8e
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ c306f174-2977-4def-8263-d9109f6769ad: !Template
+ answer_choices: null
+ id: c306f174-2977-4def-8263-d9109f6769ad
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ d5c8d1ba-2601-4076-97fb-9c9d1ce3079d: !Template
+ answer_choices: null
+ id: d5c8d1ba-2601-4076-97fb-9c9d1ce3079d
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
diff --git a/promptsource/templates/scan/simple/templates.yaml b/promptsource/templates/scan/simple/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8daf67d4e8d165157df8cab7a2f1426c5eda50f4
--- /dev/null
+++ b/promptsource/templates/scan/simple/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: simple
+templates:
+ 091ca6e2-b64a-436f-9bd9-9b870a703866: !Template
+ answer_choices: null
+ id: 091ca6e2-b64a-436f-9bd9-9b870a703866
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ 0bf757f7-bf69-4159-b056-f18a4d10e61d: !Template
+ answer_choices: null
+ id: 0bf757f7-bf69-4159-b056-f18a4d10e61d
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ 22929025-9103-495b-8891-11b7dbba119b: !Template
+ answer_choices: null
+ id: 22929025-9103-495b-8891-11b7dbba119b
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ 2f159636-37fd-4840-9423-0f20c90ac725: !Template
+ answer_choices: null
+ id: 2f159636-37fd-4840-9423-0f20c90ac725
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ 4b6636e1-a4dc-4b89-8287-a9e70cb9ec24: !Template
+ answer_choices: null
+ id: 4b6636e1-a4dc-4b89-8287-a9e70cb9ec24
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 94562077-a7b5-488c-a863-66e29bfe6dea: !Template
+ answer_choices: null
+ id: 94562077-a7b5-488c-a863-66e29bfe6dea
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ a78f15b7-52d5-4f67-b049-2b4ffb72ed9c: !Template
+ answer_choices: null
+ id: a78f15b7-52d5-4f67-b049-2b4ffb72ed9c
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ b03d9f9d-beb1-48ca-86f2-8c237aaa5769: !Template
+ answer_choices: null
+ id: b03d9f9d-beb1-48ca-86f2-8c237aaa5769
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ d6caf28e-6fec-456d-9082-163b132da6e8: !Template
+ answer_choices: null
+ id: d6caf28e-6fec-456d-9082-163b132da6e8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ ddb73f1b-0421-4541-879d-87d6301632d8: !Template
+ answer_choices: null
+ id: ddb73f1b-0421-4541-879d-87d6301632d8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ df4303f3-cd40-4925-9e78-7afdfeea7d20: !Template
+ answer_choices: null
+ id: df4303f3-cd40-4925-9e78-7afdfeea7d20
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
diff --git a/promptsource/templates/scan/template_around_right/templates.yaml b/promptsource/templates/scan/template_around_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a107a738c53e5388e2f9597a5b816074e7f42727
--- /dev/null
+++ b/promptsource/templates/scan/template_around_right/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: template_around_right
+templates:
+ 104a5cae-b055-4459-8b2a-4e0f0bcaa2ca: !Template
+ answer_choices: null
+ id: 104a5cae-b055-4459-8b2a-4e0f0bcaa2ca
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ 5d73bc47-a009-43be-82ce-47817ba06766: !Template
+ answer_choices: null
+ id: 5d73bc47-a009-43be-82ce-47817ba06766
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ 651eb35a-20be-438b-8424-19ff70c26659: !Template
+ answer_choices: null
+ id: 651eb35a-20be-438b-8424-19ff70c26659
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ 7ce69e4b-a0a4-46d1-8d87-624968861fc6: !Template
+ answer_choices: null
+ id: 7ce69e4b-a0a4-46d1-8d87-624968861fc6
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ 8961d823-2624-4bdf-b185-2b753d1770ab: !Template
+ answer_choices: null
+ id: 8961d823-2624-4bdf-b185-2b753d1770ab
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ a31dc0dd-e6ad-4536-82b4-a2bd3b5d087f: !Template
+ answer_choices: null
+ id: a31dc0dd-e6ad-4536-82b4-a2bd3b5d087f
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ a89b3913-cdbb-476b-a982-98684de18b0a: !Template
+ answer_choices: null
+ id: a89b3913-cdbb-476b-a982-98684de18b0a
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ ad29480e-6cb2-407f-abdc-40c792fc86a2: !Template
+ answer_choices: null
+ id: ad29480e-6cb2-407f-abdc-40c792fc86a2
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ b1687f7c-8530-47d3-9e85-0bfe96c638d8: !Template
+ answer_choices: null
+ id: b1687f7c-8530-47d3-9e85-0bfe96c638d8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ da9efaf3-e1a7-4eeb-8452-0ff530ff8de6: !Template
+ answer_choices: null
+ id: da9efaf3-e1a7-4eeb-8452-0ff530ff8de6
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ e49db2dc-1601-4c95-b5ad-220cdbd3d2cc: !Template
+ answer_choices: null
+ id: e49db2dc-1601-4c95-b5ad-220cdbd3d2cc
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
diff --git a/promptsource/templates/scan/template_jump_around_right/templates.yaml b/promptsource/templates/scan/template_jump_around_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ea20701ae7658201355e421b22f3a376f7efa002
--- /dev/null
+++ b/promptsource/templates/scan/template_jump_around_right/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: template_jump_around_right
+templates:
+ 075c4507-06f2-41e0-a54e-151e54fa9c38: !Template
+ answer_choices: null
+ id: 075c4507-06f2-41e0-a54e-151e54fa9c38
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ 16364e84-6f36-4adf-8a4b-68301736b375: !Template
+ answer_choices: null
+ id: 16364e84-6f36-4adf-8a4b-68301736b375
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ 44742fb9-9bc5-42cc-8263-2a2a5cb63973: !Template
+ answer_choices: null
+ id: 44742fb9-9bc5-42cc-8263-2a2a5cb63973
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ 7ea8ceab-b004-474e-85f0-9e667043c058: !Template
+ answer_choices: null
+ id: 7ea8ceab-b004-474e-85f0-9e667043c058
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ 8b007906-672d-45fc-b3bb-a076f8566ff2: !Template
+ answer_choices: null
+ id: 8b007906-672d-45fc-b3bb-a076f8566ff2
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 8c3e1d93-5053-4601-bc14-21fee929297e: !Template
+ answer_choices: null
+ id: 8c3e1d93-5053-4601-bc14-21fee929297e
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ 95f0800e-f0d4-46f4-9add-518136d7b06c: !Template
+ answer_choices: null
+ id: 95f0800e-f0d4-46f4-9add-518136d7b06c
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ b3dd8e7b-f664-47d7-b89d-320ac1402fce: !Template
+ answer_choices: null
+ id: b3dd8e7b-f664-47d7-b89d-320ac1402fce
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ dcd4ca7f-277a-4bef-8927-9816613ae2cb: !Template
+ answer_choices: null
+ id: dcd4ca7f-277a-4bef-8927-9816613ae2cb
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ ef7bc2db-88d4-42cc-a1ac-d882acc908c5: !Template
+ answer_choices: null
+ id: ef7bc2db-88d4-42cc-a1ac-d882acc908c5
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ ef80b579-e10a-4c7d-819e-60926a765276: !Template
+ answer_choices: null
+ id: ef80b579-e10a-4c7d-819e-60926a765276
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
diff --git a/promptsource/templates/scan/template_opposite_right/templates.yaml b/promptsource/templates/scan/template_opposite_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..59ab5aecbebbe16b8e88a67ff59b248c0ef2eafc
--- /dev/null
+++ b/promptsource/templates/scan/template_opposite_right/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: template_opposite_right
+templates:
+ 0529c4b1-89a4-40ac-9217-942c6558eadb: !Template
+ answer_choices: null
+ id: 0529c4b1-89a4-40ac-9217-942c6558eadb
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ 0b9e5988-2bd9-4600-9c10-048e247faa63: !Template
+ answer_choices: null
+ id: 0b9e5988-2bd9-4600-9c10-048e247faa63
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ 0c2d0c9b-3960-4851-be81-138530350f18: !Template
+ answer_choices: null
+ id: 0c2d0c9b-3960-4851-be81-138530350f18
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ 628069bc-5173-4252-bab4-075b4c23eca9: !Template
+ answer_choices: null
+ id: 628069bc-5173-4252-bab4-075b4c23eca9
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 6ac6bdec-862e-4bf7-9210-2504fb815c51: !Template
+ answer_choices: null
+ id: 6ac6bdec-862e-4bf7-9210-2504fb815c51
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ 6e7ff3cf-fafb-46cc-ac0e-62222131c7b9: !Template
+ answer_choices: null
+ id: 6e7ff3cf-fafb-46cc-ac0e-62222131c7b9
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ c5bc9d44-a417-472e-bffa-65a716405be4: !Template
+ answer_choices: null
+ id: c5bc9d44-a417-472e-bffa-65a716405be4
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ d6b39e11-0384-4156-b9db-9e518f3324c5: !Template
+ answer_choices: null
+ id: d6b39e11-0384-4156-b9db-9e518f3324c5
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
+ e2d664cb-6ce4-4ece-8ed0-3da84ebd13ed: !Template
+ answer_choices: null
+ id: e2d664cb-6ce4-4ece-8ed0-3da84ebd13ed
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ eb3fc70c-292a-49fc-a423-858535f2eec8: !Template
+ answer_choices: null
+ id: eb3fc70c-292a-49fc-a423-858535f2eec8
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ f7b57399-2ac8-4926-872e-08fd79c0374a: !Template
+ answer_choices: null
+ id: f7b57399-2ac8-4926-872e-08fd79c0374a
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
diff --git a/promptsource/templates/scan/template_right/templates.yaml b/promptsource/templates/scan/template_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e51fa178090728ebdc1a7fb2794813cd9d990965
--- /dev/null
+++ b/promptsource/templates/scan/template_right/templates.yaml
@@ -0,0 +1,203 @@
+dataset: scan
+subset: template_right
+templates:
+ 0d711ef2-408a-4712-ad54-0edb8ab3d20d: !Template
+ answer_choices: null
+ id: 0d711ef2-408a-4712-ad54-0edb8ab3d20d
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, produce the corresponding correct sequence of actions. The\
+ \ actions should be comma-separated.\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_bottom
+ reference: ''
+ 1cde06dc-084a-4164-89be-28e7cc02e08b: !Template
+ answer_choices: null
+ id: 1cde06dc-084a-4164-89be-28e7cc02e08b
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, what\
+ \ is the corresponding correct sequence of actions (comma-separated)?\n\n{{\
+ \ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_beginning
+ reference: ''
+ 52c506c2-7758-4895-8824-55df54924481: !Template
+ answer_choices: null
+ id: 52c506c2-7758-4895-8824-55df54924481
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the following commands:\
+ \ {{ commands }}\n\nWhat is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_mix
+ reference: ''
+ 56fa2dc3-e8fb-44c9-9203-92658e266efd: !Template
+ answer_choices: null
+ id: 56fa2dc3-e8fb-44c9-9203-92658e266efd
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands below, please\
+ \ produce the corresponding correct sequence of actions. The actions should\
+ \ be comma-separated. A few examples of actions include: \"turn right\", \"\
+ walk\", \"run\", etc.\n\n{{ commands }}\n|||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_beginning
+ reference: ''
+ 5c775d19-a5e9-4176-9a6a-09ce0db46eae: !Template
+ answer_choices: null
+ id: 5c775d19-a5e9-4176-9a6a-09ce0db46eae
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven the commands: {{ commands\
+ \ }}\n\nProduce the corresponding correct sequence of actions (comma-separated):\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmative_mix
+ reference: ''
+ 5d4e2bc8-1beb-46d7-8080-55583e28c31e: !Template
+ answer_choices: null
+ id: 5d4e2bc8-1beb-46d7-8080-55583e28c31e
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven some actions \"{{actions}}\"\
+ , translate them into natural language.\n|||\n{{commands}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: translate_opposite
+ reference: ''
+ 6cc540bf-bf6a-4fd2-98ee-61dd5daa1082: !Template
+ answer_choices: null
+ id: 6cc540bf-bf6a-4fd2-98ee-61dd5daa1082
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, what are the corresponding instructions in natural language?\n\n{{\
+ \ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: interrogative_opposite
+ reference: ''
+ 8ae12fff-c165-4c43-9de5-653e2527f9e2: !Template
+ answer_choices: null
+ id: 8ae12fff-c165-4c43-9de5-653e2527f9e2
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nPlease translate correctly the\
+ \ following commands in natural language into the corresponding sequence of\
+ \ actions.\n\n{{ commands }}\n|||\n{{ actions }} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: translate
+ reference: ''
+ d5f23e98-5e69-4147-b42e-35c1bddae219: !Template
+ answer_choices: null
+ id: d5f23e98-5e69-4147-b42e-35c1bddae219
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nNatural language commands: {{\
+ \ commands }}\n\nSequence of actions: ||| {{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plain
+ reference: ''
+ e1b69ba5-1b79-45b8-af0c-ba0926ed1f75: !Template
+ answer_choices: null
+ id: e1b69ba5-1b79-45b8-af0c-ba0926ed1f75
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\nGiven a sequence of actions\
+ \ below, please produce the corresponding instructions in natural language.\n\
+ \n{{ actions }}\n|||\n{{ commands }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: affirmative_opposite
+ reference: ''
+ ebeb845e-9492-4780-94e9-b89c79285396: !Template
+ answer_choices: null
+ id: ebeb845e-9492-4780-94e9-b89c79285396
+ jinja: "{% set scan_lst_of_actions = actions.split(' ') %}\n{% set lst_of_actions\
+ \ = [] %}\n{% for item in scan_lst_of_actions %}\n {{ lst_of_actions.append(item.lower()[2:]\
+ \ | replace(\"_\", \" \")) | default(\"\", True) }}\n{% endfor %}\n\n{% set\
+ \ actions = lst_of_actions | join(\", \") %}\n\n{{ commands }}\n\nGiven the\
+ \ commands above, what is the corresponding correct sequence of actions (comma-separated)?\n\
+ |||\n{{ actions }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: interrogative_bottom
+ reference: ''
diff --git a/promptsource/templates/scicite/templates.yaml b/promptsource/templates/scicite/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a6491b6a08f522750c92e8c24ee70b9733936b90
--- /dev/null
+++ b/promptsource/templates/scicite/templates.yaml
@@ -0,0 +1,130 @@
+dataset: scicite
+templates:
+ 113a4e9e-7f59-4963-89a3-c1c647acaf2b: !Template
+ answer_choices: A ||| C ||| B
+ id: 113a4e9e-7f59-4963-89a3-c1c647acaf2b
+ jinja: 'Consider the following citation from a scientific paper:
+
+ {{ string }}
+
+ {% if sectionName %} It came from a section titled: {{sectionName}}.
+
+ {% endif %}
+
+ Is this citation describing
+
+
+ A: a {{"method"}}
+
+
+ B: a {{"result"}}
+
+
+ C: {{"background"}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Classify intent w/section (select choice)
+ reference: ''
+ 359418b3-2425-4968-b428-ecb5d60b3b4e: !Template
+ answer_choices: method ||| background ||| result
+ id: 359418b3-2425-4968-b428-ecb5d60b3b4e
+ jinja: 'Is the following citation from a scientific paper describing a {{answer_choices[0]}},
+ a {{answer_choices[2]}}, or {{answer_choices[1]}}?
+
+ Citation: {{ string }}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Classify intent (choices first)
+ reference: ''
+ 36a77a57-31e5-48d3-a9b8-e8b8db5fe334: !Template
+ answer_choices: A ||| C ||| B
+ id: 36a77a57-31e5-48d3-a9b8-e8b8db5fe334
+ jinja: 'A scientific paper contained the following citation:
+
+ "{{ string }}"
+
+ Is this citation describing
+
+
+ A: a {{"method"}}
+
+
+ B: a {{"result"}}
+
+
+ C: {{"background"}}
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Classify intent (select choice)
+ reference: ''
+ b917ab5b-3e33-48ee-a319-ccca6af58cd5: !Template
+ answer_choices: method ||| background ||| result
+ id: b917ab5b-3e33-48ee-a319-ccca6af58cd5
+ jinja: 'The following is a citation taken from a scientific paper.
+
+ "{{ string }}"
+
+ Is this citation describing a {{answer_choices[0]}}, a {{answer_choices[2]}},
+ or {{answer_choices[1]}}?
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Classify intent
+ reference: ''
+ f63606d8-7168-4201-a2bc-e48a442540ac: !Template
+ answer_choices: method ||| background ||| result
+ id: f63606d8-7168-4201-a2bc-e48a442540ac
+ jinja: 'Citations can describe a {{answer_choices[0]}}, a {{answer_choices[2]}},
+ or {{answer_choices[1]}}.
+
+ What is the citation below describing?
+
+ "{{ string }}"
+
+ |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: can_describe
+ reference: ''
diff --git a/promptsource/templates/scientific_papers/arxiv/templates.yaml b/promptsource/templates/scientific_papers/arxiv/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..57b56360cc42a6262e72c97143ed4c36da66f041
--- /dev/null
+++ b/promptsource/templates/scientific_papers/arxiv/templates.yaml
@@ -0,0 +1,121 @@
+dataset: scientific_papers
+subset: arxiv
+templates:
+ 3dfaa906-d430-491f-945b-8d419e335e16: !Template
+ answer_choices: null
+ id: 3dfaa906-d430-491f-945b-8d419e335e16
+ jinja: "Suppose that you have an abstract for a scientific paper: {{abstract}}.\n\
+ And you have already written the first three sentences of the full article:\
+ \ {{article.strip().split('\\n')[:3]|join('\\n')}}. \nPlease generate the next\
+ \ two sentences of the article\n|||\n{{article.strip().split('\\n')[3:5]|join(\"\
+ \\n\")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: write_sentences_4_and_5_from_sentences_1_2_3_and_abstract
+ reference: Given the first three sentences, and generate the next two sentence.
+ 6f268cad-7bdd-4ca2-a647-18ac04d0d422: !Template
+ answer_choices: null
+ id: 6f268cad-7bdd-4ca2-a647-18ac04d0d422
+ jinja: 'Write the first line of an abstract of a paper.
+
+ This paper starts with: {{ article.strip().split(''\n'')[:3]|join(''\n'') }}
+
+ |||
+
+ {{ abstract.strip().split(''\n'')[0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_line_abstract_from_first_three_lines_article
+ reference: Given the first three lines of an article, write the first line of
+ abstract.
+ a13472ad-df38-469a-85a8-a4e1ed58bc87: !Template
+ answer_choices: null
+ id: a13472ad-df38-469a-85a8-a4e1ed58bc87
+ jinja: 'What would be the first line of a scientific article for the following
+ abstract: {{ abstract }}
+
+ |||
+
+ {{ article.strip().split(''\n'')[0] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_line_from_abstract
+ reference: Given the abstract, generate the first line of the article.
+ ab3e65ab-0935-497c-a6d0-61ad31e5a1a7: !Template
+ answer_choices: null
+ id: ab3e65ab-0935-497c-a6d0-61ad31e5a1a7
+ jinja: 'Write the first 100 words for a scientific article with the following
+ abstract: {{ abstract }}
+
+ |||
+
+ {{ article.strip().split('' '')[:100] |join('' '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_100_words_for_article
+ reference: Given the abstract, generate the first 100 words of the article.
+ ed38e74f-2a5f-4b27-a1f9-0331c6ee8b29: !Template
+ answer_choices: null
+ id: ed38e74f-2a5f-4b27-a1f9-0331c6ee8b29
+ jinja: 'Write the section names for a scientific article with the abstract: {{
+ abstract }}
+
+ |||
+
+ {{ section_names.strip().split(''\n'')|join('', '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: get_section_names_from_abstract
+ reference: Given the abstract, get the section names.
+ fb501d80-9e93-4a7f-b66c-69b98ac0347f: !Template
+ answer_choices: null
+ id: fb501d80-9e93-4a7f-b66c-69b98ac0347f
+ jinja: 'Write the first three sentences of a scientific article for the following
+ abstract:
+
+ {{abstract}}
+
+ |||
+
+ {{article.strip().split(''\n'')[:3]|join("\n")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_three_sentences_of_an_article_from_abstract
+ reference: Given the abstract, generate first three sentences of the article.
diff --git a/promptsource/templates/scientific_papers/pubmed/templates.yaml b/promptsource/templates/scientific_papers/pubmed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..04b40ada0c50577eea42c07a7ab6e0b724ad9b4c
--- /dev/null
+++ b/promptsource/templates/scientific_papers/pubmed/templates.yaml
@@ -0,0 +1,119 @@
+dataset: scientific_papers
+subset: pubmed
+templates:
+ 7b54d0d8-ea64-4828-bb1c-a12fd3162c3f: !Template
+ answer_choices: null
+ id: 7b54d0d8-ea64-4828-bb1c-a12fd3162c3f
+ jinja: 'Generate the section names for a scientific article with the abstract:
+ {{ abstract }}
+
+ |||
+
+ {{ section_names.strip().split(''\n'')|join('', '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: get_section_names_from_abstract
+ reference: Given the abstract, get the section names.
+ 7ef28be4-382c-46ee-9fd5-1c4d83aab433: !Template
+ answer_choices: null
+ id: 7ef28be4-382c-46ee-9fd5-1c4d83aab433
+ jinja: 'Write the first three sentences of a scientific article for the following
+ abstract:
+
+ {{abstract}}
+
+ |||
+
+ {{article.strip().split(''\n'')[:3]|join("\n")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_three_sentences_of_an_article_from_abstract
+ reference: Given the abstract, generate first three sentences of the article.
+ a34723df-0c10-4553-8323-99c4cfb53544: !Template
+ answer_choices: null
+ id: a34723df-0c10-4553-8323-99c4cfb53544
+ jinja: 'What would be the first line of a scientific article for the following
+ abstract: {{ abstract }}
+
+ |||
+
+ {{ article.strip().split(''\n'')[0] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_line_from_abstract
+ reference: Given the abstract, generate the first line of the article.
+ a8992de1-443f-4f6a-983a-99e10a34b328: !Template
+ answer_choices: null
+ id: a8992de1-443f-4f6a-983a-99e10a34b328
+ jinja: 'Write the first 100 words for a scientific article with the following
+ abstract : {{ abstract }}
+
+ |||
+
+ {{ article.strip().split('' '')[:100] |join('' '')}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_100_words_for_article
+ reference: Given the abstract, generate the first 100 words of the article.
+ e5a41fa1-3e81-4c74-ad73-6aab5e4855c7: !Template
+ answer_choices: null
+ id: e5a41fa1-3e81-4c74-ad73-6aab5e4855c7
+ jinja: "Suppose that you have an abstract for a scientific paper: {{abstract}}\
+ \ \nand you have already written the first three sentences of the full article:\
+ \ {{article.strip().split('\\n')[:3]}}. \nPlease generate the next two sentences\
+ \ of the article.\n|||\n{{article.strip().split('\\n')[3:5]|join(\"\\n\")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: write_sentences_4_and_5_from_sentences_1_2_3_and_abstract
+ reference: ''
+ ff694788-2e84-49e7-8df3-2665b8c687f1: !Template
+ answer_choices: null
+ id: ff694788-2e84-49e7-8df3-2665b8c687f1
+ jinja: 'Write the first line of an abstract of a paper which starts with: {{ article.strip().split(''\n'')[:3]|join(''\n'')
+ }}
+
+ |||
+
+ {{ abstract.strip().split(''\n'')[0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_first_line_abstract_from_first_three_lines_article
+ reference: Given the first three lines of an article, write the first line of
+ abstract.
diff --git a/promptsource/templates/sciq/templates.yaml b/promptsource/templates/sciq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7b382501752db1737274430f71ce22798f324eb2
--- /dev/null
+++ b/promptsource/templates/sciq/templates.yaml
@@ -0,0 +1,100 @@
+dataset: sciq
+templates:
+ 0af52ad2-2b12-4700-b664-cd26d2da6dc1: !Template
+ answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+ id: 0af52ad2-2b12-4700-b664-cd26d2da6dc1
+ jinja: 'Q: {{question}}
+
+
+
+ A:|||{{answer_choices[3]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Direct Question (Closed Book)
+ reference: ''
+ 15b0a989-84e4-4f1c-8ac1-12dbfa2ff42a: !Template
+ answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+ id: 15b0a989-84e4-4f1c-8ac1-12dbfa2ff42a
+ jinja: "{% set order = [[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1],\
+ \ [0, 3, 1, 2], [0, 3, 2, 1],\n [1, 0, 2, 3], [1,\
+ \ 0, 3, 2], [1, 2, 0, 3], [1, 2, 3, 0], [1, 3, 0, 2], [1, 3, 2, 0],\n \
+ \ [2, 1, 0, 3], [2, 1, 0, 2], [2, 0, 1, 3], [2, 0, 3,\
+ \ 1], [2, 3, 1, 0], [2, 3, 0, 1],\n [3, 1, 2, 0],\
+ \ [3, 1, 0, 2], [3, 2, 1, 0], [3, 2, 0, 1], [3, 0, 1, 2], [3, 0, 2, 1]] | choice\
+ \ %}\nQ: {{question}}\n\n\n Choices:\n\n- {{ answer_choices[order[0]] }}\n\n\
+ - {{ answer_choices[order[1]] }}\n\n- {{ answer_choices[order[2]] }}\n\n- {{\
+ \ answer_choices[order[3]] }}\n\nA:|||{{answer_choices[3]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Multiple Choice (Closed Book)
+ reference: Same multiple choice format but without the support text
+ 368e29fb-506d-4a4e-ac33-0af8d6e1729b: !Template
+ answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+ id: 368e29fb-506d-4a4e-ac33-0af8d6e1729b
+ jinja: "{% set order = [[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1],\
+ \ [0, 3, 1, 2], [0, 3, 2, 1],\n [1, 0, 2, 3], [1,\
+ \ 0, 3, 2], [1, 2, 0, 3], [1, 2, 3, 0], [1, 3, 0, 2], [1, 3, 2, 0],\n \
+ \ [2, 1, 0, 3], [2, 1, 0, 2], [2, 0, 1, 3], [2, 0, 3,\
+ \ 1], [2, 3, 1, 0], [2, 3, 0, 1],\n [3, 1, 2, 0],\
+ \ [3, 1, 0, 2], [3, 2, 1, 0], [3, 2, 0, 1], [3, 0, 1, 2], [3, 0, 2, 1]] | choice\
+ \ %}\nQ: {{question}}\n\n\nRead this paragraph and choose the correct option\
+ \ from the provided answers:\n\n{{support}}\n\n Choices:\n\n- {{ answer_choices[order[0]]\
+ \ }}\n\n- {{ answer_choices[order[1]] }}\n\n- {{ answer_choices[order[2]] }}\n\
+ \n- {{ answer_choices[order[3]] }}\n\n\nA:|||{{answer_choices[3]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Multiple Choice Question First
+ reference: Multiple choice question format
+ 63c22e8a-7029-4ce3-bd26-6ca6a1541563: !Template
+ answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+ id: 63c22e8a-7029-4ce3-bd26-6ca6a1541563
+ jinja: "{% set order = [[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1],\
+ \ [0, 3, 1, 2], [0, 3, 2, 1],\n [1, 0, 2, 3], [1,\
+ \ 0, 3, 2], [1, 2, 0, 3], [1, 2, 3, 0], [1, 3, 0, 2], [1, 3, 2, 0],\n \
+ \ [2, 1, 0, 3], [2, 1, 0, 2], [2, 0, 1, 3], [2, 0, 3,\
+ \ 1], [2, 3, 1, 0], [2, 3, 0, 1],\n [3, 1, 2, 0],\
+ \ [3, 1, 0, 2], [3, 2, 1, 0], [3, 2, 0, 1], [3, 0, 1, 2], [3, 0, 2, 1]] | choice\
+ \ %}\nAnswer the following question given this paragraph: \n\n{{support}}\n\n\
+ \nQ: {{question}}\n\n Choices:\n\n- {{ answer_choices[order[0]] }}\n\n- {{ answer_choices[order[1]]\
+ \ }}\n\n- {{ answer_choices[order[2]] }}\n\n- {{ answer_choices[order[3]] }}\n\
+ \nA:|||{{answer_choices[3]}}\n\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Multiple Choice
+ reference: Standard multiple choice format
+ d417fcfb-9f00-4186-95d8-e63609495164: !Template
+ answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+ id: d417fcfb-9f00-4186-95d8-e63609495164
+ jinja: "Answer the following question given this paragraph: \n\n{{support}}\n\n\
+ \nQ: {{question}}\n\n\nA:|||{{answer_choices[3]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Direct Question
+ reference: ''
diff --git a/promptsource/templates/scitail/snli_format/templates.yaml b/promptsource/templates/scitail/snli_format/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..311ca6cedf1a633fb99086ba694d473c0778a735
--- /dev/null
+++ b/promptsource/templates/scitail/snli_format/templates.yaml
@@ -0,0 +1,28 @@
+dataset: scitail
+subset: snli_format
+templates:
+ 90827988-2a8d-4ecb-b8c1-54ad6cd0ebfa: !Template
+ answer_choices: yes ||| no
+ id: 90827988-2a8d-4ecb-b8c1-54ad6cd0ebfa
+ jinja: 'Given that {{sentence1}} Does it follow that {{sentence2}}
+
+ {{ answer_choices | join('' or '') }}?
+
+ |||{% if gold_label == "entailment" %}
+
+ {{answer_choices[0]}}
+
+ {% else %}
+
+ {{answer_choices[1]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Another Yes/No Entailment Framing
+ reference: ''
diff --git a/promptsource/templates/scitail/tsv_format/templates.yaml b/promptsource/templates/scitail/tsv_format/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3a5023b20442acec9c5146b0427a5d92e7c69088
--- /dev/null
+++ b/promptsource/templates/scitail/tsv_format/templates.yaml
@@ -0,0 +1,77 @@
+dataset: scitail
+subset: tsv_format
+templates:
+ 189ed384-c077-49ad-b606-ed08b66f8376: !Template
+ answer_choices: true ||| false
+ id: 189ed384-c077-49ad-b606-ed08b66f8376
+ jinja: "{{premise}} Therefore, we are licensed to say that {{hypothesis}} {{\
+ \ answer_choices | join(' or ') }}|||\n{% if label == \"entails\" %} \n{{answer_choices[0]}}\n\
+ {% else %}\n{{answer_choices[1]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "\u2026 Therefore, we're licensed to say that\u2026"
+ reference: ''
+ 1ff92b02-fefc-49e0-b676-9391fab8f193: !Template
+ answer_choices: neutral ||| entails
+ id: 1ff92b02-fefc-49e0-b676-9391fab8f193
+ jinja: Suppose {{premise}} Can we infer that {{hypothesis}}? ||| {{label}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "Suppose\u2026 Can we infer that\u2026"
+ reference: ''
+ 5aa53544-73a6-4486-b8c8-623345353fa7: !Template
+ answer_choices: yes ||| no
+ id: 5aa53544-73a6-4486-b8c8-623345353fa7
+ jinja: "{{premise}} Does the previous passage support the claim that {{hypothesis}}?\
+ \ |||{% if label == \"entails\" %} \n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "\u2026does the previous passage support the claim that"
+ reference: ''
+ 705fa099-0650-4de5-b72f-881aea0fa208: !Template
+ answer_choices: yes ||| no
+ id: 705fa099-0650-4de5-b72f-881aea0fa208
+ jinja: "Given that {{premise}} Does it follow that {{hypothesis}} {{ answer_choices\
+ \ | join(' or ') }} |||\n{% if label == \"entails\" %} \n{{answer_choices[0]}}\n\
+ {% else %}\n{{answer_choices[1]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "given\u2026 does it follow that\u2026 "
+ reference: Another yes/no entailment framing
+ 9aa89dee-6cef-43bc-bdf4-e38cdf0796a6: !Template
+ answer_choices: yes ||| no
+ id: 9aa89dee-6cef-43bc-bdf4-e38cdf0796a6
+ jinja: "Sentence 1: {{premise}}\n\nSentence 2: {{hypothesis}}\n\nQuestion: Does\
+ \ Sentence 1 entail Sentence 2? {{ answer_choices | join(' or ') }} |||\n{%\
+ \ if label == \"entails\" %} \n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does S1 entail S2?
+ reference: Adapted from Victor's prompts for XNLI.
diff --git a/promptsource/templates/scitldr/Abstract/templates.yaml b/promptsource/templates/scitldr/Abstract/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6647a0d70ee1cbbb13b918a1775d7dac468c4fef
--- /dev/null
+++ b/promptsource/templates/scitldr/Abstract/templates.yaml
@@ -0,0 +1,96 @@
+dataset: scitldr
+subset: Abstract
+templates:
+ 01fb91ab-2c95-436e-9363-3dfcdb6c5ba6: !Template
+ answer_choices: null
+ id: 01fb91ab-2c95-436e-9363-3dfcdb6c5ba6
+ jinja: "Generate a summary for the text: \n{{source | join(\" \")}}\n|||\n{{target|choice}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: basic_task_description_like
+ reference: Assume there is only one choice
+ 08b9e913-a305-46e2-aa43-f1126d76cf55: !Template
+ answer_choices: null
+ id: 08b9e913-a305-46e2-aa43-f1126d76cf55
+ jinja: "Elaborate on the given summary: \n{{target |choice}}\n\nStart with following\
+ \ sentence: {{source[0]}}\n|||\n{{source | join(\" \")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: reverse_generation
+ reference: This template asks the model to hallucinate the abstract.
+ 16faf5c0-a0c5-488a-89dd-2989622b01dc: !Template
+ answer_choices: null
+ id: 16faf5c0-a0c5-488a-89dd-2989622b01dc
+ jinja: "Compress the abstract to one or two sentences. Make sure it captures the\
+ \ main point of the abstract. \nAbstract: {{source | join(\" \")}}\nSummary:\
+ \ \n|||\n{{target[0]}}\n\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: instructions_for_summary
+ reference: Providing instructions on what a summary should look like
+ 68502ad6-cb36-4137-9359-e6826731854a: !Template
+ answer_choices: null
+ id: 68502ad6-cb36-4137-9359-e6826731854a
+ jinja: "Abstract: {{source | join(\" \")}}\nPlease summarize the abstract in one\
+ \ sentence: \n|||\n{{target|choice}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: summarize_in_sentence
+ reference: Template asks the model to summarize in one sentence
+ ab46a8f2-1e57-4ac9-b4ae-422c70689450: !Template
+ answer_choices: null
+ id: ab46a8f2-1e57-4ac9-b4ae-422c70689450
+ jinja: '{{source| join(" ")}}
+
+ TL;DR: ||| {{target[0]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: gpt_2_style
+ reference: GPT 2 style template
+ bac2ebcf-a54d-49a0-ac37-e7ad3f4878cb: !Template
+ answer_choices: null
+ id: bac2ebcf-a54d-49a0-ac37-e7ad3f4878cb
+ jinja: "{{source | join(\" \")}}\nPlease summarize the above paragraph. \n|||\n\
+ {{target|choice}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: basic_with_choice_output
+ reference: basic task like description with choice filter
diff --git a/promptsource/templates/selqa/answer_selection_analysis/templates.yaml b/promptsource/templates/selqa/answer_selection_analysis/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..233e177ee79b4b5b1189420d6119476e9ffcc0f0
--- /dev/null
+++ b/promptsource/templates/selqa/answer_selection_analysis/templates.yaml
@@ -0,0 +1,93 @@
+dataset: selqa
+subset: answer_selection_analysis
+templates:
+ 39f5f57c-50b9-40b3-bb4f-3f0e4fec7776: !Template
+ answer_choices: No ||| Yes
+ id: 39f5f57c-50b9-40b3-bb4f-3f0e4fec7776
+ jinja: '{% set rand_index = range(0,10)|choice %} He asked me "{{ question }}"
+ Is he talking about the topic "{{ ["MUSIC", "TV","TRAVEL","ART","SPORT","COUNTRY","MOVIES","HISTORICAL
+ EVENTS","SCIENCE","FOOD"][rand_index]|lower}}"? ||| {% if topic == rand_index
+ %}{{answer_choices[1]}}{% else %}{{answer_choices[0]}}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: is-he-talking-about
+ reference: ''
+ 5354e98d-8aa2-49d0-a50b-fc72a503d7d4: !Template
+ answer_choices: No ||| Yes
+ id: 5354e98d-8aa2-49d0-a50b-fc72a503d7d4
+ jinja: '{% set possible_indexes = [] %}{% for c in candidates %}{% if c|trim %}{{
+ possible_indexes.append(loop.index0) | default("", True) }}{% endif %}{% endfor
+ %}{% set rand_index = possible_indexes | choice %} Would it make sense to reply
+ "{{ candidates[rand_index]|trim|trim(''.'') }}" to the question "{{ question
+ }}"? ||| {% if rand_index in answers %}{{answer_choices[1]}}{%else %}{{answer_choices[0]}}{%endif%}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: would-make-sense-qu-rand
+ reference: ''
+ 721463cf-bae4-4a22-bd19-7bdbb0777856: !Template
+ answer_choices: null
+ id: 721463cf-bae4-4a22-bd19-7bdbb0777856
+ jinja: '{% set rand_index = range(0,10)|choice %}{% set rand_index = rand_index
+ - 1 if rand_index == topic else rand_index %}{% set topics = ["MUSIC", "TV","TRAVEL","ART","SPORT","COUNTRY","MOVIES","HISTORICAL
+ EVENTS","SCIENCE","FOOD"]%} What is the topic of the question "{{ question }}"?
+ Is it {{ topics[rand_index]|lower}} or {{ topics[topic]|lower}}? ||| {{ topics[topic]|lower
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: about-topic-vs-random
+ reference: ''
+ 9de0a553-63e7-4b67-a6c5-1a15ac0d5483: !Template
+ answer_choices: No ||| Yes
+ id: 9de0a553-63e7-4b67-a6c5-1a15ac0d5483
+ jinja: '{% set possible_indexes = [] %}{% for c in candidates %}{% if c|trim %}{{
+ possible_indexes.append(loop.index0) | default("", True) }}{% endif %}{% endfor
+ %}{% set rand_index = possible_indexes | choice %}Someone asked me "{{ question
+ }}" I replied "{{ candidates[rand_index] }}" Does my answer make sense? |||
+ {% if rand_index in answers %}{{answer_choices[1]}}{%else %}{{answer_choices[0]}}{%endif%}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: make-sense-rand
+ reference: ''
+ c2be1297-cfce-48bd-9ef0-9f46fc898e84: !Template
+ answer_choices: 1 ||| 2
+ id: c2be1297-cfce-48bd-9ef0-9f46fc898e84
+ jinja: "{% set rand_val = range(0,candidates|length)|choice %}{% set rand_index\
+ \ = namespace(value=rand_val)%}\n{% for answer in answers|sort(reverse=True)%}\n\
+ \ {% if rand_index.value == answer %}\n {% set rand_index.value = rand_index.value\
+ \ - 1 %}\n{% endif %}\n{% endfor %}\n{% set response=\"2\" %}{% set real_fake_answers\
+ \ = [candidates[rand_index.value], candidates[answers[0]]] %}\n{% if range(0,2)|choice\
+ \ %}{% set response=\"1\" %}{% set real_fake_answers = [candidates[answers[0]],\
+ \ candidates[rand_index.value]] %}{% endif %}\nThe next question was \"{{ question\
+ \ }}\" Which is the correct answer? 1: \"{{ real_fake_answers|join('\" or 2:\
+ \ \"') }} ||| {{ response }}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: which-answer-1st-vs-random
+ reference: ''
diff --git a/promptsource/templates/sem_eval_2010_task_8/templates.yaml b/promptsource/templates/sem_eval_2010_task_8/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..62da8b7dc725a1d11192c1d09313feed0bdcd5cc
--- /dev/null
+++ b/promptsource/templates/sem_eval_2010_task_8/templates.yaml
@@ -0,0 +1,123 @@
+dataset: sem_eval_2010_task_8
+templates:
+ 202246b0-3f82-42b9-bc8d-d36997b5f2cb: !Template
+ answer_choices: Cause Effect e1,e2 ||| Cause Effect e2,e1 ||| Component Whole
+ e1,e2 ||| Component Whole e2,e1 ||| Content Container e1,e2 ||| Content Container
+ e2,e1 ||| Entity Destination e1,e2 ||| Entity Destination e2,e1 ||| Entity Origin
+ e1,e2 ||| Entity Origin e2,e1 ||| Instrument Agency e1,e2 ||| Instrument Agency
+ e2,e1 ||| Member Collection e1,e2 ||| Member Collection e2,e1 ||| Message Topic
+ e1,e2 ||| Message Topic e2,e1 ||| Product Producer e1,e2 ||| Product Producer
+ e2,e1 ||| Other
+ id: 202246b0-3f82-42b9-bc8d-d36997b5f2cb
+ jinja: "Given the sentence, {{sentence}}\n\nOut of the options {{answer_choices\
+ \ | join(\", \")}}, \n\nWhat is the semantic relations between the two nominals\
+ \ (nouns or noun phrases) e1 and e2 in the sentence: ||| {{ answer_choices[relation]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: semantic relations with options
+ reference: out of options; macro-averaged F1-score official metric
+ 5d7123a8-4ed4-42ce-bcfb-4af415962efc: !Template
+ answer_choices: Cause Effect e1,e2 ||| Cause Effect e2,e1 ||| Component Whole
+ e1,e2 ||| Component Whole e2,e1 ||| Content Container e1,e2 ||| Content Container
+ e2,e1 ||| Entity Destination e1,e2 ||| Entity Destination e2,e1 ||| Entity Origin
+ e1,e2 ||| Entity Origin e2,e1 ||| Instrument Agency e1,e2 ||| Instrument Agency
+ e2,e1 ||| Member Collection e1,e2 ||| Member Collection e2,e1 ||| Message Topic
+ e1,e2 ||| Message Topic e2,e1 ||| Product Producer e1,e2 ||| Product Producer
+ e2,e1 ||| Other
+ id: 5d7123a8-4ed4-42ce-bcfb-4af415962efc
+ jinja: 'How semantically related are the two nominals in the sentence, {{sentence}}
+
+
+ Please answer {{answer_choices[:-1]|join(", ")}} or {{answer_choices[-1]}}:
+ ||| {{ answer_choices[relation] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: semantically related nominials with options
+ reference: 'please answer; official metric: macro-averaged F1-score'
+ 87ca690e-87a7-44d5-b001-a4181482f5c9: !Template
+ answer_choices: Cause Effect e1,e2 ||| Cause Effect e2,e1 ||| Component Whole
+ e1,e2 ||| Component Whole e2,e1 ||| Content Container e1,e2 ||| Content Container
+ e2,e1 ||| Entity Destination e1,e2 ||| Entity Destination e2,e1 ||| Entity Origin
+ e1,e2 ||| Entity Origin e2,e1 ||| Instrument Agency e1,e2 ||| Instrument Agency
+ e2,e1 ||| Member Collection e1,e2 ||| Member Collection e2,e1 ||| Message Topic
+ e1,e2 ||| Message Topic e2,e1 ||| Product Producer e1,e2 ||| Product Producer
+ e2,e1 ||| Other
+ id: 87ca690e-87a7-44d5-b001-a4181482f5c9
+ jinja: 'Given the two nominals (nouns or noun phrases) e1 and e2 in {{sentence}}
+
+
+ ===
+
+
+ What is the semantic relations between e1 and e2 : ||| {{ answer_choices[relation]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: semantic relations without options
+ reference: 'relationship between two nomimals; official metric: macro-averaged
+ F1-score'
+ c97e7bbf-b7f0-4cee-ada5-431ce7d606cc: !Template
+ answer_choices: Cause Effect e1,e2 ||| Cause Effect e2,e1 ||| Component Whole
+ e1,e2 ||| Component Whole e2,e1 ||| Content Container e1,e2 ||| Content Container
+ e2,e1 ||| Entity Destination e1,e2 ||| Entity Destination e2,e1 ||| Entity Origin
+ e1,e2 ||| Entity Origin e2,e1 ||| Instrument Agency e1,e2 ||| Instrument Agency
+ e2,e1 ||| Member Collection e1,e2 ||| Member Collection e2,e1 ||| Message Topic
+ e1,e2 ||| Message Topic e2,e1 ||| Product Producer e1,e2 ||| Product Producer
+ e2,e1 ||| Other
+ id: c97e7bbf-b7f0-4cee-ada5-431ce7d606cc
+ jinja: 'Given the sentence, {{sentence}}
+
+
+ ===
+
+
+ What is the semantic relations between the two nominals (nouns or noun phrases)
+ e1 and e2 in the sentence: ||| {{ answer_choices[relation] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: semantic relations nominials without options
+ reference: mention e1,e2 after; macro-averaged F1-score official metric
+ d7e88599-da89-4cfd-94e2-65e68c7ef141: !Template
+ answer_choices: Cause Effect e1,e2 ||| Cause Effect e2,e1 ||| Component Whole
+ e1,e2 ||| Component Whole e2,e1 ||| Content Container e1,e2 ||| Content Container
+ e2,e1 ||| Entity Destination e1,e2 ||| Entity Destination e2,e1 ||| Entity Origin
+ e1,e2 ||| Entity Origin e2,e1 ||| Instrument Agency e1,e2 ||| Instrument Agency
+ e2,e1 ||| Member Collection e1,e2 ||| Member Collection e2,e1 ||| Message Topic
+ e1,e2 ||| Message Topic e2,e1 ||| Product Producer e1,e2 ||| Product Producer
+ e2,e1 ||| Other
+ id: d7e88599-da89-4cfd-94e2-65e68c7ef141
+ jinja: 'Sentence: {{sentence}}
+
+
+ Are the two nominals (nouns or noun phrases) in the sentence semantically related
+ as {{answer_choices[:-1]|join(", ")}} or {{answer_choices[-1]}}? ||| {{answer_choices[relation]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: semantically nominals with options
+ reference: 'given nomials; official metric: macro-averaged F1-score'
diff --git a/promptsource/templates/sem_eval_2014_task_1/templates.yaml b/promptsource/templates/sem_eval_2014_task_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1e3fca70d53317ceeaca9c13c1a1f3ed0c044e1e
--- /dev/null
+++ b/promptsource/templates/sem_eval_2014_task_1/templates.yaml
@@ -0,0 +1,112 @@
+dataset: sem_eval_2014_task_1
+templates:
+ 14b0f0c7-0026-466f-8d9e-9dc6c32bf111: !Template
+ answer_choices: No clear answer ||| yes ||| no
+ id: 14b0f0c7-0026-466f-8d9e-9dc6c32bf111
+ jinja: 'Does the premise: "{{premise}}" agree with the hypothesis: "{{hypothesis}}"
+ ? ||| {{answer_choices[entailment_judgment]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: premise_agree_hypothesis
+ reference: ''
+ 2aa091cb-02ff-4c8c-964c-4c5e53df8c1b: !Template
+ answer_choices: null
+ id: 2aa091cb-02ff-4c8c-964c-4c5e53df8c1b
+ jinja: 'How related are the two sentences : "{{hypothesis}}" and "{{premise}}"
+ ? Rate it from 1-5, where 1 is completely unrelated and 5 is very related.
+
+ ||| {{(((10*relatedness_score)|round)/10)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson correlation
+ - Spearman correlation
+ - Mean Squared Error
+ original_task: true
+ name: related_rate
+ reference: ''
+ 75203dd2-5ec3-4e91-b95f-228ad9bd2010: !Template
+ answer_choices: neither ||| entailing ||| contradicting
+ id: 75203dd2-5ec3-4e91-b95f-228ad9bd2010
+ jinja: "Sentence 1: \"{{hypothesis}}\" \nSentence 2: \"{{premise}}\"\nAre the\
+ \ two sentences {{answer_choices[1]}} or {{answer_choices[2]}} each other? If\
+ \ none of these options are valid, answer \"{{answer_choices[0]}}\".\n||| {{answer_choices[entailment_judgment]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: entailing_or_contradicting
+ reference: ''
+ 892c58fd-64f5-4059-8fb8-c74bc025ff40: !Template
+ answer_choices: Neutral ||| Entailment ||| Contradiction
+ id: 892c58fd-64f5-4059-8fb8-c74bc025ff40
+ jinja: "Given the following hypothesis: {{hypothesis}}.\nAs well as the premise:\
+ \ {{premise}}, \nPredict the Entailment relation between the premise and hypothesis\
+ \ from the labels {{answer_choices[0]}}, {{answer_choices[1]}}, {{answer_choices[2]}}\
+ \ |||\n {{answer_choices[entailment_judgment]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: entailment_relation
+ reference: ''
+ 91a6b1db-be59-41bd-9eea-73bb7a4e7350: !Template
+ answer_choices: Neutral ||| Entailment ||| Contradiction
+ id: 91a6b1db-be59-41bd-9eea-73bb7a4e7350
+ jinja: 'Given the hypothesis: {{hypothesis}} and the premise: {{premise}}. Out
+ of the options, {{answer_choices[0]}}, {{answer_choices[1]}} and {{answer_choices[2]}}
+ what is the entailment label? ||| {{answer_choices[entailment_judgment]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: entailment_label
+ reference: ''
+ a58fe8b4-f185-46a9-8fca-6dc66d0812be: !Template
+ answer_choices: null
+ id: a58fe8b4-f185-46a9-8fca-6dc66d0812be
+ jinja: "Given the following hypothesis: {{hypothesis}}.\nAs well as the premise:\
+ \ {{premise}}, \nGive a score on how related the hypothesis and premise were,\
+ \ from the scale 1 to 5, where 1 is completely unrelated and 5 is very related:\
+ \ ||| {{(((10*relatedness_score)|round)/10)}}\n\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson correlation
+ - Spearman correlation
+ - Mean Squared Error
+ original_task: true
+ name: related_score
+ reference: ''
+ d9380ec0-18b3-48b2-99eb-9f9cb47ab7c7: !Template
+ answer_choices: unclear ||| yes ||| no
+ id: d9380ec0-18b3-48b2-99eb-9f9cb47ab7c7
+ jinja: Does {{premise}} imply that {{hypothesis}}? Please answer yes, no, or
+ unclear. ||| {{answer_choices[entailment_judgment]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: premise_imply_hypothesis
+ reference: ''
diff --git a/promptsource/templates/sent_comp/templates.yaml b/promptsource/templates/sent_comp/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..19bd98b24bac7936fd1c7ba905ac7f25e18475d5
--- /dev/null
+++ b/promptsource/templates/sent_comp/templates.yaml
@@ -0,0 +1,124 @@
+dataset: sent_comp
+templates:
+ 185b5001-19e3-47d3-afd3-40f74346f4bb: !Template
+ answer_choices: null
+ id: 185b5001-19e3-47d3-afd3-40f74346f4bb
+ jinja: '{{graph.sentence}}
+
+
+ ===
+
+
+ Given the above sentence, generate a compressed sentence: ||| {{compression.text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: -given_the_above-generate_compressed-
+ reference: ''
+ 336ba469-f315-49ff-8c02-baf6d059972b: !Template
+ answer_choices: null
+ id: 336ba469-f315-49ff-8c02-baf6d059972b
+ jinja: '{{graph.sentence}}
+
+
+ ===
+
+
+ Given the above sentence, write a headline: ||| {{compression.text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: -given_the_above-write_headline-
+ reference: ''
+ 6493cbf3-bce9-4556-92ab-ec815f768eb6: !Template
+ answer_choices: null
+ id: 6493cbf3-bce9-4556-92ab-ec815f768eb6
+ jinja: 'Sentence: {{graph.sentence}}
+
+
+ Compressed sentence: ||| {{compression.text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: sentence--compressed-
+ reference: ''
+ 9391497d-4fd1-4977-aba8-dc20f9e9445a: !Template
+ answer_choices: null
+ id: 9391497d-4fd1-4977-aba8-dc20f9e9445a
+ jinja: '{{graph.sentence}}
+
+
+ Extreme TL;DR: ||| {{compression.text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: -extreme_TLDR-
+ reference: ''
+ b7b2934c-cf3e-42b9-b7be-d6f1af679bce: !Template
+ answer_choices: null
+ id: b7b2934c-cf3e-42b9-b7be-d6f1af679bce
+ jinja: 'Write a headline for the sentence below:
+
+ {{graph.sentence}}
+
+
+ headline:
+
+ |||
+
+ {{headline}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: write_headline--headline-
+ reference: ''
+ ca70c220-b9d8-46fa-8d83-3b9ba9e177c0: !Template
+ answer_choices: null
+ id: ca70c220-b9d8-46fa-8d83-3b9ba9e177c0
+ jinja: '{{graph.sentence}}
+
+ ===
+
+ Given the above sentence, write one compressed sentence to summarize: ||| {{compression.text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: -given_the_above-write_compressed_to_summarize-
+ reference: ''
+ f797c3f9-2a93-46a6-8c84-ba4871eba79b: !Template
+ answer_choices: null
+ id: f797c3f9-2a93-46a6-8c84-ba4871eba79b
+ jinja: 'Compress: {{graph.sentence}} ||| {{compression.text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: compress--
+ reference: ''
diff --git a/promptsource/templates/sick/templates.yaml b/promptsource/templates/sick/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..427bd03bce7d7803f627e08a7c925bd94c533c96
--- /dev/null
+++ b/promptsource/templates/sick/templates.yaml
@@ -0,0 +1,120 @@
+dataset: sick
+templates:
+ 2b5fcfdc-8dc4-4aed-9819-8a104230d0fa: !Template
+ answer_choices: null
+ id: 2b5fcfdc-8dc4-4aed-9819-8a104230d0fa
+ jinja: 'How related are the following sentences?
+
+ Give a score on a scale of 1 to 5.
+
+
+ {{sentence_A}}
+
+
+ {{sentence_B}} |||
+
+
+ {{(((5*relatedness_score)|round)/5)}}
+
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ - Spearman Correlation
+ original_task: true
+ name: sentences relation score
+ reference: ''
+ 566db154-818a-43c6-b66d-924a20fbbec2: !Template
+ answer_choices: entail ||| is neutral ||| contradict
+ id: 566db154-818a-43c6-b66d-924a20fbbec2
+ jinja: 'Does sentence B entail, contradict, or is neutral with respect to sentence
+ A?
+
+
+ Sentence A: {{sentence_A}}
+
+
+ Sentence B: {{sentence_B}} |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: B entails_netural_contradict A?
+ reference: ''
+ 9a4d6bd4-bd67-46e4-ac70-6d46eff32b93: !Template
+ answer_choices: entail ||| is neutral ||| contradict
+ id: 9a4d6bd4-bd67-46e4-ac70-6d46eff32b93
+ jinja: 'Does sentence A entail, contradict, or is neutral with respect to sentence
+ B?
+
+
+ Sentence A: {{sentence_A}}
+
+
+ Sentence B: {{sentence_B}}|||
+
+ {{
+
+ {"A_entails_B": answer_choices[0], "A_neutral_B": answer_choices[1], "A_contradicts_B":
+ answer_choices[2]}[entailment_AB]
+
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: A entails_neutral_contradict B?
+ reference: ''
+ a502cdc1-3bf0-4019-8b4c-b293d75a95ff: !Template
+ answer_choices: Yes ||| No
+ id: a502cdc1-3bf0-4019-8b4c-b293d75a95ff
+ jinja: 'Does the sentence, "{{sentence_B}}", entail the sentence, "{{sentence_A}}"
+ ? |||
+
+ {{
+
+ [answer_choices[0], answer_choices[1], answer_choices[1]][label]
+
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: B entails A?
+ reference: ''
+ d96bfba4-3bf3-41db-84be-3d67126faf07: !Template
+ answer_choices: Yes ||| No
+ id: d96bfba4-3bf3-41db-84be-3d67126faf07
+ jinja: 'Does the sentence, "{{sentence_A}}", entail the sentence, "{{sentence_B}}"
+ ? |||
+
+ {{
+
+ {"A_entails_B": answer_choices[0], "A_neutral_B": answer_choices[1], "A_contradicts_B":
+ answer_choices[1]}[entailment_AB]
+
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: A entails B?
+ reference: ''
diff --git a/promptsource/templates/sms_spam/templates.yaml b/promptsource/templates/sms_spam/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3bf7eece78329402487b896f2254e6996a8ff8ec
--- /dev/null
+++ b/promptsource/templates/sms_spam/templates.yaml
@@ -0,0 +1,75 @@
+dataset: sms_spam
+templates:
+ 7bab221f-92fc-46b4-8c02-d5f401185f7e: !Template
+ answer_choices: not spam ||| spam
+ id: 7bab221f-92fc-46b4-8c02-d5f401185f7e
+ jinja: "What is the spam label for the following sms message? {{sms}} \n|||\n\
+ {{ answer_choices [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: spam_label
+ reference: ''
+ 84cdb14c-f129-461c-83cf-a0a48af3d2ce: !Template
+ answer_choices: True ||| False
+ id: 84cdb14c-f129-461c-83cf-a0a48af3d2ce
+ jinja: "Is this sms message considered {{\"ham\"}} (i.e. not spam)? \n{{sms}}\n\
+ |||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: ham_True_False
+ reference: ''
+ 871415d2-552d-4798-a319-613c3c86d290: !Template
+ answer_choices: ham ||| spam
+ id: 871415d2-552d-4798-a319-613c3c86d290
+ jinja: 'Is the label for the following sms message {{"ham"}} (not spam) or {{"spam"}}?
+ {{sms}}
+
+ |||
+
+ {{ answer_choices [label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: is_the_label
+ reference: ''
+ a38996db-6f24-4412-ab78-fb9265bedd66: !Template
+ answer_choices: not spam||| spam
+ id: a38996db-6f24-4412-ab78-fb9265bedd66
+ jinja: "The following sms message should be marked as \"spam\" or \"not spam\"\
+ ? {{sms}} \n|||\n{{ answer_choices [label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: marked as
+ reference: ''
+ ef8c84e0-d45d-4e5d-b5e2-6ee3a94ce330: !Template
+ answer_choices: False ||| True
+ id: ef8c84e0-d45d-4e5d-b5e2-6ee3a94ce330
+ jinja: "Is this sms message considered {{\"spam\"}}? \n{{sms}}\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: spam_True_False
+ reference: ''
diff --git a/promptsource/templates/snips_built_in_intents/templates.yaml b/promptsource/templates/snips_built_in_intents/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9489f8e54db1df614fb65206fd680f7518c3bb2f
--- /dev/null
+++ b/promptsource/templates/snips_built_in_intents/templates.yaml
@@ -0,0 +1,147 @@
+dataset: snips_built_in_intents
+templates:
+ 05b95953-c659-4b51-8abc-6a170db93658: !Template
+ answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+ ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+ ||| Book Restaurant ||| Get Directions ||| Share ETA
+ id: 05b95953-c659-4b51-8abc-6a170db93658
+ jinja: "Here is a voice command by a user of a voice assistant: \n\n{{text}}.\n\
+ \nIt can be summarized by one of the following options: \n\n{{answer_choices\
+ \ | join(\", \")}}. \n\nWhich one would that be?\n\n|||\n\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: voice_intent
+ reference: ''
+ 069cc4e0-b76e-46be-a592-68a2323e41ea: !Template
+ answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+ ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+ ||| Book Restaurant ||| Get Directions ||| Share ETA
+ id: 069cc4e0-b76e-46be-a592-68a2323e41ea
+ jinja: 'Humans can ask questions or make requests related to one of the following
+ categories: {{answer_choices | join(", ")}}
+
+
+ What is the best category for the following request? - "{{text}}"
+
+
+ |||
+
+
+ {{answer_choices[label]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: categorize_query
+ reference: ''
+ 0952c001-39fe-4d02-9c3f-54d44bac7694: !Template
+ answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+ ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+ ||| Book Restaurant ||| Get Directions ||| Share ETA
+ id: 0952c001-39fe-4d02-9c3f-54d44bac7694
+ jinja: "{{answer_choices | join(\", \")}} are possible user intents.\n\nWhich\
+ \ of the above options best captures the intent of the following user message?\
+ \ \n\n{{text}}\n\n|||\n\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: intent_query
+ reference: ''
+ 2a1037cc-a807-493d-bffe-5493c73a425b: !Template
+ answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+ ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+ ||| Book Restaurant ||| Get Directions ||| Share ETA
+ id: 2a1037cc-a807-493d-bffe-5493c73a425b
+ jinja: 'Map this query "{{text}}" to one of these categories - {{answer_choices
+ | join(", ")}}
+
+ |||
+
+
+ {{answer_choices[label]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: categorize_query_brief
+ reference: ''
+ 7dea1614-2d7f-4fee-a7a8-35f9ea12c411: !Template
+ answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+ ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+ ||| Book Restaurant ||| Get Directions ||| Share ETA
+ id: 7dea1614-2d7f-4fee-a7a8-35f9ea12c411
+ jinja: "\"{{text}}\" is a message from a user.\n\nWhich of the following options\
+ \ best captures the intent of the user message written above? \n\n{{answer_choices\
+ \ | join(\", \")}}\n\n|||\n\n{{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: query_intent
+ reference: ''
+ 9e262c90-a8b7-40d3-896f-74dee67516a7: !Template
+ answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+ ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+ ||| Book Restaurant ||| Get Directions ||| Share ETA
+ id: 9e262c90-a8b7-40d3-896f-74dee67516a7
+ jinja: "\"{{text}}\"\n\nThis message from a user can be summarized by one of these\
+ \ options - {{answer_choices | join(\", \")}}. \n\nWhat would be the best option?\n\
+ \n|||\n\n\n{{answer_choices[label]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: query_summarize
+ reference: ''
+ b987cb60-6b37-454e-a07e-dfe8b06a70c3: !Template
+ answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+ ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+ ||| Book Restaurant ||| Get Directions ||| Share ETA
+ id: b987cb60-6b37-454e-a07e-dfe8b06a70c3
+ jinja: '"{{text}}"
+
+
+ Is the best category for the query above answer_choices[0]? Or is it answer_choices[1]?
+ Other category options include {{answer_choices[2]}}, {{answer_choices[3]}},
+ {{answer_choices[4]}}, {{answer_choices[5]}}, {{answer_choices[6]}}, {{answer_choices[7]}},
+ {{answer_choices[8]}} and {{answer_choices[9]}}. What is the best answer?
+
+
+ |||
+
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: query_options_or
+ reference: ''
diff --git a/promptsource/templates/snli/templates.yaml b/promptsource/templates/snli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..49d3987165b13ddd79f079fa753fe885a0eaaade
--- /dev/null
+++ b/promptsource/templates/snli/templates.yaml
@@ -0,0 +1,221 @@
+dataset: snli
+templates:
+ 11c67e6d-affb-4e8d-8a04-10186f8a789b: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 11c67e6d-affb-4e8d-8a04-10186f8a789b
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ 43dfca1a-7d49-41c6-85f6-73a3afbea05d: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 43dfca1a-7d49-41c6-85f6-73a3afbea05d
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ 56492318-8e7c-4757-be57-df5d9dfd0849: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: 56492318-8e7c-4757-be57-df5d9dfd0849
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+ \ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ 5ccd8e8e-bf3e-4aae-8bcf-6a8f7fa2356a: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 5ccd8e8e-bf3e-4aae-8bcf-6a8f7fa2356a
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ 6d84a416-188c-4bc8-b0dc-5368bb97e70e: !Template
+ answer_choices: True ||| Neither ||| False
+ id: 6d84a416-188c-4bc8-b0dc-5368bb97e70e
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ 7f67af98-beb1-4a15-9506-287de9286cde: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 7f67af98-beb1-4a15-9506-287de9286cde
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ 9cac32ea-6a16-4148-8cd1-27a4fe5d90be: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: 9cac32ea-6a16-4148-8cd1-27a4fe5d90be
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
+ a5cc290e-24e0-430a-8b7c-f5ea471e4de2: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: a5cc290e-24e0-430a-8b7c-f5ea471e4de2
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ b180ea70-faae-41a1-abc5-e4aa6a2c46ec: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: b180ea70-faae-41a1-abc5-e4aa6a2c46ec
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ d58e7b53-8a2e-45bb-a9ab-52aa16a69444: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: d58e7b53-8a2e-45bb-a9ab-52aa16a69444
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
+ df7400e8-c58a-42a9-8852-06e228ad8de3: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: df7400e8-c58a-42a9-8852-06e228ad8de3
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ dfe43930-1a63-46c2-bd82-216b4d11cfe3: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: dfe43930-1a63-46c2-bd82-216b4d11cfe3
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ e6f6ef48-616c-455a-ac13-a1ce8901e188: !Template
+ answer_choices: Guaranteed ||| Possible ||| Impossible
+ id: e6f6ef48-616c-455a-ac13-a1ce8901e188
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
+ ebb3841f-d689-4f46-9650-814ebc4152f5: !Template
+ answer_choices: Correct ||| Inconclusive ||| Incorrect
+ id: ebb3841f-d689-4f46-9650-814ebc4152f5
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ ec110b9b-e2a2-4a18-b88c-b731fc779dfa: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: ec110b9b-e2a2-4a18-b88c-b731fc779dfa
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
diff --git a/promptsource/templates/social_i_qa/templates.yaml b/promptsource/templates/social_i_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ec8e5ca1ce39cee1675eb29d9803b15f44f84798
--- /dev/null
+++ b/promptsource/templates/social_i_qa/templates.yaml
@@ -0,0 +1,145 @@
+dataset: social_i_qa
+templates:
+ 605691e9-df59-415d-a622-530734c7df38: !Template
+ answer_choices: '{{answerA}} ||| {{answerB}} ||| {{answerC}}'
+ id: 605691e9-df59-415d-a622-530734c7df38
+ jinja: 'I heard that {{context}}
+
+
+ And I was wondering {{question}}
+
+
+ |||
+
+
+ {{answer_choices[label | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: I was wondering
+ reference: ''
+ 666f415b-e3ac-47bf-a79b-19024c4a4143: !Template
+ answer_choices: '{{answerA}} ||| {{answerB}} ||| {{answerC}}'
+ id: 666f415b-e3ac-47bf-a79b-19024c4a4143
+ jinja: '{{context}}
+
+
+ Given the context: {{question}}
+
+
+ Possible answers: {{answer_choices | join(", ")}}
+
+
+ |||
+
+
+ {{answer_choices[label | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Show choices and generate answer
+ reference: ''
+ 991f78cc-82d3-482f-b1de-f37a7179a316: !Template
+ answer_choices: Yes ||| No
+ id: 991f78cc-82d3-482f-b1de-f37a7179a316
+ jinja: "{% set random_answer_id = range(0,2) | choice%}\n{% set answers = [answerA,\
+ \ answerB, answerC] %}\n{{context}}\n\nGiven the question \"{{question}}\",\
+ \ is \"{{answers[random_answer_id]}}\" a valid answer?\n\n|||\n\n{% if (label\
+ \ | int) - 1 == random_answer_id %}\n Yes\n{% else %}\n No\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Check if a random answer is valid or not
+ reference: ''
+ b980667e-b4ca-44ce-aba9-5b47d3ccf406: !Template
+ answer_choices: null
+ id: b980667e-b4ca-44ce-aba9-5b47d3ccf406
+ jinja: '{{context}}
+
+
+ Given that the answer to a question is "{{{"1": answerA, "2": answerB, "3":
+ answerC}[label]}}", what is the question?
+
+
+ |||
+
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate the question from the answer
+ reference: ''
+ cbad777f-5794-4d71-bf3d-54da6043e5f1: !Template
+ answer_choices: '{{answerA}} ||| {{answerB}} ||| {{answerC}}'
+ id: cbad777f-5794-4d71-bf3d-54da6043e5f1
+ jinja: '{{context}}
+
+
+ Given the context: {{question}}
+
+
+ |||
+
+
+ {{answer_choices[label | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Generate answer
+ reference: ''
+ e2316120-2461-4664-943d-962a85008e23: !Template
+ answer_choices: A ||| B ||| C
+ id: e2316120-2461-4664-943d-962a85008e23
+ jinja: 'Context: {{context}}
+
+
+ Question: {{question}}
+
+
+ Which one of these answers best answers the question according to the context?
+
+
+ A: {{answerA}}
+
+
+ B: {{answerB}}
+
+
+ C: {{answerC}}
+
+
+ |||
+
+
+ {{{"1": "A", "2": "B", "3": "C"}[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Show choices and generate index
+ reference: ''
diff --git a/promptsource/templates/species_800/templates.yaml b/promptsource/templates/species_800/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..60204e46bcdfdd59fd3d425e3db33fae47320a8f
--- /dev/null
+++ b/promptsource/templates/species_800/templates.yaml
@@ -0,0 +1,357 @@
+dataset: species_800
+templates:
+ 28b2d2b9-e844-423b-a9e8-f87cc0fc5b9e: !Template
+ answer_choices: null
+ id: 28b2d2b9-e844-423b-a9e8-f87cc0fc5b9e
+ jinja: "Given the sentence below with tokens separated with the character || ,\
+ \ identify the tokens that are species or organisms of the NCBI Taxonomy (acronyms,\
+ \ common names, abbreviations and scientific names of the species in the NCBI\
+ \ Taxonomy. E.g., Escherichia coli, E. coli). \nIndicate for each token in the\
+ \ sentence {{\"\\\"NCBI Taxonomy token\\\"\"}} if it is an NCBI Taxonomy token,\
+ \ else {{\"\\\"None\\\"\"}} if it is not an NCBI Taxonomy token. Please separate\
+ \ each token with the character || as in the original sentence.\n\nSentence:\
+ \ {{ tokens | join(\" || \")}}\n\nTokens:\n|||\n{% set new_list = [] %}\n{%\
+ \ for ner_tag in ner_tags %}\n{% if ner_tag > 0 %}\n{{ new_list.append(\"NCBI\
+ \ Taxonomy token\")|default(\"\", True) }}\n{% elif ner_tag <= 0%}\n{{ new_list.append(\"\
+ None\")|default(\"\", True) }}\n{% endif %}\n{% endfor %}\n{{ new_list | join(\"\
+ \ || \") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: affirmative_bottom_list
+ reference: ''
+ 3eaaba97-b0b7-4c00-b3ff-a82e6462c21d: !Template
+ answer_choices: null
+ id: 3eaaba97-b0b7-4c00-b3ff-a82e6462c21d
+ jinja: 'Please write the species of the NCBI Taxonomy mentioned in the text below
+ (acronyms, common names, abbreviations, and scientific names of the species
+ in the NCBI Taxonomy. E.g., Escherichia coli, E. coli). If there is no species
+ answer "None", if there are more species separate them with a comma.
+
+
+ Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+ )", ")")}}
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+ 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+ )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: affirmative_top_string
+ reference: ''
+ 456329f5-1fd5-4b02-ab35-3c3ec2b731f0: !Template
+ answer_choices: null
+ id: 456329f5-1fd5-4b02-ab35-3c3ec2b731f0
+ jinja: 'Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",")
+ | replace(" ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("(
+ ", "(") | replace(" )", ")")}}
+
+
+ Given the text above, please write the species of the NCBI Taxonomy mentioned
+ in it (acronyms, common names, abbreviations, and scientific names of the species
+ in the NCBI Taxonomy. E.g., Escherichia coli, E. coli). If there is no species
+ answer "None", if there are more species separate them with a comma.
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+ 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+ )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: affirmative_bottom_string
+ reference: ''
+ 52218e73-f4fe-4181-8af3-d60ec0e0ffd1: !Template
+ answer_choices: null
+ id: 52218e73-f4fe-4181-8af3-d60ec0e0ffd1
+ jinja: 'Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",")
+ | replace(" ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("(
+ ", "(") | replace(" )", ")")}}
+
+
+ Given the text above, can you write all the species of the NCBI Taxonomy mentioned
+ in it?
+
+ An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+ name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+ If there is no species answer "None", if there are more species separate them
+ with a comma.
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+ 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+ )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: interrogative_bottom_string
+ reference: ''
+ b6de15a6-61d0-49cc-a31c-0529d54a3ae2: !Template
+ answer_choices: null
+ id: b6de15a6-61d0-49cc-a31c-0529d54a3ae2
+ jinja: 'Given the sentence below with tokens separated with the character || ,
+ can you identify the tokens that are species or organisms of the NCBI Taxonomy?
+
+ An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+ name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+ Please indicate for each token in the sentence {{"\"NCBI Taxonomy token\""}}
+ if it is an NCBI Taxonomy token, else {{"\"None\""}} if it is not an NCBI Taxonomy
+ token. Separate each token with the character || as in the original sentence.
+
+
+ Sentence: {{ tokens | join(" || ")}}
+
+ |||
+
+ {% set new_list = [] %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ new_list.append("NCBI Taxonomy token")|default("", True) }}
+
+ {% elif ner_tag <= 0%}
+
+ {{ new_list.append("None")|default("", True) }}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{ new_list | join(" || ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: interrogative_bottom_list
+ reference: ''
+ ce8619cb-ee0a-47b1-93d3-acd32d8ffa6f: !Template
+ answer_choices: null
+ id: ce8619cb-ee0a-47b1-93d3-acd32d8ffa6f
+ jinja: "Sentence: {{ tokens | join(\" || \")}}\n\nGiven the sentence above with\
+ \ tokens separated with the character || , identify the tokens that are species\
+ \ or organisms of the NCBI Taxonomy (acronyms, common names, abbreviations,\
+ \ and scientific names of the species in the NCBI Taxonomy. E.g., Escherichia\
+ \ coli, E. coli). \nIndicate for each token in the sentence {{\"\\\"NCBI Taxonomy\
+ \ token\\\"\"}} if it is an NCBI Taxonomy token, else {{\"\\\"None\\\"\"}} if\
+ \ it is not an NCBI Taxonomy token. Please separate each token with the character\
+ \ || as in the original sentence.\n\n|||\n{% set new_list = [] %}\n{% for ner_tag\
+ \ in ner_tags %}\n{% if ner_tag > 0 %}\n{{ new_list.append(\"NCBI Taxonomy token\"\
+ )|default(\"\", True) }}\n{% elif ner_tag <= 0%}\n{{ new_list.append(\"None\"\
+ )|default(\"\", True) }}\n{% endif %}\n{% endfor %}\n{{ new_list | join(\" ||\
+ \ \") }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: affirmative_top_list
+ reference: ''
+ cfc9b46c-7268-4449-85c4-cc08c3d5aaa9: !Template
+ answer_choices: null
+ id: cfc9b46c-7268-4449-85c4-cc08c3d5aaa9
+ jinja: 'Given the text below, can you write all the species of the NCBI Taxonomy
+ mentioned in it?
+
+
+ An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+ name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+ If there is no species answer "None", if there are more species separate them
+ with a comma.
+
+
+ Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+ )", ")")}}
+
+ |||
+
+ {% set diseases = {"list": [], "disease_started": False} %}
+
+ {% set disease_token = "" %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+ {% set disease_token = tokens[loop.index - 1] %}
+
+ {{ diseases.list.append(" ") |default("", True)}}
+
+ {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+ 1 else disease_token) |default("", True)}}
+
+ {% elif diseases.disease_started %}
+
+ {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+ {{ diseases.list.append(",") |default("", True)}}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{diseases.list | join | replace(" .", ".") | replace(" ,", ",") | replace("
+ ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+ )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: interrogative_top_string
+ reference: ''
+ dd0270f2-77e7-4d31-a806-85ba02934180: !Template
+ answer_choices: null
+ id: dd0270f2-77e7-4d31-a806-85ba02934180
+ jinja: 'Sentence: {{ tokens | join(" || ")}}
+
+
+ Given the sentence above with tokens separated with the character || , can you
+ identify the tokens that are species or organisms of the NCBI Taxonomy?
+
+ An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+ name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+
+ Please indicate for each token in the sentence {{"\"NCBI Taxonomy token\""}}
+ if it is an NCBI Taxonomy token, else {{"\"None\""}} if it is not an NCBI Taxonomy
+ token. Separate each token with the character || as in the original sentence.
+
+
+ |||
+
+ {% set new_list = [] %}
+
+ {% for ner_tag in ner_tags %}
+
+ {% if ner_tag > 0 %}
+
+ {{ new_list.append("NCBI Taxonomy token")|default("", True) }}
+
+ {% elif ner_tag <= 0%}
+
+ {{ new_list.append("None")|default("", True) }}
+
+ {% endif %}
+
+ {% endfor %}
+
+ {{ new_list | join(" || ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics: []
+ original_task: true
+ name: interrogative_top_list
+ reference: ''
diff --git a/promptsource/templates/squad/templates.yaml b/promptsource/templates/squad/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..53f03ea3589aec12f39f9acb1b46717f0902eea4
--- /dev/null
+++ b/promptsource/templates/squad/templates.yaml
@@ -0,0 +1,113 @@
+dataset: squad
+templates:
+ 3d85b5b0-51db-4d72-8ead-d0b3654025ee: !Template
+ answer_choices: null
+ id: 3d85b5b0-51db-4d72-8ead-d0b3654025ee
+ jinja: 'Refer to the passage below and answer the following question:
+
+
+ Passage: {{context}}
+
+
+ Question: {{question}}
+
+ |||
+
+ {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_question_given_context
+ reference: ''
+ 5a3c2d11-9469-46f6-88c2-f7e159a9742b: !Template
+ answer_choices: null
+ id: 5a3c2d11-9469-46f6-88c2-f7e159a9742b
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ A: ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: given_context_answer_question_variation
+ reference: ''
+ 64ed14d6-c835-424d-a55d-ded1b1bd2546: !Template
+ answer_choices: null
+ id: 64ed14d6-c835-424d-a55d-ded1b1bd2546
+ jinja: '{{context}}
+
+
+ Generate a question from the above passage : ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: given_context_generate_question
+ reference: ''
+ 69041854-6e48-4902-92c2-adb46457bea3: !Template
+ answer_choices: null
+ id: 69041854-6e48-4902-92c2-adb46457bea3
+ jinja: '{{context}}
+
+
+ From the above passage, a reasonable question with "{{answers["text"][0]}}"
+ as the answer would be: ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: jeopardy
+ reference: jeopardy style- wiki_qa
+ 7c13b5ba-abfc-4b68-9a36-5430a0b0e580: !Template
+ answer_choices: null
+ id: 7c13b5ba-abfc-4b68-9a36-5430a0b0e580
+ jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\
+ \n{{answers.text[0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_question
+ reference: ''
+ e153c4cd-6757-487a-8fe6-da7e88ec3e47: !Template
+ answer_choices: null
+ id: e153c4cd-6757-487a-8fe6-da7e88ec3e47
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ Referring to the passage above, the correct answer to the given question is
+ ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_given_context_and_question
+ reference: ''
diff --git a/promptsource/templates/squad_adversarial/AddSent/templates.yaml b/promptsource/templates/squad_adversarial/AddSent/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..af2a74e32ed206d37181d4e0d0e646e417394dc3
--- /dev/null
+++ b/promptsource/templates/squad_adversarial/AddSent/templates.yaml
@@ -0,0 +1,114 @@
+dataset: squad_adversarial
+subset: AddSent
+templates:
+ 22a2f318-5302-479e-93be-215453060624: !Template
+ answer_choices: null
+ id: 22a2f318-5302-479e-93be-215453060624
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ Referring to the passage above, the correct answer to the given question is
+ ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_given_context_and_question
+ reference: ''
+ 402adce7-4857-4524-8ad3-6270b66a5e0f: !Template
+ answer_choices: null
+ id: 402adce7-4857-4524-8ad3-6270b66a5e0f
+ jinja: 'Refer to the passage below and answer the following question:
+
+
+ Passage: {{context}}
+
+
+ Question: {{question}}
+
+ |||
+
+ {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_question_given_context
+ reference: ''
+ b4994c82-bfb2-4e0c-a5d7-081053830097: !Template
+ answer_choices: null
+ id: b4994c82-bfb2-4e0c-a5d7-081053830097
+ jinja: '{{context}}
+
+
+ From the above passage, a reasonable question with "{{answers["text"][0]}}"
+ as the answer would be: ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: jeopardy
+ reference: jeopardy style- wiki_qa
+ b60cd43d-7026-434b-abf8-f67cc965316a: !Template
+ answer_choices: null
+ id: b60cd43d-7026-434b-abf8-f67cc965316a
+ jinja: '{{context}}
+
+
+ Generate a question from the above passage : ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: given_context_generate_question
+ reference: ''
+ dada0334-1dc2-4e39-a7e1-258ac622ab4f: !Template
+ answer_choices: null
+ id: dada0334-1dc2-4e39-a7e1-258ac622ab4f
+ jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\
+ \n{{answers.text[0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_question
+ reference: ''
+ e638bc9e-5059-4ace-a6f9-4871f548342f: !Template
+ answer_choices: null
+ id: e638bc9e-5059-4ace-a6f9-4871f548342f
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ A: ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: given_context_answer_question_variation
+ reference: ''
diff --git a/promptsource/templates/squad_v2/templates.yaml b/promptsource/templates/squad_v2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..814be0b6f2bac1507bb158606448368627edbfcf
--- /dev/null
+++ b/promptsource/templates/squad_v2/templates.yaml
@@ -0,0 +1,387 @@
+dataset: squad_v2
+templates:
+ 17b83a3f-f748-42e6-9cdf-b2951dd8299d: !Template
+ answer_choices: null
+ id: 17b83a3f-f748-42e6-9cdf-b2951dd8299d
+ jinja: '{% set seq = [
+
+ ''Answer the question depending on the context.'',
+
+ ''What is the answer?'',
+
+ ] %}
+
+
+ {{ seq | choice }}
+
+ Context: {{context}};
+
+ Question: {{question}};
+
+ Answer: |||
+
+ {% if answers.text == [] %}
+
+ Answer not in context
+
+ {% else %}
+
+ {{answers.text[0]}}
+
+ {% endif %}'
+ metadata: &id001 !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Questions with Context
+ reference: Given context and question, give answer
+ 189dcc58-fd13-4771-ad03-7879a61c7ab7: !Template
+ answer_choices: null
+ id: 189dcc58-fd13-4771-ad03-7879a61c7ab7
+ jinja: '{% if answers.text != [] %}
+
+ Determine the question that you might have asked to get back the following answer
+ for the given context
+
+ Context: {{context}};
+
+ Answer: {{answers.text[0]}};
+
+ Question: |||
+
+ {{question}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Jeopardy with Context
+ reference: Given context and an answer, guess the question.
+ 20064b80-e4d0-41b7-9135-92c0077d5044: !Template
+ answer_choices: null
+ id: 20064b80-e4d0-41b7-9135-92c0077d5044
+ jinja: '{% set seq = [
+
+ ''What is this about? '',
+
+ ''What is the paragraph about? '',
+
+ ''Get the topic from: '',
+
+ ''From the passage, get the topic'',
+
+ ''I want to know the topic. '',
+
+ ''Topic from the passage: '',
+
+ ''Topic from the paragraph: '',
+
+ ] %}
+
+ {{ seq | choice }}
+
+ {{context}} |||
+
+ {{title | replace("_", " ")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: Topic Prediction - Context with randomized prompt options
+ reference: Asks to predict the topic given the context with additional input as
+ if a person is asking another person.
+ 338cc143-361e-4796-b035-31fb2201c49f: !Template
+ answer_choices: null
+ id: 338cc143-361e-4796-b035-31fb2201c49f
+ jinja: '{% set seq = [
+
+ ''This is about '',
+
+ ''What is this about? '',
+
+ ''The paragraph is about '',
+
+ ''What is the paragraph about? '',
+
+ ''Get the topic: '',
+
+ ''From the passage, the topic is'',
+
+ ''I want to know the topic. '',
+
+ ''Topic from the passage: '',
+
+ ''Topic from the paragraph: '',
+
+ ] %}
+
+ {{context}}
+
+ {{ seq | choice }}|||
+
+ {{title | replace("_", " ")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: Topic Prediction - Context with randomized prompt options placed in the
+ end
+ reference: The prompt is placed at the end of Context
+ 7a44cd99-7420-4456-aaaa-34e2c81d1679: !Template
+ answer_choices: null
+ id: 7a44cd99-7420-4456-aaaa-34e2c81d1679
+ jinja: '{% if answers.text != [] %}
+
+ What is a question that would give the following answer?
+
+ Answer: {{answers.text[0]}};
+
+ Question: |||
+
+ {{question}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Jeopardy without Context
+ reference: Given an answer, output a viable question. Context is omitted.
+ 8bea1123-fd8d-4bac-96bf-b8a289ee74b3: !Template
+ answer_choices: null
+ id: 8bea1123-fd8d-4bac-96bf-b8a289ee74b3
+ jinja: '{% set seq = [
+
+ ''Can you tell me '',
+
+ ''Please tell me '',
+
+ ''Tell me '',
+
+ ''From the passage, '',
+
+ ''I want to know '',
+
+ ''I want to ask '',
+
+ ''What is the answer to: '',
+
+ ''Find the answer to: '',
+
+ ''Answer: '',
+
+ '''',
+
+ ] %}
+
+ {{context}} {{ seq | choice }}{{question}}|||
+
+ {% if answers.text == [] %}
+
+ Answer not in context
+
+ {% else %}
+
+ {{answers.text[0]}}
+
+ {% endif %}'
+ metadata: &id002 !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Questions with Context - Without Prompt Keywords
+ reference: Given context and question, give answer. No keywords to delineate context
+ and question is given.
+ b14c9843-fd56-42ff-817d-39e41963c847: !Template
+ answer_choices: null
+ id: b14c9843-fd56-42ff-817d-39e41963c847
+ jinja: '{% set seq = [
+
+ ''Answer the question depending on the context.'',
+
+ ''What is the answer?'',
+
+ ] %}
+
+
+ {{ seq | choice }}
+
+ Context: {{context}};
+
+ Question: {{question}};
+
+ If you can''t find the answer, please respond "unanswerable".
+
+ Answer: |||
+
+ {% if answers.text == [] %}
+
+ unanswerable
+
+ {% else %}
+
+ {{answers.text[0]}}
+
+ {% endif %}'
+ metadata: *id001
+ name: Questions with Context +unanswerable
+ reference: Given context and question, give answer
+ d768c181-1c9b-40c3-aa01-fc78c3b29875: !Template
+ answer_choices: null
+ id: d768c181-1c9b-40c3-aa01-fc78c3b29875
+ jinja: '{% if answers.text != [] %}
+
+ {{question}}|||
+
+ {{answers.text[0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Trivia
+ reference: Given input and directly outputs answer.
+ e1630107-8f5d-44ce-8ccd-6fa80da80328: !Template
+ answer_choices: null
+ id: e1630107-8f5d-44ce-8ccd-6fa80da80328
+ jinja: '{% set seq = [
+
+ ''Can you tell me '',
+
+ ''Please tell me '',
+
+ ''Tell me '',
+
+ ''From the passage, '',
+
+ ''I want to know '',
+
+ ''I want to ask '',
+
+ ''What is the answer to: '',
+
+ ''Find the answer to: '',
+
+ ''Answer: '',
+
+ '''',
+
+ ] %}
+
+ {{context}} {{ seq | choice }}{{question}} If you can''t find the answer, please
+ respond "unanswerable". |||
+
+ {% if answers.text == [] %}
+
+ unanswerable
+
+ {% else %}
+
+ {{answers.text[0]}}
+
+ {% endif %}'
+ metadata: *id002
+ name: Questions with Context - Without Prompt Keywords +unanswerable
+ reference: Given context and question, give answer. No keywords to delineate context
+ and question is given.
+ e2e41877-4794-4ff9-9f92-a2a85105e2a7: !Template
+ answer_choices: yes ||| no
+ id: e2e41877-4794-4ff9-9f92-a2a85105e2a7
+ jinja: "Context: {{context}}; \n\nQuestion: {{question}} \n\nIs this question\
+ \ answerable? ||| \n{% if answers.text != [] %}\n{{answer_choices[0]}}\n{% else\
+ \ %}\n{{answer_choices[1]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Unanwerable question
+ reference: The template checks if the question is answerable or not
+ e51c23b9-5b10-4db3-a0d1-ba546830173d: !Template
+ answer_choices: null
+ id: e51c23b9-5b10-4db3-a0d1-ba546830173d
+ jinja: '{% set seq = [
+
+ ''Determine the topic of the question-answer pair. '',
+
+ ''Find the topic. '',
+
+ ''What is the topic from this? '',
+
+ ] %}
+
+ {% if answers.text != [] %}
+
+ {{ seq | choice }}
+
+ Question: {{question}}; Answer: {{answers.text[0]}}; Topic: |||
+
+ {{title}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: Topic Prediction - Question and Answer Pair
+ reference: Given a Question-Answer pair, generate the topic.
+ fdcf132e-6c70-4188-999e-93601ee8e089: !Template
+ answer_choices: null
+ id: fdcf132e-6c70-4188-999e-93601ee8e089
+ jinja: 'What is the following passage about?
+
+ {{context}} |||
+
+ {{title | replace("_", " ")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: false
+ name: Topic Prediction - Context
+ reference: Predict the topic from the passage
diff --git a/promptsource/templates/squadshifts/amazon/templates.yaml b/promptsource/templates/squadshifts/amazon/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4010ce0d247a9175a4bf01864016e65233dfbeac
--- /dev/null
+++ b/promptsource/templates/squadshifts/amazon/templates.yaml
@@ -0,0 +1,179 @@
+dataset: squadshifts
+subset: amazon
+templates:
+ 24400f56-8d4e-4090-8b37-1861aad5bbb6: !Template
+ answer_choices: null
+ id: 24400f56-8d4e-4090-8b37-1861aad5bbb6
+ jinja: 'I''m creating a final exam for my reading class. Can you please come up
+ with a good question to quiz how well students have read the following text
+ snippet?
+
+
+ {{context}}
+
+
+ |||
+
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: exam_creation_help
+ reference: 'Question generation '
+ 32575c5c-52b4-49f6-82a8-2dd7828091d4: !Template
+ answer_choices: null
+ id: 32575c5c-52b4-49f6-82a8-2dd7828091d4
+ jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: after
+ reference: First question, then answer
+ 6cb1fa6d-3dfb-4eba-b962-444ed826bd23: !Template
+ answer_choices: null
+ id: 6cb1fa6d-3dfb-4eba-b962-444ed826bd23
+ jinja: 'I''m working on the final exam for my class and am trying to figure out
+ the answer to the question "{{question}}" I found the following info on Amazon
+ and I think it has the answer. Can you tell me the answer?
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam
+ reference: 'Exam style prompt '
+ 8fa4d051-8704-48ab-9b57-981a5e2b4cd4: !Template
+ answer_choices: null
+ id: 8fa4d051-8704-48ab-9b57-981a5e2b4cd4
+ jinja: 'Please come up with a good question to test reading comprehension about
+ the following paragraph:
+
+
+ {{context}}
+
+
+ |||
+
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: question generation task
+ a158a9f3-4b8c-470b-8dd0-08f02554da6f: !Template
+ answer_choices: null
+ id: a158a9f3-4b8c-470b-8dd0-08f02554da6f
+ jinja: '{{["Question", "Problem"] | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+ Hint: {{context}}
+
+
+ |||
+
+ {{answers["text"] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_num_hint_answer
+ reference: 'The prompt has a prefix of question or problem and chooses a random
+ number followed the actual question. '
+ ae6bf01b-a5a3-4c78-a922-f0bbf0b72653: !Template
+ answer_choices: null
+ id: ae6bf01b-a5a3-4c78-a922-f0bbf0b72653
+ jinja: 'I''ve always wondered: {{question}}
+
+
+ I searched Amazon and this is what I found. What''s the answer?
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: wondered
+ reference: 'Original task prompt '
+ b7f7b2b5-e099-40c9-bc7c-93e957b32c8d: !Template
+ answer_choices: null
+ id: b7f7b2b5-e099-40c9-bc7c-93e957b32c8d
+ jinja: 'Use the following answers to generate a possible short passage-question
+ pair:
+
+ {{answers["text"]|join('', '')}} |||
+
+ {{context}}
+
+ {{question}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answers_question
+ reference: Generate a question-passage pair from the answer
+ d0d3b9ba-a138-46d3-968b-679ec4fb570b: !Template
+ answer_choices: null
+ id: d0d3b9ba-a138-46d3-968b-679ec4fb570b
+ jinja: "{{context}}\n\nWith the help of the passage, please answer the following\
+ \ question: \n{{question}} |||\n{{answers[\"text\"]|choice}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: pick_one_answer
+ reference: The prompt randomly picks one correct answer
diff --git a/promptsource/templates/squadshifts/new_wiki/templates.yaml b/promptsource/templates/squadshifts/new_wiki/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5a5be11708442d522269a3987774a2013391f55f
--- /dev/null
+++ b/promptsource/templates/squadshifts/new_wiki/templates.yaml
@@ -0,0 +1,200 @@
+dataset: squadshifts
+subset: new_wiki
+templates:
+ 60995116-53af-456f-ac20-858b83fa9ba6: !Template
+ answer_choices: null
+ id: 60995116-53af-456f-ac20-858b83fa9ba6
+ jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: after
+ reference: First question, then answer
+ a5691e58-f2cc-41eb-8308-c7046856f72f: !Template
+ answer_choices: null
+ id: a5691e58-f2cc-41eb-8308-c7046856f72f
+ jinja: 'Use the following answers to generate a possible short passage-question
+ pair:
+
+ {{answers["text"]|join('', '')}} |||
+
+ {{context}}
+
+ {{question}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answers_question
+ reference: Generate a question-passage pair from the answer
+ bc874f68-ce23-43cd-9683-5574c9ef01cb: !Template
+ answer_choices: null
+ id: bc874f68-ce23-43cd-9683-5574c9ef01cb
+ jinja: 'I''ve always wondered: {{question}}
+
+
+ I searched Wikipedia and this is what I found. What''s the answer?
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: wondered
+ reference: 'Original task prompt '
+ bfa571de-6076-44c2-b23b-8b2a404b180d: !Template
+ answer_choices: null
+ id: bfa571de-6076-44c2-b23b-8b2a404b180d
+ jinja: '{{["Question", "Problem"] | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+ Hint: {{context}}
+
+
+ |||
+
+ {{answers["text"] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_num_hint_answer
+ reference: 'The prompt has a prefix of question or problem and chooses a random
+ number followed the actual question. '
+ caec82a4-f845-4e10-aad4-19111c9884c1: !Template
+ answer_choices: null
+ id: caec82a4-f845-4e10-aad4-19111c9884c1
+ jinja: 'I''m creating a final exam for my reading class. Can you please come up
+ with a good question to quiz how well students have read the following text
+ snippet?
+
+
+ {{context}}
+
+
+ |||
+
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: exam_creation_help
+ reference: 'Question generation '
+ d760501f-7726-48ec-9d86-46c10ce408d3: !Template
+ answer_choices: null
+ id: d760501f-7726-48ec-9d86-46c10ce408d3
+ jinja: 'Generate a title for the following short passage:
+
+
+ {{context}} |||
+
+ {{title|replace("_"," ")}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: title
+ reference: Pormpt generates the title of the short passage
+ e5b77630-c87c-47b5-9544-5a68cd6b5a93: !Template
+ answer_choices: null
+ id: e5b77630-c87c-47b5-9544-5a68cd6b5a93
+ jinja: "{{context}}\n\nWith the help of the passage, please answer the following\
+ \ question: \n{{question}} |||\n{{answers[\"text\"]|choice}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: pick_one_answer
+ reference: The prompt randomly picks one correct answer
+ f372bda7-8ac7-4b7f-a777-37c6cdc18f34: !Template
+ answer_choices: null
+ id: f372bda7-8ac7-4b7f-a777-37c6cdc18f34
+ jinja: 'Please come up with a good question to test reading comprehension about
+ the following paragraph:
+
+
+ {{context}}
+
+
+ |||
+
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: question generation task
+ f602988d-c3ea-4894-9fb6-7fadbb9d87c8: !Template
+ answer_choices: null
+ id: f602988d-c3ea-4894-9fb6-7fadbb9d87c8
+ jinja: 'I''m working on the final exam for my class and am trying to figure out
+ the answer to the question "{{question}}" I found the following info on Wikipedia
+ and I think it has the answer. Can you tell me the answer?
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam
+ reference: 'Exam style prompt '
diff --git a/promptsource/templates/squadshifts/nyt/templates.yaml b/promptsource/templates/squadshifts/nyt/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2118b25c3f91a81069dea6d51b30b65f2faa601
--- /dev/null
+++ b/promptsource/templates/squadshifts/nyt/templates.yaml
@@ -0,0 +1,179 @@
+dataset: squadshifts
+subset: nyt
+templates:
+ 00201c79-e310-4fb0-b4c1-d0ec0f126bf1: !Template
+ answer_choices: null
+ id: 00201c79-e310-4fb0-b4c1-d0ec0f126bf1
+ jinja: 'I''ve always wondered: {{question}}
+
+
+ I searched New York Times and this is what I found. What''s the answer?
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: wondered
+ reference: 'Original task prompt '
+ 2e618671-1bb3-4943-b16c-cbef60b0c3e2: !Template
+ answer_choices: null
+ id: 2e618671-1bb3-4943-b16c-cbef60b0c3e2
+ jinja: 'Use the following answers to generate a possible short passage-question
+ pair:
+
+ {{answers["text"]|join('', '')}} |||
+
+ {{context}}
+
+ {{question}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answers_question
+ reference: Generate a question-passage pair from the answer
+ 6336eed0-3ecd-4007-8ad3-f6e615570fdf: !Template
+ answer_choices: null
+ id: 6336eed0-3ecd-4007-8ad3-f6e615570fdf
+ jinja: 'I''m working on the final exam for my class and am trying to figure out
+ the answer to the question "{{question}}" I found the following info on New
+ York Times and I think it has the answer. Can you tell me the answer?
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam
+ reference: 'Exam style prompt '
+ 68ef7980-7c81-4768-a9d1-7d4fbbdb7a39: !Template
+ answer_choices: null
+ id: 68ef7980-7c81-4768-a9d1-7d4fbbdb7a39
+ jinja: "{{context}}\n\nWith the help of the passage, please answer the following\
+ \ question: \n{{question}} |||\n{{answers[\"text\"]|choice}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: pick_one_answer
+ reference: The prompt randomly picks one correct answer
+ 69e4dc24-544f-410f-a821-6270d57b9da7: !Template
+ answer_choices: null
+ id: 69e4dc24-544f-410f-a821-6270d57b9da7
+ jinja: 'I''m creating a final exam for my reading class. Can you please come up
+ with a good question to quiz how well students have read the following text
+ snippet?
+
+
+ {{context}}
+
+
+ |||
+
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: exam_creation_help
+ reference: 'Question generation '
+ 6ccc816f-a773-430a-af26-c0f99a9366e0: !Template
+ answer_choices: null
+ id: 6ccc816f-a773-430a-af26-c0f99a9366e0
+ jinja: '{{["Question", "Problem"] | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+ Hint: {{context}}
+
+
+ |||
+
+ {{answers["text"] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question_num_hint_answer
+ reference: 'The prompt has a prefix of question or problem and chooses a random
+ number followed the actual question. '
+ 711539f5-f199-4201-89a2-e4ea569726e1: !Template
+ answer_choices: null
+ id: 711539f5-f199-4201-89a2-e4ea569726e1
+ jinja: 'Please come up with a good question to test reading comprehension about
+ the following paragraph:
+
+
+ {{context}}
+
+
+ |||
+
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question
+ reference: question generation task
+ ffc98700-eb7a-4099-b77f-9677e2ceb7d3: !Template
+ answer_choices: null
+ id: ffc98700-eb7a-4099-b77f-9677e2ceb7d3
+ jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+ {{context}}
+
+
+ |||
+
+ {{answers[''text''] | most_frequent | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: after
+ reference: First question, then answer
diff --git a/promptsource/templates/sst/default/templates.yaml b/promptsource/templates/sst/default/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b7539bb6bc0bb6c1b86829fc11e755f0876d1b41
--- /dev/null
+++ b/promptsource/templates/sst/default/templates.yaml
@@ -0,0 +1,105 @@
+dataset: sst
+subset: default
+templates:
+ 23c231c1-672d-4420-a8ab-41ab930de317: !Template
+ answer_choices: no ||| yes
+ id: 23c231c1-672d-4420-a8ab-41ab930de317
+ jinja: 'Does the movie review below make someone want to watch it?
+
+
+ {{sentence}} |||
+
+ {{answer_choices
+
+ [0 if label < 0.5 else 1]
+
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: sentiment_watch_movie
+ reference: ''
+ 5119a0b5-5d82-4401-900a-7fafc1d48ff6: !Template
+ answer_choices: null
+ id: 5119a0b5-5d82-4401-900a-7fafc1d48ff6
+ jinja: 'How positive is the movie review below?
+
+ Give a score on a scale from 0 to 1.
+
+
+ {{sentence}} |||
+
+ {{''%0.1f''| format(label|float)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: sentiment scoring scale
+ reference: ''
+ 647585d3-dac6-40c3-b6d0-f02d835ae4c4: !Template
+ answer_choices: null
+ id: 647585d3-dac6-40c3-b6d0-f02d835ae4c4
+ jinja: 'How much does the movie review below make you want to watch it?
+
+ Give a score on a scale from 0 to 1.
+
+
+ {{sentence}} |||
+
+ {{''%0.1f''| format(label|float)}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: sentiment_watch_scale
+ reference: ''
+ 9453d08b-6144-4f36-a53d-232ed1dfcff4: !Template
+ answer_choices: no ||| yes
+ id: 9453d08b-6144-4f36-a53d-232ed1dfcff4
+ jinja: 'Does it seem like the reviewer who wrote this review liked the movie?
+
+
+ {{sentence}} |||
+
+ {{answer_choices[0 if label < 0.5 else 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: did_reviewer_like
+ reference: ''
+ b15994be-ca57-4924-9af7-fbaa6ee0124b: !Template
+ answer_choices: no ||| yes
+ id: b15994be-ca57-4924-9af7-fbaa6ee0124b
+ jinja: 'Is the movie review below positive?
+
+
+ {{sentence}} |||
+
+ {{answer_choices
+
+ [0 if label < 0.5 else 1]
+
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: sentiment_classification
+ reference: ''
diff --git a/promptsource/templates/story_cloze/2016/templates.yaml b/promptsource/templates/story_cloze/2016/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c121f8340f0acbcbd8021eea0397f2a211aa171e
--- /dev/null
+++ b/promptsource/templates/story_cloze/2016/templates.yaml
@@ -0,0 +1,99 @@
+dataset: story_cloze
+subset: '2016'
+templates:
+ 1a4946f9-a0e2-4fbb-aee8-b26ead2cf6b8: !Template
+ answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+ id: 1a4946f9-a0e2-4fbb-aee8-b26ead2cf6b8
+ jinja: '{{input_sentence_1}} {{input_sentence_2}} {{input_sentence_3}} {{input_sentence_4}}
+ What is a possible continuation for the story given the following options ?
+ - {{answer_choices | join("\n- ")}} ||| {{answer_choices[answer_right_ending
+ -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Answer Given options
+ reference: ''
+ 1a9d53bc-eb77-4e7c-af6e-3d15b79d6cf1: !Template
+ answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+ id: 1a9d53bc-eb77-4e7c-af6e-3d15b79d6cf1
+ jinja: "Read the following story :\n\n{{input_sentence_1}}\n{{input_sentence_2}}\n\
+ {{input_sentence_3}}\n{{input_sentence_4}}\n\nChoose a possible ending for the\
+ \ previous story from the following options: \n- {{answer_choices | join(\"\\\
+ n- \")}}\n|||\n\n{{answer_choices[answer_right_ending -1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Choose Story Ending
+ reference: ''
+ 9dab69d1-cad0-4d2f-a7cc-120df233571c: !Template
+ answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+ id: 9dab69d1-cad0-4d2f-a7cc-120df233571c
+ jinja: 'Yesterday, I watched a movie. Here''s what happened: {{input_sentence_1}}
+ {{input_sentence_2}} {{input_sentence_3}} {{input_sentence_4}} What happens
+ next? - {{answer_choices | join("\n- ")}} ||| {{answer_choices[answer_right_ending
+ -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Movie What Happens Next
+ reference: ''
+ b5c8445f-2d3a-4691-bdd5-58956816702f: !Template
+ answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+ id: b5c8445f-2d3a-4691-bdd5-58956816702f
+ jinja: "What is a possible continuation for the following story ? \n\n{{input_sentence_1}}\n\
+ {{input_sentence_2}}\n{{input_sentence_3}}\n{{input_sentence_4}}\n\nChoose from\
+ \ the following options:\n- {{answer_choices | join(\"\\n- \")}}\n|||\n\n{{answer_choices[answer_right_ending\
+ \ -1]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Story Continuation and Options
+ reference: ''
+ baffa716-43cf-4954-a35c-655d775321e6: !Template
+ answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+ id: baffa716-43cf-4954-a35c-655d775321e6
+ jinja: 'Generate a possible ending for the following story: {{input_sentence_1}}
+ {{input_sentence_2}} {{input_sentence_3}} {{input_sentence_4}} ||| {{answer_choices[answer_right_ending
+ -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Ending
+ reference: ''
+ c6f3d802-4f97-449f-a911-03470d418f7d: !Template
+ answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+ id: c6f3d802-4f97-449f-a911-03470d418f7d
+ jinja: 'I read the following novel: {{input_sentence_1}} {{input_sentence_2}}
+ {{input_sentence_3}} {{input_sentence_4}} What do you think is the most probable
+ ending? You can choose from the following options: - {{answer_choices | join("\n-
+ ")}} ||| {{answer_choices[answer_right_ending -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Novel Correct Ending
+ reference: ''
diff --git a/promptsource/templates/stsb_multi_mt/en/templates.yaml b/promptsource/templates/stsb_multi_mt/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6e731e94ca9524771b899c3e278ffe24a2071030
--- /dev/null
+++ b/promptsource/templates/stsb_multi_mt/en/templates.yaml
@@ -0,0 +1,74 @@
+dataset: stsb_multi_mt
+subset: en
+templates:
+ 6c0bdf61-9baa-415a-bf03-fdb8789d1740: !Template
+ answer_choices: null
+ id: 6c0bdf61-9baa-415a-bf03-fdb8789d1740
+ jinja: How similar are "{{sentence1}}" and "{{sentence2}}"? Give a score between
+ {{"0.0"}} and {{"5.0"}}. ||| {{(((5*similarity_score)|round)/5)}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: Similarity_how
+ reference: ''
+ 6df357b5-f8ea-49d2-b304-3541acb5271a: !Template
+ answer_choices: no ||| yes
+ id: 6df357b5-f8ea-49d2-b304-3541acb5271a
+ jinja: Do you think "{{sentence1}}" and "{{sentence2}}" express the same thing?
+ ||| {{answer_choices[0 if similarity_score < 2.5 else 1]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Similarity_express_binary
+ reference: sst2
+ 775af665-d8a5-46b2-bfcf-2a21abc7e99c: !Template
+ answer_choices: no ||| yes
+ id: 775af665-d8a5-46b2-bfcf-2a21abc7e99c
+ jinja: Do "{{sentence1}}" and "{{sentence2}}" seem similar to you ? ||| {{answer_choices[0
+ if similarity_score < 2.5 else 1]}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Similarity_seem_binary
+ reference: ''
+ 9cab340c-32ce-465d-be89-049e4a63af11: !Template
+ answer_choices: null
+ id: 9cab340c-32ce-465d-be89-049e4a63af11
+ jinja: On a scale from {{"0.0"}} to {{"5.0"}}, how similar are "{{sentence1}}"
+ and "{{sentence2}}"? ||| {{(((5*similarity_score)|round)/5)}}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: Similarity_scale
+ reference: ''
+ e0551bee-61f0-4c1e-9c3f-18c8b54439f8: !Template
+ answer_choices: null
+ id: e0551bee-61f0-4c1e-9c3f-18c8b54439f8
+ jinja: "Rate the similarity of these two sentences: ({{\"0.0\"}} being the lowest\
+ \ and {{\"5.0\"}} the highest)\n\"{{sentence1}}\" and \"{{sentence2}}\" \n|||\n\
+ {{(((5*similarity_score)|round)/5)}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Pearson Correlation
+ original_task: true
+ name: Similarity_rate
+ reference: ''
diff --git a/promptsource/templates/subjqa/books/templates.yaml b/promptsource/templates/subjqa/books/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4fa67ca3e35ae7f6c396b23af1d9fd204f2c960d
--- /dev/null
+++ b/promptsource/templates/subjqa/books/templates.yaml
@@ -0,0 +1,257 @@
+dataset: subjqa
+subset: books
+templates:
+ 071f2b19-7392-4258-8a60-5a96f3e44b0d: !Template
+ answer_choices: null
+ id: 071f2b19-7392-4258-8a60-5a96f3e44b0d
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Answer the following question with extracts from the context: {{question}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_first
+ reference: Original prompt with the context in the beginning.
+ 12812357-1dce-4e33-a6a4-a6ccea8cafcf: !Template
+ answer_choices: null
+ id: 12812357-1dce-4e33-a6a4-a6ccea8cafcf
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ Hint: The context domain is {{domain}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: domain_hint_og_task
+ reference: Original task template with the domain hint
+ 69e2f180-3b0f-4a00-b7e3-75f2a572ff06: !Template
+ answer_choices: null
+ id: 69e2f180-3b0f-4a00-b7e3-75f2a572ff06
+ jinja: "In today's exam on {{domain}}, answer the following question with the\
+ \ help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"][0]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_prompt
+ reference: 'Exam style original task prompt '
+ 82092877-688a-4e56-8617-31b113cc6653: !Template
+ answer_choices: null
+ id: 82092877-688a-4e56-8617-31b113cc6653
+ jinja: "To get full credit in today's test, answer the following question with\
+ \ the help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"] | join(\" \\n \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_without_hint
+ reference: Exam style prompt without hint
+ a217525b-caf2-4ae3-8a6e-06bd48bf4728: !Template
+ answer_choices: null
+ id: a217525b-caf2-4ae3-8a6e-06bd48bf4728
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_after
+ reference: Prompt has instructions to answer to question along with the context
+ afe5086e-d9fe-4981-bcac-67d580950110: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: afe5086e-d9fe-4981-bcac-67d580950110
+ jinja: 'Question:
+
+ {{question}}
+
+
+ On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score
+ reference: Prompt asks the rate the subjectivity of the question
+ b4a015eb-9346-4739-9ebd-5f91d2f230be: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: b4a015eb-9346-4739-9ebd-5f91d2f230be
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Question:
+
+ {{question}}
+
+
+ How would you rate the subjectivity of the question (on a 1 to 5 scale with
+ 1 being the most subjective)?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score_with_context
+ reference: The prompt asks to rate the subjectivity of the question
+ f074e3ce-966c-4d63-8d03-f0b6e5093b38: !Template
+ answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor
+ id: f074e3ce-966c-4d63-8d03-f0b6e5093b38
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Possible categories:
+
+ - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"] |
+ join("\n- ") }}
+
+
+ Context:
+
+ {{context}}
+
+
+ Which of the category corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q
+ reference: The prompt asks to pick the category for the context
+ f53cc9f9-1d34-47ff-b440-a6ad896bdc4a: !Template
+ answer_choices: books ||| electronics ||| grocery ||| movies ||| restaurants |||
+ tripadvisor
+ id: f53cc9f9-1d34-47ff-b440-a6ad896bdc4a
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Context:
+
+ {{context}}
+
+
+ Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+ corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q_after_context
+ reference: Another prompt asking to pick the correct category
diff --git a/promptsource/templates/subjqa/electronics/templates.yaml b/promptsource/templates/subjqa/electronics/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..049c073973dbf8703e95803a317fb325988bd4fc
--- /dev/null
+++ b/promptsource/templates/subjqa/electronics/templates.yaml
@@ -0,0 +1,257 @@
+dataset: subjqa
+subset: electronics
+templates:
+ 2077a669-1574-4117-84fe-e683bead8d46: !Template
+ answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor
+ id: 2077a669-1574-4117-84fe-e683bead8d46
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Possible categories:
+
+ - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"] |
+ join("\n- ") }}
+
+
+ Context:
+
+ {{context}}
+
+
+ Which of the category corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q
+ reference: The prompt asks to pick the category for the context
+ 27a56ea6-f951-4a95-a8a4-517d1ec191ef: !Template
+ answer_choices: null
+ id: 27a56ea6-f951-4a95-a8a4-517d1ec191ef
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ Hint: The context domain is {{domain}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: domain_hint_og_task
+ reference: Original task template with the domain hint
+ 4a99dd61-9044-45e0-b8e1-c554d0f63e7b: !Template
+ answer_choices: null
+ id: 4a99dd61-9044-45e0-b8e1-c554d0f63e7b
+ jinja: "In today's exam on {{domain}}, answer the following question with the\
+ \ help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"][0]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_prompt
+ reference: 'Exam style original task prompt '
+ 68baa479-1ad6-41f4-ad48-75d06683f1d2: !Template
+ answer_choices: null
+ id: 68baa479-1ad6-41f4-ad48-75d06683f1d2
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Answer the following question with extracts from the context: {{question}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_first
+ reference: Original prompt with the context in the beginning.
+ 6caf413a-d696-4fdc-9f85-41aa0197f180: !Template
+ answer_choices: null
+ id: 6caf413a-d696-4fdc-9f85-41aa0197f180
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_after
+ reference: Prompt has instructions to answer to question along with the context
+ 6d03b0a7-0b1f-4165-af90-071baab36e49: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 6d03b0a7-0b1f-4165-af90-071baab36e49
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Question:
+
+ {{question}}
+
+
+ How would you rate the subjectivity of the question (on a 1 to 5 scale with
+ 1 being the most subjective)?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score_with_context
+ reference: The prompt asks to rate the subjectivity of the question
+ 772db86f-9435-4ac1-bc9a-8bef389e5a80: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 772db86f-9435-4ac1-bc9a-8bef389e5a80
+ jinja: 'Question:
+
+ {{question}}
+
+
+ On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score
+ reference: Prompt asks the rate the subjectivity of the question
+ b21776bb-c6b3-45c8-8898-4c502d91a4d4: !Template
+ answer_choices: null
+ id: b21776bb-c6b3-45c8-8898-4c502d91a4d4
+ jinja: "To get full credit in today's test, answer the following question with\
+ \ the help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"] | join(\" \\n \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_without_hint
+ reference: Exam style prompt without hint
+ e35fe9f1-84db-4436-9264-e30f090052f0: !Template
+ answer_choices: books ||| electronics ||| grocery ||| movies ||| restaurants |||
+ tripadvisor
+ id: e35fe9f1-84db-4436-9264-e30f090052f0
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Context:
+
+ {{context}}
+
+
+ Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+ corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q_after_context
+ reference: Another prompt asking to pick the correct category
diff --git a/promptsource/templates/subjqa/grocery/templates.yaml b/promptsource/templates/subjqa/grocery/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ea3f6eaaf41ad6c85bb5f4697acad15ba3fd1efc
--- /dev/null
+++ b/promptsource/templates/subjqa/grocery/templates.yaml
@@ -0,0 +1,257 @@
+dataset: subjqa
+subset: grocery
+templates:
+ 0f728f5b-6488-439d-8a92-6e15a1d87c62: !Template
+ answer_choices: null
+ id: 0f728f5b-6488-439d-8a92-6e15a1d87c62
+ jinja: "In today's exam on {{domain}}, answer the following question with the\
+ \ help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"][0]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_prompt
+ reference: 'Exam style original task prompt '
+ 255dd1c5-3129-4f69-ae4f-3f2b47be926d: !Template
+ answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor
+ id: 255dd1c5-3129-4f69-ae4f-3f2b47be926d
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Possible categories:
+
+ - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"] |
+ join("\n- ") }}
+
+
+ Context:
+
+ {{context}}
+
+
+ Which of the category corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q
+ reference: The prompt asks to pick the category for the context
+ 266dd0e6-b645-4590-b521-f79416605233: !Template
+ answer_choices: null
+ id: 266dd0e6-b645-4590-b521-f79416605233
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ Hint: The context domain is {{domain}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: domain_hint_og_task
+ reference: Original task template with the domain hint
+ 4857a5ed-9df9-417b-ac6e-504604ab7e37: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 4857a5ed-9df9-417b-ac6e-504604ab7e37
+ jinja: 'Question:
+
+ {{question}}
+
+
+ On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score
+ reference: Prompt asks the rate the subjectivity of the question
+ 5173e70e-7396-4932-95b6-3b740058a6bc: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 5173e70e-7396-4932-95b6-3b740058a6bc
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Question:
+
+ {{question}}
+
+
+ How would you rate the subjectivity of the question (on a 1 to 5 scale with
+ 1 being the most subjective)?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score_with_context
+ reference: The prompt asks to rate the subjectivity of the question
+ 90b561d0-307f-49aa-a642-bbbad543f498: !Template
+ answer_choices: null
+ id: 90b561d0-307f-49aa-a642-bbbad543f498
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_after
+ reference: Prompt has instructions to answer to question along with the context
+ c6ef2acd-f32f-49f5-9803-5017412f739d: !Template
+ answer_choices: null
+ id: c6ef2acd-f32f-49f5-9803-5017412f739d
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Answer the following question with extracts from the context: {{question}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_first
+ reference: Original prompt with the context in the beginning.
+ ecceacf7-6075-453c-a6fb-d2869b371cdd: !Template
+ answer_choices: null
+ id: ecceacf7-6075-453c-a6fb-d2869b371cdd
+ jinja: "To get full credit in today's test, answer the following question with\
+ \ the help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"] | join(\" \\n \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_without_hint
+ reference: Exam style prompt without hint
+ f19ac17f-ed79-4f64-9f7b-511d9f4e4c6b: !Template
+ answer_choices: books ||| electronics ||| grocery ||| movies ||| restaurants |||
+ tripadvisor
+ id: f19ac17f-ed79-4f64-9f7b-511d9f4e4c6b
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Context:
+
+ {{context}}
+
+
+ Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+ corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q_after_context
+ reference: Another prompt asking to pick the correct category
diff --git a/promptsource/templates/subjqa/movies/templates.yaml b/promptsource/templates/subjqa/movies/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..25a1e118f2479bc3cd3d4c4f1acab80f6e4c4592
--- /dev/null
+++ b/promptsource/templates/subjqa/movies/templates.yaml
@@ -0,0 +1,257 @@
+dataset: subjqa
+subset: movies
+templates:
+ 1b63e0fb-e9c3-4e6c-b5f1-3a922fcef327: !Template
+ answer_choices: null
+ id: 1b63e0fb-e9c3-4e6c-b5f1-3a922fcef327
+ jinja: "To get full credit in today's test, answer the following question with\
+ \ the help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"] | join(\" \\n \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_without_hint
+ reference: Exam style prompt without hint
+ 36c91233-23e5-4a3d-a5d9-b58a9a5db16b: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 36c91233-23e5-4a3d-a5d9-b58a9a5db16b
+ jinja: 'Question:
+
+ {{question}}
+
+
+ On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score
+ reference: Prompt asks the rate the subjectivity of the question
+ 45a2b402-b71c-4d2e-ab9d-f8ef7f941f7b: !Template
+ answer_choices: null
+ id: 45a2b402-b71c-4d2e-ab9d-f8ef7f941f7b
+ jinja: "In today's exam on {{domain}}, answer the following question with the\
+ \ help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"][0]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_prompt
+ reference: 'Exam style original task prompt '
+ 4aab6eb5-12e3-433e-90f2-6fd42d608e54: !Template
+ answer_choices: null
+ id: 4aab6eb5-12e3-433e-90f2-6fd42d608e54
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Answer the following question with extracts from the context: {{question}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_first
+ reference: Original prompt with the context in the beginning.
+ a66b2864-1bb4-4b39-8387-6ec2dc6c533f: !Template
+ answer_choices: books ||| electronics ||| grocery ||| movies ||| restaurants |||
+ tripadvisor
+ id: a66b2864-1bb4-4b39-8387-6ec2dc6c533f
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Context:
+
+ {{context}}
+
+
+ Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+ corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q_after_context
+ reference: Another prompt asking to pick the correct category
+ adde27c2-719a-4c32-b2f5-7d4d3425ad09: !Template
+ answer_choices: null
+ id: adde27c2-719a-4c32-b2f5-7d4d3425ad09
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ Hint: The context domain is {{domain}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: domain_hint_og_task
+ reference: Original task template with the domain hint
+ b55d80d5-788d-406e-be37-e911a7aa7236: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: b55d80d5-788d-406e-be37-e911a7aa7236
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Question:
+
+ {{question}}
+
+
+ How would you rate the subjectivity of the question (on a 1 to 5 scale with
+ 1 being the most subjective)?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score_with_context
+ reference: The prompt asks to rate the subjectivity of the question
+ cac989ae-ff00-4be6-b909-65cabdfb6017: !Template
+ answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor
+ id: cac989ae-ff00-4be6-b909-65cabdfb6017
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Possible categories:
+
+ - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"] |
+ join("\n- ") }}
+
+
+ Context:
+
+ {{context}}
+
+
+ Which of the category corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q
+ reference: The prompt asks to pick the category for the context
+ fedc0f61-4cac-4baa-9f3b-283ac21fe2a4: !Template
+ answer_choices: null
+ id: fedc0f61-4cac-4baa-9f3b-283ac21fe2a4
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_after
+ reference: Prompt has instructions to answer to question along with the context
diff --git a/promptsource/templates/subjqa/restaurants/templates.yaml b/promptsource/templates/subjqa/restaurants/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fd39fb8db94dbe9a41a2ceb377399a70ac93fd25
--- /dev/null
+++ b/promptsource/templates/subjqa/restaurants/templates.yaml
@@ -0,0 +1,257 @@
+dataset: subjqa
+subset: restaurants
+templates:
+ 5177d00a-255d-4a80-bb77-2d94f40e276c: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 5177d00a-255d-4a80-bb77-2d94f40e276c
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Question:
+
+ {{question}}
+
+
+ How would you rate the subjectivity of the question (on a 1 to 5 scale with
+ 1 being the most subjective)?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score_with_context
+ reference: The prompt asks to rate the subjectivity of the question
+ 54d7fcfb-3875-44c1-90db-fcc56fb76730: !Template
+ answer_choices: null
+ id: 54d7fcfb-3875-44c1-90db-fcc56fb76730
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ Hint: The context domain is {{domain}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: domain_hint_og_task
+ reference: Original task template with the domain hint
+ 62e3ad29-aa70-4cff-9974-b199096ff002: !Template
+ answer_choices: null
+ id: 62e3ad29-aa70-4cff-9974-b199096ff002
+ jinja: "In today's exam on {{domain}}, answer the following question with the\
+ \ help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"][0]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_prompt
+ reference: 'Exam style original task prompt '
+ 7a2ecf8e-8646-42f8-a7b6-3422ceab6e85: !Template
+ answer_choices: books ||| electronics ||| grocery ||| movies ||| restaurants |||
+ tripadvisor
+ id: 7a2ecf8e-8646-42f8-a7b6-3422ceab6e85
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Context:
+
+ {{context}}
+
+
+ Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+ corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q_after_context
+ reference: Another prompt asking to pick the correct category
+ 7d900ca3-d6d6-41a8-bd64-d3c1547004d0: !Template
+ answer_choices: null
+ id: 7d900ca3-d6d6-41a8-bd64-d3c1547004d0
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Answer the following question with extracts from the context: {{question}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_first
+ reference: Original prompt with the context in the beginning.
+ 8984babd-1a5d-456e-b439-2736627f0883: !Template
+ answer_choices: null
+ id: 8984babd-1a5d-456e-b439-2736627f0883
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_after
+ reference: Prompt has instructions to answer to question along with the context
+ 8ed11c13-6160-4b19-b643-77fb6e4aff33: !Template
+ answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor
+ id: 8ed11c13-6160-4b19-b643-77fb6e4aff33
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Possible categories:
+
+ - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"] |
+ join("\n- ") }}
+
+
+ Context:
+
+ {{context}}
+
+
+ Which of the category corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q
+ reference: The prompt asks to pick the category for the context
+ 93026d40-9586-4a24-aa77-b15b78f18ef5: !Template
+ answer_choices: null
+ id: 93026d40-9586-4a24-aa77-b15b78f18ef5
+ jinja: "To get full credit in today's test, answer the following question with\
+ \ the help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"] | join(\" \\n \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_without_hint
+ reference: Exam style prompt without hint
+ afd9a593-21db-4bf8-842c-9259a7e73e99: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: afd9a593-21db-4bf8-842c-9259a7e73e99
+ jinja: 'Question:
+
+ {{question}}
+
+
+ On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score
+ reference: Prompt asks the rate the subjectivity of the question
diff --git a/promptsource/templates/subjqa/tripadvisor/templates.yaml b/promptsource/templates/subjqa/tripadvisor/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c34d16b32da1d1157b97329726f6ba8e63cd674
--- /dev/null
+++ b/promptsource/templates/subjqa/tripadvisor/templates.yaml
@@ -0,0 +1,257 @@
+dataset: subjqa
+subset: tripadvisor
+templates:
+ 0cb4bf0f-6f89-4f17-bf81-9740fac3d374: !Template
+ answer_choices: books|||electronics|||grocery|||movies|||restaurants|||tripadvisor
+ id: 0cb4bf0f-6f89-4f17-bf81-9740fac3d374
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Possible categories:
+
+ - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"] |
+ join("\n- ") }}
+
+
+ Context:
+
+ {{context}}
+
+
+ Which of the category corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q
+ reference: The prompt asks to pick the category for the context
+ 50df9d40-2fef-4b8e-a254-735b1ecbdc4f: !Template
+ answer_choices: null
+ id: 50df9d40-2fef-4b8e-a254-735b1ecbdc4f
+ jinja: "To get full credit in today's test, answer the following question with\
+ \ the help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"] | join(\" \\n \")}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_without_hint
+ reference: Exam style prompt without hint
+ 61d21137-d2b6-42a4-b682-50e92be1ec2f: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 61d21137-d2b6-42a4-b682-50e92be1ec2f
+ jinja: 'Question:
+
+ {{question}}
+
+
+ On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score
+ reference: Prompt asks the rate the subjectivity of the question
+ 892f6eeb-170e-42b7-8291-8317fa937fe7: !Template
+ answer_choices: null
+ id: 892f6eeb-170e-42b7-8291-8317fa937fe7
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_after
+ reference: Prompt has instructions to answer to question along with the context
+ 8de6ddd1-17d9-4eac-bb91-78a2f0d57f92: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 8de6ddd1-17d9-4eac-bb91-78a2f0d57f92
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Question:
+
+ {{question}}
+
+
+ How would you rate the subjectivity of the question (on a 1 to 5 scale with
+ 1 being the most subjective)?
+
+
+ |||
+
+
+ {{answer_choices[question_subj_level -1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: q_subj_score_with_context
+ reference: The prompt asks to rate the subjectivity of the question
+ 97413933-0b3f-4dfd-bfb6-771a9152131a: !Template
+ answer_choices: null
+ id: 97413933-0b3f-4dfd-bfb6-771a9152131a
+ jinja: "In today's exam on {{domain}}, answer the following question with the\
+ \ help of the context. If the question cannot be answered, say Unanswerable.\n\
+ \nQuestion: \n{{question}}\n\nContext:\n{{context}}\n\n|||\n{% if (answers[\"\
+ text\"] | length) == 0 %}\n{{ \"Unanswerable\" }}\n{% else %}\n{{answers[\"\
+ text\"][0]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exam_style_prompt
+ reference: 'Exam style original task prompt '
+ c67ab028-02fe-4a15-86a8-6c04a8b315f1: !Template
+ answer_choices: books ||| electronics ||| grocery ||| movies ||| restaurants |||
+ tripadvisor
+ id: c67ab028-02fe-4a15-86a8-6c04a8b315f1
+ jinja: '{% set mapping = {"books": 0, "electronics": 1, "grocery": 2, "movies":
+ 3, "restaurants":4 , "tripadvisor": 5} %}
+
+ Context:
+
+ {{context}}
+
+
+ Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+ corresponds to the context?
+
+
+ |||
+
+
+ {{answer_choices[mapping[domain]]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: domain_q_after_context
+ reference: Another prompt asking to pick the correct category
+ cb18c33c-44ae-43f6-856d-37644e425795: !Template
+ answer_choices: null
+ id: cb18c33c-44ae-43f6-856d-37644e425795
+ jinja: 'Context:
+
+ {{context}}
+
+
+ Answer the following question with extracts from the context: {{question}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_q_with_context_first
+ reference: Original prompt with the context in the beginning.
+ fa8d9f32-af81-44c7-99f6-a092f500b172: !Template
+ answer_choices: null
+ id: fa8d9f32-af81-44c7-99f6-a092f500b172
+ jinja: '{{question}}
+
+
+ Answer using extracts from the following context. If you can''t find an answer,
+ return {{"Unanswerable"}}
+
+
+ Context:
+
+ {{context}}
+
+
+ Hint: The context domain is {{domain}}
+
+
+ |||
+
+ {% if (answers["text"] | length) == 0 %}
+
+ {{ "Unanswerable" }}
+
+ {% else %}
+
+ {{answers["text"][0]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: domain_hint_og_task
+ reference: Original task template with the domain hint
diff --git a/promptsource/templates/super_glue/axb/templates.yaml b/promptsource/templates/super_glue/axb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bb25f4569ac527efeeeb2d995bffece2b9f4cba6
--- /dev/null
+++ b/promptsource/templates/super_glue/axb/templates.yaml
@@ -0,0 +1,145 @@
+dataset: super_glue
+subset: axb
+templates:
+ 1ae41916-7b4d-4ef3-b414-bfadd95d67e2: !Template
+ answer_choices: Yes ||| No
+ id: 1ae41916-7b4d-4ef3-b414-bfadd95d67e2
+ jinja: 'Given {{sentence1}} Should we assume that "{{sentence2}}" is true? Yes
+ or no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ 1b2d6e85-a5a9-4d1b-9e3b-630b490c6a34: !Template
+ answer_choices: Yes ||| No
+ id: 1b2d6e85-a5a9-4d1b-9e3b-630b490c6a34
+ jinja: '{{sentence1}} Are we justified in saying that "{{sentence2}}"? Yes or
+ no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ 23651f68-93cc-441f-b826-30dd2c6d6a93: !Template
+ answer_choices: Yes ||| No
+ id: 23651f68-93cc-441f-b826-30dd2c6d6a93
+ jinja: Given that {{sentence1}} Does it follow that {{sentence2}} Yes or no? |||
+ {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 552d6c20-ab5b-462f-b5fb-3c7b80c78dcc: !Template
+ answer_choices: Yes ||| No
+ id: 552d6c20-ab5b-462f-b5fb-3c7b80c78dcc
+ jinja: '{{sentence1}} Using only the above description and what you know about
+ the world, is "{{sentence2}}" definitely correct? Yes or no? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ 908be561-caf4-4416-9fe9-9919c3998681: !Template
+ answer_choices: Yes ||| No
+ id: 908be561-caf4-4416-9fe9-9919c3998681
+ jinja: 'Given {{sentence1}} Is it guaranteed true that "{{sentence2}}"? Yes or
+ no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ bae54ef5-c3be-4862-bdd4-a559ed04eb31: !Template
+ answer_choices: Yes ||| No
+ id: bae54ef5-c3be-4862-bdd4-a559ed04eb31
+ jinja: 'Suppose {{sentence1}} Can we infer that "{{sentence2}}"? Yes or no? |||
+ {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ c92d765f-83b1-4684-a0a3-580929b5e46b: !Template
+ answer_choices: Yes ||| No
+ id: c92d765f-83b1-4684-a0a3-580929b5e46b
+ jinja: "{{sentence1}} \n\nQuestion: Does this imply that \"{{sentence2}}\"? Yes\
+ \ or no? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ cb68ee27-c0a3-440b-b595-e90fe89539c3: !Template
+ answer_choices: Yes ||| No
+ id: cb68ee27-c0a3-440b-b595-e90fe89539c3
+ jinja: 'Given that {{sentence1}} Therefore, it must be true that "{{sentence2}}"?
+ Yes or no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ d57550ef-2f67-46eb-98cb-432dd135be16: !Template
+ answer_choices: Yes ||| No
+ id: d57550ef-2f67-46eb-98cb-432dd135be16
+ jinja: '{{sentence1}} Based on the previous passage, is it true that "{{sentence2}}"?
+ Yes or no? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ d965164b-fa96-41b5-8852-e0f6dfe5524e: !Template
+ answer_choices: True ||| False
+ id: d965164b-fa96-41b5-8852-e0f6dfe5524e
+ jinja: '{{sentence1}}
+
+ Question: {{sentence2}} True or False? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: Same as reported in Figure G31 of the GPT-3 paper.
diff --git a/promptsource/templates/super_glue/axg/templates.yaml b/promptsource/templates/super_glue/axg/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d247434f86b5bba3b9c83bbef047bb4f8b17641
--- /dev/null
+++ b/promptsource/templates/super_glue/axg/templates.yaml
@@ -0,0 +1,145 @@
+dataset: super_glue
+subset: axg
+templates:
+ 0f530aa8-b254-4687-8032-bab1a65610c0: !Template
+ answer_choices: Yes ||| No
+ id: 0f530aa8-b254-4687-8032-bab1a65610c0
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes
+ or no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ 0f8afaef-19a0-472f-9e9f-c803426f8f22: !Template
+ answer_choices: Yes ||| No
+ id: 0f8afaef-19a0-472f-9e9f-c803426f8f22
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes\
+ \ or no? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ 3b7a57e0-7733-4b21-9bed-a381fdc2415f: !Template
+ answer_choices: Yes ||| No
+ id: 3b7a57e0-7733-4b21-9bed-a381fdc2415f
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes or no? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ 4361cf07-1b58-478f-b97c-3b140832fb77: !Template
+ answer_choices: Yes ||| No
+ id: 4361cf07-1b58-478f-b97c-3b140832fb77
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes or no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ 626823f5-ff12-46d5-9e68-b2dc4bfe7cd4: !Template
+ answer_choices: True ||| False
+ id: 626823f5-ff12-46d5-9e68-b2dc4bfe7cd4
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True or False? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: Same as reported in Figure G31 of the GPT-3 paper.
+ 7e1439f6-d54d-43e6-bdc7-306ad5fd9203: !Template
+ answer_choices: Yes ||| No
+ id: 7e1439f6-d54d-43e6-bdc7-306ad5fd9203
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes or
+ no? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ c008c778-7621-496e-baa3-7b5817400659: !Template
+ answer_choices: Yes ||| No
+ id: c008c778-7621-496e-baa3-7b5817400659
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes or no? |||
+ {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ d4a1dd92-e184-4843-bc1f-1f625c833249: !Template
+ answer_choices: Yes ||| No
+ id: d4a1dd92-e184-4843-bc1f-1f625c833249
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes or no?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ db13469f-7161-4670-8a59-8c1137d1fa8b: !Template
+ answer_choices: Yes ||| No
+ id: db13469f-7161-4670-8a59-8c1137d1fa8b
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes or no? |||
+ {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ e21f5367-0cc8-412d-b8d9-78548438a384: !Template
+ answer_choices: Yes ||| No
+ id: e21f5367-0cc8-412d-b8d9-78548438a384
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, is "{{hypothesis}}" definitely correct? Yes or no? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
diff --git a/promptsource/templates/super_glue/boolq/templates.yaml b/promptsource/templates/super_glue/boolq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0fa8b0ac9795ee50090e1a2a56011c2ceed0a053
--- /dev/null
+++ b/promptsource/templates/super_glue/boolq/templates.yaml
@@ -0,0 +1,190 @@
+dataset: super_glue
+subset: boolq
+templates:
+ 3e386463-1715-4578-9cba-07d11a0d3b61: !Template
+ answer_choices: False ||| True
+ id: 3e386463-1715-4578-9cba-07d11a0d3b61
+ jinja: 'Passage: {{passage}}
+
+
+ After reading this passage, I have a question: {{question}}? True or False?
+ |||
+
+ {% if label != -1 %}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: after_reading
+ reference: ''
+ 492f0f88-4370-46cd-839b-1de37a55aeda: !Template
+ answer_choices: No ||| Yes
+ id: 492f0f88-4370-46cd-839b-1de37a55aeda
+ jinja: "{{ passage }} \nQuestion: {{ question }}\nAnswer: ||| \n{% if label !=\
+ \ -1 %}\n{{ answer_choices[label] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 Style
+ reference: Same as Figure G29, p. 58 of the GPT-3 paper
+ 6cb6a026-c070-470a-b75d-bb8fdf424e35: !Template
+ answer_choices: No ||| Yes
+ id: 6cb6a026-c070-470a-b75d-bb8fdf424e35
+ jinja: "{{ passage }} \n\nHaving read that, I wonder {{ question }}? |||\n{% if\
+ \ label != -1 %}\n{{ answer_choices[label] }} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "I wonder\u2026"
+ reference: ''
+ 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5: !Template
+ answer_choices: No ||| Yes
+ id: 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5
+ jinja: 'Text: {{passage}}
+
+
+ Answer the following yes/no question: {{question}}? Yes or no? |||
+
+ {% if label != -1 %}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: yes_no_question
+ reference: ''
+ 7d21d974-0624-4d4f-9e8c-644e2d009cb5: !Template
+ answer_choices: No ||| Yes
+ id: 7d21d974-0624-4d4f-9e8c-644e2d009cb5
+ jinja: "{{ passage }} \n\nHaving read that, could you tell me {{ question }}?\
+ \ ||| {% if label != -1 %}{{ answer_choices[label] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "could you tell me\u2026"
+ reference: ''
+ 922d3e87-ac58-4731-84d1-f0a40e47afb5: !Template
+ answer_choices: No ||| Yes
+ id: 922d3e87-ac58-4731-84d1-f0a40e47afb5
+ jinja: "EXAM\n1. Answer by yes or no.\n\nDocument: {{passage}}\nQuestion: {{question}}?\
+ \ ||| \n{% if label != -1 %}\n{{answer_choices[label]}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: exam
+ reference: ''
+ 9a1bf459-8047-437c-9def-f21e960429cc: !Template
+ answer_choices: No ||| Yes
+ id: 9a1bf459-8047-437c-9def-f21e960429cc
+ jinja: 'Based on the following passage, {{ question }}? {{ passage }}
+
+
+ |||
+
+ {% if label != -1 %}
+
+ {{ answer_choices[label] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the following passage
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ 9f4c6b0a-437b-40c0-b467-db4b7218d38d: !Template
+ answer_choices: False ||| True
+ id: 9f4c6b0a-437b-40c0-b467-db4b7218d38d
+ jinja: 'Exercise: read the text and answer the question by True or False.
+
+
+ Text: {{passage}}
+
+ Question: {{question}}? |||
+
+ {% if label != -1 %}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: exercise
+ reference: ''
+ b2b3cb60-d6e3-491c-a09a-8201e13e417e: !Template
+ answer_choices: No ||| Yes
+ id: b2b3cb60-d6e3-491c-a09a-8201e13e417e
+ jinja: '{{ passage }}
+
+ Based on the previous passage, {{ question }}? ||| {% if label != -1 %}{{ answer_choices[label]
+ }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ eb78772c-e81e-4b8a-a77b-b75efd1c212a: !Template
+ answer_choices: False ||| True
+ id: eb78772c-e81e-4b8a-a77b-b75efd1c212a
+ jinja: '{{passage}}
+
+
+ Q: {{question}}? True or False? |||
+
+ {% if label != -1 %}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: valid_binary
+ reference: ''
diff --git a/promptsource/templates/super_glue/cb/templates.yaml b/promptsource/templates/super_glue/cb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..895aee368ec0f298af4b25da6f1352a3a5bd94a1
--- /dev/null
+++ b/promptsource/templates/super_glue/cb/templates.yaml
@@ -0,0 +1,226 @@
+dataset: super_glue
+subset: cb
+templates:
+ 2e76cd0f-68ca-4f03-83ed-11cf15b25a84: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 2e76cd0f-68ca-4f03-83ed-11cf15b25a84
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ 358860fd-61ad-45fd-92a6-a72ca9107ebc: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 358860fd-61ad-45fd-92a6-a72ca9107ebc
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ 3f43a599-ffdb-490e-8952-c0ce41dd4621: !Template
+ answer_choices: True ||| False ||| Inconclusive
+ id: 3f43a599-ffdb-490e-8952-c0ce41dd4621
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {% if label !=-1 %}{{ answer_choices[label]
+ }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ 404eed25-558a-4d39-9515-7de46d60d4e0: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 404eed25-558a-4d39-9515-7de46d60d4e0
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 5c9b1fa9-93f0-4f82-b9e3-e0967e4d7260: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 5c9b1fa9-93f0-4f82-b9e3-e0967e4d7260
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ 6b0c6191-183d-4731-8050-ab17c909335c: !Template
+ answer_choices: Always ||| Never ||| Sometimes
+ id: 6b0c6191-183d-4731-8050-ab17c909335c
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {% if label !=-1 %}{{ answer_choices[label]
+ }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
+ 75db2bc2-3caa-4956-9653-13c7dd6255df: !Template
+ answer_choices: True ||| False ||| Neither
+ id: 75db2bc2-3caa-4956-9653-13c7dd6255df
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {% if label !=-1 %}{{
+ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ 87237a07-7cce-470a-80ac-3e5e3a5283ba: !Template
+ answer_choices: Always ||| Never ||| Sometimes
+ id: 87237a07-7cce-470a-80ac-3e5e3a5283ba
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {%\
+ \ if label !=-1 %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ 8798b8a4-1f59-4c72-9c1b-3e3044a7462a: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 8798b8a4-1f59-4c72-9c1b-3e3044a7462a
+ jinja: Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ 8e3b8d3d-1362-47dc-922a-82c03f965989: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: 8e3b8d3d-1362-47dc-922a-82c03f965989
+ jinja: Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif
+ %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ 90ab1002-093c-4e54-b48f-626655e36b65: !Template
+ answer_choices: Guaranteed ||| Impossible ||| Possible
+ id: 90ab1002-093c-4e54-b48f-626655e36b65
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {% if label\
+ \ !=-1 %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
+ a485d120-6eef-4ff6-8684-42df1639b101: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: a485d120-6eef-4ff6-8684-42df1639b101
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {% if label !=-1 %}{{answer_choices[label]}}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ bee62bfa-5307-4e1c-97b2-2ad2f7bcb179: !Template
+ answer_choices: Correct ||| Incorrect ||| Inconclusive
+ id: bee62bfa-5307-4e1c-97b2-2ad2f7bcb179
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ e503b148-8e6c-43b5-9ed6-312794c54d9b: !Template
+ answer_choices: Yes ||| No ||| Maybe
+ id: e503b148-8e6c-43b5-9ed6-312794c54d9b
+ jinja: Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ ea56b7f3-6e07-45bc-b619-c527eac4a41b: !Template
+ answer_choices: True ||| False ||| Inconclusive
+ id: ea56b7f3-6e07-45bc-b619-c527eac4a41b
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
diff --git a/promptsource/templates/super_glue/copa/templates.yaml b/promptsource/templates/super_glue/copa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9e9c0a30ec813c2af4242074acafcd4669ece94b
--- /dev/null
+++ b/promptsource/templates/super_glue/copa/templates.yaml
@@ -0,0 +1,211 @@
+dataset: super_glue
+subset: copa
+templates:
+ 0edd8660-f299-4819-a5ac-633c11177228: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: 0edd8660-f299-4819-a5ac-633c11177228
+ jinja: 'Exercise: choose the most plausible alternative.
+
+
+ {{ premise }} {% if question == "cause" %} because... {% else %} so... {% endif
+ %}
+
+ - {{choice1}}
+
+ - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: exercise
+ reference: ''
+ 150789fe-e309-47a1-82c9-0a4dc2c6b12b: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: 150789fe-e309-47a1-82c9-0a4dc2c6b12b
+ jinja: "{% if question == \"effect\" %} \n{{ premise }} What could happen next,\
+ \ \"{{ answer_choices[0] }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label\
+ \ != -1 %}{{ answer_choices[label] }}{%endif%}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "\u2026What could happen next, C1 or C2?"
+ reference: ''
+ 4d879cbe-2fd7-424a-9d78-3f5200313fba: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: 4d879cbe-2fd7-424a-9d78-3f5200313fba
+ jinja: "{{ premise }} \n\nI am hesitating between two options. Help me choose\
+ \ the more likely {% if question == \"cause\" %} cause: {% else %} effect: {%\
+ \ endif %}\n- {{choice1}}\n- {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label]\
+ \ }}{%endif%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: i_am_hesitating
+ reference: ''
+ 66ea075e-4d03-4a78-b1fa-9a5228cf0c9d: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: 66ea075e-4d03-4a78-b1fa-9a5228cf0c9d
+ jinja: '{{ premise }} {% if question == "cause" %} This happened because... {%
+ else %} As a consequence... {% endif %}
+
+ Help me pick the more plausible option:
+
+ - {{choice1}}
+
+ - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: plausible_alternatives
+ reference: ''
+ 744047dc-1298-45a2-8d68-d67e3f834ded: !Template
+ answer_choices: '{{choice1 }} ||| {{choice2}}'
+ id: 744047dc-1298-45a2-8d68-d67e3f834ded
+ jinja: '"{{ answer_choices[0] }}" or "{{ answer_choices[1] }}"? {{ premise }}
+ {% if question == "cause" %} because {% else %} so {% endif %} ||| {% if label
+ != -1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "C1 or C2? premise, so/because\u2026"
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ 84da62c2-9440-4cfc-bdd4-d70c65e33a82: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: 84da62c2-9440-4cfc-bdd4-d70c65e33a82
+ jinja: "{% if question == \"effect\" %} \n{{ premise }} As a result, \"{{ answer_choices[0]\
+ \ }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label != -1 %}{{ answer_choices[label]\
+ \ }}{%endif%}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "\u2026As a result, C1 or C2?"
+ reference: ''
+ 8ce80f8a-239e-4393-892c-f63dbb0d9929: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: 8ce80f8a-239e-4393-892c-f63dbb0d9929
+ jinja: "{{ premise }} \n\nWhat's the best option?\n- {{choice1}}\n- {{choice2}}\n\
+ \nWe are looking for {% if question == \"cause\" %} a cause {% else %} an effect\
+ \ {% endif %}\n||| {% if label != -1 %}{{answer_choices[label]}}{%endif%}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: best_option
+ reference: ''
+ 8cf2ba73-aee5-4651-b5d4-b1b88afe4abb: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: 8cf2ba73-aee5-4651-b5d4-b1b88afe4abb
+ jinja: "{% if question == \"cause\" %} \n{{ premise }} Which may be caused by\
+ \ \"{{ answer_choices[0] }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label\
+ \ != -1 %}{{ answer_choices[label] }}{%endif%}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "\u2026which may be caused by"
+ reference: ''
+ a1f9951e-2b6b-4530-9636-9cdf4c1658c5: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: a1f9951e-2b6b-4530-9636-9cdf4c1658c5
+ jinja: 'Pick the more likely continuation to the following sentence:
+
+ {{ premise }} {% if question == "cause" %} as a result of: {% else %} as a consequence:
+ {% endif %}
+
+ - {{choice1}}
+
+ - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: more likely
+ reference: ''
+ a61d8c21-da25-47bf-b5fe-14a8edd650af: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: a61d8c21-da25-47bf-b5fe-14a8edd650af
+ jinja: '{{ premise }}
+
+
+ Select the most plausible {% if question == "cause" %} cause: {% else %} effect:
+ {% endif %}
+
+ - {{choice1}}
+
+ - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: cause_effect
+ reference: ''
+ a8bf11c3-bea2-45ba-a533-957d8bee5e2e: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: a8bf11c3-bea2-45ba-a533-957d8bee5e2e
+ jinja: "{% if question == \"cause\" %} \n{{ premise }} Why? \"{{ answer_choices[0]\
+ \ }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label != -1 %}{{ answer_choices[label]\
+ \ }}{%endif%}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "\u2026why? C1 or C2"
+ reference: ''
+ f32348cd-d3cb-4619-87b9-e24f99c78567: !Template
+ answer_choices: '{{choice1}} ||| {{choice2}}'
+ id: f32348cd-d3cb-4619-87b9-e24f99c78567
+ jinja: '{{ premise }} {% if question == "cause" %} because... {% else %} so...
+ {% endif %}
+
+ Choose between:
+
+ - {{choice1}}
+
+ - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose
+ reference: ''
diff --git a/promptsource/templates/super_glue/multirc/templates.yaml b/promptsource/templates/super_glue/multirc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2107173d0c9398fe46233cdadbc93d4c9e3b214f
--- /dev/null
+++ b/promptsource/templates/super_glue/multirc/templates.yaml
@@ -0,0 +1,185 @@
+dataset: super_glue
+subset: multirc
+templates:
+ 2d95962b-a545-41ae-8d76-07ee6704ef65: !Template
+ answer_choices: No ||| Yes
+ id: 2d95962b-a545-41ae-8d76-07ee6704ef65
+ jinja: '{{paragraph}}
+
+
+ Question: {{question}}
+
+ I found this answer "{{answer}}". Is that correct? Yes or no?
+
+ |||
+
+ {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: found_this_answer
+ reference: ''
+ 42d47df9-09de-4691-8e49-7cfadd636cdd: !Template
+ answer_choices: No ||| Yes
+ id: 42d47df9-09de-4691-8e49-7cfadd636cdd
+ jinja: "{{ paragraph }}\nBased on the previous passage, {{ question }} \nIs \"\
+ {{ answer }}\" a correct answer? ||| {% if label != -1 %}{{ answer_choices[label]\
+ \ }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "is\u2026 a correct answer?"
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ 431a5c97-af33-4053-83c8-afb0dfc04448: !Template
+ answer_choices: No ||| Yes
+ id: 431a5c97-af33-4053-83c8-afb0dfc04448
+ jinja: '{{paragraph}}
+
+ Question: {{question}}
+
+
+ I am grading my students'' exercises. Is the answer "{{answer}}" correct?
+
+ |||
+
+ {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: grading
+ reference: ''
+ 4fc9e1ea-7451-4dba-a2cb-ce870e35ef8b: !Template
+ answer_choices: No ||| Yes
+ id: 4fc9e1ea-7451-4dba-a2cb-ce870e35ef8b
+ jinja: "{{ paragraph }}\n{{ question }} \nWould it be good to answer \"{{ answer\
+ \ }}\"? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "Would it be good to answer\u2026"
+ reference: ''
+ 59a2d847-27f3-4002-a125-cf9a291b3098: !Template
+ answer_choices: No ||| Yes
+ id: 59a2d847-27f3-4002-a125-cf9a291b3098
+ jinja: "{{ paragraph }}\nQuestion: {{ question }} \nIs it {{ answer }}? ||| {%\
+ \ if label != -1 %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "paragraph\u2026 question\u2026 is it\u2026 ?"
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ 7bf537ea-ff8d-44c7-8fc9-305b35e3be66: !Template
+ answer_choices: No ||| Yes
+ id: 7bf537ea-ff8d-44c7-8fc9-305b35e3be66
+ jinja: '{{paragraph}}
+
+
+ Decide whether "{{answer}}" is a valid answer to the following question: {{question}}
+
+ Answer yes or no.
+
+ |||
+
+ {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: decide_valid
+ reference: ''
+ 7d878b89-2774-429a-82fb-ac801379e3ae: !Template
+ answer_choices: No ||| Yes
+ id: 7d878b89-2774-429a-82fb-ac801379e3ae
+ jinja: "{{ paragraph }}\nQuestion: {{ question }} \nIs the correct answer {{ answer\
+ \ }}? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "is the correct answer\u2026"
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ ae9b2b0b-1731-4370-adcc-36c4a959490d: !Template
+ answer_choices: No ||| Yes
+ id: ae9b2b0b-1731-4370-adcc-36c4a959490d
+ jinja: 'Is "{{answer}}" a correct answer to the following question?
+
+ Question: {{question}}
+
+
+ Rely on the following text: {{paragraph}}
+
+ |||
+
+ {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: correct
+ reference: ''
+ b63fd1c3-b4a6-43c3-8429-6a389235b2a4: !Template
+ answer_choices: No ||| Yes
+ id: b63fd1c3-b4a6-43c3-8429-6a389235b2a4
+ jinja: '{{paragraph}}
+
+
+ Question: {{question}}
+
+ I think "{{answer}}" is a valid answer. Could you confirm? Yes or no?
+
+ |||
+
+ {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: confirm
+ reference: ''
+ d2d78b88-8845-45b5-935a-6451da00b285: !Template
+ answer_choices: No ||| Yes
+ id: d2d78b88-8845-45b5-935a-6451da00b285
+ jinja: "{{ paragraph }}\n{{ question }} \nI was going to say \"{{ answer }}\"\
+ . Does that sound right? ||| {% if label != -1 %}{{ answer_choices[label] }}{%\
+ \ endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: "I was going to say\u2026"
+ reference: ''
diff --git a/promptsource/templates/super_glue/record/templates.yaml b/promptsource/templates/super_glue/record/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6949a30046c270e886264952298a26db284027e0
--- /dev/null
+++ b/promptsource/templates/super_glue/record/templates.yaml
@@ -0,0 +1,369 @@
+dataset: super_glue
+subset: record
+templates:
+ 014b669e-2e3b-40ce-bdde-418966c7d666: !Template
+ answer_choices: '{{ entities | join("|||") }}'
+ id: 014b669e-2e3b-40ce-bdde-418966c7d666
+ jinja: "{{ passage }} \n{{ query }} \nWhich one is the \"{{\"@placeholder\"}}\"\
+ ? {{ entities | join(\", \") }}? ||| {% if ( answers | length ) > 0 %} {{ answers\
+ \ | choice }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: Which one is the placeholder?
+ reference: ''
+ 11e27d59-b1f5-43a1-9ccc-17f1c3249173: !Template
+ answer_choices: '{{ entities | join("|||") }}'
+ id: 11e27d59-b1f5-43a1-9ccc-17f1c3249173
+ jinja: "The following document has been corrupted. Tell me what \"{{\"@placeholder\"\
+ }}\" is referring to.\n\nDocument: {{ passage }} \n{{ query }} \n||| {% if (\
+ \ answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: corrupted
+ reference: ''
+ 147656b2-2dad-4028-96c4-f19d57cd1344: !Template
+ answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: 147656b2-2dad-4028-96c4-f19d57cd1344
+ jinja: "Summary:\n\n- {{ passage.split(\"@highlight\")[1:] | join(\"\\n- \") }}\
+ \ \n\nArticle:\n\n{{ passage.split(\"@highlight\")[0] }}\n ||| {% if ( answers\
+ \ | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice) }}\
+ \ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Summary first (continuation choices)
+ reference: ''
+ 24c267d4-359e-40a9-83d2-bff904d63b09: !Template
+ answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: 24c267d4-359e-40a9-83d2-bff904d63b09
+ jinja: "Summary:\n\n- {{ passage.split(\"@highlight\")[1:] | join(\"\\n- \") }}\
+ \ \n\nArticle:\n\n{{ passage.split(\"@highlight\")[0] }}\n\nNow that you've\
+ \ read the article, please write a new sentence to add to it.\n\n||| {% if (\
+ \ answers | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice)\
+ \ }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Add sentence after after (continuation choices)
+ reference: ''
+ 441c70e3-095a-44a1-8163-bc3b666b7ea1: !Template
+ answer_choices: '{{ entities | join("|||") }}'
+ id: 441c70e3-095a-44a1-8163-bc3b666b7ea1
+ jinja: "{{ passage }} \n{{ query }} \n\nYou should decide what \"{{\"@placeholder\"\
+ }}\" is referring to. Choose between:\n- {{answer_choices | join(\"\\n- \")}}\n\
+ ||| {% if ( answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: choose_between
+ reference: ''
+ 64013fb3-1afd-4e5a-8777-b164ca3b8e18: !Template
+ answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: 64013fb3-1afd-4e5a-8777-b164ca3b8e18
+ jinja: "{{ passage.split(\"@highlight\")[0] }}\n\nSummary:\n\n- {{ passage.split(\"\
+ @highlight\")[1:] | join(\"\\n- \") }} \n\n ||| {% if ( answers | length ) >\
+ \ 0 %}- {{ query | replace(\"@placeholder\", answers | choice) }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style summary only (continuation choices)
+ reference: Brown et al. 2020
+ 90fc9ecb-c706-4c03-bb7e-4fe9fcd777f6: !Template
+ answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: 90fc9ecb-c706-4c03-bb7e-4fe9fcd777f6
+ jinja: "Article:\n\n{{ passage.split(\"@highlight\")[0] }}\n\nHighlights:\n\n\
+ {{ passage.split(\"@highlight\")[1:] | join(\"\\n\") }} \n\n ||| {% if ( answers\
+ \ | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice) }}\
+ \ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: GPT-3 style with labels without hyphens (continuation choices)
+ reference: ''
+ 91555c1c-c1e4-469b-a2a4-fc952ce1a145: !Template
+ answer_choices: '{{ entities | join("|||") }}'
+ id: 91555c1c-c1e4-469b-a2a4-fc952ce1a145
+ jinja: "{{ passage }} \n{{ query }} \nIn the question above, the \"{{\"@placeholder\"\
+ }}\" stands for ||| {% if ( answers | length ) > 0 %}{{ answers | choice }}{%\
+ \ endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: In the question above, the placeholder stands for
+ reference: ''
+ 94577b75-2eac-4eae-b367-3b413c4188c6: !Template
+ answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: 94577b75-2eac-4eae-b367-3b413c4188c6
+ jinja: 'After reading the article, write another sentence to add to it.
+
+ {{ passage | replace("@highlight", "\n- ") }}
+
+
+ ||| {% if ( answers | length ) > 0 %}{{ query | replace("@placeholder", answers
+ | choice) }}{% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Add sentence after (continuation choices)
+ reference: ''
+ 9579b54e-4f0f-4e43-8907-af57112cc857: !Template
+ answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: 9579b54e-4f0f-4e43-8907-af57112cc857
+ jinja: "Please read the following news article and write another sentence to add\
+ \ to it.\n\n{{ passage | replace(\"@highlight\", \"\\n- \") }} \n ||| {% if\
+ \ ( answers | length ) > 0 %}{{ query | replace(\"@placeholder\", answers |\
+ \ choice) }} {% endif %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: News article (continuation choices)
+ reference: ''
+ 99dd38ce-32f3-4d58-93c5-59821002b9cc: !Template
+ answer_choices: '{{ entities | join("|||") }}'
+ id: 99dd38ce-32f3-4d58-93c5-59821002b9cc
+ jinja: "{{ passage }} \n{{ query }} \nWhat could the \"{{\"@placeholder\"}}\"\
+ \ be? {{ entities | join(\", \") }}? ||| {% if ( answers | length ) > 0 %}{{\
+ \ answers | choice }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: What could the placeholder be?
+ reference: ''
+ 9b688cf3-28bf-4f33-94cf-e73e4fa8c608: !Template
+ answer_choices: '{{entities | join("|||")}}'
+ id: 9b688cf3-28bf-4f33-94cf-e73e4fa8c608
+ jinja: '{{ passage }}
+
+ {{ query }}
+
+
+ I am trying to decide what "{{"@placeholder"}}" means in the previous text.
+
+ Help by choosing an option between:
+
+ - {{ entities | join("\n- ") }}
+
+ ||| {% if ( answers | length ) > 0 %}
+
+ {{ answers | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: trying_to_decide
+ reference: ''
+ a5ed27ed-162b-4ac1-9c7a-85059d5214be: !Template
+ answer_choices: '{{ entities | join("|||") }}'
+ id: a5ed27ed-162b-4ac1-9c7a-85059d5214be
+ jinja: "{{ passage }} \n{{ query }} \nHere, the placeholder refers to ||| {% if\
+ \ ( answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: "the placeholder refers to\u2026"
+ reference: ''
+ a99a92e0-e1ee-4ec3-a38a-3be4303ba017: !Template
+ answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: a99a92e0-e1ee-4ec3-a38a-3be4303ba017
+ jinja: "{{ passage.split(\"@highlight\")[0] }}\n\nHighlights:\n\n- {{ passage.split(\"\
+ @highlight\")[1:] | join(\"\\n- \") }} \n\nPlease write an additional highlight.\n\
+ \ ||| {% if ( answers | length ) > 0 %}- {{ query | replace(\"@placeholder\"\
+ , answers | choice) }} {% endif %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: New highlight (continuation choices)
+ reference: ''
+ d3fce74e-0d9d-404a-a009-9ebbf5794c2c: !Template
+ answer_choices: '{{entities | join("|||")}}'
+ id: d3fce74e-0d9d-404a-a009-9ebbf5794c2c
+ jinja: 'Exercise: Extract from the text the correct entity that "{{"@placeholder"}}"
+ is referring to.
+
+
+ {{ passage }}
+
+ {{ query }}
+
+ ||| {% if ( answers | length ) > 0 %}
+
+ {{ answers | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: exercise
+ reference: ''
+ de5b635e-c2f4-40bb-81ac-650f1b45564b: !Template
+ answer_choices: '{{entities | join("|||")}}'
+ id: de5b635e-c2f4-40bb-81ac-650f1b45564b
+ jinja: '{{ passage }}
+
+ {{ query }}
+
+
+ Pick one option, "{{"@placeholder"}}" refers to:
+
+ - {{answer_choices | join("\n- ")}}
+
+ ||| {% if ( answers | length ) > 0 %}
+
+ {{ answers | choice }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: pick_one_option
+ reference: ''
+ df8d0822-2cad-42de-8191-687ae47f6098: !Template
+ answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: df8d0822-2cad-42de-8191-687ae47f6098
+ jinja: "{{ passage | replace(\"@highlight\", \"\\n- \") }} \n\n ||| {% if ( answers\
+ \ | length ) > 0 %}- {{ query | replace(\"@placeholder\", answers | choice)\
+ \ }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style (continuation choices)
+ reference: Brown et al. 2020
+ dfa3052f-ede8-42c2-b99a-bc5762c4fdc6: !Template
+ answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: dfa3052f-ede8-42c2-b99a-bc5762c4fdc6
+ jinja: "Article:\n\n{{ passage.split(\"@highlight\")[0] }}\n\nHighlights:\n\n\
+ - {{ passage.split(\"@highlight\")[1:] | join(\"\\n- \") }} \n\n ||| {% if (\
+ \ answers | length ) > 0 %}- {{ query | replace(\"@placeholder\", answers |\
+ \ choice) }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: GPT-3 style with labels (continuation choices)
+ reference: Brown et al. 2020
+ e68d13c5-df75-4de0-b59e-f2eaf4af6ce7: !Template
+ answer_choices: '{{ entities | join("|||") }}'
+ id: e68d13c5-df75-4de0-b59e-f2eaf4af6ce7
+ jinja: "{{ passage }} \n{{ query }} \nCan you figure out what does the \"{{\"\
+ @placeholder\"}}\" mean? It means ||| {% if ( answers | length ) > 0 %}{{ answers\
+ \ | choice }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: "Can you figure out\u2026"
+ reference: ''
+ f7a92707-c531-42cb-81b4-063976e013cb: !Template
+ answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+ entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+ }}'
+ id: f7a92707-c531-42cb-81b4-063976e013cb
+ jinja: "{{ passage | replace(\"@highlight\", \"\\n\") }} \n ||| {% if ( answers\
+ \ | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice) }}\
+ \ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style without hyphens (continuation choices)
+ reference: Brown et al. 2020
diff --git a/promptsource/templates/super_glue/rte/templates.yaml b/promptsource/templates/super_glue/rte/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..11dd1c2c5c7e8619ea8dcc7bd25d113e32696cc4
--- /dev/null
+++ b/promptsource/templates/super_glue/rte/templates.yaml
@@ -0,0 +1,146 @@
+dataset: super_glue
+subset: rte
+templates:
+ 2b52a83c-0021-41fe-b44c-5aaa076d71a2: !Template
+ answer_choices: Yes ||| No
+ id: 2b52a83c-0021-41fe-b44c-5aaa076d71a2
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, is "{{hypothesis}}" definitely correct? Yes or no? ||| {% if label !=
+ -1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+ 2d0d63da-ffcf-4f6e-941a-b8da922be43e: !Template
+ answer_choices: Yes ||| No
+ id: 2d0d63da-ffcf-4f6e-941a-b8da922be43e
+ jinja: Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes or no?
+ ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ 4163e6f1-1a83-4c73-b867-02eb7ac80316: !Template
+ answer_choices: Yes ||| No
+ id: 4163e6f1-1a83-4c73-b867-02eb7ac80316
+ jinja: Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes or no? |||
+ {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ 8fb1c6aa-20e9-438c-bece-c6af1c746449: !Template
+ answer_choices: True ||| False
+ id: 8fb1c6aa-20e9-438c-bece-c6af1c746449
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True or False? ||| {% if label != -1 %}{{ answer_choices[label]
+ }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: Same as reported in Figure G31 of the GPT-3 paper.
+ 9e078fb4-505b-413c-bb5e-3cd16ddcf5d7: !Template
+ answer_choices: Yes ||| No
+ id: 9e078fb4-505b-413c-bb5e-3cd16ddcf5d7
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes\
+ \ or no? ||| {% if label != -1 %}{{answer_choices[label]}}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ b8dc85c6-28b6-4340-979a-8e77c2a0dde8: !Template
+ answer_choices: Yes ||| No
+ id: b8dc85c6-28b6-4340-979a-8e77c2a0dde8
+ jinja: Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes or
+ no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ e2fb58f2-b1f2-4aef-b74b-c4ee1c571fff: !Template
+ answer_choices: Yes ||| No
+ id: e2fb58f2-b1f2-4aef-b74b-c4ee1c571fff
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes or no? |||
+ {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ ed1f4b75-8826-4852-9bd6-aedf368678f5: !Template
+ answer_choices: Yes ||| No
+ id: ed1f4b75-8826-4852-9bd6-aedf368678f5
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ ee0ce095-122a-4509-bf0b-33d1495295f7: !Template
+ answer_choices: Yes ||| No
+ id: ee0ce095-122a-4509-bf0b-33d1495295f7
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes or no?
+ ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ fb4f8144-37f5-4977-88da-37a5d0bfd0e8: !Template
+ answer_choices: Yes ||| No
+ id: fb4f8144-37f5-4977-88da-37a5d0bfd0e8
+ jinja: Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
diff --git a/promptsource/templates/super_glue/wic/templates.yaml b/promptsource/templates/super_glue/wic/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1ca77970d09443b61c0bb7126e19f5419098f018
--- /dev/null
+++ b/promptsource/templates/super_glue/wic/templates.yaml
@@ -0,0 +1,238 @@
+dataset: super_glue
+subset: wic
+templates:
+ 14e73f39-a0d1-44c2-b9a4-4e48f9f1608e: !Template
+ answer_choices: No ||| Yes
+ id: 14e73f39-a0d1-44c2-b9a4-4e48f9f1608e
+ jinja: 'Does the word "{{word}}" have the same meaning in these two sentences?
+ Yes, No?
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question-context-meaning-with-label
+ reference: Generalized question-context format with label
+ 3503ead5-4fa5-4f77-95dc-f0c2ed3eecdc: !Template
+ answer_choices: No ||| Yes
+ id: 3503ead5-4fa5-4f77-95dc-f0c2ed3eecdc
+ jinja: 'Does the word "{{word}}" have the same meaning in these two sentences?
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question-context-meaning
+ reference: Generalized question-context format
+ 611d13dc-d414-4b9b-9204-e4f325e859e7: !Template
+ answer_choices: No ||| Yes
+ id: 611d13dc-d414-4b9b-9204-e4f325e859e7
+ jinja: 'Homework
+
+
+ Decide whether the word "{{word}}" is used with the same meaning in the two
+ following sentences. Answer by yes or no.
+
+ {{sentence1}}
+
+ {{sentence2}}
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: grammar_homework
+ reference: ''
+ 725b5ed0-7728-4890-95a4-a74cb7ae1bb4: !Template
+ answer_choices: False ||| True
+ id: 725b5ed0-7728-4890-95a4-a74cb7ae1bb4
+ jinja: 'Sentence A: {{sentence1}}
+
+ Sentence B: {{sentence2}}
+
+
+ "{{word}}" has a similar meaning in sentences A and B. True or False?
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: affirmation_true_or_false
+ reference: ''
+ c3a0a5d8-cfe9-4a7f-8a3c-3c526e0ad0c6: !Template
+ answer_choices: No ||| Yes
+ id: c3a0a5d8-cfe9-4a7f-8a3c-3c526e0ad0c6
+ jinja: '{{sentence1}}
+
+ {{sentence2}}
+
+ Question: Is the word ''{{word}}'' used in the same sense in the two sentences
+ above?
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3-prompt
+ reference: Following table G32. https://arxiv.org/pdf/2005.14165.pdf
+ ce8b5a93-1841-4897-84db-b100f1c84f4b: !Template
+ answer_choices: No ||| Yes
+ id: ce8b5a93-1841-4897-84db-b100f1c84f4b
+ jinja: 'Sentence 1: {{sentence1}}
+
+ Sentence 2: {{sentence2}}
+
+
+ Determine whether the word "{{word}}" is used in the same sense in both sentences.
+ Yes or no?
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: same_sense
+ reference: ''
+ cfbc1637-10b8-4f20-a31c-55292f3cebd0: !Template
+ answer_choices: No ||| Yes
+ id: cfbc1637-10b8-4f20-a31c-55292f3cebd0
+ jinja: "Determine if the word '{{word}}' is used in the same way in the two sentences\
+ \ below. \n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question-context
+ reference: Generalized question-context format
+ d9e1db2a-ab0b-4621-bb41-01d5788d3873: !Template
+ answer_choices: No ||| Yes
+ id: d9e1db2a-ab0b-4621-bb41-01d5788d3873
+ jinja: '{{sentence1}}
+
+ {{sentence2}}
+
+ Question: Is the word ''{{word}}'' used in the same sense in the two sentences
+ above? Yes, No?
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3-prompt-with-label
+ reference: Following table G32. https://arxiv.org/pdf/2005.14165.pdf add additional
+ label
+ dd2080cf-3117-49ba-9aff-c988a21fdb69: !Template
+ answer_choices: No ||| Yes
+ id: dd2080cf-3117-49ba-9aff-c988a21fdb69
+ jinja: 'The word "{{word}}" has multiple meanings. Does it have the same meaning
+ in sentences 1 and 2? Yes or no?
+
+
+ Sentence 1: {{sentence1}}
+
+ Sentence 2: {{sentence2}}
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: polysemous
+ reference: ''
+ f934a96d-fe4d-4075-aa47-5595b9a604c7: !Template
+ answer_choices: No ||| Yes
+ id: f934a96d-fe4d-4075-aa47-5595b9a604c7
+ jinja: '{{sentence1}}
+
+ {{sentence2}}
+
+ Similar sense of {{word}}?
+
+ ||| {% if label != -1%}
+
+ {{answer_choices[label]}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: similar-sense
+ reference: Following https://arxiv.org/abs/2105.11447, https://github.com/ethanjperez/true_few_shot/tree/main/templates.super_glue
diff --git a/promptsource/templates/super_glue/wsc.fixed/templates.yaml b/promptsource/templates/super_glue/wsc.fixed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7c94ac112ceacfadb110e0163e89a05584f81256
--- /dev/null
+++ b/promptsource/templates/super_glue/wsc.fixed/templates.yaml
@@ -0,0 +1,155 @@
+dataset: super_glue
+subset: wsc.fixed
+templates:
+ 212fb8b1-8436-4f64-8f37-a9094fe029f4: !Template
+ answer_choices: No ||| Yes
+ id: 212fb8b1-8436-4f64-8f37-a9094fe029f4
+ jinja: '{{ text }} In the previous sentence, does the pronoun "{{ span2_text.lower()
+ }}" refer to {{ span1_text }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label]
+ }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does the pronoun refer to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ 23361c5d-b67f-4c2a-9da7-16301c55d0e1: !Template
+ answer_choices: No ||| Yes
+ id: 23361c5d-b67f-4c2a-9da7-16301c55d0e1
+ jinja: '{{ text }} Here, by "{{ span2_text }}" they mean "{{ span1_text }}". Yes
+ or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: by p they mean
+ reference: ''
+ 2f17f18b-6daa-44ef-a2dd-dddaf04aec0e: !Template
+ answer_choices: False ||| True
+ id: 2f17f18b-6daa-44ef-a2dd-dddaf04aec0e
+ jinja: "{{ text }} \n\nIn other words, {{ text.split(\" \")[span2_index:] | join(\"\
+ \ \") | replace(span2_text, span1_text) }} True or false? ||| {% if label !=\
+ \ -1 %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: in other words
+ reference: ''
+ 4b3e29cc-ccb8-4e4c-a845-4935ca29cf34: !Template
+ answer_choices: No ||| Yes
+ id: 4b3e29cc-ccb8-4e4c-a845-4935ca29cf34
+ jinja: '{{ text }} I think they mean "{{ text.split(" ")[span2_index:] | join("
+ ") | replace(span2_text, span1_text) }}" Yes or no? ||| {% if label != -1 %}{{
+ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: I think they mean
+ reference: ''
+ 7482d24f-cf45-4013-b82d-369489fc958b: !Template
+ answer_choices: No ||| Yes
+ id: 7482d24f-cf45-4013-b82d-369489fc958b
+ jinja: '{{ text }} Here, does "{{ span2_text.lower() }}" stand for {{ span1_text
+ }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does p stand for
+ reference: ''
+ 7d377293-d043-4b6c-8ec1-d61eaf14ec67: !Template
+ answer_choices: No ||| Yes
+ id: 7d377293-d043-4b6c-8ec1-d61eaf14ec67
+ jinja: "Passage: {{ text }} \n\nQuestion: In the passage above, does the pronoun\
+ \ \"{{ span2_text }}\" refer to {{ span1_text }}?\n\nAnswer: ||| {% if label\
+ \ != -1 %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 Style
+ reference: Adapted from Figure G33, p. 59, Brown et al. 2020
+ 809eacd0-2f6c-4e3a-b52a-57c783879d36: !Template
+ answer_choices: No ||| Yes
+ id: 809eacd0-2f6c-4e3a-b52a-57c783879d36
+ jinja: '{{ text }} In the previous sentence, can the pronoun "{{ span2_text }}"
+ be replaced with "{{ span1_text }}"? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label]
+ }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: replaced with
+ reference: ''
+ 87f97aa0-1fa9-4f0b-b8e6-89d3c1f19bd6: !Template
+ answer_choices: False ||| True
+ id: 87f97aa0-1fa9-4f0b-b8e6-89d3c1f19bd6
+ jinja: "Context: {{ text }} \n\n{% if span2_text.lower() == \"they\" or span2_text.lower()\
+ \ == \"them\" %}\nQuestion: \"{{ span2_text }}\" are {{ span1_text }}. True\
+ \ or false?\n{% else %}\nQuestion: \"{{ span2_text }}\" is {{ span1_text }}.\
+ \ True or false?\n{% endif %}\n\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label]\
+ \ }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: p is/are r
+ reference: ''
+ aae24b54-c3a7-4f69-8b77-f6dc115988f8: !Template
+ answer_choices: False ||| True
+ id: aae24b54-c3a7-4f69-8b77-f6dc115988f8
+ jinja: "{{ text }} \nIn the passage above, the pronoun \"{{ span2_text }}\" refers\
+ \ to {{ span1_text }}. True or false? ||| {% if label != -1 %}{{ answer_choices[label]\
+ \ }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: the pronoun refers to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ d88f3e21-42dc-49a5-924d-69b764a14816: !Template
+ answer_choices: No ||| Yes
+ id: d88f3e21-42dc-49a5-924d-69b764a14816
+ jinja: "{{ text }} \n{% if span2_text.lower() == \"they\" or span2_text.lower()\
+ \ == \"them\" %}\nQuestion: Who or what are \"{{ span2_text.lower() }}\"? {{\
+ \ span1_text }}?\n{% else %}\nQuestion: Who or what is \"{{ span2_text.lower()\
+ \ }}\"? Is it {{ span1_text }}?\n{% endif %}\nAnswer: ||| {% if label != -1\
+ \ %}{{ answer_choices[label] }}{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Who or what is/are
+ reference: I double checked the only plural pronouns in WSC are "they" and "them".
diff --git a/promptsource/templates/swag/regular/templates.yaml b/promptsource/templates/swag/regular/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1a737235a33e451efb620281a7f827ac170f3537
--- /dev/null
+++ b/promptsource/templates/swag/regular/templates.yaml
@@ -0,0 +1,119 @@
+dataset: swag
+subset: regular
+templates:
+ 111e2684-cd6c-4808-97c1-e452941d7550: !Template
+ answer_choices: Yes ||| No
+ id: 111e2684-cd6c-4808-97c1-e452941d7550
+ jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the sentence: {{\
+ \ sent2 }} {{ [ending0, ending1, ending2, ending3][instance] }} \nIs it an appropriate\
+ \ continuation of the following situation:\n{{ sent1 }} ?\nYes or No?\n||| \n\
+ {% if label == instance %}\n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: reversed_appropriate_continuation
+ reference: The template randomly selects a continuation and checks if the continuation
+ is appropriate for the given premise.
+ 124f20bc-fea8-415f-8f09-5c2f8d077232: !Template
+ answer_choices: Yes ||| No
+ id: 124f20bc-fea8-415f-8f09-5c2f8d077232
+ jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the sentence: {{\
+ \ sent1 }}\nIs the following an appropriate continuation?\n{{ sent2 }} {{ [ending0,\
+ \ ending1, ending2, ending3][instance] }}\nYes or No?\n||| \n{% if label ==\
+ \ instance %}\n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n{%\
+ \ endif %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: appropriate_continuation
+ reference: The template checks if the two sentences are valid continuations.
+ 25b7abe7-e357-4e93-8c31-5f7be319b705: !Template
+ answer_choices: (a) ||| (b) ||| (c) ||| (d)
+ id: 25b7abe7-e357-4e93-8c31-5f7be319b705
+ jinja: "{{ startphrase }}...\nHow does the description likely end? \n(a): {{ ending0\
+ \ }}\n(b): {{ ending1 }}\n(c): {{ ending2 }}\n(d): {{ ending3 }}\n||| \n{{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: how_ends
+ reference: 'Predict the ending key based on the startphrase. Not original task
+ because sent1 is not included. '
+ 66366555-f989-4e82-beca-2aaa92960a50: !Template
+ answer_choices: (a) ||| (b) ||| (c) ||| (d)
+ id: 66366555-f989-4e82-beca-2aaa92960a50
+ jinja: "First, {{ sent1.lower() }} Then, {{ sent2.lower() }}... \nComplete with\
+ \ an appropriate ending:\n(a) {{ ending0 }}\n(b) {{ ending1 }}\n(c) {{ ending2\
+ \ }}\n(d) {{ ending3 }}\n||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: first_then
+ reference: Generate the ending
+ d61be86a-242e-48ad-871c-f8be5133c4df: !Template
+ answer_choices: (a) ||| (b) ||| (c) ||| (d)
+ id: d61be86a-242e-48ad-871c-f8be5133c4df
+ jinja: "First, {{ sent1.lower() }} Then, {{ sent2.lower() }}... \nChoose the key\
+ \ with an appropriate ending:\n(a) {{ ending0 }}\n(b) {{ ending1 }}\n(c) {{\
+ \ ending2 }}\n(d) {{ ending3 }}\n||| \n{{answer_choices[label]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: first_then_key
+ reference: Predict the key of the ending
+ dae2999a-843e-445f-819e-9a3255cca049: !Template
+ answer_choices: null
+ id: dae2999a-843e-445f-819e-9a3255cca049
+ jinja: "{% set endings = [ending0, ending1, ending2, ending3] %}\nGenerate the\
+ \ starting sentence with the ending: {{endings[label]}}\n||| \n{{sent1}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_start
+ reference: Template generates the start of the sentence
+ ecb7eddb-5836-4b31-89d6-e2d4ebfcc779: !Template
+ answer_choices: null
+ id: ecb7eddb-5836-4b31-89d6-e2d4ebfcc779
+ jinja: 'Complete the sentence: {{ sent1 }} {{sent2}}
+
+ |||
+
+ {% set endings = [ending0, ending1, ending2, ending3] %}
+
+ {{ endings[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: open_completion
+ reference: Template for open-ended common sense completion
diff --git a/promptsource/templates/tab_fact/tab_fact/templates.yaml b/promptsource/templates/tab_fact/tab_fact/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6c375db384bb6116d4a1538bc20696ffbc6eccdf
--- /dev/null
+++ b/promptsource/templates/tab_fact/tab_fact/templates.yaml
@@ -0,0 +1,109 @@
+dataset: tab_fact
+subset: tab_fact
+templates:
+ 137a6f5d-fdcd-4849-ba3c-7ae572285ef9: !Template
+ answer_choices: null
+ id: 137a6f5d-fdcd-4849-ba3c-7ae572285ef9
+ jinja: '{% if label %}
+
+ Passage: "{{statement}}"
+
+
+ Table: "{{table_text}}"
+
+
+ Note: {{"#"}} is the delimiter between columns; {{"\\n"}} is the delimiter between
+ rows.
+
+
+ Give a suitable caption for the table.|||
+
+ {{table_caption}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Edit Distance
+ original_task: false
+ name: tab_fact_table_caption
+ reference: Generate Table Caption
+ 33e3dbc2-3b1b-4891-8c78-2b575dd3ec35: !Template
+ answer_choices: refuted ||| entailed
+ id: 33e3dbc2-3b1b-4891-8c78-2b575dd3ec35
+ jinja: "Parse the following table:\n\nTable Caption: \"{{table_caption}}\"\n\n\
+ Table:\n\n{{table_text}}\n\nNote: {{\"#\"}} is the delimiter between columns;\
+ \ {{\"\\n\"}} is the delimiter between rows.\n\nFrom the above table, the statement\
+ \ \"{{statement}}\" can either be {{\"entailed\"}} or {{\"refuted\"}}. Which\
+ \ one is it? ||| \n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: parse_table
+ reference: 'Input: Table Caption, Table; Label: Refuted/Entailed -- Affirmative
+ Form'
+ 5bf642b0-4d75-40b7-9c0a-80b38a170d0f: !Template
+ answer_choices: null
+ id: 5bf642b0-4d75-40b7-9c0a-80b38a170d0f
+ jinja: "{% if label %}\nDescribe a piece of information or conclusion obtained\
+ \ from the following table in plain English:\n\n\"{{table_caption}}\"\n\n\"\
+ {{table_text}}\" \n\nNote: {{\"#\"}} is the delimiter between columns; {{\"\\\
+ n\"}} is the delimiter between rows.\n|||\n{{statement}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Edit Distance
+ original_task: false
+ name: tab_fact_express
+ reference: Generate Natural Text from the table
+ 6e4d3fe8-1d31-4685-8ef6-419ab8554741: !Template
+ answer_choices: No ||| Yes
+ id: 6e4d3fe8-1d31-4685-8ef6-419ab8554741
+ jinja: "Is \"{{statement}}\" corroborated by \"{{table_caption}}\"\nwith the following\
+ \ table:\n{{table_text}}\"? \n\nNote: {{\"#\"}} is the delimiter between columns;\
+ \ {{\"\\n\"}} is the delimiter between rows.\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: 'tab_fact_corroborated '
+ reference: 'Input: Table Caption, Table; Label: Yes/No -- Interrogative Form (corroboration)'
+ becf68bd-726d-40c1-afb1-80afd461126c: !Template
+ answer_choices: No ||| Yes
+ id: becf68bd-726d-40c1-afb1-80afd461126c
+ jinja: 'I have the following paragraph: "{{statement}}". Is there any evidence
+ of this passage in the data below?
+
+
+ Topic: "{{table_caption}}"
+
+
+ {{table_text}}
+
+
+ Note: {{"#"}} is the delimiter between columns; {{"\\n"}} is the delimiter between
+ rows.
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: 'tab_fact_evidence '
+ reference: 'Input: Table Caption, Table; Label: Yes/No -- Interrogative Form'
diff --git a/promptsource/templates/tmu_gfm_dataset/templates.yaml b/promptsource/templates/tmu_gfm_dataset/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9fa60ac3bc28e4d0c75340d8944e567ea049f2f0
--- /dev/null
+++ b/promptsource/templates/tmu_gfm_dataset/templates.yaml
@@ -0,0 +1,164 @@
+dataset: tmu_gfm_dataset
+templates:
+ 2b6a9c53-7cbc-4574-b5bd-448cf7960693: !Template
+ answer_choices: null
+ id: 2b6a9c53-7cbc-4574-b5bd-448cf7960693
+ jinja: 'Supposedly Sentence B is more natural than Sentence A. How much better
+ is it on a scale from 1 to 4?
+
+
+ Sentence A: {{source}}
+
+
+ Sentence B: {{output}}
+
+ |||
+
+ {{ (((10*ave_f) | round )/10) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: fluency
+ reference: The metric is a regression task metric.
+ 2b712291-0629-4499-86cc-566ee7376271: !Template
+ answer_choices: null
+ id: 2b712291-0629-4499-86cc-566ee7376271
+ jinja: 'Sentence B is grammatically better than Sentence A. How much better is
+ it on a scale from 0 to 4?
+
+
+ Sentence A: {{source}}
+
+
+ Sentence B: {{output}}
+
+ |||
+
+ {{ (((10*ave_g) | round )/10) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: grammar
+ reference: The metric is a regression task metric.
+ 30a17c4d-2bee-450c-b921-9b748ae87c93: !Template
+ answer_choices: null
+ id: 30a17c4d-2bee-450c-b921-9b748ae87c93
+ jinja: 'Grammatically improve the below text. Note that the original meaning has
+ to be preserved and also it should sound natural.
+
+
+ Text: {{source}}
+
+ |||
+
+ {{output}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: correct-sentence
+ reference: ''
+ 9854074f-422e-47e4-bb49-e472dff76311: !Template
+ answer_choices: null
+ id: 9854074f-422e-47e4-bb49-e472dff76311
+ jinja: 'Sentence A was rewritten into Sentence B. Would you say that the original
+ meaning is well preserved? Please rate it on a scale from 0 to 4.
+
+
+ Sentence A: {{source}}
+
+
+ Sentence B: {{output}}
+
+ |||
+
+ {{ (((10*ave_m) | round )/10) }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: meaning
+ reference: The metric is a regression task metric.
+ c8347303-bfcd-4fe5-b085-dee46045850c: !Template
+ answer_choices: null
+ id: c8347303-bfcd-4fe5-b085-dee46045850c
+ jinja: 'Read the below two sentences and answer the question.
+
+
+ Sentence A: {{source}}
+
+
+ Sentence B: {{output}}
+
+
+ Question: Sentence B is an improved version of Sentence A. How would you rate
+ the improvement on a scale from 0 to 4, with respect to grammaticality, fluency,
+ and meaning preservation, respectively? Please give an answer with three numbers
+ separated by commas.
+
+ |||
+
+ {{ (((10*ave_g) | round )/10) }}, {{ (((10*ave_f) | round )/10) }}, and {{ (((10*ave_m)
+ | round )/10) }}.'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: grammar-fluency-meaning
+ reference: The metric is a regression task metric.
+ ebb2956b-25eb-4a66-ba23-569ccf9b8675: !Template
+ answer_choices: Sentence A ||| Sentence B
+ id: ebb2956b-25eb-4a66-ba23-569ccf9b8675
+ jinja: 'Which one of the following two sentences is written better? Your answer
+ should be either "Sentence A" or "Sentence B".
+
+ {% if range(0,2) | choice %}
+
+ Sentence A: {{source}}
+
+
+ Sentence B: {{output}}
+
+ |||
+
+ Sentence B
+
+ {% else %}
+
+ Sentence A: {{output}}
+
+
+ Sentence B: {{source}}
+
+ |||
+
+ Sentence A
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: choose-better
+ reference: ''
diff --git a/promptsource/templates/trec/templates.yaml b/promptsource/templates/trec/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d3ebfe4993edec1de91a4b85209925460cd87a86
--- /dev/null
+++ b/promptsource/templates/trec/templates.yaml
@@ -0,0 +1,421 @@
+dataset: trec
+templates:
+ 21d04668-c5b3-4418-bbb6-663f1ffdb97c: !Template
+ answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+ ||| Location
+ id: 21d04668-c5b3-4418-bbb6-663f1ffdb97c
+ jinja: "Categories: {{', '.join(answer_choices)}}\n\nWhat category best describes:\
+ \ {{text}} \nAnswer: ||| {{ answer_choices [label_coarse] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: what_category_best_describe
+ reference: ''
+ 2d4e0add-cfca-4f54-80a2-ddd8e91f9fd6: !Template
+ answer_choices: city ||| country ||| mountain ||| state ||| other location
+ id: 2d4e0add-cfca-4f54-80a2-ddd8e91f9fd6
+ jinja: '{% set label_mapping = {21:0, 18:1, 24:2, 11:3, 14:4} %}
+
+ {% if label_coarse == 5 %}
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ {{text}}
+
+ |||
+
+ {{ answer_choices [label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_LOC
+ reference: Fine grained labels with coarse-label `LOC`, context after question
+ 309bf243-2185-4090-ac66-a24f44d89966: !Template
+ answer_choices: code ||| count ||| date ||| distance ||| price ||| order ||| period
+ of time ||| percentage ||| speed ||| temperature ||| size ||| weight ||| other
+ number
+ id: 309bf243-2185-4090-ac66-a24f44d89966
+ jinja: '{% set label_mapping = {39:0, 13:1, 8:2, 40:3, 25:4, 43:5, 27:6, 38:7,
+ 35:8, 41:9, 32:10, 45:11, 14:12} %}
+
+ {% if label_coarse == 4 %}
+
+ {{text}}
+
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ |||
+
+ {{ answer_choices [label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_NUM_context_first
+ reference: Fine grained labels with coarse-label `NUM`, context provided first
+ 3aff84f3-e478-4598-abe8-40aa24cec1fa: !Template
+ answer_choices: an animal ||| an organ of the body ||| a color ||| creative piece
+ ||| currency ||| disease or medicine ||| event ||| food ||| musical instrument
+ ||| language ||| letter ||| plant ||| product ||| religion ||| sport ||| substance
+ ||| symbol ||| technique ||| term ||| vehicle ||| word ||| other entity
+ id: 3aff84f3-e478-4598-abe8-40aa24cec1fa
+ jinja: '{% set label_mapping = {2:0, 22:1, 19:2, 1:3, 46:3, 23:4, 10:5, 17:6,
+ 33:7, 37:8, 15:9, 30:10, 26:11, 16:12, 28:13, 42:14, 31:15, 20:16, 44:17, 36:18,
+ 14:19} %}
+
+ {% if label_coarse == 1 %}
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ {{text}}
+
+ |||
+
+ {{ answer_choices [label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_ENTY
+ reference: Fine grained labels with coarse-label `ENTY`, context after question
+ 43a188a2-b6dd-46a7-af2e-81a64b90b92a: !Template
+ answer_choices: code ||| count ||| date ||| distance ||| price ||| order ||| period
+ of time ||| percentage ||| speed ||| temperature ||| size ||| weight ||| other
+ number
+ id: 43a188a2-b6dd-46a7-af2e-81a64b90b92a
+ jinja: '{% set label_mapping = {39:0, 13:1, 8:2, 40:3, 25:4, 43:5, 27:6, 38:7,
+ 35:8, 41:9, 32:10, 45:11, 14:12} %}
+
+ {% if label_coarse == 4 %}
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ {{text}}
+
+ |||
+
+ {{ answer_choices [label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_NUM
+ reference: Fine grained labels with coarse-label `NUM`
+ 6c391f4f-027b-4425-88de-1dbb6aa706ee: !Template
+ answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+ ||| Location
+ id: 6c391f4f-027b-4425-88de-1dbb6aa706ee
+ jinja: 'Question: {{text}}
+
+
+ Descriptors: {{'', ''.join(answer_choices)}}
+
+
+ Best Descriptor?
+
+ |||
+
+ {{answer_choices[label_coarse]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: pick_the_best_descriptor
+ reference: ''
+ 71090d59-dd02-4cbd-8032-ad86179b9bd4: !Template
+ answer_choices: Manner ||| Creative Piece ||| Animal ||| Expression abbreviated
+ ||| Individual ||| Group ||| Title ||| Defintion ||| Date ||| Reason ||| Event
+ ||| State ||| Description ||| Count ||| Other ||| Letter ||| Religion ||| Food
+ ||| Country ||| Color ||| Term ||| City ||| Organ of the body ||| Disease or
+ medicine ||| Mountain ||| Price ||| Product ||| Period ||| Substance ||| Sport
+ ||| Plant ||| Technique ||| Size ||| Instrument ||| Abbreviation ||| Speed |||
+ Word ||| Language ||| Percentage ||| Code ||| Distance ||| Temperature ||| Symbol
+ ||| Order ||| Vehicle ||| Weight ||| Currency
+ id: 71090d59-dd02-4cbd-8032-ad86179b9bd4
+ jinja: '{{text}}
+
+
+ What is this question asking for?
+
+ |||
+
+ {{answer_choices[label_fine] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fine_grained_open_context_first
+ reference: Fine grained classes without providing choices, context first.
+ 736b2629-ed57-48ce-a458-4cbc435c499b: !Template
+ answer_choices: city ||| country ||| mountain ||| state ||| other location
+ id: 736b2629-ed57-48ce-a458-4cbc435c499b
+ jinja: '{% set label_mapping = {21:0, 18:1, 24:2, 11:3, 14:4} %}
+
+ {% if label_coarse == 5 %}
+
+ {{text}}
+
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ |||
+
+ {{ answer_choices [label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_LOC_context_first
+ reference: Fine grained labels with coarse-label `LOC`, context provided first
+ 7a3ed4dd-af89-493c-8efb-c67622f63034: !Template
+ answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+ ||| Location
+ id: 7a3ed4dd-af89-493c-8efb-c67622f63034
+ jinja: "Which category best describes the following question: {{text}} \n\nChoose\
+ \ from the following list: \n{{', '.join(answer_choices)}}\n ||| {{ answer_choices\
+ \ [label_coarse] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_category_best_describes
+ reference: ''
+ 7a9e6f3c-1dee-45b0-a315-1badaf59a7b8: !Template
+ answer_choices: definition ||| description ||| manner of action ||| reason
+ id: 7a9e6f3c-1dee-45b0-a315-1badaf59a7b8
+ jinja: '{% set label_mapping={0:2, 7:1, 12:0, 9:3} %}
+
+ {% if label_coarse == 0 %}
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ {{text}}
+
+ |||
+
+ {{ answer_choices[label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_DESC
+ reference: Fine grained labels with coarse-label `DESC`, context after question
+ 861d1a48-1113-4f35-b777-2b2f12ab9d5d: !Template
+ answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+ ||| Location
+ id: 861d1a48-1113-4f35-b777-2b2f12ab9d5d
+ jinja: '{{text}}
+
+
+ Is this asking about {{('', '').join(answer_choices)}}?
+
+ |||
+
+ {{ answer_choices [label_coarse] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: trec1
+ reference: Context then prompt
+ 93a06e72-2c15-4f8a-a46c-6a10919c4ea4: !Template
+ answer_choices: abbreviation ||| expression abbreviated
+ id: 93a06e72-2c15-4f8a-a46c-6a10919c4ea4
+ jinja: "{% set label_mapping={34:0, 3:1} %} \n{% if label_coarse == 2 %}\nIs this\
+ \ question asking for an {{', '.join(answer_choices)}}?\n{{text}}\n|||\n{{answer_choices[label_mapping[label_fine]]\
+ \ }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_ABBR
+ reference: Fine grained labels with coarse-label `ABBR`, context after question
+ a0096044-3b4c-4c80-b139-25eac8fe692a: !Template
+ answer_choices: abbreviation ||| expression abbreviated
+ id: a0096044-3b4c-4c80-b139-25eac8fe692a
+ jinja: "{% set label_mapping = {34:0, 3:1} %} \n{% if label_coarse == 2 %}\n{{text}}\n\
+ \nIs this question asking for an {{', '.join(answer_choices)}}?\n|||\n{{ answer_choices\
+ \ [label_mapping[label_fine]] }}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_ABBR_context_first
+ reference: Fine grained labels with coarse-label `ABBR`, context provided first
+ aad2def1-b694-40ee-9c26-3d1cf5c577da: !Template
+ answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+ ||| Location
+ id: aad2def1-b694-40ee-9c26-3d1cf5c577da
+ jinja: 'Is the following question asking about {{'', ''.join(answer_choices)}}?
+
+
+ {{text}}
+
+ |||
+
+ {{ answer_choices [label_coarse] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: trec2
+ reference: Prompt then context
+ bc58ba18-24a5-4553-be0a-2dba60efdad6: !Template
+ answer_choices: group ||| individual ||| title ||| description
+ id: bc58ba18-24a5-4553-be0a-2dba60efdad6
+ jinja: '{% set label_mapping = {5:0, 4:1, 6:2, 12:3} %}
+
+ {% if label_coarse == 3 %}
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ {{text}}
+
+ |||
+
+ {{ answer_choices[label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_HUM
+ reference: Fine grained labels with coarse-label `HUM`, context after question
+ cfa8fde0-8320-4050-8d6e-7619ab14adea: !Template
+ answer_choices: Manner ||| Creative Piece ||| Animal ||| Expression abbreviated
+ ||| Individual ||| Group ||| Title ||| Defintion ||| Date ||| Reason ||| Event
+ ||| State ||| Description ||| Count ||| Other ||| Letter ||| Religion ||| Food
+ ||| Country ||| Color ||| Term ||| City ||| Organ of the body ||| Disease or
+ medicine ||| Mountain ||| Price ||| Product ||| Period ||| Substance ||| Sport
+ ||| Plant ||| Technique ||| Size ||| Instrument ||| Abbreviation ||| Speed |||
+ Word ||| Language ||| Percentage ||| Code ||| Distance ||| Temperature ||| Symbol
+ ||| Order ||| Vehicle ||| Weight ||| Currency
+ id: cfa8fde0-8320-4050-8d6e-7619ab14adea
+ jinja: 'What is this question asking for?
+
+
+ {{text}}
+
+ |||
+
+ {{ answer_choices[label_fine] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fine_grained_open
+ reference: Fine grained classes without providing choices.
+ e98b9294-76b4-4172-a78c-9c6e5fdfe73b: !Template
+ answer_choices: group ||| individual ||| title ||| description
+ id: e98b9294-76b4-4172-a78c-9c6e5fdfe73b
+ jinja: '{% set label_mapping = {5:0, 4:1, 6:2, 12:3} %}
+
+ {% if label_coarse == 3 %}
+
+ {{text}}
+
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ |||
+
+ {{ answer_choices [label_mapping[label_fine]] }}{% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_HUM_context_first
+ reference: Fine grained labels with coarse-label `HUM`, context provided first
+ fa588c55-5c69-4fd0-a0b1-edbfa092f710: !Template
+ answer_choices: definition ||| description ||| manner of action ||| reason
+ id: fa588c55-5c69-4fd0-a0b1-edbfa092f710
+ jinja: '{% set label_mapping={0:2, 7:1, 12:0, 9:3} %}
+
+ {% if label_coarse == 0 %}
+
+ {{text}}
+
+
+ Is this question asking for {{'', ''.join(answer_choices)}}?
+
+ |||
+
+ {{ answer_choices [label_mapping[label_fine]] }}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: fine_grained_DESC_context_first
+ reference: Fine grained labels with coarse-label `DESC`, context provided first
diff --git a/promptsource/templates/trivia_qa/unfiltered/templates.yaml b/promptsource/templates/trivia_qa/unfiltered/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c25041f4738ff3ac28517856068a172a606302ee
--- /dev/null
+++ b/promptsource/templates/trivia_qa/unfiltered/templates.yaml
@@ -0,0 +1,79 @@
+dataset: trivia_qa
+subset: unfiltered
+templates:
+ 5946db1a-a068-4a31-a06f-74a7d976cb6d: !Template
+ answer_choices: null
+ id: 5946db1a-a068-4a31-a06f-74a7d976cb6d
+ jinja: "{% if answer.aliases %} \n Guess a question that has the answer \"\
+ {{answer.aliases|choice}}\" \n ||| \n {{question}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: guess_question
+ reference: Guess a question.
+ 7ada9605-6fd1-49a9-a56e-6d778d4a0eb6: !Template
+ answer_choices: null
+ id: 7ada9605-6fd1-49a9-a56e-6d778d4a0eb6
+ jinja: "The goal is to predict an English answer string for an input English question.\
+ \ \nQuestion : {{question}}\nAnswer : \n||| \n{% if answer.aliases %} \n{{answer.aliases|choice}}\
+ \ \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: formal_description
+ reference: ''
+ 91d9f950-a25a-4557-a16f-952d74629584: !Template
+ answer_choices: null
+ id: 91d9f950-a25a-4557-a16f-952d74629584
+ jinja: "Answer the following question.\n{{question}} \n|||\n{% if answer.aliases\
+ \ %} \n{{answer.aliases|choice}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: question_with_instruction
+ reference: Instruction before question.
+ bfec3d73-c024-492f-8878-64fdb6639a29: !Template
+ answer_choices: null
+ id: bfec3d73-c024-492f-8878-64fdb6639a29
+ jinja: "I've always wondered: {{question}} \n||| \n{% if answer.aliases %} \n\
+ {{answer.aliases|choice}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: first_person_context
+ reference: Ask a question in first person
+ c29c7072-0535-4e38-ba0c-b7ac0acdacf8: !Template
+ answer_choices: null
+ id: c29c7072-0535-4e38-ba0c-b7ac0acdacf8
+ jinja: "Question : {{question}}\nAnswer : \n||| \n{% if answer.aliases %} \n{{answer.aliases|choice}}\n\
+ {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ - Other
+ original_task: true
+ name: question_answer
+ reference: Plain Question
diff --git a/promptsource/templates/turk/templates.yaml b/promptsource/templates/turk/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c61b67552c9403cc95c1de5968ef913cbfffd06b
--- /dev/null
+++ b/promptsource/templates/turk/templates.yaml
@@ -0,0 +1,136 @@
+dataset: turk
+templates:
+ 58d1370e-3fc0-4b96-9e74-950b7c3edfd9: !Template
+ answer_choices: null
+ id: 58d1370e-3fc0-4b96-9e74-950b7c3edfd9
+ jinja: '{% set real_simplifications = [] %}{% for text in simplifications %}{%
+ if text|length < original|length %}{{real_simplifications.append(text) | default("",
+ True)}}{% endif %}{% endfor %}
+
+ {% if real_simplifications %}
+
+ Simplify the below sentence.
+
+
+ {{original}}
+
+ |||
+
+ {{real_simplifications | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: simplify
+ reference: This is not the original task, since the original task is the analysis
+ of simplification, not the simplification itself.
+ da3fb228-7383-497f-936c-9bcbcd0d057b: !Template
+ answer_choices: Text A ||| Text B
+ id: da3fb228-7383-497f-936c-9bcbcd0d057b
+ jinja: '{% set real_simplifications = [] %}{% for text in simplifications %}{%
+ if text|length < original|length %}{{real_simplifications.append(text) | default("",
+ True)}}{% endif %}{% endfor %}
+
+ {% if real_simplifications %}
+
+ Read the below two texts and answer the question.
+
+ {% if range(0,2) | choice %}
+
+ Text A: {{original}}
+
+
+ Text B: {{real_simplifications | choice }}
+
+
+ One of the texts above is more verbose than the other. Which one is the verbose
+ one? Your answer should be either "Text A" or "Text B".
+
+ |||
+
+ Text A
+
+ {% else %}
+
+ Text A: {{real_simplifications | choice }}
+
+
+ Text B: {{original}}
+
+
+ One of the texts above is more verbose than the other. Which one is the verbose
+ one? Your answer should be either "Text A" or "Text B".
+
+ |||
+
+ Text B
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: choose-verbose
+ reference: ''
+ dc853532-e948-443b-bae1-3ebb968bc7c5: !Template
+ answer_choices: null
+ id: dc853532-e948-443b-bae1-3ebb968bc7c5
+ jinja: '{% set real_simplifications = [] %}{% for text in simplifications %}{%
+ if text|length < original|length %}{{real_simplifications.append(text) | default("",
+ True)}}{% endif %}{% endfor %}
+
+ {% if real_simplifications %}
+
+ Make the following sentence more verbose.
+
+
+ {{real_simplifications | choice}}
+
+ |||
+
+ {{original}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: make-verbose
+ reference: ''
+ ec70cd03-8282-4e45-957f-927c60254ad4: !Template
+ answer_choices: Text A ||| Text B
+ id: ec70cd03-8282-4e45-957f-927c60254ad4
+ jinja: "{% set real_simplifications = [] %}{% for text in simplifications %}{%\
+ \ if text|length < original|length %}{{real_simplifications.append(text) | default(\"\
+ \", True)}}{% endif %}{% endfor %}\n{% if real_simplifications %}\n{% if range(0,2)\
+ \ | choice %}\nText A: {{original}}\n\nText B: {{real_simplifications | choice\
+ \ }}\n\nOne of the texts above is a simplification of the other. Which one is\
+ \ the simplified one? Your answer should be either \"Text A\" or \"Text B\"\
+ .\n|||\nText B\n{% else %}\nText A: {{real_simplifications | choice }}\n\nText\
+ \ B: {{original}}\n\nOne of the texts above is a simplification of the other.\
+ \ Which one is the simplified one? Your answer should be either \"Text A\" or\
+ \ \"Text B\".\n|||\nText A\n{% endif %} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose-simplification
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/emoji/templates.yaml b/promptsource/templates/tweet_eval/emoji/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..fb85973d3b8c636d310b9dba4984bdf0634a2924
--- /dev/null
+++ b/promptsource/templates/tweet_eval/emoji/templates.yaml
@@ -0,0 +1,68 @@
+dataset: tweet_eval
+subset: emoji
+templates:
+ 8c794abe-5364-430f-aa1e-eb3501443cec: !Template
+ answer_choices: "\u2764|||\U0001F60D|||\U0001F602|||\U0001F495|||\U0001F525|||\U0001F60A\
+ |||\U0001F60E|||\u2728|||\U0001F499|||\U0001F618|||\U0001F4F7|||\U0001F1FA\U0001F1F8\
+ |||\u2600|||\U0001F49C|||\U0001F609|||\U0001F4AF|||\U0001F601|||\U0001F384|||\U0001F4F8\
+ |||\U0001F61C"
+ id: 8c794abe-5364-430f-aa1e-eb3501443cec
+ jinja: 'Which emoji among {{answer_choices | join(", ")}} best describes the sentiment
+ of the following tweet?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: emoji_option
+ reference: 'official metric: macroaveraged F1'
+ c05f50e0-f708-44bc-98e7-ff7b3f9f5d93: !Template
+ answer_choices: "\u2764|||\U0001F60D|||\U0001F602|||\U0001F495|||\U0001F525|||\U0001F60A\
+ |||\U0001F60E|||\u2728|||\U0001F499|||\U0001F618|||\U0001F4F7|||\U0001F1FA\U0001F1F8\
+ |||\u2600|||\U0001F49C|||\U0001F609|||\U0001F4AF|||\U0001F601|||\U0001F384|||\U0001F4F8\
+ |||\U0001F61C"
+ id: c05f50e0-f708-44bc-98e7-ff7b3f9f5d93
+ jinja: 'Which emoji among {{answer_choices | join(", ")}} would be the best comment
+ to the following tweet?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: emoji_reply
+ reference: 'Metric: macroaveraged F1'
+ d5c771d3-28e7-420e-af47-c077cfe0e7e5: !Template
+ answer_choices: "\u2764|||\U0001F60D|||\U0001F602|||\U0001F495|||\U0001F525|||\U0001F60A\
+ |||\U0001F60E|||\u2728|||\U0001F499|||\U0001F618|||\U0001F4F7|||\U0001F1FA\U0001F1F8\
+ |||\u2600|||\U0001F49C|||\U0001F609|||\U0001F4AF|||\U0001F601|||\U0001F384|||\U0001F4F8\
+ |||\U0001F61C"
+ id: d5c771d3-28e7-420e-af47-c077cfe0e7e5
+ jinja: 'Which emoji best describes the sentiment of the following tweet?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: emoji
+ reference: 'official metric: macroaveraged F1'
diff --git a/promptsource/templates/tweet_eval/emotion/templates.yaml b/promptsource/templates/tweet_eval/emotion/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..86ba7aa0d1571bc0daaf1311d12a7976f2225aee
--- /dev/null
+++ b/promptsource/templates/tweet_eval/emotion/templates.yaml
@@ -0,0 +1,103 @@
+dataset: tweet_eval
+subset: emotion
+templates:
+ 7c09c33e-31f2-414b-89a1-6b1dda92ef6f: !Template
+ answer_choices: anger ||| joy ||| optimism ||| sadness
+ id: 7c09c33e-31f2-414b-89a1-6b1dda92ef6f
+ jinja: '{{text}}
+
+
+ To get full credit in this exam, choose the correct emotion from the following
+ choices: {{answer_choices | join(", ")}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: emotion_exam
+ reference: exam style prompt
+ 87db02f2-585e-4fd1-81c0-e94297607097: !Template
+ answer_choices: anger ||| joy ||| optimism ||| sadness
+ id: 87db02f2-585e-4fd1-81c0-e94297607097
+ jinja: 'Which emotion among {{answer_choices | join(", ")}} best describes the
+ feeling of the author of the following tweet?
+
+
+ {{text}}|||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: author_emotion
+ reference: ''
+ 8bc3ebc5-77f1-4d55-bd96-c62429ebf093: !Template
+ answer_choices: anger ||| joy ||| optimism ||| sadness
+ id: 8bc3ebc5-77f1-4d55-bd96-c62429ebf093
+ jinja: 'Which emotion is best represented by the following tweet?
+
+ {{text}}
+
+
+ Possible emotions: {{answer_choices | join(", ")}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: which_emotion
+ reference: ''
+ a5992077-2e31-467b-a6ee-b75dee933d0e: !Template
+ answer_choices: anger ||| joy ||| optimism ||| sadness
+ id: a5992077-2e31-467b-a6ee-b75dee933d0e
+ jinja: "{{text}}\n\nCategorize the tweet into one of the following options: \n\
+ (a) {{answer_choices[0]}}\n(b) {{answer_choices[1]}}\n(c) {{answer_choices[2]}}\n\
+ (d) {{answer_choices[3]}}\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: emotion_with_option
+ reference: ''
+ b8f4912e-e3be-4dd5-82ec-6f110c056a86: !Template
+ answer_choices: anger ||| joy ||| optimism ||| sadness
+ id: b8f4912e-e3be-4dd5-82ec-6f110c056a86
+ jinja: '{{text}}
+
+
+ What is the emotion of the text?
+
+
+ Hint: {{answer_choices | join(", ")}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: emotion_hint
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/hate/templates.yaml b/promptsource/templates/tweet_eval/hate/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e5654fdb5451ce799d094198eb6b2b58cf8a5918
--- /dev/null
+++ b/promptsource/templates/tweet_eval/hate/templates.yaml
@@ -0,0 +1,84 @@
+dataset: tweet_eval
+subset: hate
+templates:
+ 3266f9d4-9c80-4e17-a8a6-1fe44ce8f3bf: !Template
+ answer_choices: no ||| yes
+ id: 3266f9d4-9c80-4e17-a8a6-1fe44ce8f3bf
+ jinja: 'Does this tweet convey the author''s hatred towards something or someone?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: author_hate
+ reference: ''
+ 34a5128b-6fc8-453b-94d4-4ebaa87172c1: !Template
+ answer_choices: no ||| yes
+ id: 34a5128b-6fc8-453b-94d4-4ebaa87172c1
+ jinja: 'Does this tweet convey hate: yes or no?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: hate_no_yes
+ reference: ''
+ 5b15dc68-05a2-42dd-b0bf-fa15d4f40320: !Template
+ answer_choices: no|||yes
+ id: 5b15dc68-05a2-42dd-b0bf-fa15d4f40320
+ jinja: "Is this a hateful tweet? \n{{text}}\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: hate_question
+ reference: ''
+ 971ad470-85bf-484e-aab7-b942f817bf2c: !Template
+ answer_choices: non-hate ||| hate
+ id: 971ad470-85bf-484e-aab7-b942f817bf2c
+ jinja: 'Does this tweet convey {{"hate"}} or {{"non-hate"}}?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: hate_options
+ reference: ''
+ b0cdecc2-78a2-47e7-a74e-38d509c01214: !Template
+ answer_choices: 'no ||| yes '
+ id: b0cdecc2-78a2-47e7-a74e-38d509c01214
+ jinja: "In this test, you need to answer with either yes or no. \n\nQ: Is this\
+ \ a hateful tweet? \n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: hate_exam
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/irony/templates.yaml b/promptsource/templates/tweet_eval/irony/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a3b851ae385a64058b4e1e0e74a07f6dc61219e6
--- /dev/null
+++ b/promptsource/templates/tweet_eval/irony/templates.yaml
@@ -0,0 +1,84 @@
+dataset: tweet_eval
+subset: irony
+templates:
+ c45095f1-9be1-4e83-8daa-68805b6ece39: !Template
+ answer_choices: no ||| yes
+ id: c45095f1-9be1-4e83-8daa-68805b6ece39
+ jinja: "Is this tweet is ironic? \n\n{{text}} |||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: irony_question
+ reference: ''
+ cd2ed852-c6fa-431a-b0f1-06f0240d74a0: !Template
+ answer_choices: no ||| yes
+ id: cd2ed852-c6fa-431a-b0f1-06f0240d74a0
+ jinja: "Is this tweet ironic? Answer with either yes or no. \n\n{{text}} |||\n\
+ {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: irony_yes_no
+ reference: ''
+ cef39e97-09f2-430c-ad1f-5fd9d05c876b: !Template
+ answer_choices: no ||| yes
+ id: cef39e97-09f2-430c-ad1f-5fd9d05c876b
+ jinja: "In this test, you need to answer with either yes or no. \n\nQ: Is this\
+ \ an ironic tweet? \n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: irony_exam
+ reference: exam style prompt
+ e7cff075-9388-48de-af51-fe185b654217: !Template
+ answer_choices: non-irony ||| irony
+ id: e7cff075-9388-48de-af51-fe185b654217
+ jinja: 'Does this tweet contain {{"irony"}} or {{"non-irony"}}?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: irony_option
+ reference: ''
+ f8361684-dad1-4c2c-9f22-1128f3096c20: !Template
+ answer_choices: no|||yes
+ id: f8361684-dad1-4c2c-9f22-1128f3096c20
+ jinja: 'Does this tweet convey the author''s irony towards something or someone?
+
+
+ {{text}}
+
+ |||
+
+ {{answer_choices[label]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: irony_author
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/offensive/templates.yaml b/promptsource/templates/tweet_eval/offensive/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..276a9c7b489ae578b29ca9b81cf8cae670d28015
--- /dev/null
+++ b/promptsource/templates/tweet_eval/offensive/templates.yaml
@@ -0,0 +1,87 @@
+dataset: tweet_eval
+subset: offensive
+templates:
+ 10850707-80f3-4a75-b9f4-1e2d12be04c0: !Template
+ answer_choices: no ||| yes
+ id: 10850707-80f3-4a75-b9f4-1e2d12be04c0
+ jinja: "Is this tweet {{\"offensive\"}}? \n\n{{text}} |||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: offensive_question
+ reference: ''
+ 22ecf073-2932-4b0a-b305-1f92b696d0b4: !Template
+ answer_choices: no ||| yes
+ id: 22ecf073-2932-4b0a-b305-1f92b696d0b4
+ jinja: "Can the tweet be removed for being offensive? Answer with a yes or a no.\
+ \ \n\n{{text}}\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: offensive_tweet_removed
+ reference: ''
+ 90a80d37-922d-40c0-b036-87eac64ce4c5: !Template
+ answer_choices: no ||| yes
+ id: 90a80d37-922d-40c0-b036-87eac64ce4c5
+ jinja: 'Is the author''s tweet offensive? Answer with either yes or no.
+
+
+ {{text}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: offensive_author
+ reference: ''
+ a12bd98b-facc-4b17-bb16-80c98a20aa64: !Template
+ answer_choices: no ||| yes
+ id: a12bd98b-facc-4b17-bb16-80c98a20aa64
+ jinja: 'Task: Identify if the tweet or text is offensive.
+
+
+ Tweet: {{text}}
+
+
+ Possible answers: yes, no
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: offensive_task
+ reference: ''
+ bf2cea43-0666-4eb5-814d-00956afd1900: !Template
+ answer_choices: no ||| yes
+ id: bf2cea43-0666-4eb5-814d-00956afd1900
+ jinja: "In this test, you need to answer with either yes or no.\n\nQ: Is this\
+ \ an offensive tweet?\n\n{{text}}\n\nA: \n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: offensive_exam
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/sentiment/templates.yaml b/promptsource/templates/tweet_eval/sentiment/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7ace74214cc07d882f64d4769728b5d501406291
--- /dev/null
+++ b/promptsource/templates/tweet_eval/sentiment/templates.yaml
@@ -0,0 +1,84 @@
+dataset: tweet_eval
+subset: sentiment
+templates:
+ 1fef2b36-3a19-4179-9b43-d67887cff299: !Template
+ answer_choices: negative ||| neutral ||| positive
+ id: 1fef2b36-3a19-4179-9b43-d67887cff299
+ jinja: 'In this exam, you need to pick the correct sentiment for the tweet:
+
+
+ {{text}}
+
+
+ Possible choices: {{answer_choices | join(", ")}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: sentiment_exam
+ reference: ''
+ 6702e8cd-9764-4c88-86a9-046f84c98ef2: !Template
+ answer_choices: negative ||| neutral ||| positive
+ id: 6702e8cd-9764-4c88-86a9-046f84c98ef2
+ jinja: "What is the sentiment of the tweet?\n\n{{text}} \n\nPossible choices:\
+ \ {{answer_choices | join(\", \")}}\n|||\n{{answer_choices[label]}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: sentiment_question
+ reference: ''
+ 6c6c797c-9912-4778-906b-16f465941d16: !Template
+ answer_choices: negative ||| neutral ||| positive
+ id: 6c6c797c-9912-4778-906b-16f465941d16
+ jinja: "Task: Identify the sentiment of the tweet.\n\nTweet: {{text}}\n\nOptions:\
+ \ {{answer_choices | join(\", \")}} \n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: sentiment_task
+ reference: ''
+ b70647cf-22a0-49b2-b45e-23432c635cc2: !Template
+ answer_choices: negative|||neutral|||positive
+ id: b70647cf-22a0-49b2-b45e-23432c635cc2
+ jinja: "Suppose you are the moderator of Twitter, what would be the sentiment\
+ \ of the following tweet: \n\n{{text}}\n\nOptions: {{answer_choices | join(\"\
+ , \")}}\n|||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: sentiment_moderator
+ reference: ''
+ ec68fd8e-92a3-4010-b0df-b14af95421a3: !Template
+ answer_choices: negative ||| neutral ||| positive
+ id: ec68fd8e-92a3-4010-b0df-b14af95421a3
+ jinja: "{{text}}\n\nCategorize the tweet into one of the following options: \n\
+ (a) {{answer_choices[0]}} \n(b) {{answer_choices[1]}} \n(c) {{answer_choices[2]}}\n\
+ |||\n{{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: sentiment_options
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_abortion/templates.yaml b/promptsource/templates/tweet_eval/stance_abortion/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..833ecde9b6129f4ea819f9bbe5f8436009b04207
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_abortion/templates.yaml
@@ -0,0 +1,108 @@
+dataset: tweet_eval
+subset: stance_abortion
+templates:
+ 0d1dc279-e50c-4706-bc3d-84ea01cb59a1: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 0d1dc279-e50c-4706-bc3d-84ea01cb59a1
+ jinja: 'Does the author express any stance about abortion in the following text?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: abortion_predict_stance
+ reference: ''
+ 22758062-db86-4009-81a4-1e2a2e1052f2: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 22758062-db86-4009-81a4-1e2a2e1052f2
+ jinja: '{{text}} Where does the author of the above sentence stand on abortion?
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: abortion_guess_passive_author
+ reference: ''
+ 615151f8-ac5b-4c0e-a234-9e9b6296a2f2: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 615151f8-ac5b-4c0e-a234-9e9b6296a2f2
+ jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best
+ describes the stance of this tweet regarding abortion?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: abortion_option
+ reference: ''
+ 687ffa1e-a772-48b1-9291-ba4e530a909e: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 687ffa1e-a772-48b1-9291-ba4e530a909e
+ jinja: 'Is this tweet neutral, in favor of, or against abortion?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: abortion
+ reference: ''
+ c5507588-1d20-42f9-935f-0c767294f5a9: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: c5507588-1d20-42f9-935f-0c767294f5a9
+ jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(",
+ ")}}
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: abortion_how_describe
+ reference: ''
+ ee12d37b-5667-4b0e-9831-f952d08152b5: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: ee12d37b-5667-4b0e-9831-f952d08152b5
+ jinja: '{{text}} Where does the above sentence stand on abortion? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: abortion_guess_passive
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_atheism/templates.yaml b/promptsource/templates/tweet_eval/stance_atheism/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0365325e1b49cb248f22e481ae99f05846f922b0
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_atheism/templates.yaml
@@ -0,0 +1,108 @@
+dataset: tweet_eval
+subset: stance_atheism
+templates:
+ 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8f: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8f
+ jinja: '{{text}} Where does the above sentence stand on atheism? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: atheism_guess_passive
+ reference: ''
+ 4309e10d-c9a9-4a17-8561-15270b998905: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 4309e10d-c9a9-4a17-8561-15270b998905
+ jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(",
+ ")}}
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: atheism_how_describe
+ reference: ''
+ 7e47c6b8-2923-4580-a275-a2b8867a3d96: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 7e47c6b8-2923-4580-a275-a2b8867a3d96
+ jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best
+ describes the stance of this tweet regarding atheism?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: atheism_option
+ reference: ''
+ 89aa258e-3c3b-4d1c-8ac4-fe2c838b76e4: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 89aa258e-3c3b-4d1c-8ac4-fe2c838b76e4
+ jinja: 'Does the author express any stance about atheism in the following text?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: atheism_predict_stance
+ reference: ''
+ 97ef9418-7c92-455d-a4c5-d7b91668278c: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 97ef9418-7c92-455d-a4c5-d7b91668278c
+ jinja: 'Is this tweet neutral, in favor of, or against atheism?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: atheism
+ reference: ''
+ f28307ab-563e-4189-99b5-e0d858e9ab4c: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: f28307ab-563e-4189-99b5-e0d858e9ab4c
+ jinja: '{{text}} Where does the author of the above sentence stand on atheism?
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: atheism_guess_passive_author
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_climate/templates.yaml b/promptsource/templates/tweet_eval/stance_climate/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f114b4b5fd8259426d219c66105022fda3520d7f
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_climate/templates.yaml
@@ -0,0 +1,109 @@
+dataset: tweet_eval
+subset: stance_climate
+templates:
+ 2ebf2eaa-ef9f-413d-b7bf-cb2037330d2a: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 2ebf2eaa-ef9f-413d-b7bf-cb2037330d2a
+ jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best
+ describes the stance of this tweet regarding climate change?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: climate_option
+ reference: ''
+ 6f4205ad-6321-42a9-bf8e-a45508e67c1a: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 6f4205ad-6321-42a9-bf8e-a45508e67c1a
+ jinja: '{{text}} Where does the above sentence stand on climate change? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: climate_guess_passive
+ reference: ''
+ 703f067e-5930-424e-9882-48063307ff8e: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 703f067e-5930-424e-9882-48063307ff8e
+ jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(",
+ ")}}
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: climate_how_describe
+ reference: ''
+ 8ecd5059-742a-4833-95a1-bf0e25e9abfc: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 8ecd5059-742a-4833-95a1-bf0e25e9abfc
+ jinja: '{{text}} Where does the author of the above sentence stand on climate
+ change? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: climate_guess_passive_author
+ reference: ''
+ cd82620b-6d1d-42f7-af89-56980cbb69a5: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: cd82620b-6d1d-42f7-af89-56980cbb69a5
+ jinja: 'Does the author express any stance about climate change in the following
+ text?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: climate_predict_stance
+ reference: ''
+ edcdde10-b2e4-4954-82e2-f84fd57fc122: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: edcdde10-b2e4-4954-82e2-f84fd57fc122
+ jinja: 'Is this tweet neutral, in favor of, or against climate change?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: climate_change
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_feminist/templates.yaml b/promptsource/templates/tweet_eval/stance_feminist/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4ec2b5a7fe0e97b46130b9825643bf790dbe8438
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_feminist/templates.yaml
@@ -0,0 +1,108 @@
+dataset: tweet_eval
+subset: stance_feminist
+templates:
+ 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8b: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 2f6bfe8f-c45e-4f1d-a623-91a9cfd0ea8b
+ jinja: '{{text}} Where does the above sentence stand on feminism? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: feminism_guess_passive
+ reference: ''
+ 4309e10d-c9a9-4a17-8561-15270b99890b: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 4309e10d-c9a9-4a17-8561-15270b99890b
+ jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(",
+ ")}}
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: feminism_how_describe
+ reference: ''
+ 7e47c6b8-2923-4580-a275-a2b8867a3d9b: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 7e47c6b8-2923-4580-a275-a2b8867a3d9b
+ jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best
+ describes the stance of this tweet regarding feminism?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: feminism_option
+ reference: ''
+ 89aa258e-3c3b-4d1c-8ac4-fe2c838b76eb: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 89aa258e-3c3b-4d1c-8ac4-fe2c838b76eb
+ jinja: 'Does the author express any stance about feminism in the following text?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: feminism_predict_stance
+ reference: ''
+ 97ef9418-7c92-455d-a4c5-d7b91668278b: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 97ef9418-7c92-455d-a4c5-d7b91668278b
+ jinja: 'Is this tweet neutral, in favor of, or against feminism?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: feminism
+ reference: ''
+ f28307ab-563e-4189-99b5-e0d858e9ab4b: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: f28307ab-563e-4189-99b5-e0d858e9ab4b
+ jinja: '{{text}} Where does the author of the above sentence stand on feminism?
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: feminism_guess_passive_author
+ reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_hillary/templates.yaml b/promptsource/templates/tweet_eval/stance_hillary/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..68cd2bc4a8d9dbe4734dd19fc03094146e15b22c
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_hillary/templates.yaml
@@ -0,0 +1,108 @@
+dataset: tweet_eval
+subset: stance_hillary
+templates:
+ 21ba1c40-b491-43ed-96d6-7423b55c3bcf: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 21ba1c40-b491-43ed-96d6-7423b55c3bcf
+ jinja: 'Does the author express any stance about Hillary in the following text?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Hillary_predict_stance
+ reference: ''
+ 41502ea8-73a4-48a4-a15e-ab2ac7700457: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 41502ea8-73a4-48a4-a15e-ab2ac7700457
+ jinja: 'What option among, {{"neutral"}}, {{"against"}}, {{"in favor"}}, best
+ describes the stance of this tweet regarding Hillary?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Hillary_option
+ reference: ''
+ 498f1dec-12dc-4082-a44e-82fcae004bb8: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 498f1dec-12dc-4082-a44e-82fcae004bb8
+ jinja: 'Is this tweet neutral, in favor of, or against Hillary?
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Hillary
+ reference: ''
+ 5c451846-349a-44ad-83ef-d0f8e5d2bd6b: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 5c451846-349a-44ad-83ef-d0f8e5d2bd6b
+ jinja: '{{text}} Where does the above sentence stand on Hillary? |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Hillary_guess_passive
+ reference: ''
+ 83f10728-2347-46e9-b365-724f47e65877: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: 83f10728-2347-46e9-b365-724f47e65877
+ jinja: '{{text}} Where does the author of the above sentence stand on Hillary?
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Hillary_guess_passive_author
+ reference: ''
+ b521857a-9d4f-4e21-848b-0baf7f4a636c: !Template
+ answer_choices: Neutral ||| Against ||| In favor
+ id: b521857a-9d4f-4e21-848b-0baf7f4a636c
+ jinja: 'How would you describe the stance used in this tweet? {{answer_choices|join(",
+ ")}}
+
+
+ {{text}} |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Hillary_how_describe
+ reference: ''
diff --git a/promptsource/templates/tydiqa/primary_task/templates.yaml b/promptsource/templates/tydiqa/primary_task/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..445d45e739e707ef64b4fe03be720cfd73b95703
--- /dev/null
+++ b/promptsource/templates/tydiqa/primary_task/templates.yaml
@@ -0,0 +1,173 @@
+dataset: tydiqa
+subset: primary_task
+templates:
+ 16f11e56-a78d-4e33-bba1-586f9947baf7: !Template
+ answer_choices: Yes ||| No ||| None
+ id: 16f11e56-a78d-4e33-bba1-586f9947baf7
+ jinja: '{% if language == "english" %}
+
+ I wonder {{question_text}}.
+
+ Help me answer this question with "{{answer_choices[0]}}" or "{{answer_choices[1]}}"
+ or "{{answer_choices[2]}}" if none of the first two answers apply.
+
+ Here''s what I found on the internet:
+
+ Topic: {{document_title}}
+
+ Article: {{document_plaintext}}
+
+ |||
+
+ {{annotations.yes_no_answer[0] | capitalize}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: heres_what_I_found
+ reference: ''
+ 297fc59f-bd92-493b-ae61-3c3adcb46eb3: !Template
+ answer_choices: Yes ||| No ||| None
+ id: 297fc59f-bd92-493b-ae61-3c3adcb46eb3
+ jinja: "{% if language == \"english\" %} \nQuestion: {{question_text}}\nAnswer\
+ \ the question with {{\"Yes\"}} or {{\"No\"}}. If it is not possible then answer\
+ \ {{\"None\"}}.\nHint: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0]\
+ \ | capitalize}}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: yes_no_none
+ reference: ''
+ 4b21e3be-fba4-49b7-beb1-a61de26eb0ac: !Template
+ answer_choices: Yes ||| No
+ id: 4b21e3be-fba4-49b7-beb1-a61de26eb0ac
+ jinja: "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0]\
+ \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question\
+ \ about {{document_title}}. {{question_text}}\n||| \n{{annotations.yes_no_answer[0]\
+ \ | capitalize}} \n {% endif %} \n{% endif %} "
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: open_domain_qa_without_choices
+ reference: Answer Yes/No question
+ 6835dd64-96bd-4bf8-9ba5-645d6a7b8472: !Template
+ answer_choices: Yes ||| No
+ id: 6835dd64-96bd-4bf8-9ba5-645d6a7b8472
+ jinja: '{% if language == "english" %}
+
+ {{question_text}}
+
+ Is this a "Yes/No" question?
+
+ |||
+
+ {% if annotations. yes_no_answer[0] == "NONE" %}
+
+ No
+
+ {% else %}
+
+ Yes
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: yes_no_question
+ reference: Ask if this is a yes no question
+ 7b8b7707-dbad-40d2-a5c2-430e6ace10bb: !Template
+ answer_choices: Yes ||| No ||| None
+ id: 7b8b7707-dbad-40d2-a5c2-430e6ace10bb
+ jinja: '{% if language == "english" %}
+
+ Answer the following question with "{{answer_choices[0]}}" or "{{answer_choices[1]}}"
+ or "{{answer_choices[2]}}" if none of the first two answers apply.
+
+ Question: {{question_text}}
+
+ Topic: {{document_title}}
+
+ Article: {{document_plaintext}}
+
+ |||
+
+ {{annotations.yes_no_answer[0] | capitalize}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: read_and_answer
+ reference: ''
+ 9c42e3fd-d46e-4149-bb60-4b3118104d95: !Template
+ answer_choices: Yes ||| No
+ id: 9c42e3fd-d46e-4149-bb60-4b3118104d95
+ jinja: "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0]\
+ \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAfter reading the\
+ \ following text snippet from Wikipedia, please answer the question: {{question_text}}\
+ \ \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n\
+ \ {% endif %}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: after_reading_the_text
+ reference: Reading Comprehension
+ b4f7c441-41b1-4665-93f9-f2e875aed92a: !Template
+ answer_choices: Yes ||| No
+ id: b4f7c441-41b1-4665-93f9-f2e875aed92a
+ jinja: "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0]\
+ \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question\
+ \ about {{document_title}}.\nQuestion: {{question_text}}. Yes or No?\n||| \n\
+ {{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: open_domain_qa
+ reference: Ask Yes/No question
+ e593017f-9bcf-4442-944d-fcdf2edcb4f7: !Template
+ answer_choices: Yes ||| No
+ id: e593017f-9bcf-4442-944d-fcdf2edcb4f7
+ jinja: "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0]\
+ \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nBased on the following\
+ \ text snippet, {{question_text}} \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0]\
+ \ | capitalize}}\n {% endif %}\n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based_on_the_text
+ reference: Binary question without mentioning KB
diff --git a/promptsource/templates/tydiqa/secondary_task/templates.yaml b/promptsource/templates/tydiqa/secondary_task/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e5023fc6e5d518c6e379d133d3b6dfc4bb5dc08e
--- /dev/null
+++ b/promptsource/templates/tydiqa/secondary_task/templates.yaml
@@ -0,0 +1,236 @@
+dataset: tydiqa
+subset: secondary_task
+templates:
+ 047ed162-f58b-42d5-81aa-0a17a9750230: !Template
+ answer_choices: null
+ id: 047ed162-f58b-42d5-81aa-0a17a9750230
+ jinja: "{% set lang = id.split('-')[0] %}\n{% if lang == \"english\" %}\nSnippet:\
+ \ {{context}}\nI know that the answer to \"{{question}}\" appears somewhere\
+ \ in the text snippet about {{title}}. Can you answer the question?\n|||\n{{answers.text\
+ \ | choice}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: can_you_answer_the_question
+ reference: Ask question with Context
+ 05c99237-0e03-4ec9-95f3-dfde8ae25605: !Template
+ answer_choices: null
+ id: 05c99237-0e03-4ec9-95f3-dfde8ae25605
+ jinja: '{% set _blank2 = ["title", "context", "topic" ] %}
+
+ {% set _blank1 = ["guess", "generate", "determine"] %}
+
+ {% set _blank=["passage", "text", "text snippet", "info"]|random %}
+
+ {% set _position = ["above", "following"] |random %}
+
+ {% set lang = id.split(''-'')[0] %}
+
+ {% if lang == "english" %}
+
+ {% if _position == "above" %}
+
+ {{context}}{{"\n"}}
+
+ {% endif %}
+
+ Can you {{_blank1|random}} the {{_blank2|random}} of the {{_position}} passage.
+
+ {% if _position == "following" %}
+
+ {{"\n"}}{{context}}
+
+ {% endif %}
+
+ |||
+
+ {{title}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: title_generation
+ reference: Generate title from a passage
+ 1f4728ba-b25e-450e-975f-6dc8c0cb4bb1: !Template
+ answer_choices: null
+ id: 1f4728ba-b25e-450e-975f-6dc8c0cb4bb1
+ jinja: '{% set lang = id.split(''-'')[0] %}
+
+ {% if lang == "english" %}
+
+ Could you generate a question whose answer is {{answers.text | choice}} based
+ on the following context: {{context}}
+
+ |||
+
+ {{question}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: end_to_end_question_generation
+ reference: generate end-to-end question from a passage.
+ 3d7ee9fe-ac53-4cf3-9913-431425225a5c: !Template
+ answer_choices: null
+ id: 3d7ee9fe-ac53-4cf3-9913-431425225a5c
+ jinja: '{% set lang = id.split(''-'')[0] %}
+
+ {% if lang == "english" %}
+
+ I am trying to figure out the answer to the question "{{question}}"
+
+ I found this text about {{title}} on Wikipedia and I think it contains the answer.
+ Can you tell me the answer?
+
+ Text: {{context}}
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: can_you_tell_me_the_answer
+ reference: Ask question with Context
+ 696c888c-3419-4e4c-b559-1d9772fa60ab: !Template
+ answer_choices: null
+ id: 696c888c-3419-4e4c-b559-1d9772fa60ab
+ jinja: "{% set lang = id.split('-')[0] %}\n{% if lang == \"english\" %}\nExtract\
+ \ in the passage the answer to the question: {{question}}\nPassage about {{title}}:\
+ \ {{context}}\n|||\n{{answers.text | choice}} \n{% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: extract_answer
+ reference: Ask question with Context
+ c2356ac6-7761-43b8-9fb9-38ed25c0db9b: !Template
+ answer_choices: null
+ id: c2356ac6-7761-43b8-9fb9-38ed25c0db9b
+ jinja: '{% set lang = id.split("-")[0] %}
+
+ {% if lang == "english" %}
+
+ I am testing my students'' knowledge about {{title}}.
+
+ Based on the context ({{context}}), here''s the question to answer: {{question}}.
+ The answer is in the context.
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: testing_students
+ reference: ''
+ d0966d12-6d15-4138-b273-5fe2e1619ff9: !Template
+ answer_choices: null
+ id: d0966d12-6d15-4138-b273-5fe2e1619ff9
+ jinja: '{% set lang = id.split(''-'')[0] %}
+
+ {% if lang == "english" %}
+
+ Could you generate a question about {{title}} and whose answer is {{answers.text
+ | choice}} based on the following context: {{context}}
+
+ |||
+
+ {{question}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: end_to_end_question_generation_with_title
+ reference: generate end-to-end question from a passage with a title
+ f276815f-f7c0-4dab-b12e-08e76da6d760: !Template
+ answer_choices: null
+ id: f276815f-f7c0-4dab-b12e-08e76da6d760
+ jinja: '{% set lang = id.split(''-'')[0] %}
+
+ {% if lang == "english" %}
+
+ {{question}}
+
+ Answer the question above.
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: false
+ name: simple_question_odqa
+ reference: Ask question without Context
+ fcf0920f-5599-44a6-bf2a-9ef6bbbe1e64: !Template
+ answer_choices: null
+ id: fcf0920f-5599-44a6-bf2a-9ef6bbbe1e64
+ jinja: '{% set lang = id.split(''-'')[0] %}
+
+ {% if lang == "english" %}
+
+ I''ve always wondered: {{question}}
+
+ I searched Wikipedia and I found the following text snippet about {{title}}.
+
+ Snippet: {{context}}
+
+ What''s the answer?
+
+ |||
+
+ {{answers.text | choice}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: whats_the_answer
+ reference: Ask question with Context
diff --git a/promptsource/templates/web_questions/templates.yaml b/promptsource/templates/web_questions/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d98f106e323e0cf7c2a81e6b8abd49b4a07185f7
--- /dev/null
+++ b/promptsource/templates/web_questions/templates.yaml
@@ -0,0 +1,69 @@
+dataset: web_questions
+templates:
+ 427785bc-a8f3-4c86-bd43-e54447a58615: !Template
+ answer_choices: null
+ id: 427785bc-a8f3-4c86-bd43-e54447a58615
+ jinja: 'Give me the correct facts to answer this: {{question}} ||| {{answers |
+ choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: get_the_answer
+ reference: ''
+ 9f4cd4a4-79e5-40b2-bb0d-f9a86396511a: !Template
+ answer_choices: null
+ id: 9f4cd4a4-79e5-40b2-bb0d-f9a86396511a
+ jinja: Give me a possible correct answer to the question "{{ question }}" |||
+ {{ answers | choice }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: potential-correct-answer
+ reference: ''
+ bfed45a7-b36c-440b-8c94-f117cc6c9f34: !Template
+ answer_choices: null
+ id: bfed45a7-b36c-440b-8c94-f117cc6c9f34
+ jinja: 'What''s the answer to that question: {{question}} ||| {{answers | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: whats_the_answer
+ reference: ''
+ df08956c-035b-4216-af1c-61250617faa4: !Template
+ answer_choices: null
+ id: df08956c-035b-4216-af1c-61250617faa4
+ jinja: 'Short general knowledge question: {{question}} ||| {{answers | choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: short_general_knowledge_q
+ reference: ''
+ e5c72a6b-8ab4-4219-9f41-debf7224884c: !Template
+ answer_choices: null
+ id: e5c72a6b-8ab4-4219-9f41-debf7224884c
+ jinja: '{{ question|capitalize }} ||| {{ answers | choice }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: question-answer
+ reference: ''
diff --git a/promptsource/templates/wiki_bio/templates.yaml b/promptsource/templates/wiki_bio/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e7e65906d6deefb9dca798e94625272bcf185a37
--- /dev/null
+++ b/promptsource/templates/wiki_bio/templates.yaml
@@ -0,0 +1,129 @@
+dataset: wiki_bio
+templates:
+ 0e240546-0d3c-4049-9cc7-32039a6e50ff: !Template
+ answer_choices: null
+ id: 0e240546-0d3c-4049-9cc7-32039a6e50ff
+ jinja: 'Facts:
+
+ {% for n in range (input_text["table"]["column_header"]|length) %}
+
+ {% if input_text["table"]["column_header"][n] != "article_title" %}
+
+ - {{input_text["table"]["column_header"][n].replace("_"," ") }}: {{input_text["table"]["content"][n]
+ }}
+
+ {% endif %}
+
+ {% endfor %}
+
+ Based on these bullet points, write a short biography describing the life of
+ {{input_text["context"]}}. |||
+
+ {{target_text}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ - Other
+ original_task: true
+ name: who
+ reference: ''
+ 53694802-680c-4584-8b07-686f56c45278: !Template
+ answer_choices: null
+ id: 53694802-680c-4584-8b07-686f56c45278
+ jinja: "Read the bio below and try to give details on {{input_text[\"context\"\
+ ]}}'s: \n{% for n in range (input_text[\"table\"][\"column_header\"]|length)\
+ \ %} {% if input_text[\"table\"][\"column_header\"][n] != \"article_title\"\
+ \ %}\n- {{ input_text[\"table\"][\"column_header\"][n].replace(\"_\",\" \")\
+ \ }} \n{% endif %} {% endfor %}\n\nBio: {{target_text}} |||\n{% for n in range\
+ \ (input_text[\"table\"][\"column_header\"]|length) %}\n{% if input_text[\"\
+ table\"][\"column_header\"][n] != \"article_title\" %}\n- {{ input_text[\"table\"\
+ ][\"column_header\"][n].replace(\"_\",\" \") }} is {{ input_text[\"table\"][\"\
+ content\"][n] }}\n{% endif %}\n{% endfor %}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: comprehension
+ reference: ''
+ 788c1adf-bc8e-40ed-a81f-ac6f0dfcb471: !Template
+ answer_choices: null
+ id: 788c1adf-bc8e-40ed-a81f-ac6f0dfcb471
+ jinja: "What type of details about {{input_text[\"context\"]}} can be gathered\
+ \ from the following bio?\n\nBio: {{target_text}} |||\n{% for n in range (input_text[\"\
+ table\"][\"column_header\"]|length) %}\n{% if input_text[\"table\"][\"column_header\"\
+ ][n] != \"article_title\" %}\n- {{ input_text[\"table\"][\"column_header\"][n].replace(\"\
+ _\",\" \") }} \n{% endif %}\n{% endfor %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: what_content
+ reference: ''
+ a954e5bb-c763-4d8e-82a8-7e96cfce8b78: !Template
+ answer_choices: null
+ id: a954e5bb-c763-4d8e-82a8-7e96cfce8b78
+ jinja: '{% for n in range (input_text["table"]["column_header"]|length) %}
+
+ {% if input_text["table"]["column_header"][n] != "article_title" and input_text["table"]["column_header"][n]
+ !="name" %}
+
+ - {{ input_text["table"]["column_header"][n].replace("_"," ") }} is {{ input_text["table"]["content"][n]
+ }}
+
+ {% endif %}
+
+ {% endfor %}
+
+
+ Given the details above, guess who could this information be about. |||
+
+ {{input_text["context"]}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: guess_person
+ reference: ''
+ d14f4527-cd06-484b-af25-ba9082bcda38: !Template
+ answer_choices: null
+ id: d14f4527-cd06-484b-af25-ba9082bcda38
+ jinja: 'What key details about {{input_text["context"]}} can be extracted from
+ the following bio?
+
+
+ Bio: {{target_text}} |||
+
+ {% for n in range (input_text["table"]["column_header"]|length) %}
+
+ {% if input_text["table"]["column_header"][n] != "article_title" %}
+
+ - {{ input_text["table"]["column_header"][n].replace("_"," ") }} is {{ input_text["table"]["content"][n]
+ }}
+
+ {% endif %}
+
+ {% endfor %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: key_content
+ reference: ''
diff --git a/promptsource/templates/wiki_hop/masked/templates.yaml b/promptsource/templates/wiki_hop/masked/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1af4f79332b89c5e053a8f72e533b74a7acfecc8
--- /dev/null
+++ b/promptsource/templates/wiki_hop/masked/templates.yaml
@@ -0,0 +1,182 @@
+dataset: wiki_hop
+subset: masked
+templates:
+ 08f2d1cf-c026-4b65-96d0-a28ff91affb5: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: 08f2d1cf-c026-4b65-96d0-a28ff91affb5
+ jinja: '{% set question_split = question.split('' '') %}
+
+ {% if question_split[0]=="place_of_birth" %}
+
+ Information:
+
+ - {{ supports | join("\n- ") }}
+
+
+ Where was {{ question_split[1:] | join(" ")}} born? Choose from the following:
+
+ - {{ candidates | join("\n- ") }}
+
+ {% elif question_split[0]=="country_of_citizenship" %}
+
+ Information:
+
+ - {{ supports | join("\n- ") }}
+
+
+ What country is {{ question_split[1:] | join(" ")}} a citizen of? Choose from
+ the following:
+
+ - {{ candidates | join("\n- ") }}
+
+ {% elif question_split[0]=="place_of_death" %}
+
+ Information:
+
+ - {{ supports | join("\n- ") }}
+
+
+ Where did {{ question_split[1:] | join(" ")}} die? Choose from the following:
+
+ - {{ candidates | join("\n- ") }}
+
+ {% endif %}
+
+ |||
+
+ {% if question_split[0] in ["place_of_birth", "country_of_citizenship", "place_of_death"]
+ %}
+
+ {{answer}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Indirect Question about Birthplace / Citizenship / Place of Death
+ reference: Ask about place of birth, citizenship, or place of death for the subject
+ entity.
+ 1fb4f0d9-9533-49ab-8ac9-53cd42849584: !Template
+ answer_choices: null
+ id: 1fb4f0d9-9533-49ab-8ac9-53cd42849584
+ jinja: 'Information:
+
+ - {{ supports | join("\n- ") }}
+
+
+ {% set question_split = question.split('' '') %}
+
+ What is the relationship between "{{ question_split[1:] | join(" ")}}" and "{{answer}}"?
+
+
+ |||
+
+ {{ question_split[0] | replace("_", " ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Explain Relation
+ reference: Given information, explain the relation between the subject entity
+ and the object entity in a fact triple.
+ 3181f711-a376-4d6e-9fca-a34e1d048585: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: 3181f711-a376-4d6e-9fca-a34e1d048585
+ jinja: 'Information:
+
+ - {{ supports | join("\n- ") }}
+
+
+ {% set question_split = question.split('' '') %}
+
+ Question: ({{ question_split[1:] | join(" ")}}, {{ question_split[0] | replace("_",
+ " ") }}, ?)
+
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: Generate Object Answer
+ reference: Given information, generate the best object entity for the fact triple.
+ 639fa83f-14fd-457a-886e-a65334cb7e66: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: 639fa83f-14fd-457a-886e-a65334cb7e66
+ jinja: "Information:\n- {{ supports | join(\"\\n- \") }}\n\n{% set question_split\
+ \ = question.split(' ') %}\nQuestion: ({{ question_split[1:] | join(\" \")}},\
+ \ {{ question_split[0] | replace(\"_\", \" \") }}, ?)\n\nCandidate Answers:\
+ \ \n- {{ candidates | join(\"\\n- \") }}\n|||\n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Choose Best Object Candidate
+ reference: Given information and possible object candidates, choose the best object
+ for the fact triple.
+ c543669d-d3aa-4eb2-9338-0fa7c37b6b90: !Template
+ answer_choices: null
+ id: c543669d-d3aa-4eb2-9338-0fa7c37b6b90
+ jinja: 'Information:
+
+ - {{ supports | join("\n- ") }}
+
+
+ {% set question_split = question.split('' '') %}
+
+ Generate a fact triple for the information above.
+
+
+ |||
+
+ ({{ question_split[1:] | join(" ") }}, {{ question_split[0] | replace("_", "
+ ") }}, {{answer}})'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Generate Fact Triple
+ reference: Given information, generate a fact triple.
+ dc8f1874-f6d5-4dc4-a829-0899286021a0: !Template
+ answer_choices: null
+ id: dc8f1874-f6d5-4dc4-a829-0899286021a0
+ jinja: 'Information:
+
+ - {{ supports | join("\n- ") }}
+
+
+ {% set question_split = question.split('' '') %}
+
+ Question: (?, {{ question_split[0] | replace("_", " ") }}, {{answer}})
+
+
+ |||
+
+ {{ question_split[1:] | join(" ")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: null
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: Generate Subject Answer
+ reference: Given information, generate the best subject entity for the fact triple.
diff --git a/promptsource/templates/wiki_hop/original/templates.yaml b/promptsource/templates/wiki_hop/original/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f1f620a3a6dcb595b183fc42655af6685d3498b8
--- /dev/null
+++ b/promptsource/templates/wiki_hop/original/templates.yaml
@@ -0,0 +1,291 @@
+dataset: wiki_hop
+subset: original
+templates:
+ 0bb6b603-115e-4ae9-b17b-881fa72b2e81: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: 0bb6b603-115e-4ae9-b17b-881fa72b2e81
+ jinja: "Information:\n{% for support in supports %}\n- {{ support }}\n{% endfor\
+ \ %}\n\n{% set question_split = question.split(' ') %}\nWhat object entity has\
+ \ the relation of '{{ question_split[0] | replace(\"_\", \" \")}}' with the\
+ \ subject '{{ question_split[1:] | join(\" \")}}'? \n\nChoices:\n- {{answer_choices\
+ \ | join(\"\\n - \") }}\n\n|||\n{{answer}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_best_object_interrogative_1
+ reference: Given information and subject and relation, choose the best object
+ entity (interrogative instruction).
+ 23e0d05a-8777-45c4-8692-13f3dc5a40bb: !Template
+ answer_choices: null
+ id: 23e0d05a-8777-45c4-8692-13f3dc5a40bb
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ What is the relationship between ''{{ question_split[1:] | join(" ")}}'' and
+ ''{{answer}}''?
+
+
+ |||
+
+ {{ question_split[0] | replace("_", " ") }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: explain_relation
+ reference: Given information, explain the relation between the subject entity
+ and the object entity.
+ 2fadafea-f814-4ff1-a3aa-cace9067f31f: !Template
+ answer_choices: null
+ id: 2fadafea-f814-4ff1-a3aa-cace9067f31f
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ What entity does ''{{ question_split[1:] | join(" ")}}'' has the relation ''{{
+ question_split[0] | replace("_", " ") }}'' with?
+
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: generate_object
+ reference: Given information, generate the best object entity (without answer
+ choices in prompt).
+ 40bdb0e7-def9-4829-9a37-a05d354ef7cd: !Template
+ answer_choices: null
+ id: 40bdb0e7-def9-4829-9a37-a05d354ef7cd
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ Given the paragraphs above, decide what entity has the relation ''{{ question_split[0]
+ | replace("_", " ") }}'' with ''{{answer}}''.
+
+
+ |||
+
+ {{ question_split[1:] | join(" ")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: generate_subject
+ reference: Given information, generate the best subject entity for the fact triple.
+ 4836e754-b2c9-4697-b386-6770494dc5f5: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: 4836e754-b2c9-4697-b386-6770494dc5f5
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ Given the information above, choose from the list below the object entity that
+ exhibits the relation ''{{ question_split[0] | replace("_", " ")}}'' with the
+ subject ''{{ question_split[1:] | join(" ")}}''.
+
+
+ Choices:
+
+ - {{answer_choices | join("\n - ") }}
+
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_best_object_affirmative_1
+ reference: Given information and subject and relation, choose the best object
+ entity (affirmative instruction).
+ 4a1b61f6-c619-4d3d-aec2-f41a8986641c: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: 4a1b61f6-c619-4d3d-aec2-f41a8986641c
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ After reading the paragraphs above, we are interested in knowing the entity
+ with which ''{{ question_split[1:] | join(" ")}}'' exhibits the relationship
+ of ''{{ question_split[0] | replace("_", " ")}}''. Find the answer from the
+ choices below.
+
+
+ Choices:
+
+ - {{answer_choices | join("\n - ") }}
+
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_best_object_affirmative_3
+ reference: Given information and subject and relation, choose the best object
+ entity (affirmative instruction).
+ c4675106-0ac5-4bf0-a400-f628daae81db: !Template
+ answer_choices: null
+ id: c4675106-0ac5-4bf0-a400-f628daae81db
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ Given the information, choose the subject and object entities that have the
+ relation of ''{{ question_split[0] | replace("_", " ") }}''.
+
+
+ |||
+
+ {{ question_split[1:] | join(" ") }} , {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: generate_subject_and_object
+ reference: Given information and relation, generate the subject and object.
+ e4dc7abf-d56a-4217-ba7f-7470cd959e8e: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: e4dc7abf-d56a-4217-ba7f-7470cd959e8e
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ After reading the paragraphs above, choose the best answer for the entity that
+ related to ''{{ question_split[1:] | join(" ")}}'' with the relationship of
+ ''{{ question_split[0] | replace("_", " ")}}''.
+
+
+ Choices:
+
+ - {{answer_choices | join("\n - ") }}
+
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_best_object_affirmative_2
+ reference: Given information and subject and relation, choose the best object
+ entity (affirmative instruction).
+ f44936e1-cbde-4d41-b462-6150cce8c1c8: !Template
+ answer_choices: '{{candidates | join("|||")}}'
+ id: f44936e1-cbde-4d41-b462-6150cce8c1c8
+ jinja: 'Information:
+
+ {% for support in supports %}
+
+ - {{ support }}
+
+ {% endfor %}
+
+
+ {% set question_split = question.split('' '') %}
+
+ ''{{ question_split[1:] | join(" ")}}'' is related to which object entity through
+ the relation of ''{{ question_split[0] | replace("_", " ")}}''?
+
+
+ Choices:
+
+ - {{answer_choices | join("\n - ") }}
+
+
+ |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: choose_best_object_interrogative_2
+ reference: Given information and subject and relation, choose the best object
+ entity (interrogative instruction).
diff --git a/promptsource/templates/wiki_qa/templates.yaml b/promptsource/templates/wiki_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6cea619fc6688aeab530ed85c8873f18c01f4600
--- /dev/null
+++ b/promptsource/templates/wiki_qa/templates.yaml
@@ -0,0 +1,247 @@
+dataset: wiki_qa
+templates:
+ 148e8e91-4f38-4427-8806-8a407268cda9: !Template
+ answer_choices: No ||| Yes
+ id: 148e8e91-4f38-4427-8806-8a407268cda9
+ jinja: 'Question: {{question}}?
+
+ Would "{{answer}}" be a reasonable answer? |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Is This True?
+ reference: Model is inserted both question and answer and output whether the answer
+ is correct or not.
+ 2395d5ce-5abd-4193-9cf1-863c7271a4f0: !Template
+ answer_choices: No ||| Yes
+ id: 2395d5ce-5abd-4193-9cf1-863c7271a4f0
+ jinja: 'I am verifying the answers generated by an automatic system to the following
+ question: {{question}}
+
+ Suggested answer: {{answer}}
+
+ Should I validate this answer?
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: automatic_system
+ reference: ''
+ 3480df1e-88bb-4b3d-90df-3f292463eb76: !Template
+ answer_choices: null
+ id: 3480df1e-88bb-4b3d-90df-3f292463eb76
+ jinja: '{% if label == 1 %}
+
+ What is the question to: "{{answer}}"? The topic is {{document_title}}.|||
+
+ "{{question}}?"
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Jeopardy style
+ reference: Given a passage (an answer), generate the question.
+ 8a9f2146-aa30-4e17-b1e2-aeb858b08b55: !Template
+ answer_choices: null
+ id: 8a9f2146-aa30-4e17-b1e2-aeb858b08b55
+ jinja: '{% if label == 1 %}
+
+ Determine the topic of the question-answer pair.
+
+ Question: "{{question}}?"; Answer: "{{answer}}"? Topic: |||
+
+ {{document_title}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Topic Prediction - Question and Answer Pair
+ reference: Given a correct Question-Answer pair, generate the topic.
+ a99a68fa-46ae-4331-8b97-fcf751db3f6f: !Template
+ answer_choices: null
+ id: a99a68fa-46ae-4331-8b97-fcf751db3f6f
+ jinja: '{% if label == 1 %}
+
+ Generate a question about the topic "{{document_title}}" whose answer would
+ be: {{answer}}.|||
+
+ {{question}}?
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Generate Question from Topic
+ reference: Given a topic, generate a question.
+ add469e1-b8d9-4926-8f38-3a60c85a7d2b: !Template
+ answer_choices: No ||| Yes
+ id: add469e1-b8d9-4926-8f38-3a60c85a7d2b
+ jinja: 'Question: {{question}}
+
+ I found the following answer on Google: {{answer}}
+
+ Is that a correct answer? Yes or no.
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: found_on_google
+ reference: ''
+ b0ad07f8-8799-4dd8-8f55-82f3f817f1fd: !Template
+ answer_choices: null
+ id: b0ad07f8-8799-4dd8-8f55-82f3f817f1fd
+ jinja: '{% if label == 1 %}
+
+ Determine the topic of the question.
+
+ Question: "{{question}}?"
+
+ Topic: |||
+
+ {{document_title}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Topic Prediction - Question Only
+ reference: Given a Question, generate the topic.
+ bfa3adac-d75b-4e09-aa92-dc38e334937f: !Template
+ answer_choices: False ||| True
+ id: bfa3adac-d75b-4e09-aa92-dc38e334937f
+ jinja: 'The exercise is to decide whether the question accepts the proposed suggestion
+ as a correct answer. If yes, write "{{answer_choices[1]}}", otherwise write
+ "{{answer_choices[0]}}".
+
+ Question: {{question}}
+
+ Suggestion: {{answer}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: exercise
+ reference: ''
+ c802cf19-59a7-4a3e-a6ab-5cbb1f169c70: !Template
+ answer_choices: No ||| Yes
+ id: c802cf19-59a7-4a3e-a6ab-5cbb1f169c70
+ jinja: 'This is a correct answer to the following question about {{document_title}}.
+ Yes or no?
+
+ Answer: {{answer}}
+
+ Question: {{question}}
+
+ |||
+
+ {{answer_choices[label]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Decide_good_answer
+ reference: ''
+ cdc54124-723e-4e1c-878c-aeaabf55c28c: !Template
+ answer_choices: null
+ id: cdc54124-723e-4e1c-878c-aeaabf55c28c
+ jinja: '{% if label == 1 %}
+
+ Determine the topic of the passage.
+
+ "{{answer}}"
+
+ Topic:|||
+
+ {{document_title}}
+
+ {% endif %}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: Topic Prediction - Answer Only
+ reference: Given a correct Answer (as a text passage), generate the topic.
+ d827a178-ff54-4bbf-bc6d-8756950ae5c5: !Template
+ answer_choices: null
+ id: d827a178-ff54-4bbf-bc6d-8756950ae5c5
+ jinja: '{% if label == 1 %}
+
+ Answer this question: {{question}}?|||
+
+ {{answer}}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: Direct Answer to Question
+ reference: Generates an answers given a question.
diff --git a/promptsource/templates/wiki_split/templates.yaml b/promptsource/templates/wiki_split/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a4ede350de4bf2c1d5dd37bbdbb85daa18297ae
--- /dev/null
+++ b/promptsource/templates/wiki_split/templates.yaml
@@ -0,0 +1,125 @@
+dataset: wiki_split
+templates:
+ 50e32867-042a-480c-a55d-99f0d9d9bca6: !Template
+ answer_choices: null
+ id: 50e32867-042a-480c-a55d-99f0d9d9bca6
+ jinja: 'The following sentence is pretty complex to understand.
+
+
+ "{{complex_sentence|replace("'' '''' ","")}}"
+
+
+ Simplify the sentence above into two simpler sentences.|||
+
+ {{simple_sentence_1|replace("'' '''' ","")}} {{simple_sentence_2|replace("''
+ '''' ","")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: split_complex_to_simple_affirmative_simplify
+ reference: ''
+ 51bdeba1-d23c-427e-9def-9099e7b5f803: !Template
+ answer_choices: null
+ id: 51bdeba1-d23c-427e-9def-9099e7b5f803
+ jinja: "Given the sentence below, how would you use two simple sentences to represent\
+ \ its meaning?\n\nSentence: {{complex_sentence|replace(\"' '' \",\"\")}} \n\
+ |||\n{{simple_sentence_1|replace(\"' '' \",\"\")}} {{simple_sentence_2|replace(\"\
+ ' '' \",\"\")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: split_complex_to_simple_interrogative_represent
+ reference: ''
+ 96a38f85-90d4-438a-b4e9-ccb61310d5a8: !Template
+ answer_choices: null
+ id: 96a38f85-90d4-438a-b4e9-ccb61310d5a8
+ jinja: 'Break down "{{complex_sentence|replace("'' '''' ","")}}" into two simpler
+ sentences. |||
+
+ {{simple_sentence_1|replace("'' '''' ","")}} {{simple_sentence_2|replace("''
+ '''' ","")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: split_complex_to_simple_affirmative_break_down
+ reference: ''
+ 9e1192cc-6828-4e1c-949c-b86af4999145: !Template
+ answer_choices: null
+ id: 9e1192cc-6828-4e1c-949c-b86af4999145
+ jinja: "You are trying to teach sixth-grade students with complex sentences. You\
+ \ ask them to break down the following sentence into two simpler sentences.\
+ \ What are those sentences? \n\nSentence: {{complex_sentence|replace(\"' ''\
+ \ \",\"\")}} \n|||\n{{simple_sentence_1|replace(\"' '' \",\"\")}} {{simple_sentence_2|replace(\"\
+ ' '' \",\"\")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: split_complex_to_simple_interrogative_textbook
+ reference: ''
+ a8285013-e3cf-4f43-86dc-3754ead3f90c: !Template
+ answer_choices: null
+ id: a8285013-e3cf-4f43-86dc-3754ead3f90c
+ jinja: "You are talking to your friend on Zoom, but the internet is choppy. Your\
+ \ friend asks you to repeat the sentence \"{{complex_sentence|replace(\"' ''\
+ \ \",\"\")}}\". You want to repeat it with two shorter sentences. What would\
+ \ you say? \n|||\n{{simple_sentence_1|replace(\"' '' \",\"\")}} {{simple_sentence_2|replace(\"\
+ ' '' \",\"\")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: true
+ name: split_complex_to_simple_interrogative_repeat
+ reference: ''
+ c5da1785-e4e3-4788-ad41-bc9255e137fa: !Template
+ answer_choices: null
+ id: c5da1785-e4e3-4788-ad41-bc9255e137fa
+ jinja: 'The sentence "{{complex_sentence|replace("'' '''' ","")}}" can be broken
+ down into two simpler sentences. One of it is "{{simple_sentence_1|replace("''
+ '''' ","")}}". What is the other sentence?|||
+
+ {{simple_sentence_2|replace("'' '''' ","")}}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: false
+ name: find_A_given_B_and_C
+ reference: ''
+ da51aebf-e4c1-495b-b69e-0ed4eb646d37: !Template
+ answer_choices: null
+ id: da51aebf-e4c1-495b-b69e-0ed4eb646d37
+ jinja: "Combine these two sentences \"{{simple_sentence_1|replace(\"' '' \",\"\
+ \")}}\" and \"{{simple_sentence_2|replace(\"' '' \",\"\")}}\" into a complex\
+ \ sentence. \n|||\n{{complex_sentence|replace(\"' '' \",\"\")}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ original_task: false
+ name: combine
+ reference: ''
diff --git a/promptsource/templates/wino_bias/type1_anti/templates.yaml b/promptsource/templates/wino_bias/type1_anti/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..74c01a7380ab1e689ae7f3cfe7d4cbbedd389614
--- /dev/null
+++ b/promptsource/templates/wino_bias/type1_anti/templates.yaml
@@ -0,0 +1,162 @@
+dataset: wino_bias
+subset: type1_anti
+templates:
+ 4faa9623-6d11-47d1-8d6e-bb41af088cff: !Template
+ answer_choices: null
+ id: 4faa9623-6d11-47d1-8d6e-bb41af088cff
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+ {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: replaced with
+ reference: ''
+ 5e5c9f7b-2c07-42d7-baf2-925e91a5fb9b: !Template
+ answer_choices: null
+ id: 5e5c9f7b-2c07-42d7-baf2-925e91a5fb9b
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ What does "{{ pronoun }}" refer to in the following sentence?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: refers_to
+ reference: ''
+ 5ea6715b-20b2-4f10-8122-54ed3af54763: !Template
+ answer_choices: null
+ id: 5ea6715b-20b2-4f10-8122-54ed3af54763
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ In the sentence below, what does "{{pronoun}}" represent?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: represent
+ reference: ''
+ 8d5eedf2-de08-41fb-a584-7f35df315fd3: !Template
+ answer_choices: null
+ id: 8d5eedf2-de08-41fb-a584-7f35df315fd3
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: the pronoun refers to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ d102cd81-e0d1-46bf-9e7d-a620328ad3bf: !Template
+ answer_choices: null
+ id: d102cd81-e0d1-46bf-9e7d-a620328ad3bf
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: What does p stand for
+ reference: ''
+ d355811f-eb29-4e6e-9d57-299eea1d96e1: !Template
+ answer_choices: null
+ id: d355811f-eb29-4e6e-9d57-299eea1d96e1
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: by p they mean
+ reference: ''
+ f4bdb35d-ccb0-4482-a47e-603f8566301e: !Template
+ answer_choices: null
+ id: f4bdb35d-ccb0-4482-a47e-603f8566301e
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ {% if pronoun.lower() == "they" or pronoun.lower() == "them" %}
+
+ Question: Who or what are "{{ pronoun }}"?
+
+ {% else %}
+
+ Question: Who or what is "{{ pronoun }}"?
+
+ {% endif %}
+
+ Answer: ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Who or what is/are
+ reference: ''
diff --git a/promptsource/templates/wino_bias/type1_pro/templates.yaml b/promptsource/templates/wino_bias/type1_pro/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7cd5bae41af6b0b21391262ff63414e8f05d1dde
--- /dev/null
+++ b/promptsource/templates/wino_bias/type1_pro/templates.yaml
@@ -0,0 +1,162 @@
+dataset: wino_bias
+subset: type1_pro
+templates:
+ 13b2dbe4-abf3-4b09-b7cb-459224881800: !Template
+ answer_choices: null
+ id: 13b2dbe4-abf3-4b09-b7cb-459224881800
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ What does "{{ pronoun }}" refer to in the following sentence?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: refers_to
+ reference: ''
+ 13b2dbe4-abf3-4b09-b7cb-459224881801: !Template
+ answer_choices: null
+ id: 13b2dbe4-abf3-4b09-b7cb-459224881801
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ In the sentence below, what does "{{pronoun}}" represent?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: represent
+ reference: ''
+ 143449f6-350a-44ef-ab4d-857841eadaf8: !Template
+ answer_choices: null
+ id: 143449f6-350a-44ef-ab4d-857841eadaf8
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+ {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: replaced with
+ reference: ''
+ 18004871-0d0c-4f59-976c-53becd04c98f: !Template
+ answer_choices: null
+ id: 18004871-0d0c-4f59-976c-53becd04c98f
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ {% if pronoun.lower() == "they" or pronoun.lower() == "them" %}
+
+ Question: Who or what are "{{ pronoun }}"?
+
+ {% else %}
+
+ Question: Who or what is "{{ pronoun }}"?
+
+ {% endif %}
+
+ Answer: ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Who or what is/are
+ reference: ''
+ 1ab4e47e-bb58-47c4-8148-fcfaf4a75785: !Template
+ answer_choices: null
+ id: 1ab4e47e-bb58-47c4-8148-fcfaf4a75785
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: What does p stand for
+ reference: ''
+ 97fb69f9-34d6-4fb2-bb60-75679c4a25c1: !Template
+ answer_choices: null
+ id: 97fb69f9-34d6-4fb2-bb60-75679c4a25c1
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: the pronoun refers to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ e5ac51e8-beaf-4cf9-a7fe-20d8cc2b1d0a: !Template
+ answer_choices: null
+ id: e5ac51e8-beaf-4cf9-a7fe-20d8cc2b1d0a
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: by p they mean
+ reference: ''
diff --git a/promptsource/templates/wino_bias/type2_anti/templates.yaml b/promptsource/templates/wino_bias/type2_anti/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1309c445b2c80e73f0c5465dc6184df0f22e7926
--- /dev/null
+++ b/promptsource/templates/wino_bias/type2_anti/templates.yaml
@@ -0,0 +1,162 @@
+dataset: wino_bias
+subset: type2_anti
+templates:
+ 3cdaa371-affb-48da-ba8f-f3dcb574fdcc: !Template
+ answer_choices: null
+ id: 3cdaa371-affb-48da-ba8f-f3dcb574fdcc
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ What does "{{ pronoun }}" refer to in the following sentence?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: refers_to
+ reference: ''
+ 4ee240b3-482d-4f4c-8d87-7824b656d486: !Template
+ answer_choices: null
+ id: 4ee240b3-482d-4f4c-8d87-7824b656d486
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+ {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: replaced with
+ reference: ''
+ 4f3a74bc-da74-4ee0-a3d4-a4387313102d: !Template
+ answer_choices: null
+ id: 4f3a74bc-da74-4ee0-a3d4-a4387313102d
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: What does p stand for
+ reference: ''
+ 560ea974-4478-49c7-988e-f49853d45119: !Template
+ answer_choices: null
+ id: 560ea974-4478-49c7-988e-f49853d45119
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ In the sentence below, what does "{{pronoun}}" represent?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: represent
+ reference: ''
+ 72c3f2ad-41b4-4aba-901e-b08a756b5cd2: !Template
+ answer_choices: null
+ id: 72c3f2ad-41b4-4aba-901e-b08a756b5cd2
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ {% if pronoun.lower() == "they" or pronoun.lower() == "them" %}
+
+ Question: Who or what are "{{ pronoun }}"?
+
+ {% else %}
+
+ Question: Who or what is "{{ pronoun }}"?
+
+ {% endif %}
+
+ Answer: ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Who or what is/are
+ reference: ''
+ 73750099-941c-4929-adb7-aaad3a8f3ac7: !Template
+ answer_choices: null
+ id: 73750099-941c-4929-adb7-aaad3a8f3ac7
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: by p they mean
+ reference: ''
+ 7cb4282d-48ae-43fd-9075-e65e24980724: !Template
+ answer_choices: null
+ id: 7cb4282d-48ae-43fd-9075-e65e24980724
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: the pronoun refers to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
diff --git a/promptsource/templates/wino_bias/type2_pro/templates.yaml b/promptsource/templates/wino_bias/type2_pro/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..39b23986566bdfcffe7a4d925a33b9bf14428271
--- /dev/null
+++ b/promptsource/templates/wino_bias/type2_pro/templates.yaml
@@ -0,0 +1,162 @@
+dataset: wino_bias
+subset: type2_pro
+templates:
+ 165a421e-6a90-4a7a-8ec5-06ae904ab46f: !Template
+ answer_choices: null
+ id: 165a421e-6a90-4a7a-8ec5-06ae904ab46f
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ {% if pronoun.lower() == "they" or pronoun.lower() == "them" %}
+
+ Question: Who or what are "{{ pronoun }}"?
+
+ {% else %}
+
+ Question: Who or what is "{{ pronoun }}"?
+
+ {% endif %}
+
+ Answer: ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: Who or what is/are
+ reference: ''
+ 25066e95-3782-44fc-949e-3620edd24a22: !Template
+ answer_choices: null
+ id: 25066e95-3782-44fc-949e-3620edd24a22
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ What does "{{ pronoun }}" refer to in the following sentence?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: refers_to
+ reference: ''
+ 793c09af-1ec7-492a-ab65-392b0b17d807: !Template
+ answer_choices: null
+ id: 793c09af-1ec7-492a-ab65-392b0b17d807
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ In the sentence below, what does "{{pronoun}}" represent?
+
+ {{tokens | join(" ")}} ||| {{referent}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: represent
+ reference: ''
+ 83446f7f-07ae-4b88-8aff-3eda1183dd7b: !Template
+ answer_choices: null
+ id: 83446f7f-07ae-4b88-8aff-3eda1183dd7b
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+ {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: replaced with
+ reference: ''
+ 85a90e9b-a6ef-4e25-9577-f26f14350099: !Template
+ answer_choices: null
+ id: 85a90e9b-a6ef-4e25-9577-f26f14350099
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: by p they mean
+ reference: ''
+ ace9b776-df88-4895-b1e1-6821c5fcef72: !Template
+ answer_choices: null
+ id: ace9b776-df88-4895-b1e1-6821c5fcef72
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: What does p stand for
+ reference: ''
+ af0b86f2-2fc6-4237-89da-d6d7dd2d9a40: !Template
+ answer_choices: null
+ id: af0b86f2-2fc6-4237-89da-d6d7dd2d9a40
+ jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+ | int + 1] | join(" ") %}
+
+ {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+ | int + 1] | join(" ") %}
+
+ {{tokens | join(" ")}}
+
+ In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: the pronoun refers to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
diff --git a/promptsource/templates/winograd_wsc/wsc273/templates.yaml b/promptsource/templates/winograd_wsc/wsc273/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bafc72483929b2ce37da7a4c499d4385fcfbf945
--- /dev/null
+++ b/promptsource/templates/winograd_wsc/wsc273/templates.yaml
@@ -0,0 +1,125 @@
+dataset: winograd_wsc
+subset: wsc273
+templates:
+ 18233597-fcd3-415d-a184-a971e98119d9: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 18233597-fcd3-415d-a184-a971e98119d9
+ jinja: '{{ text }} Here, does "{{ pronoun }}" stand for {{ answer_choices[0] }}
+ or {{ answer_choices[1] }}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does p stand for
+ reference: ''
+ 3f04226a-fb68-4d82-bda1-658f1a316365: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 3f04226a-fb68-4d82-bda1-658f1a316365
+ jinja: "{{ text }} \n{% if pronoun.lower() == \"they\" or pronoun.lower() ==\
+ \ \"them\" %}\nQuestion: Who or what are \"{{ pronoun }}\"? {{ answer_choices[0]\
+ \ }} or {{ answer_choices[1] }}?\n{% else %}\nQuestion: Who or what is \"{{\
+ \ pronoun }}\"? Is it {{ answer_choices[0] }} or {{ answer_choices[1] }}?\n\
+ {% endif %}\nAnswer: ||| {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Who or what is/are
+ reference: ''
+ 53603685-806f-4332-ae9a-e393b6ad2d89: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: 53603685-806f-4332-ae9a-e393b6ad2d89
+ jinja: '{{ text }} In the previous sentence, can the pronoun "{{pronoun }}" be
+ replaced with "{{ answer_choices[0] }}" or "{{ answer_choices[1] }}"? ||| {{
+ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: replaced with
+ reference: ''
+ 62aa8a33-2f62-43ec-aa7e-20d2052e2a8c: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 62aa8a33-2f62-43ec-aa7e-20d2052e2a8c
+ jinja: "{{ text }} \nIn the passage above, the pronoun \"{{ pronoun }}\" refers\
+ \ to {{ answer_choices[0] }} or {{ answer_choices[1] }}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: the pronoun refers to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ 6e8a2985-ecc1-4184-86b9-929d2d25746e: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: 6e8a2985-ecc1-4184-86b9-929d2d25746e
+ jinja: "Context: {{ text }} \n\n{% if pronoun.lower() == \"they\" or pronoun.lower()\
+ \ == \"them\" %}\nQuestion: \"{{ pronoun }}\" are {{ answer_choices[0] }} or\
+ \ {{ answer_choices[1] }}?\n{% else %}\nQuestion: \"{{ pronoun }}\" is {{ answer_choices[0]\
+ \ }} or {{ answer_choices[1] }}?\n{% endif %}\n\nAnswer: ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: p is/are r
+ reference: ''
+ 7ee12960-5512-431a-b1eb-b3a975761f6c: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: 7ee12960-5512-431a-b1eb-b3a975761f6c
+ jinja: "Passage: {{ text }} \n\nQuestion: In the passage above, does the pronoun\
+ \ \"{{ pronoun }}\" refer to {{ answer_choices[0] }} or {{answer_choices[1]\
+ \ }}?\n\nAnswer: ||| {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 Style
+ reference: Adapted from Figure G33, p. 59, Brown et al. 2020
+ a0e4b805-e0bc-4b20-81bd-2b265ace8644: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: a0e4b805-e0bc-4b20-81bd-2b265ace8644
+ jinja: '{{ text }} In the previous sentence, does the pronoun "{{ pronoun }}"
+ refer to {{ answer_choices[0] }} or {{ answer_choices[1] }}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does the pronoun refer to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ e617cc59-9eca-4b17-ba2e-a87b79fe8c89: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: e617cc59-9eca-4b17-ba2e-a87b79fe8c89
+ jinja: '{{ text }} Here, by "{{ pronoun }}" do they mean "{{ answer_choices[0]
+ }}" or "{{ answer_choices[1]}}"? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: by p they mean
+ reference: ''
diff --git a/promptsource/templates/winograd_wsc/wsc285/templates.yaml b/promptsource/templates/winograd_wsc/wsc285/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f0607a70ed5006524ade19077188b0c34b431252
--- /dev/null
+++ b/promptsource/templates/winograd_wsc/wsc285/templates.yaml
@@ -0,0 +1,125 @@
+dataset: winograd_wsc
+subset: wsc285
+templates:
+ 1ab89604-45c8-4c12-9f25-24d62e8bb6a6: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 1ab89604-45c8-4c12-9f25-24d62e8bb6a6
+ jinja: '{{ text }} Here, does "{{ pronoun }}" stand for {{ answer_choices[0] }}
+ or {{ answer_choices[1] }}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does p stand for
+ reference: ''
+ 442e7f58-5378-4ebb-853e-db1dc2e3fd78: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: 442e7f58-5378-4ebb-853e-db1dc2e3fd78
+ jinja: "{{ text }} \n{% if pronoun.lower() == \"they\" or pronoun.lower() ==\
+ \ \"them\" %}\nQuestion: Who or what are \"{{ pronoun }}\"? {{ answer_choices[0]\
+ \ }} or {{ answer_choices[1] }}?\n{% else %}\nQuestion: Who or what is \"{{\
+ \ pronoun }}\"? Is it {{ answer_choices[0] }} or {{ answer_choices[1] }}?\n\
+ {% endif %}\nAnswer: ||| {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Who or what is/are
+ reference: ''
+ a934793c-7098-4bd6-8fe8-b4c8dbc36787: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: a934793c-7098-4bd6-8fe8-b4c8dbc36787
+ jinja: '{{ text }} In the previous sentence, can the pronoun "{{pronoun }}" be
+ replaced with "{{ answer_choices[0] }}" or "{{ answer_choices[1] }}"? ||| {{
+ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: replaced with
+ reference: ''
+ ac69d5f7-c9e2-4a80-93ff-2ba88f38c65a: !Template
+ answer_choices: '{{ options | join("|||") }}'
+ id: ac69d5f7-c9e2-4a80-93ff-2ba88f38c65a
+ jinja: "{{ text }} \nIn the passage above, the pronoun \"{{ pronoun }}\" refers\
+ \ to {{ answer_choices[0] }} or {{ answer_choices[1] }}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: the pronoun refers to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ aec280c8-0690-4cb9-bd10-59590a895466: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: aec280c8-0690-4cb9-bd10-59590a895466
+ jinja: "Context: {{ text }} \n\n{% if pronoun.lower() == \"they\" or pronoun.lower()\
+ \ == \"them\" %}\nQuestion: \"{{ pronoun }}\" are {{ answer_choices[0] }} or\
+ \ {{ answer_choices[1] }}?\n{% else %}\nQuestion: \"{{ pronoun }}\" is {{ answer_choices[0]\
+ \ }} or {{ answer_choices[1] }}?\n{% endif %}\n\nAnswer: ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: p is/are r
+ reference: ''
+ b2474c90-e701-4ecc-8322-581fce5863a7: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: b2474c90-e701-4ecc-8322-581fce5863a7
+ jinja: "Passage: {{ text }} \n\nQuestion: In the passage above, does the pronoun\
+ \ \"{{ pronoun }}\" refer to {{ answer_choices[0] }} or {{answer_choices[1]\
+ \ }}?\n\nAnswer: ||| {{ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 Style
+ reference: Adapted from Figure G33, p. 59, Brown et al. 2020
+ b5c68418-3776-44b6-99d6-cbad437775ee: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: b5c68418-3776-44b6-99d6-cbad437775ee
+ jinja: '{{ text }} In the previous sentence, does the pronoun "{{ pronoun }}"
+ refer to {{ answer_choices[0] }} or {{ answer_choices[1] }}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does the pronoun refer to
+ reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+ c45a0ba3-a0a2-410a-8ef7-f11558135496: !Template
+ answer_choices: '{{options | join("|||")}}'
+ id: c45a0ba3-a0a2-410a-8ef7-f11558135496
+ jinja: '{{ text }} Here, by "{{ pronoun }}" do they mean "{{ answer_choices[0]
+ }}" or "{{ answer_choices[1]}}"? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: by p they mean
+ reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_debiased/templates.yaml b/promptsource/templates/winogrande/winogrande_debiased/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..633ffb623bac0dcc302762238ef68e84214b6a39
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_debiased/templates.yaml
@@ -0,0 +1,110 @@
+dataset: winogrande
+subset: winogrande_debiased
+templates:
+ 1ce2be12-1815-4a07-80a7-ac3c3505b005: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 1ce2be12-1815-4a07-80a7-ac3c3505b005
+ jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+ \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+ \ else %} {{ option2 }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Replace
+ reference: ''
+ 1ddbbca4-8917-4a1d-9d83-f42db77f24ba: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 1ddbbca4-8917-4a1d-9d83-f42db77f24ba
+ jinja: '{{sentence}}
+
+ What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+ }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: underscore refer to
+ reference: ''
+ 276eaba6-17e5-403a-98c7-0f8c53c35221: !Template
+ answer_choices: '{{ option1 }} ||| {{ option2 }}'
+ id: 276eaba6-17e5-403a-98c7-0f8c53c35221
+ jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+ or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+ }} {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does underscore refer to
+ reference: ''
+ 9702d456-4261-4a7e-94c5-6a9d2a1c4859: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 9702d456-4261-4a7e-94c5-6a9d2a1c4859
+ jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: stand for
+ reference: ''
+ bb9b91fc-760a-45cd-bacd-dcb05a1cb2f3: !Template
+ answer_choices: True ||| False
+ id: bb9b91fc-760a-45cd-bacd-dcb05a1cb2f3
+ jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer|int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: True or False
+ reference: ''
+ ebabc54d-cff4-46a7-9c22-2412b8ce00c6: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: ebabc54d-cff4-46a7-9c22-2412b8ce00c6
+ jinja: 'Fill in the _ in the below sentence:
+
+ {{sentence}}
+
+
+ Choices:
+
+ - {{ option1 }}
+
+ - {{ option2 }}
+
+
+ Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the blank
+ reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_l/templates.yaml b/promptsource/templates/winogrande/winogrande_l/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ef50fa49a8fd08b6bc470246fb714f2d547ad4
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_l/templates.yaml
@@ -0,0 +1,110 @@
+dataset: winogrande
+subset: winogrande_l
+templates:
+ 035038df-0b71-45c1-b18f-14451c580508: !Template
+ answer_choices: True ||| False
+ id: 035038df-0b71-45c1-b18f-14451c580508
+ jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer|int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: True or False
+ reference: ''
+ 1ceacaa7-ccd3-4e4e-ad0d-c75b241e0ddb: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 1ceacaa7-ccd3-4e4e-ad0d-c75b241e0ddb
+ jinja: 'Fill in the _ in the below sentence:
+
+ {{sentence}}
+
+
+ Choices:
+
+ - {{ option1 }}
+
+ - {{ option2 }}
+
+
+ Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the blank
+ reference: ''
+ 5627ba11-110c-4871-a0ed-86e7e66fec60: !Template
+ answer_choices: '{{ option1 }} ||| {{ option2 }}'
+ id: 5627ba11-110c-4871-a0ed-86e7e66fec60
+ jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+ or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+ }} {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does underscore refer to
+ reference: ''
+ 57030ec2-f026-491d-88bc-c2709455cc56: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 57030ec2-f026-491d-88bc-c2709455cc56
+ jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+ \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+ \ else %} {{ option2 }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Replace
+ reference: ''
+ c920f420-f80d-4e94-9024-b45fbf4d6367: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: c920f420-f80d-4e94-9024-b45fbf4d6367
+ jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: stand for
+ reference: ''
+ edb3168f-ce82-4b9e-9713-6a581f5aef96: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: edb3168f-ce82-4b9e-9713-6a581f5aef96
+ jinja: '{{sentence}}
+
+ What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+ }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: underscore refer to
+ reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_m/templates.yaml b/promptsource/templates/winogrande/winogrande_m/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..380e09b93beee58ad684a616015a107f531787c0
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_m/templates.yaml
@@ -0,0 +1,110 @@
+dataset: winogrande
+subset: winogrande_m
+templates:
+ 5170abad-7046-4538-8216-68cc508d3d23: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 5170abad-7046-4538-8216-68cc508d3d23
+ jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+ \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+ \ else %} {{ option2 }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Replace
+ reference: ''
+ 5c801298-c08f-4165-8c25-38592e341a1c: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 5c801298-c08f-4165-8c25-38592e341a1c
+ jinja: 'Fill in the _ in the below sentence:
+
+ {{sentence}}
+
+
+ Choices:
+
+ - {{ option1 }}
+
+ - {{ option2 }}
+
+
+ Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the blank
+ reference: ''
+ 894b5a5b-6685-462b-ad00-ea82f4fb80e2: !Template
+ answer_choices: True ||| False
+ id: 894b5a5b-6685-462b-ad00-ea82f4fb80e2
+ jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer|int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: True or False
+ reference: ''
+ a394fafe-260c-4473-8b67-a4ecfc7fe1fd: !Template
+ answer_choices: '{{ option1 }} ||| {{ option2 }}'
+ id: a394fafe-260c-4473-8b67-a4ecfc7fe1fd
+ jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+ or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+ }} {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does underscore refer to
+ reference: ''
+ ad345e36-5dc7-4a0b-a9f4-654f253e3c20: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: ad345e36-5dc7-4a0b-a9f4-654f253e3c20
+ jinja: '{{sentence}}
+
+ What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+ }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: underscore refer to
+ reference: ''
+ ead1b8bf-7c42-4320-86ec-3a7c7aef14cb: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: ead1b8bf-7c42-4320-86ec-3a7c7aef14cb
+ jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: stand for
+ reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_s/templates.yaml b/promptsource/templates/winogrande/winogrande_s/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..496cb1905bdff2bc002961997cbba46d358d6ae1
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_s/templates.yaml
@@ -0,0 +1,110 @@
+dataset: winogrande
+subset: winogrande_s
+templates:
+ 1a150f71-aba6-4e7d-9be2-dce2df84c5de: !Template
+ answer_choices: '{{ option1 }} ||| {{ option2 }}'
+ id: 1a150f71-aba6-4e7d-9be2-dce2df84c5de
+ jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+ or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+ }} {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does underscore refer to
+ reference: ''
+ 85d97c81-9d82-4df5-91db-56a5459b61cd: !Template
+ answer_choices: True ||| False
+ id: 85d97c81-9d82-4df5-91db-56a5459b61cd
+ jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer|int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: True or False
+ reference: ''
+ 968aa4aa-67d7-41ca-8ff2-462d482f4d89: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 968aa4aa-67d7-41ca-8ff2-462d482f4d89
+ jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: stand for
+ reference: ''
+ d304fb01-a60a-4846-9378-394f84f05d85: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: d304fb01-a60a-4846-9378-394f84f05d85
+ jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+ \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+ \ else %} {{ option2 }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Replace
+ reference: ''
+ d715126d-1cd3-4fc0-bd32-945d8c1af800: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: d715126d-1cd3-4fc0-bd32-945d8c1af800
+ jinja: 'Fill in the _ in the below sentence:
+
+ {{sentence}}
+
+
+ Choices:
+
+ - {{ option1 }}
+
+ - {{ option2 }}
+
+
+ Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the blank
+ reference: ''
+ d8e01dcd-ec07-40a3-a642-8446f81f700a: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: d8e01dcd-ec07-40a3-a642-8446f81f700a
+ jinja: '{{sentence}}
+
+ What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+ }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: underscore refer to
+ reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_xl/templates.yaml b/promptsource/templates/winogrande/winogrande_xl/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..11642583ac6db080ee1e335082f2484ffba6fcd2
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_xl/templates.yaml
@@ -0,0 +1,110 @@
+dataset: winogrande
+subset: winogrande_xl
+templates:
+ 0f23f058-5b4d-42a3-92d4-5d60688aa90c: !Template
+ answer_choices: '{{ option1 }} ||| {{ option2 }}'
+ id: 0f23f058-5b4d-42a3-92d4-5d60688aa90c
+ jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+ or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+ }} {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does underscore refer to
+ reference: ''
+ 5080f912-fac8-400f-983c-944baf9b10c0: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 5080f912-fac8-400f-983c-944baf9b10c0
+ jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: stand for
+ reference: ''
+ 5af00ba1-86e0-421b-bb97-26bf58df52d3: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 5af00ba1-86e0-421b-bb97-26bf58df52d3
+ jinja: '{{sentence}}
+
+ What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+ }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: underscore refer to
+ reference: ''
+ ac4e5cb4-f874-460a-8578-ddf1c6541bb4: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: ac4e5cb4-f874-460a-8578-ddf1c6541bb4
+ jinja: 'Fill in the _ in the below sentence:
+
+ {{sentence}}
+
+
+ Choices:
+
+ - {{ option1 }}
+
+ - {{ option2 }}
+
+
+ Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the blank
+ reference: ''
+ d11378d1-2f24-4509-bbbc-bfa2921300d5: !Template
+ answer_choices: True ||| False
+ id: d11378d1-2f24-4509-bbbc-bfa2921300d5
+ jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer|int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: True or False
+ reference: ''
+ e7e42e9e-bc57-46ed-ad8a-76a5b90a5bb9: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: e7e42e9e-bc57-46ed-ad8a-76a5b90a5bb9
+ jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+ \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+ \ else %} {{ option2 }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Replace
+ reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_xs/templates.yaml b/promptsource/templates/winogrande/winogrande_xs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..074a1476bf8b60339422bf642ecad81a812c24a9
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_xs/templates.yaml
@@ -0,0 +1,110 @@
+dataset: winogrande
+subset: winogrande_xs
+templates:
+ 52b40d2b-7547-44e2-8cc6-eb127ecbb2fe: !Template
+ answer_choices: '{{ option1 }} ||| {{ option2 }}'
+ id: 52b40d2b-7547-44e2-8cc6-eb127ecbb2fe
+ jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+ or {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+ }} {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does underscore refer to
+ reference: ''
+ 75072f4d-843b-4ba4-96a5-cd0ea3e7855d: !Template
+ answer_choices: True ||| False
+ id: 75072f4d-843b-4ba4-96a5-cd0ea3e7855d
+ jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer|int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: True or False
+ reference: ''
+ 8a976d84-efbc-47c0-8e3d-8e3cf89c3e2c: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 8a976d84-efbc-47c0-8e3d-8e3cf89c3e2c
+ jinja: '{{sentence}}
+
+ What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+ }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+ %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: underscore refer to
+ reference: ''
+ 9ee3a3b0-d84f-4d66-bb79-de82ac5040b2: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: 9ee3a3b0-d84f-4d66-bb79-de82ac5040b2
+ jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+ {{sentence}}|||
+
+ {{answer_choices[answer | int - 1]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: stand for
+ reference: ''
+ eb63f6df-99d9-4a00-a165-976e93c7271f: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: eb63f6df-99d9-4a00-a165-976e93c7271f
+ jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+ \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+ \ else %} {{ option2 }} {% endif %}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: Replace
+ reference: ''
+ f87367de-59ca-4859-abe0-26521a77fc67: !Template
+ answer_choices: '{{option1}} ||| {{option2}}'
+ id: f87367de-59ca-4859-abe0-26521a77fc67
+ jinja: 'Fill in the _ in the below sentence:
+
+ {{sentence}}
+
+
+ Choices:
+
+ - {{ option1 }}
+
+ - {{ option2 }}
+
+
+ Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+ endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: fill in the blank
+ reference: ''
diff --git a/promptsource/templates/wiqa/templates.yaml b/promptsource/templates/wiqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..66ec559a88107f02cc2dd37f278420f538970d7f
--- /dev/null
+++ b/promptsource/templates/wiqa/templates.yaml
@@ -0,0 +1,240 @@
+dataset: wiqa
+templates:
+ 1bc8d95b-0a50-49f4-a46b-bd752929926d: !Template
+ answer_choices: null
+ id: 1bc8d95b-0a50-49f4-a46b-bd752929926d
+ jinja: '- {{ question_para_step[1:] | join("\n- ") }}
+
+
+ What might be the first step of the process?
+
+
+ |||
+
+
+ {{ question_para_step | first }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: what_might_be_the_first_step_of_the_process
+ reference: ''
+ 360cd99a-2f83-469a-a505-d80808159dd2: !Template
+ answer_choices: null
+ id: 360cd99a-2f83-469a-a505-d80808159dd2
+ jinja: '
+
+ {% set process_list = question_para_step[:-1] if question_para_step[-1] == ""
+ else question_para_step %}
+
+ - {{ process_list[:-1] | join("\n- ") }}
+
+
+ What might be the last step of the process?
+
+
+ |||
+
+
+ {{ process_list | last }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: what_might_be_the_last_step_of_the_process
+ reference: ''
+ 4191b162-9220-46e5-a2f0-0a763eef55a0: !Template
+ answer_choices: null
+ id: 4191b162-9220-46e5-a2f0-0a763eef55a0
+ jinja: 'What is the missing first step of the following process:
+
+
+ - {{ question_para_step[1:] | join("\n- ") }}
+
+
+ |||
+
+
+ {{ question_para_step | first }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: what_is_the_missing_first_step
+ reference: ''
+ 52d69c02-5ff3-4fe7-bcaf-a6b995a15020: !Template
+ answer_choices: null
+ id: 52d69c02-5ff3-4fe7-bcaf-a6b995a15020
+ jinja: ' {% set process_list = question_para_step[:-1] if question_para_step[-1]
+ == "" else question_para_step %}
+
+ What is the final step of the following process:
+
+ - {{ process_list[:-1] | join("\n- ") }}
+
+
+ |||
+
+
+ {{ process_list | last }}
+
+ '
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: what_is_the_final_step_of_the_following_process
+ reference: ''
+ 5dfee2c2-9742-4003-8ab6-dfe0ce5a745b: !Template
+ answer_choices: '{{choices.text | join("|||")}}'
+ id: 5dfee2c2-9742-4003-8ab6-dfe0ce5a745b
+ jinja: 'Process:
+
+ - {{ question_para_step | join("\n- ")}}
+
+
+ Question:
+
+ {{question_stem}}
+
+
+ How does the supposed perturbation influence the second effect mentioned. Answer
+ by {{"more, less or no effect"}}
+
+
+ |||
+
+
+ {{answer_label|replace("_", " ")}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: effect_with_string_answer
+ reference: ''
+ 667c291f-6a36-4334-aa49-804c9e72500b: !Template
+ answer_choices: indirectly impacting a step of the process ||| not impacting any
+ step of the process
+ id: 667c291f-6a36-4334-aa49-804c9e72500b
+ jinja: 'Process:
+
+
+ - {{ question_para_step | join("\n- ") }}
+
+
+ {{question_stem}}
+
+
+ Which of the following is the supposed perturbation?
+
+
+ - {{"directly impacting a step of the process"}}
+
+ - {{"indirectly impacting a step of the process"}}
+
+ - {{"not impacting any step of the process"}}
+
+
+
+ |||
+
+
+ {{{"EXOGENOUS_EFFECT": "indirectly impacting a step of the process", "OUTOFPARA_DISTRACTOR":
+ "not impacting any step of the process", "INPARA_EFFECT": "directly impacting
+ a step of the process"}[metadata_question_type]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: which_of_the_following_is_the_supposed_perturbation
+ reference: ''
+ 6cf2b300-6790-4120-9592-9db63bec221b: !Template
+ answer_choices: A ||| B ||| C
+ id: 6cf2b300-6790-4120-9592-9db63bec221b
+ jinja: 'Process:
+
+ - {{ question_para_step | join("\n- ")}}
+
+
+ Question:
+
+ {{question_stem}}
+
+
+ - {{"A: more"}}
+
+ - {{"B: less"}}
+
+ - {{"C: no effect"}}
+
+
+ |||
+
+
+ {{answer_label_as_choice}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: effect_with_label_answer
+ reference: ''
+ a17313bd-94bb-47ab-82bf-538df1b1ad5f: !Template
+ answer_choices: yes ||| no
+ id: a17313bd-94bb-47ab-82bf-538df1b1ad5f
+ jinja: 'Process:
+
+
+ - {{ question_para_step | join("\n- ") }}
+
+
+ Perturbation hypothesis:
+
+ {{question_stem}}
+
+
+ Does the supposed perturbation have an effect (direct or indirect) on the process?
+
+
+ |||
+
+
+ {{{"EXOGENOUS_EFFECT": "yes", "OUTOFPARA_DISTRACTOR": "no", "INPARA_EFFECT":
+ "yes"}[metadata_question_type]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: does_the_supposed_perturbation_have_an_effect
+ reference: ''
diff --git a/promptsource/templates/xnli/en/templates.yaml b/promptsource/templates/xnli/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..794f97416aeb5d9fac09334c6e2c052bad216c75
--- /dev/null
+++ b/promptsource/templates/xnli/en/templates.yaml
@@ -0,0 +1,222 @@
+dataset: xnli
+subset: en
+templates:
+ 161036e2-c397-4def-a813-4a2be119c5d6: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: 161036e2-c397-4def-a813-4a2be119c5d6
+ jinja: 'Take the following as truth: {{premise}}
+
+ Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+ {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: take the following as truth
+ reference: Sanh et al. 2021
+ 172b73dc-d045-491c-9dc2-76bf6566c8ee: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 172b73dc-d045-491c-9dc2-76bf6566c8ee
+ jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+ \ no, or maybe? ||| {{answer_choices[label]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does this imply
+ reference: Sanh et al. 2021
+ 37d2f061-06b0-4aa3-af53-871a2b06748f: !Template
+ answer_choices: True ||| Neither ||| False
+ id: 37d2f061-06b0-4aa3-af53-871a2b06748f
+ jinja: '{{premise}}
+
+ Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: GPT-3 style
+ reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+ is no task identifying tokens like "anli R1: ".'
+ 5350f9f1-61bb-43a3-9471-17db720f12bc: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 5350f9f1-61bb-43a3-9471-17db720f12bc
+ jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+ ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: does it follow that
+ reference: Sanh et al. 2021
+ 58536115-fd5c-4f29-a85b-420fde6fc5b0: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 58536115-fd5c-4f29-a85b-420fde6fc5b0
+ jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based on the previous passage
+ reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+ 833c65a6-6068-4e00-9833-944cf7510544: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 833c65a6-6068-4e00-9833-944cf7510544
+ jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed true
+ reference: Webson & Pavlick 2021
+ 854211f0-14eb-4370-9998-95c331828d6f: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 854211f0-14eb-4370-9998-95c331828d6f
+ jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+ no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: should assume
+ reference: Webson & Pavlick 2021
+ 8eb1c093-293c-4fcc-9d8c-a1451494ef06: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 8eb1c093-293c-4fcc-9d8c-a1451494ef06
+ jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+ Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: must be true
+ reference: Sanh et al. 2021
+ 9b75ff67-bb66-413b-a33d-325707b035d7: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 9b75ff67-bb66-413b-a33d-325707b035d7
+ jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+ ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: can we infer
+ reference: Webson & Pavlick 2021
+ 9bda8e36-c881-4c9a-a3a9-eec68388a6f6: !Template
+ answer_choices: Yes ||| Maybe ||| No
+ id: 9bda8e36-c881-4c9a-a3a9-eec68388a6f6
+ jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+ or maybe? ||| {{ answer_choices[label] }} '
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: justified in saying
+ reference: Webson & Pavlick 2021
+ c201719f-28f6-44c7-bb09-f82c6b049893: !Template
+ answer_choices: True ||| Inconclusive ||| False
+ id: c201719f-28f6-44c7-bb09-f82c6b049893
+ jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+ {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: claim true/false/inconclusive
+ reference: Sanh et al. 2021
+ c96fd357-3736-489d-a409-4ba210d1be5d: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: c96fd357-3736-489d-a409-4ba210d1be5d
+ jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+ \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+ \ answer_choices[label] }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: consider always/sometimes/never
+ reference: Sanh et al. 2021
+ c9c79c98-2d33-45f8-ab44-e2203883f0b7: !Template
+ answer_choices: Always ||| Sometimes ||| Never
+ id: c9c79c98-2d33-45f8-ab44-e2203883f0b7
+ jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+ {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: always/sometimes/never
+ reference: Sanh et al. 2021
+ d44c2947-f8e0-49ea-9770-e59f0581a921: !Template
+ answer_choices: Guaranteed ||| Possible ||| Impossible
+ id: d44c2947-f8e0-49ea-9770-e59f0581a921
+ jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+ \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+ \ }}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: guaranteed/possible/impossible
+ reference: Sanh et al. 2021
+ da368462-3a66-4222-9de1-05d66037a708: !Template
+ answer_choices: Correct ||| Inconclusive ||| Incorrect
+ id: da368462-3a66-4222-9de1-05d66037a708
+ jinja: '{{premise}} Using only the above description and what you know about the
+ world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: MNLI crowdsource
+ reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
diff --git a/promptsource/templates/xquad/xquad.en/templates.yaml b/promptsource/templates/xquad/xquad.en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..56cea06bc148c3442cc44eb1eb06047a47f124fb
--- /dev/null
+++ b/promptsource/templates/xquad/xquad.en/templates.yaml
@@ -0,0 +1,114 @@
+dataset: xquad
+subset: xquad.en
+templates:
+ 10efb2e0-390c-4bab-9dc7-d90db707b6ae: !Template
+ answer_choices: null
+ id: 10efb2e0-390c-4bab-9dc7-d90db707b6ae
+ jinja: '{{context}}
+
+
+ Generate a question from the above passage : ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: given_context_generate_question
+ reference: ''
+ 120fffe0-b752-43f8-bf50-ecf009703ef0: !Template
+ answer_choices: null
+ id: 120fffe0-b752-43f8-bf50-ecf009703ef0
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ Referring to the passage above, the correct answer to the given question is
+ ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_given_context_and_question
+ reference: ''
+ 32a9896f-34d5-4bde-8843-6d01d4621016: !Template
+ answer_choices: null
+ id: 32a9896f-34d5-4bde-8843-6d01d4621016
+ jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\
+ \n{{answers.text[0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_question
+ reference: ''
+ 4bae0661-a3e5-448a-bfa2-69b096b01283: !Template
+ answer_choices: null
+ id: 4bae0661-a3e5-448a-bfa2-69b096b01283
+ jinja: '{{context}}
+
+
+ From the above passage, a reasonable question with "{{answers["text"][0]}}"
+ as the answer would be: ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: jeopardy
+ reference: jeopardy style- wiki_qa
+ 90b53380-5c3b-4884-8cd1-9b4316da7993: !Template
+ answer_choices: null
+ id: 90b53380-5c3b-4884-8cd1-9b4316da7993
+ jinja: 'Refer to the passage below and answer the following question:
+
+
+ Passage: {{context}}
+
+
+ Question: {{question}}
+
+ |||
+
+ {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_question_given_context
+ reference: ''
+ 9cff064e-97e0-4026-94bc-3f7987856ec7: !Template
+ answer_choices: null
+ id: 9cff064e-97e0-4026-94bc-3f7987856ec7
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ A: ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: given_context_answer_question_variation
+ reference: ''
diff --git a/promptsource/templates/xquad_r/en/templates.yaml b/promptsource/templates/xquad_r/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a16091398f78fe13e1206eb8a0bdaac3a66b3d6f
--- /dev/null
+++ b/promptsource/templates/xquad_r/en/templates.yaml
@@ -0,0 +1,114 @@
+dataset: xquad_r
+subset: en
+templates:
+ 2b5ecd1b-4a91-420b-b91a-ad5d8b19e42a: !Template
+ answer_choices: null
+ id: 2b5ecd1b-4a91-420b-b91a-ad5d8b19e42a
+ jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\
+ \n{{answers.text[0]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_the_question
+ reference: ''
+ ad458b3c-7eb1-4011-9a9b-4e289f172160: !Template
+ answer_choices: null
+ id: ad458b3c-7eb1-4011-9a9b-4e289f172160
+ jinja: 'Refer to the passage below and answer the following question:
+
+
+ Passage: {{context}}
+
+
+ Question: {{question}}
+
+ |||
+
+ {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_question_given_context
+ reference: ''
+ b52c4840-a5c2-4bb1-ab39-0d533d56b336: !Template
+ answer_choices: null
+ id: b52c4840-a5c2-4bb1-ab39-0d533d56b336
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ Referring to the passage above, the correct answer to the given question is
+ ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: answer_given_context_and_question
+ reference: ''
+ c559afa7-10dc-47ed-b407-e1799c835027: !Template
+ answer_choices: null
+ id: c559afa7-10dc-47ed-b407-e1799c835027
+ jinja: '{{context}}
+
+
+ Generate a question from the above passage : ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: given_context_generate_question
+ reference: ''
+ d853848e-df82-4575-9c40-98c44ddd9d3f: !Template
+ answer_choices: null
+ id: d853848e-df82-4575-9c40-98c44ddd9d3f
+ jinja: '{{context}}
+
+
+ From the above passage, a reasonable question with "{{answers["text"][0]}}"
+ as the answer would be: ||| {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: jeopardy
+ reference: jeopardy style- wiki_qa
+ fd8c7cf6-3654-4622-bdff-b4f9fae52871: !Template
+ answer_choices: null
+ id: fd8c7cf6-3654-4622-bdff-b4f9fae52871
+ jinja: '{{context}}
+
+
+ Q: {{question}}
+
+
+ A: ||| {{answers["text"][0]}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Squad
+ original_task: true
+ name: given_context_answer_question_variation
+ reference: ''
diff --git a/promptsource/templates/xsum/templates.yaml b/promptsource/templates/xsum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..568304510c8662ebfa66a9bd9e82a11bf4ae2859
--- /dev/null
+++ b/promptsource/templates/xsum/templates.yaml
@@ -0,0 +1,180 @@
+dataset: xsum
+templates:
+ 13c02904-e4e2-4b4f-b115-44b437d22041: !Template
+ answer_choices: null
+ id: 13c02904-e4e2-4b4f-b115-44b437d22041
+ jinja: '{{document}}
+
+
+ ===
+
+
+ Write a summary of the text above : ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: DOC_write_summary_of_above
+ reference: ''
+ 30292806-8e58-463c-8d92-ba525411c6fa: !Template
+ answer_choices: null
+ id: 30292806-8e58-463c-8d92-ba525411c6fa
+ jinja: 'Article: {{document}}
+
+
+ Summary: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: article_DOC_summary
+ reference: Prefix-Tuning
+ 3d388a1e-3361-407b-baa7-61397cc58382: !Template
+ answer_choices: null
+ id: 3d388a1e-3361-407b-baa7-61397cc58382
+ jinja: '{{document}}
+
+ How would you rephrase that in a few words? ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: DOC_how_would_you_rephrase_few_words
+ reference: http://gptprompts.wikidot.com/prompt:summarization
+ 4cfe4126-b9f5-44eb-8a98-973987c5f32e: !Template
+ answer_choices: null
+ id: 4cfe4126-b9f5-44eb-8a98-973987c5f32e
+ jinja: 'My college roommate asked me what this article means:
+
+
+ {{document}}
+
+
+ So I recapped it in layman''s terms: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: college_roommate_asked_DOC_so_I_recap
+ reference: http://gptprompts.wikidot.com/prompt:summarization
+ 57a7a3f1-91f8-4f4b-b72d-745d7cb7b1e3: !Template
+ answer_choices: null
+ id: 57a7a3f1-91f8-4f4b-b72d-745d7cb7b1e3
+ jinja: '{{document}}
+
+ This boils down to the simple idea that ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: DOC_boils_down_to_simple_idea_that
+ reference: http://gptprompts.wikidot.com/prompt:summarization
+ 65a3c419-57e9-48c2-b090-0c5d7adb23c6: !Template
+ answer_choices: null
+ id: 65a3c419-57e9-48c2-b090-0c5d7adb23c6
+ jinja: 'Summarize: {{document}}|||
+
+ {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: summarize_DOC
+ reference: ''
+ 752fda48-e64c-47a7-8342-17c2c113f600: !Template
+ answer_choices: null
+ id: 752fda48-e64c-47a7-8342-17c2c113f600
+ jinja: 'Summarize this document: {{document}}
+
+ Summary: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: summarize_this_DOC_summary
+ reference: ''
+ 826ffcd4-c0e6-4f4c-bd9a-fcf8ee169ede: !Template
+ answer_choices: null
+ id: 826ffcd4-c0e6-4f4c-bd9a-fcf8ee169ede
+ jinja: '{{document}}
+
+
+ ===
+
+
+ Given the above document, write one sentence to summarize: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: DOC_given_above_write_one_sentence
+ reference: ''
+ 9a3f617f-628f-4fa5-9b74-47d0b166a487: !Template
+ answer_choices: null
+ id: 9a3f617f-628f-4fa5-9b74-47d0b166a487
+ jinja: 'First, please read the article below.
+
+
+ {{document}}
+
+
+ Now, can you write me an extremely short abstract for it? ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: read_below_DOC_write_abstract
+ reference: ''
+ d878b768-9da2-4d9d-9517-1edcca3b1b26: !Template
+ answer_choices: null
+ id: d878b768-9da2-4d9d-9517-1edcca3b1b26
+ jinja: '{{document}}
+
+
+ TL;DR: ||| {{summary}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - ROUGE
+ - BLEU
+ original_task: true
+ name: DOC_tldr
+ reference: GPT-2 TLDR
diff --git a/promptsource/templates/yahoo_answers_qa/templates.yaml b/promptsource/templates/yahoo_answers_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d68d9da5e39a2d72d9d76f5ec712b613bfd7401b
--- /dev/null
+++ b/promptsource/templates/yahoo_answers_qa/templates.yaml
@@ -0,0 +1,116 @@
+dataset: yahoo_answers_qa
+templates:
+ 1a6eda86-debc-4681-b643-f1f16fedd713: !Template
+ answer_choices: null
+ id: 1a6eda86-debc-4681-b643-f1f16fedd713
+ jinja: "Answer the following question: \n\n{{question}} |||\n{{nbestanswers|choice}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: n_best_answer
+ reference: Template chooses a random answer for the given question
+ 2c214261-f32b-42ab-ac90-b22f4f2f465f: !Template
+ answer_choices: Society & Culture ||| Science & Mathematics ||| Health ||| Education
+ & Reference ||| Computers & Internet ||| Sports ||| Business & Finance ||| Entertainment
+ & Music ||| Family & Relationships ||| Politics & Government
+ id: 2c214261-f32b-42ab-ac90-b22f4f2f465f
+ jinja: "What is the category of the following question? \nPossible topics: {{answer_choices\
+ \ |join(', ')}}.\n\n{{question}} |||\n{{main_category}}\n"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: false
+ name: category
+ reference: Template asks the model to pick the correct category for the question
+ (topics from yahoo_answers_topics dataset)
+ 7fc9307b-99ad-457a-8b60-c44bd6b2d86c: !Template
+ answer_choices: null
+ id: 7fc9307b-99ad-457a-8b60-c44bd6b2d86c
+ jinja: 'Given the best answer , "{{answer}}", generate the question: |||
+
+ {{question}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: answer_to_question
+ reference: Template generates the question from the best answer
+ 914f3d07-cc51-487e-8e2a-91dc92c49dda: !Template
+ answer_choices: null
+ id: 914f3d07-cc51-487e-8e2a-91dc92c49dda
+ jinja: "Using the internet, answer the following question: \n\n{{question}} |||\n\
+ {{nbestanswers|choice}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: using_internet_answer
+ reference: Template asks the model to answer the question with a prefix "using
+ the internet"
+ 922f01b3-98e8-419c-bcb0-a854abc7f4f3: !Template
+ answer_choices: null
+ id: 922f01b3-98e8-419c-bcb0-a854abc7f4f3
+ jinja: "Answer the following question:\n{{question}} \n\nHint: The question is\
+ \ related to {{main_category}}. \n\n|||\n{{nbestanswers|choice}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: hint_question_answer
+ reference: 'Template asks the model to generate the answer for the question. The
+ template provides a hint. '
+ f0eeb5cb-1364-44c2-b284-de67452aef6d: !Template
+ answer_choices: null
+ id: f0eeb5cb-1364-44c2-b284-de67452aef6d
+ jinja: 'What is the best answer for the following question?
+
+
+ {{question}} |||
+
+ {{answer}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: best_answer
+ reference: The template asks to generate the correct answer
+ fab0eea0-158a-484b-9cfe-87dfb7b0d627: !Template
+ answer_choices: null
+ id: fab0eea0-158a-484b-9cfe-87dfb7b0d627
+ jinja: "{{question}} \n\nFor full credit on the open-book test, you need to answer\
+ \ the question. You are allowed to use any resource including the internet.\
+ \ \n\n|||\n{{nbestanswers|choice}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: true
+ name: exam_style_prompt
+ reference: Template asks the model to answer the exam style prompt
diff --git a/promptsource/templates/yahoo_answers_topics/templates.yaml b/promptsource/templates/yahoo_answers_topics/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2dc36eb92be1186ebe6f151a809cebac4c46dcf9
--- /dev/null
+++ b/promptsource/templates/yahoo_answers_topics/templates.yaml
@@ -0,0 +1,131 @@
+dataset: yahoo_answers_topics
+templates:
+ 21e4d34e-0f80-4056-8870-bb303ba12dca: !Template
+ answer_choices: null
+ id: 21e4d34e-0f80-4056-8870-bb303ba12dca
+ jinja: 'Given the answer below suggest a possible question title:
+
+
+ Answer: {{ best_answer}} |||
+
+ {{ question_title}}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: generate_question_from_answer
+ reference: Prompt template generates the question from the answer
+ 2b5a86e3-51a8-4b74-bf2c-15dca37ac98a: !Template
+ answer_choices: Society & Culture ||| Science & Mathematics ||| Health ||| Education
+ & Reference ||| Computers & Internet ||| Sports ||| Business & Finance ||| Entertainment
+ & Music ||| Family & Relationships ||| Politics & Government
+ id: 2b5a86e3-51a8-4b74-bf2c-15dca37ac98a
+ jinja: "Question: {{ question_title }}; {{ question_content }} \n\nAnswer: {{best_answer}}\n\
+ \nFrom the list of topics, choose the correct topic that relates to the question\
+ \ and passage: \n- {{answer_choices|join('\\n- ')}}\n|||\n{{ answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: options_for_topic
+ reference: The template asks the model to choose from a list of topics
+ 32ca4b76-fbb1-4846-94c9-9e968c627ed9: !Template
+ answer_choices: null
+ id: 32ca4b76-fbb1-4846-94c9-9e968c627ed9
+ jinja: 'Given the question and additional details, answer the question:
+
+
+ Question: {{ question_title }}
+
+
+ Additional details: {{ question_content }} |||
+
+ {{ best_answer }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - BLEU
+ - ROUGE
+ original_task: false
+ name: question_answer
+ reference: The prompt asks to answer the question
+ 568a5d5e-ec5c-4ceb-9620-df5e86280143: !Template
+ answer_choices: Society & Culture ||| Science & Mathematics ||| Health ||| Education
+ & Reference ||| Computers & Internet ||| Sports ||| Business & Finance ||| Entertainment
+ & Music ||| Family & Relationships ||| Politics & Government
+ id: 568a5d5e-ec5c-4ceb-9620-df5e86280143
+ jinja: "Given the question title, question content, and the answer, classify the\
+ \ question into one of these topics, {{answer_choices|join(', ')}}:\n\nQuestion\
+ \ Title: {{ question_title }}\n\nQuestion content: {{ question_content }} \n\
+ \nAnswer: {{best_answer}}\n|||\n{{ answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: question_content_answer_classification
+ reference: Prompt classifies question, content, and answer into a topic
+ 846d9711-0389-4a2a-8545-0d021f858cd1: !Template
+ answer_choices: Society & Culture ||| Science & Mathematics ||| Health ||| Education
+ & Reference ||| Computers & Internet ||| Sports ||| Business & Finance ||| Entertainment
+ & Music ||| Family & Relationships ||| Politics & Government
+ id: 846d9711-0389-4a2a-8545-0d021f858cd1
+ jinja: "Question: {{ question_title }}\n\nAdditional details: {{question_content}}\
+ \ \n\nBest answer: {{best_answer}} \n\nIn this test, you need to classify the\
+ \ document into one of the following categories: {{answer_choices |join(', ')}}.\
+ \ \n||| \n{{ answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify_document_exam_style
+ reference: Prompt asks to classify documents with an exam style prompt
+ b3ca202f-11ba-4c96-92eb-2e766749e7a2: !Template
+ answer_choices: Society & Culture ||| Science & Mathematics ||| Health ||| Education
+ & Reference ||| Computers & Internet ||| Sports ||| Business & Finance ||| Entertainment
+ & Music ||| Family & Relationships ||| Politics & Government
+ id: b3ca202f-11ba-4c96-92eb-2e766749e7a2
+ jinja: "Question: {{ question_title }}; {{ question_content }} \n\nAnswer: {{best_answer}}\n\
+ \nTo win the prize in the game, pick the right topic for the question-answer\
+ \ pair from the list of topics: {{answer_choices|join(', ')}}.\n|||\n{{ answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: gameshow_topic_classification
+ reference: This template asks the model to choose the correct topic in a gameshow
+ style format
+ b56d1289-d3df-4e66-88b5-737dce09b467: !Template
+ answer_choices: Society & Culture ||| Science & Mathematics ||| Health ||| Education
+ & Reference ||| Computers & Internet ||| Sports ||| Business & Finance ||| Entertainment
+ & Music ||| Family & Relationships ||| Politics & Government
+ id: b56d1289-d3df-4e66-88b5-737dce09b467
+ jinja: "Given the question title below, classify the document into one of these\
+ \ topics: {{answer_choices |join(', ')}}.\n\nQuestion: {{ question_title }}\
+ \ \n\n|||\n{{ answer_choices[topic]}}"
+ metadata: !TemplateMetadata
+ choices_in_prompt: true
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: classify_document
+ reference: classify the topic of the document
diff --git a/promptsource/templates/yelp_polarity/templates.yaml b/promptsource/templates/yelp_polarity/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..45331a64b9632a4c436190192ed2c3f0b832832d
--- /dev/null
+++ b/promptsource/templates/yelp_polarity/templates.yaml
@@ -0,0 +1,149 @@
+dataset: yelp_polarity
+templates:
+ 01dc166f-0774-4be0-b606-2beb2252d9b5: !Template
+ answer_choices: bad ||| good
+ id: 01dc166f-0774-4be0-b606-2beb2252d9b5
+ jinja: '{{ text }}
+
+ Overall, the experience is ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: experience_good_bad
+ reference: ''
+ 13f829c1-ca58-4efd-b1f0-14446b176871: !Template
+ answer_choices: low ||| high
+ id: 13f829c1-ca58-4efd-b1f0-14446b176871
+ jinja: '{{ text }}
+
+ Based on that, my rating for this place is ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: rating_high_low
+ reference: ''
+ 19e426b1-26e6-462e-a556-0ec3b9402e13: !Template
+ answer_choices: Yes ||| No
+ id: 19e426b1-26e6-462e-a556-0ec3b9402e13
+ jinja: '{{ text }}
+
+ Did I regret it? ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: regret_yes_or_no
+ reference: ''
+ 363d31a1-6706-47fd-ad7e-d648cf23bbaa: !Template
+ answer_choices: no ||| yes
+ id: 363d31a1-6706-47fd-ad7e-d648cf23bbaa
+ jinja: '{{ text }}
+
+ If you ask me whether I will come again, my answer is ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: come_again
+ reference: ''
+ a5ec86fb-046d-482b-a552-8499b8b59b8f: !Template
+ answer_choices: No ||| Yes
+ id: a5ec86fb-046d-482b-a552-8499b8b59b8f
+ jinja: 'Review:
+
+ {{ text }}
+
+
+ Will you come here again? |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: format_come_again
+ reference: Simulating website
+ e0cc8573-5eda-413a-a20f-1da3e7077736: !Template
+ answer_choices: dislike this place. ||| like this place.
+ id: e0cc8573-5eda-413a-a20f-1da3e7077736
+ jinja: '{{ text }}
+
+ That being said, I ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: like_dislike
+ reference: ''
+ e14da8aa-2995-4aed-a90b-adf2bec2b90e: !Template
+ answer_choices: Bad ||| Good
+ id: e14da8aa-2995-4aed-a90b-adf2bec2b90e
+ jinja: 'Review:
+
+ {{ text }}
+
+
+ Overall rating: |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: format_good_bad
+ reference: Simulating the website.
+ e7bb6ec7-921a-4889-93e9-84d957c6035b: !Template
+ answer_choices: bad ||| good
+ id: e7bb6ec7-921a-4889-93e9-84d957c6035b
+ jinja: '{{ text }}
+
+ In a nutshell, this place is ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: place_good_bad
+ reference: ''
+ fb412829-d8a3-4faa-8443-a2ebe6545f6c: !Template
+ answer_choices: no ||| yes
+ id: fb412829-d8a3-4faa-8443-a2ebe6545f6c
+ jinja: '{{ text }}
+
+ If you ask me whether I like this place? The answer is ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: like_dislike_2
+ reference: ''
diff --git a/promptsource/templates/yelp_review_full/templates.yaml b/promptsource/templates/yelp_review_full/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..84c027c177d66516aa77762ea865138d3191ed49
--- /dev/null
+++ b/promptsource/templates/yelp_review_full/templates.yaml
@@ -0,0 +1,123 @@
+dataset: yelp_review_full
+templates:
+ 135fcd11-9fcc-4b55-bf1b-9b76290d0f6b: !Template
+ answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+ id: 135fcd11-9fcc-4b55-bf1b-9b76290d0f6b
+ jinja: '{{ text }}
+
+ So I would like to give it ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: so_i_would
+ reference: ''
+ 27b6bc81-bb1c-467b-91c0-22a4d6a19f44: !Template
+ answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+ id: 27b6bc81-bb1c-467b-91c0-22a4d6a19f44
+ jinja: '{{ text }}
+
+ ===
+
+ Based on that, my rating is ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: based_on_that
+ reference: ''
+ 29fc6386-90b3-4976-b249-26e49fe7c924: !Template
+ answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+ id: 29fc6386-90b3-4976-b249-26e49fe7c924
+ jinja: 'Review text:
+
+ {{ text }}
+
+
+ Stars: |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: format_star
+ reference: simulating webpage
+ 2a57af86-e25a-4572-ba9e-aa921842c04b: !Template
+ answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+ id: 2a57af86-e25a-4572-ba9e-aa921842c04b
+ jinja: '{{ text }} My rating for this place is ||| {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: this_place
+ reference: ''
+ 4dd990b3-7201-4cba-bb9a-baa462d68b1a: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 4dd990b3-7201-4cba-bb9a-baa462d68b1a
+ jinja: 'Review text:
+
+ {{ text }}
+
+
+ Review score (between 1 and 5): |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: format_score
+ reference: Simulating webpage
+ 6d4bfb59-4260-40a5-9da5-e061720bd430: !Template
+ answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+ id: 6d4bfb59-4260-40a5-9da5-e061720bd430
+ jinja: 'Review: {{text}}
+
+ On a scale of 1 to 5, I would give this product ||| {{ answer_choices[label]
+ }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: on_a_scale
+ reference: ''
+ e8091beb-c0fa-490d-9e0c-32eb6907dbc0: !Template
+ answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+ id: e8091beb-c0fa-490d-9e0c-32eb6907dbc0
+ jinja: 'Review text:
+
+ {{ text }}
+
+
+ Review rating: |||
+
+ {{ answer_choices[label] }}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Accuracy
+ original_task: true
+ name: format_rating
+ reference: It's simulating the format of a webpage.
diff --git a/promptsource/templates/zest/templates.yaml b/promptsource/templates/zest/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5d2d2906fe8836908acccd7c8f73eddc36c6b6e3
--- /dev/null
+++ b/promptsource/templates/zest/templates.yaml
@@ -0,0 +1,202 @@
+dataset: zest
+templates:
+ 223c6226-5f2d-4dd8-9710-4657ffb54f13: !Template
+ answer_choices: null
+ id: 223c6226-5f2d-4dd8-9710-4657ffb54f13
+ jinja: '{{context}}
+
+ {{question}}|||
+
+ {% if answer != []%}
+
+ {% if answer[0] == "n/a" %}
+
+ I don''t know
+
+ {% else %}
+
+ {{answer | choice |replace("|", " or ")}}
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: concat_quest_context
+ reference: 'Concatenate question and context. Metric: F1 Score with modified precision
+ and recall, please refer to the paper https://arxiv.org/abs/2011.08115'
+ 2283cebf-988e-4bff-96bf-982a09963e49: !Template
+ answer_choices: No ||| Yes
+ id: 2283cebf-988e-4bff-96bf-982a09963e49
+ jinja: 'Decide whether the question "{{question}}" is answerable solely based
+ on this passage:
+
+ {{context}}
+
+ Answer: |||
+
+ {% if answer != []%}
+
+ {% if answer[0] == "n/a" %}
+
+ {{ answer_choices[0] }}
+
+ {% else %}
+
+ {{ answer_choices[1] }}
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: false
+ name: answerable_or_not
+ reference: 'Decide whether this question is answerable. Metric: F1 Score with
+ modified precision and recall, please refer to the paper https://arxiv.org/abs/2011.08115'
+ 6f694e45-1d17-4067-a1f6-7dae89c148db: !Template
+ answer_choices: null
+ id: 6f694e45-1d17-4067-a1f6-7dae89c148db
+ jinja: 'My daughter is asking me a question about {{domain | replace("_", " ")}}:
+ "{{question}}"
+
+ Here''s what I found on the internet: {{context}}
+
+ What''s the answer?
+
+ Answer: |||
+
+ {% if answer != []%}
+
+ {% if answer[0] == "n/a" %}
+
+ Can''t answer
+
+ {% else %}
+
+ {{answer | choice |replace("|", " or ")}}
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: ask_question_as_kid
+ reference: 'Answer the questions of a curious kid. Metric: F1 Score with modified
+ precision and recall, please refer to the paper https://arxiv.org/abs/2011.08115'
+ 7425232a-9880-428c-9ddc-4070e50e22cc: !Template
+ answer_choices: null
+ id: 7425232a-9880-428c-9ddc-4070e50e22cc
+ jinja: 'Answer the question based on the context. If the question is not answerable
+ with the context alone, say "Can''t answer".
+
+ {{context}}
+
+ {{question}}|||
+
+ {% if answer != []%}
+
+ {% if answer[0] == "n/a" %}
+
+ Can''t answer
+
+ {% else %}
+
+ {{answer | choice |replace("|", " or ")}}
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: gpt3_instruct_format
+ reference: 'Template format from GPT3 instruct. Metric: F1 Score with modified
+ precision and recall, please refer to the paper https://arxiv.org/abs/2011.08115'
+ bdaf4f8a-2344-4e46-a52b-2045a080a4b2: !Template
+ answer_choices: null
+ id: bdaf4f8a-2344-4e46-a52b-2045a080a4b2
+ jinja: 'Answer the question about {{domain | replace("_", " ")}} based on the
+ context below. If the question is not answerable with the context alone, say
+ "Can''t answer".
+
+ Context:
+
+ {{context}}
+
+ Question:
+
+ {{question}}|||
+
+ {% if answer != []%}
+
+ {% if answer[0] == "n/a" %}
+
+ Can''t answer
+
+ {% else %}
+
+ {{answer | choice |replace("|", " or ")}}
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: gpt3_instruct_format_with_domain
+ reference: 'Template format from GPT3 instruct with the question''s domain. Metric:
+ F1 Score with modified precision and recall, please refer to the paper https://arxiv.org/abs/2011.08115'
+ cd563834-49ee-495d-ac46-99f0264e58d5: !Template
+ answer_choices: null
+ id: cd563834-49ee-495d-ac46-99f0264e58d5
+ jinja: 'I am giving my students the following question about {{domain | replace("_",
+ " ")}}: "{{question}}".
+
+ What should be their answer based on this context: {{context}}|||
+
+ {% if answer != []%}
+
+ {% if answer[0] == "n/a" %}
+
+ I don''t know
+
+ {% else %}
+
+ {{answer | choice |replace("|", " or ")}}
+
+ {% endif %}
+
+ {% endif %}'
+ metadata: !TemplateMetadata
+ choices_in_prompt: false
+ languages:
+ - en
+ metrics:
+ - Other
+ original_task: true
+ name: ask_question_as_teacher
+ reference: 'I don''t know answer. Metric: F1 Score with modified precision and
+ recall, please refer to the paper https://arxiv.org/abs/2011.08115'
diff --git a/promptsource/utils.py b/promptsource/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce57a2ac159508bdfb39a61188bd1031e28e6f6f
--- /dev/null
+++ b/promptsource/utils.py
@@ -0,0 +1,149 @@
+# coding=utf-8
+import os
+
+import datasets
+import requests
+
+from promptsource import DEFAULT_PROMPTSOURCE_CACHE_HOME
+from promptsource.templates import INCLUDED_USERS
+
+
+def removeHyphen(example):
+ example_clean = {}
+ for key in example.keys():
+ if "-" in key:
+ new_key = key.replace("-", "_")
+ example_clean[new_key] = example[key]
+ else:
+ example_clean[key] = example[key]
+ example = example_clean
+ return example
+
+
+def renameDatasetColumn(dataset):
+ col_names = dataset.column_names
+ for cols in col_names:
+ if "-" in cols:
+ dataset = dataset.rename_column(cols, cols.replace("-", "_"))
+ return dataset
+
+
+#
+# Helper functions for datasets library
+#
+
+
+def get_dataset_builder(path, conf=None):
+ "Get a dataset builder from name and conf."
+ module_path = datasets.load.dataset_module_factory(path)
+ builder_cls = datasets.load.import_main_class(module_path.module_path, dataset=True)
+ if conf:
+ builder_instance = builder_cls(name=conf, cache_dir=None, hash=module_path.hash)
+ else:
+ builder_instance = builder_cls(cache_dir=None, hash=module_path.hash)
+ return builder_instance
+
+
+def get_dataset(path, conf=None):
+ "Get a dataset from name and conf."
+ builder_instance = get_dataset_builder(path, conf)
+ if builder_instance.manual_download_instructions is None and builder_instance.info.size_in_bytes is not None:
+ builder_instance.download_and_prepare()
+ return builder_instance.as_dataset()
+ else:
+ return load_dataset(path, conf)
+
+
+def load_dataset(dataset_name, subset_name):
+ try:
+ return datasets.load_dataset(dataset_name, subset_name)
+ except datasets.builder.ManualDownloadError:
+ cache_root_dir = (
+ os.environ["PROMPTSOURCE_MANUAL_DATASET_DIR"]
+ if "PROMPTSOURCE_MANUAL_DATASET_DIR" in os.environ
+ else DEFAULT_PROMPTSOURCE_CACHE_HOME
+ )
+ data_dir = (
+ f"{cache_root_dir}/{dataset_name}"
+ if subset_name is None
+ else f"{cache_root_dir}/{dataset_name}/{subset_name}"
+ )
+ return datasets.load_dataset(
+ dataset_name,
+ subset_name,
+ data_dir=data_dir,
+ )
+
+
+def get_dataset_confs(path):
+ "Get the list of confs for a dataset."
+ module_path = datasets.load.dataset_module_factory(path).module_path
+ # Get dataset builder class from the processing script
+ builder_cls = datasets.load.import_main_class(module_path, dataset=True)
+ # Instantiate the dataset builder
+ confs = builder_cls.BUILDER_CONFIGS
+ if confs and len(confs) > 1:
+ return confs
+ return []
+
+
+def render_features(features):
+ """Recursively render the dataset schema (i.e. the fields)."""
+ if isinstance(features, dict):
+ return {k: render_features(v) for k, v in features.items()}
+ if isinstance(features, datasets.features.ClassLabel):
+ return features.names
+
+ if isinstance(features, datasets.features.Value):
+ return features.dtype
+
+ if isinstance(features, datasets.features.Sequence):
+ return {"[]": render_features(features.feature)}
+ return features
+
+
+#
+# Loads dataset information
+#
+
+
+def filter_english_datasets():
+ """
+ Filter English datasets based on language tags in metadata.
+
+ Also includes the datasets of any users listed in INCLUDED_USERS
+ """
+ english_datasets = []
+
+ response = requests.get("https://huggingface.co/api/datasets?full=true")
+ tags = response.json()
+
+ for dataset in tags:
+ dataset_name = dataset["id"]
+
+ is_community_dataset = "/" in dataset_name
+ if is_community_dataset:
+ user = dataset_name.split("/")[0]
+ if user in INCLUDED_USERS:
+ english_datasets.append(dataset_name)
+ continue
+
+ if "cardData" not in dataset:
+ continue
+ metadata = dataset["cardData"]
+
+ if "language" not in metadata:
+ continue
+ languages = metadata["language"]
+
+ if "en" in languages or "en-US" in languages:
+ english_datasets.append(dataset_name)
+
+ return sorted(english_datasets)
+
+
+def list_datasets():
+ """Get all the datasets to work with."""
+ dataset_list = filter_english_datasets()
+ dataset_list.sort(key=lambda x: x.lower())
+ return dataset_list
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9704fdea88d9681983a6996042a9d77d69364dfe
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,13 @@
+black<=21.12b0
+datasets>=1.7.0
+flake8
+isort==5.8.0
+pytest
+pyyaml>=5
+streamlit==0.82
+jinja2
+plotly
+requests
+pandas
+py7zr
+pygments
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..7d20505c0dafb3a7496affc0dd0584c8c20a793d
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,13 @@
+[isort]
+ensure_newline_before_comments = True
+force_grid_wrap = 0
+include_trailing_comma = True
+line_length = 119
+lines_after_imports = 2
+multi_line_output = 3
+use_parentheses = True
+
+
+[flake8]
+ignore = E203, E501, W503
+max-line-length = 119
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c4210721188119a749d3f3212380e69af5fea6a
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,54 @@
+from setuptools import setup, find_packages
+
+with open("README.md", "r", encoding="utf-8") as readme_file:
+ readme = readme_file.read()
+
+requirements = [
+ "black<=21.12b0",
+ "datasets>=1.7.0",
+ "flake8",
+ "isort==5.8.0",
+ "pytest",
+ "pyyaml>=5",
+ "streamlit==0.82",
+ "jinja2",
+ "plotly",
+ "requests",
+ "pandas",
+ ##############################################################
+ # Dependencies in this section are added for specific datasets
+ ##############################################################
+ "py7zr",
+ ##############################################################
+ # End of dataset-specific dependencies
+ ##############################################################
+]
+
+setup(
+ name='promptsource',
+ version='0.2.3',
+ url='https://github.com/bigscience-workshop/promptsource.git',
+ author='BigScience - Prompt Engineering Working Group',
+ author_email='sbach@cs.brown.edu,victor@huggingface.co',
+ python_requires='>=3.7,<3.10',
+ install_requires=requirements,
+ classifiers=[
+ 'Development Status :: 2 - Pre-Alpha',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Natural Language :: English',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ ],
+ description='An Integrated Development Environment and Repository for Natural Language Prompts.',
+ packages=find_packages(),
+ license="Apache Software License 2.0",
+ long_description=readme,
+ long_description_content_type="text/markdown",
+ package_data={"": [
+ "templates/*/*.yaml",
+ "templates/*/*/*.yaml",
+ ]}
+)
diff --git a/test/show_templates.py b/test/show_templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbaa2cf462af8b8755747fcab8989770a272b10a
--- /dev/null
+++ b/test/show_templates.py
@@ -0,0 +1,81 @@
+import argparse
+import textwrap
+
+from promptsource.templates import TemplateCollection, INCLUDED_USERS
+from promptsource.utils import get_dataset
+
+
+parser = argparse.ArgumentParser(description="Process some integers.")
+parser.add_argument("dataset_path", type=str, help="path to dataset name")
+
+args = parser.parse_args()
+if "templates.yaml" not in args.dataset_path:
+ exit()
+
+path = args.dataset_path.split("/")
+
+if path[2] in INCLUDED_USERS:
+ print("Skipping showing templates for community dataset.")
+else:
+ dataset_name = path[2]
+ subset_name = path[3] if len(path) == 5 else ""
+
+ template_collection = TemplateCollection()
+
+ dataset = get_dataset(dataset_name, subset_name)
+ splits = list(dataset.keys())
+
+ dataset_templates = template_collection.get_dataset(dataset_name, subset_name)
+ template_list = dataset_templates.all_template_names
+
+ width = 80
+ print("DATASET ", args.dataset_path)
+
+ # First show all the templates.
+ for template_name in template_list:
+ template = dataset_templates[template_name]
+ print("TEMPLATE")
+ print("NAME:", template_name)
+ print("Is Original Task: ", template.metadata.original_task)
+ print(template.jinja)
+ print()
+
+ # Show examples of the templates.
+ for template_name in template_list:
+ template = dataset_templates[template_name]
+ print()
+ print("TEMPLATE")
+ print("NAME:", template_name)
+ print("REFERENCE:", template.reference)
+ print("--------")
+ print()
+ print(template.jinja)
+ print()
+
+ for split_name in splits:
+ dataset_split = dataset[split_name]
+
+ print_counter = 0
+ for example in dataset_split:
+ print("\t--------")
+ print("\tSplit ", split_name)
+ print("\tExample ", example)
+ print("\t--------")
+ output = template.apply(example)
+ if output[0].strip() == "" or (len(output) > 1 and output[1].strip() == ""):
+ print("\t Blank result")
+ continue
+
+ xp, yp = output
+ print()
+ print("\tPrompt | X")
+ for line in textwrap.wrap(xp, width=width, replace_whitespace=False):
+ print("\t", line.replace("\n", "\n\t"))
+ print()
+ print("\tY")
+ for line in textwrap.wrap(yp, width=width, replace_whitespace=False):
+ print("\t", line.replace("\n", "\n\t"))
+
+ print_counter += 1
+ if print_counter >= 10:
+ break
diff --git a/test/test_templates.py b/test/test_templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8a7bc91924c42050167220d93371540885a25d1
--- /dev/null
+++ b/test/test_templates.py
@@ -0,0 +1,128 @@
+import time
+from jinja2 import meta, TemplateError
+import pytest
+import promptsource.templates
+from promptsource.utils import get_dataset_builder
+from uuid import UUID
+
+# Sets up Jinja environment
+env = promptsource.templates.env
+
+# Loads templates and iterates over each data (sub)set
+template_collection = promptsource.templates.TemplateCollection()
+
+
+def test_uuids():
+ """
+ Checks that all UUIDs across promptsource are unique. (Although collisions
+ are unlikely, copying and pasting YAML files could lead to duplicates.
+ """
+ all_uuids = {}
+
+ # Iterates over all datasets
+ for dataset_name, subset_name in template_collection.keys:
+
+ # Iterates over each template for current data (sub)set
+ dataset_templates = template_collection.get_dataset(dataset_name, subset_name)
+ for template_name in dataset_templates.all_template_names:
+ template = dataset_templates[template_name]
+
+ uuid = template.get_id()
+
+ if uuid in all_uuids:
+ raise ValueError(f"Template {template_name} for dataset {dataset_name}/{subset_name} "
+ f"has duplicate uuid {template.get_id()} as "
+ f"{all_uuids[uuid][0]}/{all_uuids[uuid][1]}.")
+
+ all_uuids[uuid] = (dataset_name, subset_name)
+
+
+@pytest.mark.parametrize("dataset", template_collection.keys)
+def test_dataset(dataset):
+ """
+ Validates all the templates in the repository with simple syntactic checks:
+ 0. Are all templates parsable YAML?
+ 1. Do all templates parse in Jinja and are all referenced variables in the dataset schema?
+ 2. Does the template contain a prompt/output separator "|||" ?
+ 3. Are all names and templates within a data (sub)set unique?
+ 4. Is the YAML dictionary properly formatted?
+ 5. Is the UUID valid?
+
+ :param dataset: (dataset_name, subset_name) pair to test
+
+ """
+ dataset_name, subset_name = dataset
+
+ # Loads dataset information
+ tries = 0
+ max_tries = 3
+ while True:
+ try:
+ builder_instance = get_dataset_builder(dataset_name, subset_name)
+ break
+ except ConnectionError as e:
+ if tries < max_tries:
+ time.sleep(2)
+ tries += 1
+ else:
+ raise e
+
+ has_features = builder_instance.info.features is not None
+ if has_features:
+ features = builder_instance.info.features.keys()
+ features = set([feature.replace("-", "_") for feature in features])
+
+ # Initializes sets for checking uniqueness among templates
+ template_name_set = set()
+ template_jinja_set = set()
+
+ # Iterates over each template for current data (sub)set
+ dataset_templates = template_collection.get_dataset(dataset_name, subset_name)
+ any_original = False
+ for template_name in dataset_templates.all_template_names:
+ template = dataset_templates[template_name]
+ any_original = any_original or template.metadata.original_task
+ # Check 1: Jinja and all features valid?
+ try:
+ parse = env.parse(template.jinja)
+ except TemplateError as e:
+ raise ValueError(f"Template for dataset {dataset_name}/{subset_name} "
+ f"with uuid {template.get_id()} failed to parse.") from e
+
+ variables = meta.find_undeclared_variables(parse)
+ for variable in variables:
+ if has_features and variable not in features and variable != "answer_choices":
+ raise ValueError(f"Template for dataset {dataset_name}/{subset_name} "
+ f"with uuid {template.get_id()} has unrecognized variable {variable}.")
+
+ # Check 2: Prompt/output separator present?
+ if "|||" not in template.jinja:
+ raise ValueError(f"Template for dataset {dataset_name}/{subset_name} "
+ f"with uuid {template.get_id()} has no prompt/output separator.")
+
+ # Check 3: Unique names and templates?
+ if template.get_name() in template_name_set:
+ raise ValueError(f"Template for dataset {dataset_name}/{subset_name} "
+ f"with uuid {template.get_id()} has duplicate name.")
+
+ if template.jinja in template_jinja_set:
+ raise ValueError(f"Template for dataset {dataset_name}/{subset_name} "
+ f"with uuid {template.get_id()} has duplicate definition.")
+
+ template_name_set.add(template.get_name())
+ template_jinja_set.add(template.jinja)
+
+ # Check 4: Is the YAML dictionary properly formatted?
+ try:
+ if dataset_templates.templates[template.get_id()] != template:
+ raise ValueError(f"Template for dataset {dataset_name}/{subset_name} "
+ f"with uuid {template.get_id()} has wrong YAML key.")
+ except KeyError as e:
+ raise ValueError(f"Template for dataset {dataset_name}/{subset_name} "
+ f"with uuid {template.get_id()} has wrong YAML key.") from e
+
+ # Check 5: Is the UUID valid?
+ UUID(template.get_id())
+
+ # Turned off for now until we fix.
+ #assert any_original, "There must be at least one original task template for each dataset"