Datasets:

Languages:
English
ArXiv:
License:
system HF staff commited on
Commit
4979160
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/0.1.0/dummy_data.zip +3 -0
  4. wikisql.py +161 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "A large crowd-sourced dataset for developing natural language interfaces for relational databases\n", "citation": "@article{zhongSeq2SQL2017,\n author = {Victor Zhong and\n Caiming Xiong and\n Richard Socher},\n title = {Seq2SQL: Generating Structured Queries from Natural Language using\n Reinforcement Learning},\n journal = {CoRR},\n volume = {abs/1709.00103},\n year = {2017}\n}\n", "homepage": "https://github.com/salesforce/WikiSQL", "license": "", "features": {"phase": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "table": {"header": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "page_title": {"dtype": "string", "id": null, "_type": "Value"}, "page_id": {"dtype": "string", "id": null, "_type": "Value"}, "types": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "string", "id": null, "_type": "Value"}, "section_title": {"dtype": "string", "id": null, "_type": "Value"}, "caption": {"dtype": "string", "id": null, "_type": "Value"}, "rows": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "name": {"dtype": "string", "id": null, "_type": "Value"}}, "sql": {"human_readable": {"dtype": "string", "id": null, "_type": "Value"}, "sel": {"dtype": "int32", "id": null, "_type": "Value"}, "agg": {"dtype": "int32", "id": null, "_type": "Value"}, "conds": {"feature": {"column_index": {"dtype": "int32", "id": null, "_type": "Value"}, "operator_index": {"dtype": "int32", "id": null, "_type": "Value"}, "condition": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}}, "supervised_keys": null, "builder_name": "wiki_sql", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 32234761, "num_examples": 15878, "dataset_name": "wiki_sql"}, "validation": {"name": "validation", "num_bytes": 15159314, "num_examples": 8421, "dataset_name": "wiki_sql"}, "train": {"name": "train", "num_bytes": 107345917, "num_examples": 56355, "dataset_name": "wiki_sql"}}, "download_checksums": {"https://github.com/salesforce/WikiSQL/raw/master/data.tar.bz2": {"num_bytes": 26164664, "checksum": "755c728ab188e364575705c8641f3fafd86fb089cb8b08e8c03f01832aae0881"}}, "download_size": 26164664, "dataset_size": 154739992, "size_in_bytes": 180904656}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d01b228943cc6f1a5e76b73ee8d493c69692377f946fbf30fb984610a1610644
3
+ size 2426
wikisql.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ _CITATION = """\
12
+ @article{zhongSeq2SQL2017,
13
+ author = {Victor Zhong and
14
+ Caiming Xiong and
15
+ Richard Socher},
16
+ title = {Seq2SQL: Generating Structured Queries from Natural Language using
17
+ Reinforcement Learning},
18
+ journal = {CoRR},
19
+ volume = {abs/1709.00103},
20
+ year = {2017}
21
+ }
22
+ """
23
+
24
+ _DESCRIPTION = """\
25
+ A large crowd-sourced dataset for developing natural language interfaces for relational databases
26
+ """
27
+
28
+ _DATA_URL = "https://github.com/salesforce/WikiSQL/raw/master/data.tar.bz2"
29
+
30
+ _AGG_OPS = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"]
31
+ _COND_OPS = ["=", ">", "<", "OP"]
32
+
33
+
34
+ class WikiSQL(datasets.GeneratorBasedBuilder):
35
+ """WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
36
+
37
+ VERSION = datasets.Version("0.1.0")
38
+
39
+ def _info(self):
40
+ return datasets.DatasetInfo(
41
+ description=_DESCRIPTION,
42
+ features=datasets.Features(
43
+ {
44
+ "phase": datasets.Value("int32"),
45
+ "question": datasets.Value("string"),
46
+ "table": {
47
+ "header": datasets.features.Sequence(datasets.Value("string")),
48
+ "page_title": datasets.Value("string"),
49
+ "page_id": datasets.Value("string"),
50
+ "types": datasets.features.Sequence(datasets.Value("string")),
51
+ "id": datasets.Value("string"),
52
+ "section_title": datasets.Value("string"),
53
+ "caption": datasets.Value("string"),
54
+ "rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
55
+ "name": datasets.Value("string"),
56
+ },
57
+ "sql": {
58
+ "human_readable": datasets.Value("string"),
59
+ "sel": datasets.Value("int32"),
60
+ "agg": datasets.Value("int32"),
61
+ "conds": datasets.features.Sequence(
62
+ {
63
+ "column_index": datasets.Value("int32"),
64
+ "operator_index": datasets.Value("int32"),
65
+ "condition": datasets.Value("string"),
66
+ }
67
+ ),
68
+ },
69
+ }
70
+ ),
71
+ # If there's a common (input, target) tuple from the features,
72
+ # specify them here. They'll be used if as_supervised=True in
73
+ # builder.as_dataset.
74
+ supervised_keys=None,
75
+ # Homepage of the dataset for documentation
76
+ homepage="https://github.com/salesforce/WikiSQL",
77
+ citation=_CITATION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+ """Returns SplitGenerators."""
82
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
83
+ dl_dir = os.path.join(dl_dir, "data")
84
+
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TEST,
88
+ gen_kwargs={
89
+ "main_filepath": os.path.join(dl_dir, "test.jsonl"),
90
+ "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.VALIDATION,
95
+ gen_kwargs={
96
+ "main_filepath": os.path.join(dl_dir, "dev.jsonl"),
97
+ "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
98
+ },
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TRAIN,
102
+ gen_kwargs={
103
+ "main_filepath": os.path.join(dl_dir, "train.jsonl"),
104
+ "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
105
+ },
106
+ ),
107
+ ]
108
+
109
+ def _convert_to_human_readable(self, sel, agg, columns, conditions):
110
+ """Make SQL query string. Based on https://github.com/salesforce/WikiSQL/blob/c2ed4f9b22db1cc2721805d53e6e76e07e2ccbdc/lib/query.py#L10"""
111
+
112
+ rep = "SELECT {agg} {sel} FROM table".format(
113
+ agg=_AGG_OPS[agg], sel=columns[sel] if columns is not None else "col{}".format(sel)
114
+ )
115
+
116
+ if conditions:
117
+ rep += " WHERE " + " AND ".join(["{} {} {}".format(columns[i], _COND_OPS[o], v) for i, o, v in conditions])
118
+ return " ".join(rep.split())
119
+
120
+ def _generate_examples(self, main_filepath, tables_filepath):
121
+ """Yields examples."""
122
+
123
+ # Build dictionary to table_ids:tables
124
+ with open(tables_filepath, encoding="utf-8") as f:
125
+ tables = [json.loads(line) for line in f]
126
+ id_to_tables = {x["id"]: x for x in tables}
127
+
128
+ with open(main_filepath, encoding="utf-8") as f:
129
+ for idx, line in enumerate(f):
130
+ row = json.loads(line)
131
+ row["table"] = id_to_tables[row["table_id"]]
132
+ del row["table_id"]
133
+
134
+ # Handle missing data
135
+ row["table"]["page_title"] = row["table"].get("page_title", "")
136
+ row["table"]["section_title"] = row["table"].get("section_title", "")
137
+ row["table"]["caption"] = row["table"].get("caption", "")
138
+ row["table"]["name"] = row["table"].get("name", "")
139
+ row["table"]["page_id"] = str(row["table"].get("page_id", ""))
140
+
141
+ # Fix row types
142
+ row["table"]["rows"] = [[str(e) for e in r] for r in row["table"]["rows"]]
143
+
144
+ # Get human-readable version
145
+ row["sql"]["human_readable"] = self._convert_to_human_readable(
146
+ row["sql"]["sel"],
147
+ row["sql"]["agg"],
148
+ row["table"]["header"],
149
+ row["sql"]["conds"],
150
+ )
151
+
152
+ # Restructure sql->conds
153
+ # - wikiSQL provides a tuple [column_index, operator_index, condition]
154
+ # as 'condition' can have 2 types (float or str) we convert to dict
155
+ for i in range(len(row["sql"]["conds"])):
156
+ row["sql"]["conds"][i] = {
157
+ "column_index": row["sql"]["conds"][i][0],
158
+ "operator_index": row["sql"]["conds"][i][1],
159
+ "condition": str(row["sql"]["conds"][i][2]),
160
+ }
161
+ yield idx, row