Chaeeun-Kim commited on
Commit
c53c815
·
verified ·
1 Parent(s): 7ca30a9

Update LEGAR_BENCH.py

Browse files
Files changed (1) hide show
  1. LEGAR_BENCH.py +52 -132
LEGAR_BENCH.py CHANGED
@@ -1,176 +1,96 @@
1
  import json
2
- import os
3
- import glob
4
- from pathlib import Path
5
  import datasets
6
 
7
-
8
  _DESCRIPTION = """\
9
  LEGAR_BENCH is the first large-scale Korean LCR benchmark, covering 411 diverse crime types in queries over 1.2M legal cases.
10
  """
11
 
12
-
13
  _HOMEPAGE = "https://huggingface.co/datasets/Chaeeun-Kim/LEGAR_BENCH"
14
  _LICENSE = "Apache 2.0"
15
 
16
- class LegarBenchConfig(datasets.BuilderConfig):
17
- def __init__(self, **kwargs):
18
- super(LegarBenchConfig, self).__init__(**kwargs)
19
-
 
20
 
21
  class LegarBench(datasets.GeneratorBasedBuilder):
 
 
22
  BUILDER_CONFIGS = [
23
- LegarBenchConfig(
24
  name="standard",
 
25
  description="Standard version of LEGAR BENCH",
26
  ),
27
- LegarBenchConfig(
28
  name="stricter",
 
29
  description="Stricter version of LEGAR BENCH",
30
  ),
31
- LegarBenchConfig(
32
  name="stricter_by_difficulty",
 
33
  description="Stricter version organized by difficulty",
34
  ),
35
  ]
36
-
37
  DEFAULT_CONFIG_NAME = "standard"
38
 
39
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
40
  return datasets.DatasetInfo(
41
  description=_DESCRIPTION,
42
- features=datasets.Features({
43
- "id": datasets.Value("int64"),
44
- "target_category": datasets.Value("string"),
45
- "category": datasets.Value("string"),
46
- "question": datasets.Value("string"),
47
- "question_id": datasets.Value("string"),
48
- "answer": datasets.Sequence(datasets.Value("string")),
49
- "evidence_id": datasets.Sequence(datasets.Value("string")),
50
- "difficulty": datasets.Value("string"),
51
- }),
52
  homepage=_HOMEPAGE,
53
  license=_LICENSE,
54
  )
55
 
56
  def _split_generators(self, dl_manager):
57
- if self.config.name == "standard":
58
- file_pattern = "Standard_version/*.json"
59
- elif self.config.name == "stricter":
60
- file_pattern = "Stricter_version/*.json"
61
- elif self.config.name == "stricter_by_difficulty":
62
- file_pattern = "Stricter_version_by_difficulty/**/*.json"
63
- else:
64
- file_pattern = "Standard_version/*.json"
65
-
66
- # 파일들을 다운로드
67
- try:
68
- data_files = dl_manager.download_and_extract("")
69
- if isinstance(data_files, str):
70
- data_dir = data_files
71
- else:
72
- data_dir = data_files
73
-
74
- except Exception as e:
75
- if hasattr(dl_manager, 'manual_dir') and dl_manager.manual_dir:
76
- data_dir = dl_manager.manual_dir
77
- else:
78
- raise e
79
 
80
  return [
81
  datasets.SplitGenerator(
82
  name=datasets.Split.TRAIN,
83
  gen_kwargs={
84
- "data_dir": data_dir,
85
- "config_name": self.config.name,
86
  },
87
  ),
88
  ]
89
 
90
- def _generate_examples(self, data_dir, config_name):
91
- example_id = 0
92
-
93
- if config_name == "standard":
94
- search_path = os.path.join(data_dir, "Standard_version")
95
- elif config_name == "stricter":
96
- search_path = os.path.join(data_dir, "Stricter_version")
97
- elif config_name == "stricter_by_difficulty":
98
- search_path = os.path.join(data_dir, "Stricter_version_by_difficulty")
99
- else:
100
- search_path = os.path.join(data_dir, "Standard_version")
101
-
102
- if not os.path.exists(search_path):
103
- possible_paths = [
104
- data_dir,
105
- os.path.join(data_dir, "LEGAR_BENCH"),
106
- os.path.join(data_dir, "LEGAR-BENCH")
107
- ]
108
-
109
- found = False
110
- for possible_path in possible_paths:
111
- test_path = os.path.join(possible_path, search_path.split('/')[-1])
112
- if os.path.exists(test_path):
113
- search_path = test_path
114
- found = True
115
- break
116
-
117
- if not found:
118
- raise FileNotFoundError(f"Could not find data directory. Tried: {search_path}, data_dir: {data_dir}")
119
-
120
- if config_name == "stricter_by_difficulty":
121
- if os.path.exists(search_path):
122
- for difficulty_folder in sorted(os.listdir(search_path)):
123
- folder_path = os.path.join(search_path, difficulty_folder)
124
- if os.path.isdir(folder_path):
125
- for filename in sorted(os.listdir(folder_path)):
126
- if filename.endswith('.json'):
127
- filepath = os.path.join(folder_path, filename)
128
- for item in self._load_json_file(filepath, difficulty=difficulty_folder):
129
- yield example_id, item
130
- example_id += 1
131
- else:
132
- if os.path.exists(search_path):
133
- for filename in sorted(os.listdir(search_path)):
134
- if filename.endswith('.json'):
135
- filepath = os.path.join(search_path, filename)
136
- for item in self._load_json_file(filepath, difficulty=""):
137
- yield example_id, item
138
- example_id += 1
139
-
140
- def _load_json_file(self, filepath, difficulty=""):
141
- with open(filepath, 'r', encoding='utf-8') as f:
142
- data = json.load(f)
143
-
144
- if isinstance(data, list):
145
- for item in data:
146
- yield self._process_item(item, difficulty)
147
- else:
148
- yield self._process_item(data, difficulty)
149
 
150
- def _process_item(self, item, difficulty=""):
151
- category = item.get("category", {})
152
  if isinstance(category, dict):
153
- category_str = json.dumps(category, ensure_ascii=False)
154
- else:
155
- category_str = str(category)
156
-
157
- answer = item.get("answer", [])
158
- if answer is None:
159
- answer = []
160
- answer_clean = [str(a) if a is not None else "" for a in answer]
161
-
162
- evidence_id = item.get("evidence_id", [])
163
- if evidence_id is None:
164
- evidence_id = []
165
- evidence_id_clean = [str(e) if e is not None else "" for e in evidence_id]
166
-
167
- return {
168
- "id": int(item.get("id", 0)),
169
- "target_category": str(item.get("target_category", "")),
170
- "category": category_str,
171
- "question": str(item.get("question", "")),
172
- "question_id": str(item.get("question_id", "")),
173
- "answer": answer_clean,
174
- "evidence_id": evidence_id_clean,
175
- "difficulty": str(difficulty if difficulty else ""),
176
- }
 
1
  import json
 
 
 
2
  import datasets
3
 
 
4
  _DESCRIPTION = """\
5
  LEGAR_BENCH is the first large-scale Korean LCR benchmark, covering 411 diverse crime types in queries over 1.2M legal cases.
6
  """
7
 
 
8
  _HOMEPAGE = "https://huggingface.co/datasets/Chaeeun-Kim/LEGAR_BENCH"
9
  _LICENSE = "Apache 2.0"
10
 
11
+ _URLS = {
12
+ "standard": "data/standard_train.jsonl",
13
+ "stricter": "data/stricter_train.jsonl",
14
+ "stricter_by_difficulty": "data/stricter_by_difficulty_train.jsonl",
15
+ }
16
 
17
  class LegarBench(datasets.GeneratorBasedBuilder):
18
+ VERSION = datasets.Version("1.0.0")
19
+
20
  BUILDER_CONFIGS = [
21
+ datasets.BuilderConfig(
22
  name="standard",
23
+ version=VERSION,
24
  description="Standard version of LEGAR BENCH",
25
  ),
26
+ datasets.BuilderConfig(
27
  name="stricter",
28
+ version=VERSION,
29
  description="Stricter version of LEGAR BENCH",
30
  ),
31
+ datasets.BuilderConfig(
32
  name="stricter_by_difficulty",
33
+ version=VERSION,
34
  description="Stricter version organized by difficulty",
35
  ),
36
  ]
37
+
38
  DEFAULT_CONFIG_NAME = "standard"
39
 
40
  def _info(self):
41
+ features = datasets.Features({
42
+ "id": datasets.Value("int64"),
43
+ "target_category": datasets.Value("string"),
44
+ "category": datasets.Value("string"),
45
+ "question": datasets.Value("string"),
46
+ "question_id": datasets.Value("string"),
47
+ "answer": datasets.Sequence(datasets.Value("string")),
48
+ "evidence_id": datasets.Sequence(datasets.Value("string")),
49
+ "difficulty": datasets.Value("string"),
50
+ })
51
+
52
  return datasets.DatasetInfo(
53
  description=_DESCRIPTION,
54
+ features=features,
 
 
 
 
 
 
 
 
 
55
  homepage=_HOMEPAGE,
56
  license=_LICENSE,
57
  )
58
 
59
  def _split_generators(self, dl_manager):
60
+ url = _URLS[self.config.name]
61
+ data_file = dl_manager.download(url)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  return [
64
  datasets.SplitGenerator(
65
  name=datasets.Split.TRAIN,
66
  gen_kwargs={
67
+ "filepath": data_file,
 
68
  },
69
  ),
70
  ]
71
 
72
+ def _generate_examples(self, filepath):
73
+ with open(filepath, encoding="utf-8") as f:
74
+ for key, line in enumerate(f):
75
+ data = json.loads(line)
76
+
77
+ yield key, {
78
+ "id": int(data.get("id", 0)),
79
+ "target_category": str(data.get("target_category", "")),
80
+ "category": self._process_category(data.get("category", {})),
81
+ "question": str(data.get("question", "")),
82
+ "question_id": str(data.get("question_id", "")),
83
+ "answer": self._process_list_field(data.get("answer", [])),
84
+ "evidence_id": self._process_list_field(data.get("evidence_id", [])),
85
+ "difficulty": str(data.get("difficulty", "")),
86
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
+ def _process_category(self, category):
 
89
  if isinstance(category, dict):
90
+ return json.dumps(category, ensure_ascii=False)
91
+ return str(category)
92
+
93
+ def _process_list_field(self, field):
94
+ if field is None:
95
+ return []
96
+ return [str(item) if item is not None else "" for item in field]