Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Tags:
code
Libraries:
Datasets
pandas
License:
jpodivin commited on
Commit
f9ba8e0
·
verified ·
1 Parent(s): 8fa9fe0

processor (#1)

Browse files

- Script used for processing of raw json annotations (10774be0623a30d27a1eb968d8b6e588b8d53c34)

Files changed (1) hide show
  1. processor.py +277 -0
processor.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import json
4
+ import argparse
5
+ import random
6
+ import os
7
+ import pandas as pd
8
+ import datasets
9
+
10
+ # Annotation file structure
11
+ # {
12
+ # "username": <str>,
13
+ # "fail_reason": {
14
+ # "text": <str>,
15
+ # "vote": <int>
16
+ # },
17
+ # "how_to_fix": {
18
+ # "text": <str>,
19
+ # "vote": <int>
20
+ # },
21
+ # "snippets": [
22
+ # {
23
+ # "text": <str>,
24
+ # "comment": <str>,
25
+ # "file": <str>,
26
+ # "color": <hex>,
27
+ # "vote": 0,
28
+ # "start_index": <int>,
29
+ # "end_index": <int>
30
+ # },
31
+ # ],
32
+ # "id": <str>
33
+ # }
34
+
35
+ SNIPPET_USER_MSG = """Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
36
+ Your analysis must be as concise as possible, while keeping relevant information intact.
37
+ Snippet:
38
+ {}"""
39
+
40
+ FULL_CONVERSATION_MSG = """Given following log snippets, their explanation, and nothing else, explain what failure, if any, occurred during build of this package.
41
+
42
+ Snippets are in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
43
+
44
+ Snippets are delimited with '================'.
45
+
46
+ Drawing on information from all snippets, provide a concise explanation of the issue and recommend a solution.
47
+
48
+ Explanation of the issue, and recommended solution, should take a handful of sentences.
49
+
50
+ Snippets:
51
+
52
+ {}
53
+ """
54
+
55
+ ANALYSIS_MSG = "Issue: {issue}\nResolution: {resolution}"
56
+
57
+ def n_snippets(logs: dict):
58
+ """Count number of snippets in a log."""
59
+ cnt = 0
60
+ for log in logs:
61
+ if logs[log]:
62
+ cnt += len(logs[log]["snippets"])
63
+ return cnt
64
+
65
+
66
+ def none_snippet(logs: dict):
67
+ """Presence of snippet with text set to `None`"""
68
+ for log in logs:
69
+ if logs[log]:
70
+ for snippet in logs[log]["snippets"]:
71
+ if not snippet["text"]:
72
+ return True
73
+ return False
74
+
75
+
76
+ def total_snippet_annotation_len(logs: dict):
77
+ """Total length of snippet annotations"""
78
+ total = 0
79
+ for log in logs:
80
+ if logs[log]:
81
+ for snippet in logs[log]["snippets"]:
82
+ if snippet["user_comment"]:
83
+ total += len(snippet["user_comment"])
84
+ return total
85
+
86
+
87
+ def fix_snippets(logs: dict):
88
+ """Set snippet text to equal log content delimited by indices"""
89
+ for log in logs:
90
+ if logs[log]:
91
+ for snippet in logs[log]["snippets"]:
92
+ snippet_text = logs[log]["content"][snippet["start_index"]:snippet["end_index"]]
93
+ if "text" not in snippet or not snippet["text"]:
94
+ snippet["text"] = snippet_text
95
+ return logs
96
+
97
+
98
+ def cleanup(dataset: pd.DataFrame):
99
+ """Cleaning the dataset
100
+
101
+ For sake of simplicity we are going to assume following.
102
+
103
+ 1. Descriptions of issues and fixes shorter than `10` chars are not useful.
104
+ 2. Submissions without annotated snippets are not useful.
105
+ 3. Submissions with total length of snippet annotations shorter than `10` chars are not useful.
106
+ 4. Submissions with snippets set to `None` are not useful."""
107
+
108
+ # Fixing snippets with messed up text
109
+ filled_snippets = dataset.logs.apply(fix_snippets)
110
+ dataset['filled_snippets'] = filled_snippets
111
+ dataset.logs = dataset.filled_snippets
112
+
113
+ # Setting conditional columns
114
+ dataset['how_to_fix_len'] = dataset.how_to_fix.apply(len)
115
+ dataset['fail_reason_len'] = dataset.fail_reason.apply(len)
116
+ dataset["tot_snippet_annot_len"] = dataset['logs'].apply(total_snippet_annotation_len)
117
+
118
+ # Conditions
119
+ almost_empty_fix = dataset['how_to_fix_len'] < 10
120
+ almost_empty_reason = dataset['fail_reason_len'] < 10
121
+ almost_empty_snippet_annotations = dataset["tot_snippet_annot_len"] < 10
122
+ none_snippets = dataset['logs'].apply(none_snippet)
123
+
124
+ sparse_annotation_criteria = (
125
+ almost_empty_snippet_annotations |
126
+ almost_empty_reason |
127
+ almost_empty_fix |
128
+ none_snippets)
129
+
130
+ sparse_count = dataset[sparse_annotation_criteria].shape[0]
131
+ print(
132
+ f"Total sparse annotations: {sparse_count} \
133
+ Relative to original dataset: {sparse_count / dataset.shape[0]}")
134
+
135
+ # Applying filters
136
+ final_dataset = dataset[~sparse_annotation_criteria]
137
+ final_dataset.reindex()
138
+
139
+ return final_dataset
140
+
141
+
142
+ def save_dataset(dataset: pd.DataFrame, path: str):
143
+ """Turn dataframe into parquet"""
144
+ dataset.to_parquet(path)
145
+
146
+
147
+ def load_data(data_path: str):
148
+ """Load json files and return them as list of dicts"""
149
+ data = []
150
+ for root, _, files in os.walk(data_path):
151
+ for file in files:
152
+ if file.endswith(".json"):
153
+ with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
154
+ parsed_data = json.load(f)
155
+ parsed_data["file_name"] = file
156
+ data.append(parsed_data)
157
+
158
+ return data
159
+
160
+
161
+ def make_snippet_qa(snippet: dict, user_message_template: str):
162
+ """Create a QnA pair from a snippet"""
163
+ return {
164
+ "question": user_message_template.format(snippet["text"]),
165
+ "answer": snippet["user_comment"]
166
+ }
167
+
168
+
169
+ def make_analysis_qa(snippets, issue, resolution, user_message_template,
170
+ analysis_template):
171
+ """Create QnA pair from entire annotation."""
172
+ formatted_snippets = "\n=============\n".join(
173
+ [
174
+ f'[{e["text"]}]: [{e["user_comment"]}]'
175
+ for e in snippets
176
+ ]
177
+ )
178
+
179
+ return {
180
+ "question": user_message_template.format(formatted_snippets),
181
+ "answer": analysis_template.format(issue=issue, resolution=resolution)
182
+ }
183
+
184
+
185
+ def to_qa_pairs(dataframe: pd.DataFrame):
186
+ """Turn dataframe into list of QnA pairs for training."""
187
+ qa_pairs = []
188
+
189
+ for row in dataframe.iterrows():
190
+ sample_snippet_messages = []
191
+
192
+ # First element is index
193
+ data = row[1]
194
+
195
+ logfiles = data.logs
196
+ # All annoted snippets will be used for final analysis
197
+ annotated_snippets = []
198
+ for file in logfiles:
199
+ if logfiles[file] and "snippets" in logfiles[file]:
200
+ for snippet in logfiles[file]["snippets"]:
201
+ sample_snippet_messages.append(
202
+ make_snippet_qa(
203
+ snippet,
204
+ SNIPPET_USER_MSG)
205
+ )
206
+ annotated_snippets.append(snippet)
207
+ qa_pairs.append(
208
+ make_analysis_qa(
209
+ annotated_snippets,
210
+ data.fail_reason,
211
+ data.how_to_fix,
212
+ FULL_CONVERSATION_MSG,
213
+ ANALYSIS_MSG))
214
+ # Adding individual snippet messages
215
+ qa_pairs.extend(sample_snippet_messages)
216
+ return qa_pairs
217
+
218
+
219
+ def split_dataset(dataset: pd.DataFrame, seed: int):
220
+ """Splits dataset into training and evaluation subset"""
221
+ split = dataset.shape[0] // 5
222
+ dataset = dataset.sample(frac=1.0, random_state=seed)
223
+ train_dataset, test_dataset = dataset[split:], dataset[:split]
224
+
225
+ return train_dataset, test_dataset
226
+
227
+
228
+ def main(hf_token, data_path="results/results", sanitize=True,
229
+ seed=42, repo_id="fedora-copr/log_detective_qna",
230
+ ):
231
+ """Process entire directory and turn contents into parquet files."""
232
+
233
+ # For reproducibility
234
+ random.seed = seed
235
+
236
+ data = load_data(data_path)
237
+ full_dataset = pd.DataFrame.from_records(data)
238
+
239
+ if sanitize:
240
+ full_dataset = cleanup(full_dataset)
241
+
242
+ # Split dataset
243
+ train_dataset, test_dataset = split_dataset(full_dataset, seed)
244
+
245
+ # Format as QnA pairs
246
+ train_dataset, test_dataset = to_qa_pairs(train_dataset), to_qa_pairs(test_dataset)
247
+
248
+ dataset = {
249
+ "train": datasets.Dataset.from_list(train_dataset),
250
+ "test": datasets.Dataset.from_list(test_dataset)}
251
+
252
+ dataset = datasets.DatasetDict(dataset)
253
+
254
+ dataset.push_to_hub(repo_id=repo_id, private=True, token=hf_token)
255
+
256
+ print("Saving full dataset as parquet...")
257
+ save_dataset(full_dataset, "full_dataset.parquet")
258
+
259
+
260
+ if __name__ == "__main__":
261
+ parser = argparse.ArgumentParser("Dataset Processor")
262
+ parser.add_argument(
263
+ "data_path", type=str, default="results/results", help="Path to annotations.")
264
+ parser.add_argument(
265
+ "--sanitize", type=bool, default=True, help="Run basic data cleanup procedure")
266
+ parser.add_argument(
267
+ "--seed", type=int, default=42,
268
+ help="Seed for random generator to be used when generating split.")
269
+ parser.add_argument(
270
+ "--repo_id", type=str, default="fedora-copr/log_detective_qna",
271
+ help="ID of Hugging Face repo")
272
+ parser.add_argument("--token", type=str, required=True, help="Token for Hugging face")
273
+ args = parser.parse_args()
274
+
275
+ main(
276
+ data_path=args.data_path, sanitize=args.sanitize,
277
+ seed=args.seed, repo_id=args.repo_id, hf_token=args.token)