Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Tags:
code
Libraries:
Datasets
pandas
License:
File size: 8,890 Bytes
f9ba8e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
#!/usr/bin/env python

import json
import argparse
import random
import os
import pandas as pd
import datasets

# Annotation file structure
# {
#     "username": <str>,
#     "fail_reason": {
#         "text": <str>,
#         "vote": <int>
#     },
#     "how_to_fix": {
#         "text": <str>,
#         "vote": <int>
#     },
#     "snippets": [
#         {
#             "text": <str>,
#             "comment": <str>,
#             "file": <str>,
#             "color": <hex>,
#             "vote": 0,
#             "start_index": <int>,
#             "end_index": <int>
#         },
#     ],
#     "id": <str>
# }

SNIPPET_USER_MSG = """Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.            
Your analysis must be as concise as possible, while keeping relevant information intact.
Snippet: 
{}"""

FULL_CONVERSATION_MSG = """Given following log snippets, their explanation, and nothing else, explain what failure, if any, occurred during build of this package.

  Snippets are in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.

  Snippets are delimited with '================'.

  Drawing on information from all snippets, provide a concise explanation of the issue and recommend a solution.

  Explanation of the issue, and recommended solution, should take a handful of sentences.

  Snippets:

  {}
"""

ANALYSIS_MSG = "Issue: {issue}\nResolution: {resolution}"

def n_snippets(logs: dict):
    """Count number of snippets in a log."""
    cnt = 0
    for log in logs:
        if logs[log]:
            cnt += len(logs[log]["snippets"])
    return cnt


def none_snippet(logs: dict):
    """Presence of snippet with text set to `None`"""
    for log in logs:
        if logs[log]:
            for snippet in logs[log]["snippets"]:
                if not snippet["text"]:
                    return True
    return False


def total_snippet_annotation_len(logs: dict):
    """Total length of snippet annotations"""
    total = 0
    for log in logs:
        if logs[log]:
            for snippet in logs[log]["snippets"]:
                if snippet["user_comment"]:
                    total += len(snippet["user_comment"])
    return total


def fix_snippets(logs: dict):
    """Set snippet text to equal log content delimited by indices"""
    for log in logs:
        if logs[log]:
            for snippet in logs[log]["snippets"]:
                snippet_text = logs[log]["content"][snippet["start_index"]:snippet["end_index"]]
                if "text" not in snippet or not snippet["text"]:
                    snippet["text"] = snippet_text
    return logs


def cleanup(dataset: pd.DataFrame):
    """Cleaning the dataset

    For sake of simplicity we are going to assume following.

    1. Descriptions of issues and fixes shorter than `10` chars are not useful.
    2. Submissions without annotated snippets are not useful.
    3. Submissions with total length of snippet annotations shorter than `10` chars are not useful.
    4. Submissions with snippets set to `None` are not useful."""

    # Fixing snippets with messed up text
    filled_snippets = dataset.logs.apply(fix_snippets)
    dataset['filled_snippets'] = filled_snippets
    dataset.logs = dataset.filled_snippets

    # Setting conditional columns
    dataset['how_to_fix_len'] = dataset.how_to_fix.apply(len)
    dataset['fail_reason_len'] = dataset.fail_reason.apply(len)
    dataset["tot_snippet_annot_len"] = dataset['logs'].apply(total_snippet_annotation_len)

    # Conditions
    almost_empty_fix = dataset['how_to_fix_len'] < 10
    almost_empty_reason = dataset['fail_reason_len'] < 10
    almost_empty_snippet_annotations = dataset["tot_snippet_annot_len"] < 10
    none_snippets = dataset['logs'].apply(none_snippet)

    sparse_annotation_criteria = (
        almost_empty_snippet_annotations |
        almost_empty_reason |
        almost_empty_fix |
        none_snippets)

    sparse_count = dataset[sparse_annotation_criteria].shape[0]
    print(
        f"Total sparse annotations: {sparse_count} \
        Relative to original dataset: {sparse_count / dataset.shape[0]}")

    # Applying filters
    final_dataset = dataset[~sparse_annotation_criteria]
    final_dataset.reindex()

    return final_dataset


def save_dataset(dataset: pd.DataFrame, path: str):
    """Turn dataframe into parquet"""
    dataset.to_parquet(path)


def load_data(data_path: str):
    """Load json files and return them as list of dicts"""
    data = []
    for root, _, files in os.walk(data_path):
        for file in files:
            if file.endswith(".json"):
                with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
                    parsed_data = json.load(f)
                    parsed_data["file_name"] = file
                    data.append(parsed_data)

    return data


def make_snippet_qa(snippet: dict, user_message_template: str):
    """Create a QnA pair from a snippet"""
    return {
        "question": user_message_template.format(snippet["text"]),
        "answer": snippet["user_comment"]
    }


def make_analysis_qa(snippets, issue, resolution, user_message_template,
                     analysis_template):
    """Create QnA pair from entire annotation."""
    formatted_snippets = "\n=============\n".join(
        [
            f'[{e["text"]}]: [{e["user_comment"]}]'
            for e in snippets
        ]
    )

    return {
        "question": user_message_template.format(formatted_snippets),
        "answer": analysis_template.format(issue=issue, resolution=resolution)
    }


def to_qa_pairs(dataframe: pd.DataFrame):
    """Turn dataframe into list of QnA pairs for training."""
    qa_pairs = []

    for row in dataframe.iterrows():
        sample_snippet_messages = []

        # First element is index
        data = row[1]

        logfiles = data.logs
        # All annoted snippets will be used for final analysis
        annotated_snippets = []
        for file in logfiles:
            if logfiles[file] and "snippets" in logfiles[file]:
                for snippet in logfiles[file]["snippets"]:
                    sample_snippet_messages.append(
                        make_snippet_qa(
                            snippet,
                            SNIPPET_USER_MSG)
                    )
                    annotated_snippets.append(snippet)
        qa_pairs.append(
            make_analysis_qa(
                annotated_snippets,
                data.fail_reason,
                data.how_to_fix,
                FULL_CONVERSATION_MSG,
                ANALYSIS_MSG))
        # Adding individual snippet messages
        qa_pairs.extend(sample_snippet_messages)
    return qa_pairs


def split_dataset(dataset: pd.DataFrame, seed: int):
    """Splits dataset into training and evaluation subset"""
    split = dataset.shape[0] // 5
    dataset = dataset.sample(frac=1.0, random_state=seed)
    train_dataset, test_dataset =  dataset[split:], dataset[:split]

    return train_dataset, test_dataset


def main(hf_token, data_path="results/results", sanitize=True,
         seed=42, repo_id="fedora-copr/log_detective_qna",
        ):
    """Process entire directory and turn contents into parquet files."""

    # For reproducibility
    random.seed = seed

    data = load_data(data_path)
    full_dataset = pd.DataFrame.from_records(data)

    if sanitize:
        full_dataset = cleanup(full_dataset)

    # Split dataset
    train_dataset, test_dataset = split_dataset(full_dataset, seed)

    # Format as QnA pairs
    train_dataset, test_dataset = to_qa_pairs(train_dataset), to_qa_pairs(test_dataset)

    dataset = {
        "train": datasets.Dataset.from_list(train_dataset),
        "test": datasets.Dataset.from_list(test_dataset)}

    dataset = datasets.DatasetDict(dataset)

    dataset.push_to_hub(repo_id=repo_id, private=True, token=hf_token)

    print("Saving full dataset as parquet...")
    save_dataset(full_dataset, "full_dataset.parquet")


if __name__ == "__main__":
    parser = argparse.ArgumentParser("Dataset Processor")
    parser.add_argument(
        "data_path", type=str, default="results/results", help="Path to annotations.")
    parser.add_argument(
        "--sanitize", type=bool, default=True, help="Run basic data cleanup procedure")
    parser.add_argument(
        "--seed", type=int, default=42,
        help="Seed for random generator to be used when generating split.")
    parser.add_argument(
        "--repo_id", type=str, default="fedora-copr/log_detective_qna",
        help="ID of Hugging Face repo")
    parser.add_argument("--token", type=str, required=True, help="Token for Hugging face")
    args = parser.parse_args()

    main(
        data_path=args.data_path, sanitize=args.sanitize,
        seed=args.seed, repo_id=args.repo_id, hf_token=args.token)