Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Tags:
code
Libraries:
Datasets
pandas
License:
log_detective_qna / processor.py
jpodivin's picture
processor (#1)
f9ba8e0 verified
#!/usr/bin/env python
import json
import argparse
import random
import os
import pandas as pd
import datasets
# Annotation file structure
# {
# "username": <str>,
# "fail_reason": {
# "text": <str>,
# "vote": <int>
# },
# "how_to_fix": {
# "text": <str>,
# "vote": <int>
# },
# "snippets": [
# {
# "text": <str>,
# "comment": <str>,
# "file": <str>,
# "color": <hex>,
# "vote": 0,
# "start_index": <int>,
# "end_index": <int>
# },
# ],
# "id": <str>
# }
SNIPPET_USER_MSG = """Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
Your analysis must be as concise as possible, while keeping relevant information intact.
Snippet:
{}"""
FULL_CONVERSATION_MSG = """Given following log snippets, their explanation, and nothing else, explain what failure, if any, occurred during build of this package.
Snippets are in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
Snippets are delimited with '================'.
Drawing on information from all snippets, provide a concise explanation of the issue and recommend a solution.
Explanation of the issue, and recommended solution, should take a handful of sentences.
Snippets:
{}
"""
ANALYSIS_MSG = "Issue: {issue}\nResolution: {resolution}"
def n_snippets(logs: dict):
"""Count number of snippets in a log."""
cnt = 0
for log in logs:
if logs[log]:
cnt += len(logs[log]["snippets"])
return cnt
def none_snippet(logs: dict):
"""Presence of snippet with text set to `None`"""
for log in logs:
if logs[log]:
for snippet in logs[log]["snippets"]:
if not snippet["text"]:
return True
return False
def total_snippet_annotation_len(logs: dict):
"""Total length of snippet annotations"""
total = 0
for log in logs:
if logs[log]:
for snippet in logs[log]["snippets"]:
if snippet["user_comment"]:
total += len(snippet["user_comment"])
return total
def fix_snippets(logs: dict):
"""Set snippet text to equal log content delimited by indices"""
for log in logs:
if logs[log]:
for snippet in logs[log]["snippets"]:
snippet_text = logs[log]["content"][snippet["start_index"]:snippet["end_index"]]
if "text" not in snippet or not snippet["text"]:
snippet["text"] = snippet_text
return logs
def cleanup(dataset: pd.DataFrame):
"""Cleaning the dataset
For sake of simplicity we are going to assume following.
1. Descriptions of issues and fixes shorter than `10` chars are not useful.
2. Submissions without annotated snippets are not useful.
3. Submissions with total length of snippet annotations shorter than `10` chars are not useful.
4. Submissions with snippets set to `None` are not useful."""
# Fixing snippets with messed up text
filled_snippets = dataset.logs.apply(fix_snippets)
dataset['filled_snippets'] = filled_snippets
dataset.logs = dataset.filled_snippets
# Setting conditional columns
dataset['how_to_fix_len'] = dataset.how_to_fix.apply(len)
dataset['fail_reason_len'] = dataset.fail_reason.apply(len)
dataset["tot_snippet_annot_len"] = dataset['logs'].apply(total_snippet_annotation_len)
# Conditions
almost_empty_fix = dataset['how_to_fix_len'] < 10
almost_empty_reason = dataset['fail_reason_len'] < 10
almost_empty_snippet_annotations = dataset["tot_snippet_annot_len"] < 10
none_snippets = dataset['logs'].apply(none_snippet)
sparse_annotation_criteria = (
almost_empty_snippet_annotations |
almost_empty_reason |
almost_empty_fix |
none_snippets)
sparse_count = dataset[sparse_annotation_criteria].shape[0]
print(
f"Total sparse annotations: {sparse_count} \
Relative to original dataset: {sparse_count / dataset.shape[0]}")
# Applying filters
final_dataset = dataset[~sparse_annotation_criteria]
final_dataset.reindex()
return final_dataset
def save_dataset(dataset: pd.DataFrame, path: str):
"""Turn dataframe into parquet"""
dataset.to_parquet(path)
def load_data(data_path: str):
"""Load json files and return them as list of dicts"""
data = []
for root, _, files in os.walk(data_path):
for file in files:
if file.endswith(".json"):
with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
parsed_data = json.load(f)
parsed_data["file_name"] = file
data.append(parsed_data)
return data
def make_snippet_qa(snippet: dict, user_message_template: str):
"""Create a QnA pair from a snippet"""
return {
"question": user_message_template.format(snippet["text"]),
"answer": snippet["user_comment"]
}
def make_analysis_qa(snippets, issue, resolution, user_message_template,
analysis_template):
"""Create QnA pair from entire annotation."""
formatted_snippets = "\n=============\n".join(
[
f'[{e["text"]}]: [{e["user_comment"]}]'
for e in snippets
]
)
return {
"question": user_message_template.format(formatted_snippets),
"answer": analysis_template.format(issue=issue, resolution=resolution)
}
def to_qa_pairs(dataframe: pd.DataFrame):
"""Turn dataframe into list of QnA pairs for training."""
qa_pairs = []
for row in dataframe.iterrows():
sample_snippet_messages = []
# First element is index
data = row[1]
logfiles = data.logs
# All annoted snippets will be used for final analysis
annotated_snippets = []
for file in logfiles:
if logfiles[file] and "snippets" in logfiles[file]:
for snippet in logfiles[file]["snippets"]:
sample_snippet_messages.append(
make_snippet_qa(
snippet,
SNIPPET_USER_MSG)
)
annotated_snippets.append(snippet)
qa_pairs.append(
make_analysis_qa(
annotated_snippets,
data.fail_reason,
data.how_to_fix,
FULL_CONVERSATION_MSG,
ANALYSIS_MSG))
# Adding individual snippet messages
qa_pairs.extend(sample_snippet_messages)
return qa_pairs
def split_dataset(dataset: pd.DataFrame, seed: int):
"""Splits dataset into training and evaluation subset"""
split = dataset.shape[0] // 5
dataset = dataset.sample(frac=1.0, random_state=seed)
train_dataset, test_dataset = dataset[split:], dataset[:split]
return train_dataset, test_dataset
def main(hf_token, data_path="results/results", sanitize=True,
seed=42, repo_id="fedora-copr/log_detective_qna",
):
"""Process entire directory and turn contents into parquet files."""
# For reproducibility
random.seed = seed
data = load_data(data_path)
full_dataset = pd.DataFrame.from_records(data)
if sanitize:
full_dataset = cleanup(full_dataset)
# Split dataset
train_dataset, test_dataset = split_dataset(full_dataset, seed)
# Format as QnA pairs
train_dataset, test_dataset = to_qa_pairs(train_dataset), to_qa_pairs(test_dataset)
dataset = {
"train": datasets.Dataset.from_list(train_dataset),
"test": datasets.Dataset.from_list(test_dataset)}
dataset = datasets.DatasetDict(dataset)
dataset.push_to_hub(repo_id=repo_id, private=True, token=hf_token)
print("Saving full dataset as parquet...")
save_dataset(full_dataset, "full_dataset.parquet")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Dataset Processor")
parser.add_argument(
"data_path", type=str, default="results/results", help="Path to annotations.")
parser.add_argument(
"--sanitize", type=bool, default=True, help="Run basic data cleanup procedure")
parser.add_argument(
"--seed", type=int, default=42,
help="Seed for random generator to be used when generating split.")
parser.add_argument(
"--repo_id", type=str, default="fedora-copr/log_detective_qna",
help="ID of Hugging Face repo")
parser.add_argument("--token", type=str, required=True, help="Token for Hugging face")
args = parser.parse_args()
main(
data_path=args.data_path, sanitize=args.sanitize,
seed=args.seed, repo_id=args.repo_id, hf_token=args.token)