File size: 2,786 Bytes
553c3d9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
from datasets import load_dataset
import pandas as pd
import re
import json
# Load SQuAD dataset from Hugging Face
dataset = load_dataset('squad')
# Define a function to find the previous sentence-ending punctuation before a given index
def find_previous_punctuation(text, start_index):
# Regex pattern for sentence-ending punctuation
punctuation_pattern = r"[.?!;]"
# Find all matching punctuation positions
matches = list(re.finditer(punctuation_pattern, text[:start_index]))
if matches:
# Return the position just after the last match before start_index
return matches[-1].end()
return 0 # Return the start of the string if no punctuation is found
# Define a function to find the next sentence-ending punctuation after a given index
def find_next_punctuation(text, start_index):
# Regex pattern for sentence-ending punctuation
punctuation_pattern = r"[.?!;]"
# Search for the next punctuation after start_index
match = re.search(punctuation_pattern, text[start_index:])
if match:
# Return the index relative to the original string
return start_index + match.end()
return len(text) # Return end of the string if no punctuation is found
# Extract the specific row's context from the dataset
def get_row_context(row):
# Get the starting index of the answer
answer_idx = row.get('answers').get('answer_start')[0] # Assuming the first answer
# Find the previous sentence-ending punctuation before the answer
start_idx = find_previous_punctuation(row['context'], answer_idx)
# Find the next sentence-ending punctuation after the answer
end_idx = find_next_punctuation(row['context'], answer_idx + len(row.get('answers').get('text')[0]))
# Return the substring of context containing the answer and its surrounding context
return row['context'][start_idx:end_idx].strip()
# Function to join and process the dataset into a Pandas DataFrame
def join_squad_dataset(dataset):
# Convert the dataset into a Pandas DataFrame
df = pd.DataFrame(dataset) # Use 'train' split
# Apply `get_row_context` to generate the context sentence
df['context'] = df.apply(get_row_context, axis=1)
df['answer'] = df['answers'].apply(lambda x: x['text'][0])
# Return the processed DataFrame with required columns
return df[['context', 'question', 'answer']]
# Process the dataset
train_df = join_squad_dataset(dataset['train'])
test_df = join_squad_dataset(dataset['validation'])
with open("train.json", "w", encoding="utf-8") as f:
json.dump(train_df.to_dict(orient='records'), f, indent=4, ensure_ascii=False)
with open("validation.json", "w", encoding="utf-8") as f:
json.dump(test_df.to_dict(orient='records'), f, indent=4, ensure_ascii=False)
|