kardosdrur commited on
Commit
3312e73
1 Parent(s): a2f4bef

Create produce_dataset.py

Browse files
Files changed (1) hide show
  1. produce_dataset.py +49 -0
produce_dataset.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from pathlib import Path
3
+ from datasets import Dataset
4
+
5
+ import pandas as pd
6
+
7
+
8
+ def generate_chunks(text: str, chunk_size: int = 128) -> list[str]:
9
+ sentences = re.split("[?.!]", text)
10
+ chunks = []
11
+ current_chunk_tokens = []
12
+ for sentence in sentences:
13
+ tokens = sentence.split()
14
+ if (len(current_chunk_tokens) + len(tokens)) <= 128:
15
+ current_chunk_tokens.extend(tokens)
16
+ else:
17
+ chunks.append(" ".join(current_chunk_tokens))
18
+ current_chunk_tokens = [*tokens]
19
+ return chunks
20
+
21
+
22
+ textfiles = Path("Corpus-v1.1/texts").glob("*.txt")
23
+ entries = []
24
+ for file in textfiles:
25
+ year, author, work, *_ = file.stem.split("_")
26
+ with file.open() as in_file:
27
+ text = in_file.read()
28
+ entries.append(dict(year=year, author=author, work=work, text=text))
29
+
30
+ data = pd.DataFrame.from_records(entries)
31
+ data["full_title"] = data["author"] + " - " + data["work"]
32
+ data["text"] = data["text"].map(generate_chunks)
33
+ data = data.explode("text")
34
+
35
+ seed = 42
36
+ n_works = 64
37
+ n_chunks_per_work = 32
38
+ sampled_titles = pd.Series(data["full_title"].unique()).sample(
39
+ n_works, random_state=seed
40
+ )
41
+ sampled_data = data[data["full_title"].isin(sampled_titles)]
42
+ sampled_data = sampled_data.groupby(["full_title"]).sample(
43
+ n_chunks_per_work, random_state=seed
44
+ )
45
+
46
+ ds = Dataset.from_pandas(
47
+ sampled_data[["year", "author", "work", "text", "full_title"]].reset_index()
48
+ ).shuffle(seed=seed)
49
+ ds.push_to_hub("kardosdrur/historical-danish-clustering")