zlatinb commited on
Commit
3713e94
·
verified ·
1 Parent(s): 5496a0b

The dataset and the cleaning script.

Browse files
Files changed (2) hide show
  1. cleanup.py +107 -0
  2. jfk_2025_cleaned.parquet +3 -0
cleanup.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+
3
+ from nltk.tokenize import word_tokenize
4
+ from spellchecker import SpellChecker
5
+ import pyarrow as pa
6
+ import pyarrow.parquet as pq
7
+ import pandas as pd
8
+ import sys
9
+
10
+ # Line acceptance threshold [0,1]
11
+ THRESHOLD = 0.4
12
+
13
+ #Levenshtein Distance weights
14
+ LD1_WEIGHT = 0.5
15
+ LD2_WEIGHT = 0.2
16
+
17
+ # Minimum word length to be accepted at specified Levenshtein Distance
18
+ LD1_MIN_LENGTH = 5
19
+ LD2_MIN_LENGTH = 10
20
+
21
+ # Tokens not to be penalized
22
+ COMMON = set([',','.','|','\'','-','_','"','“','(',')','[',']'])
23
+ def accept_token(token) :
24
+ """
25
+ If a word has already passed the spell checker, accept it under
26
+ the conditions below.
27
+ """
28
+ if token.isdigit() :
29
+ return True
30
+ if token.isupper() :
31
+ return True
32
+ if token.istitle() :
33
+ return True
34
+ if len(token) == 1 and token in COMMON :
35
+ return True
36
+ return False
37
+
38
+ spell = SpellChecker()
39
+ def line_score(line) :
40
+ """
41
+ Assign each line a score based on how many words are spelled correctly
42
+ and how many have small mistakes. Aome tokens are accepted automatically.
43
+ """
44
+ tokens = word_tokenize(line)
45
+ if len(tokens) == 0 :
46
+ return 0
47
+ known = spell.known(tokens)
48
+ spelled = 0
49
+ accepted = 0
50
+ ldist1 , ldist2 = 0, 0
51
+ for token in tokens :
52
+ l_token = token.lower()
53
+ if l_token in known :
54
+ spelled += 1
55
+ elif len(l_token) > LD1_MIN_LENGTH and len(spell.edit_distance_1(l_token)) > 0 :
56
+ ldist1 += 1
57
+ elif len(l_token) > LD2_MIN_LENGTH and len(spell.edit_distance_2(l_token)) > 0 :
58
+ ldist2 += 1
59
+ elif accept_token(token) :
60
+ accepted += 1
61
+ return ( spelled + accepted + (ldist1 * LD1_WEIGHT) + (ldist2 * LD2_WEIGHT) ) * 1.0 / len(tokens)
62
+
63
+ def filter_content(content) :
64
+ """
65
+ Evaluate each line of the content, assign a line number to each line
66
+ and split the content in 'Accepted' and 'Rejected' catenations.
67
+ """
68
+ content = content.split("\n")
69
+ line_no = 0
70
+ accepted, rejected = [], []
71
+ for line in content:
72
+ line_no += 1
73
+ score = line_score(line)
74
+ line = f"[{line_no}]{line}"
75
+ if score >= THRESHOLD :
76
+ accepted.append(line)
77
+ else :
78
+ rejected.append(line)
79
+ accepted = "\n".join(accepted)
80
+ rejected = "\n".join(rejected)
81
+ return accepted, rejected
82
+
83
+ source = pq.ParquetFile(sys.argv[1])
84
+ dest = None
85
+
86
+ for rgi in range(source.num_row_groups) :
87
+ rg = source.read_row_group(rgi).to_pylist()
88
+ d = {}
89
+ d['File'] = []
90
+ d['Accepted'] = []
91
+ d['Rejected'] = []
92
+ for row in rg :
93
+ file_name = row['File']
94
+ content = row['Content']
95
+ accepted, rejected = filter_content(content)
96
+ d['File'].append(file_name)
97
+ d['Accepted'].append(accepted)
98
+ d['Rejected'].append(rejected)
99
+ df = pd.DataFrame(d, dtype=str)
100
+ table = pa.Table.from_pandas(df)
101
+ if dest is None :
102
+ dest = pq.ParquetWriter(sys.argv[2], table.schema)
103
+ dest.write_table(table)
104
+
105
+ dest.close()
106
+
107
+
jfk_2025_cleaned.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edd7b2bae46d4d84ea8bc69cd69aa56f70bc243041e328d80ec18ee68ee67927
3
+ size 81216939