Unknees commited on
Commit
cd61af8
·
verified ·
1 Parent(s): 456187f

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. Linker_Eng.py +229 -0
  3. Linker_Fr.py +138 -0
  4. french_time_series_MERGED.csv +3 -0
.gitattributes CHANGED
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  Multi_modal_dataset/Multi_Modal_FR/Crisis_TS_FR.csv filter=lfs diff=lfs merge=lfs -text
 
 
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  Multi_modal_dataset/Multi_Modal_FR/Crisis_TS_FR.csv filter=lfs diff=lfs merge=lfs -text
61
+ french_time_series_MERGED.csv filter=lfs diff=lfs merge=lfs -text
Linker_Eng.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ import json
4
+ from sklearn.preprocessing import StandardScaler
5
+ import argparse
6
+
7
+
8
+
9
+ # this function put on the good format the date from the nlp dataset
10
+ def to_date(my_date):
11
+ set_month= {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06',
12
+ 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}
13
+ month = set_month[my_date[4:7]]
14
+ day = my_date[8:10]
15
+ year = my_date[26:30]
16
+ final_date = year+'-'+month+'-'+day
17
+ return final_date
18
+
19
+
20
+ # open the knowledge base of the corresponding state with the corresponding category
21
+ def open_state_file(dataset_path: str, STATE: str, CATEGORY: str) -> list[str]:
22
+ tags = set()
23
+ with open('./utils/Keywords/'+STATE+'_keywords_'+CATEGORY+'_no_dupe.txt', 'r') as fp:
24
+ for line in fp:
25
+ # remove linebreak from a current name
26
+ # linebreak is the last character of each line
27
+ x = line[:-1]
28
+ # add current item to the list
29
+ tags.add(x.lower())
30
+ return tags
31
+
32
+
33
+ # the function take a time series dataframe (df) and return another dataset with all the data put in one parameter
34
+ # (Window) except the date and the label which are in different columns
35
+ # window size is equal to the size of the window
36
+
37
+ def window_creation(df: object, window_size: int, timestamp_colum: str) -> object:
38
+ """
39
+ Create a TS window for each date. Distance of sliding window is 1.
40
+
41
+ df: Dataframe of a whole TS record in one state. Each row represents the record on one date.
42
+ window_size: Size of TS window.
43
+ label_column: Name of the column of label
44
+ timestamp_colum: Name of the column of time stamp
45
+ """
46
+ df = df.reset_index()
47
+ df = df.drop(columns=['index'])
48
+ good_columns = list(df.columns)
49
+ good_columns.remove(timestamp_colum)
50
+ window_df = df[good_columns]
51
+ scaler = StandardScaler()
52
+ window_df = pd.DataFrame(scaler.fit_transform(window_df), columns=window_df.columns)
53
+ date_df = df[timestamp_colum]
54
+ window_data = []
55
+ label_data = []
56
+ date_data = []
57
+ for i in range(len(df)-window_size):
58
+ my_window = window_df.loc[i:i+window_size-1]
59
+ window_data.append(my_window.to_numpy())
60
+ date_data.append((date_df.loc[i+window_size-1]))
61
+ return_value = pd.DataFrame({'Date': date_data, 'Window': window_data})
62
+ return return_value
63
+
64
+
65
+ # the function that link the NLP data and the time series data for English dataset
66
+ # crisis_to_link is one row from crisis knowledge
67
+ # directory is the path of NLP data
68
+ # dataset_path is the path of time series data
69
+ # window_size is the size of the window for time series data
70
+ # fillNA is the value to fill the missing value
71
+ def linker(
72
+ crisis_to_link: object,
73
+ corpus_path: str, # path of corpus directory
74
+ ts_dataset_path: str, # path of ts data directory
75
+ window_size: int,
76
+ date_column: str,
77
+ fillNA : int) -> object:
78
+
79
+ # list of states associated with the crisis
80
+ type_of_crisis = crisis_to_link['Crisis']
81
+
82
+
83
+ states = crisis_to_link['Places'].replace('[', '').replace(']', '').split(',')
84
+
85
+ # features to keep
86
+ features = ['date', 'mean_speed_wind', 'precipitation', 'snow_fall', 'snow_depth', 'temp_max', 'temp_min', 'temp_mean',
87
+
88
+ 'wind_dir_angle_2min', 'wind_dir_angle_5sec', 'wind_max_speed_2min', 'wind_max_speed_5sec']
89
+
90
+ df_return = pd.DataFrame({'Date': [], 'Text': [],'Crisis_Type' :[], 'Window': [], 'label_humanitarian': [], 'label_useful' : [], 'label_urgent' : [], 'label_sudden' : []})
91
+
92
+ for state in states:
93
+ # Open the time series file corresponding to the state
94
+ ts_df = pd.read_csv(ts_dataset_path + '/' + state + '_Time_Series_Data.csv')
95
+ ts_df = ts_df[features].sort_values(by='date') # Frequency of TS is per day.
96
+ ts_df = ts_df.fillna(fillNA)
97
+ # Window Creation
98
+ window_df = window_creation(ts_df, window_size, date_column)
99
+ #since New Zealand has no keyword, we skip this part
100
+ if state != 'New_Zealand' :
101
+ # we get the keywords to link the location mention to the state
102
+ tags_city = open_state_file(ts_dataset_path, state, 'city')
103
+ tags_county = open_state_file(ts_dataset_path, state, 'county')
104
+ tags_state = open_state_file(ts_dataset_path, state, 'state')
105
+ date = []
106
+ text = []
107
+ label_humanitarian = []
108
+ label_useful = []
109
+ label_urgent = []
110
+ label_sudden = []
111
+ window_data = []
112
+ crisis_label = []
113
+
114
+ # Process NLP data and link to time series data
115
+ for root, dirs, files in os.walk(corpus_path + '/' + crisis_to_link['Path_name']):
116
+ for fil in files:
117
+ if fil.endswith('.jsonl'):
118
+ with open(os.path.join(root,fil), 'r') as json_file:
119
+ json_list = list(json_file) # a list of '{}', each {} is a tweet and its features.
120
+ for json_str in json_list: # for each tweet loaded
121
+ result = json.loads(json_str)
122
+ place = result['location_mentions']
123
+
124
+ # if the location mention is empty (the tweet does not refer to particular place),
125
+ # thanks to the crisis_to_link, we know which crisis this tweet make reference
126
+ # (the tweet speak about this crisis) so we assume that if location mention is empty
127
+ # we assume that the tweet make a reference to the current state since this state is the localisation of the crisis
128
+ #we still have the New Zealand special case
129
+ if place == [] or state == 'New_Zealand':
130
+ # Put NLP date on the same format as time series date
131
+ date_NLP = to_date(result['created_at'])
132
+ # Check if there is matching date between time series and tweets.
133
+ if list(window_df['Window'][ window_df['Date'] == date_NLP]) != [] :
134
+ date.append(date_NLP)
135
+ text.append(result['text'])
136
+ linked_data = window_df[window_df['Date'] == date[-1]]
137
+ my_window = list(linked_data['Window'])[0]
138
+ window_data.append(my_window)
139
+ # for the label, we take reference from the time series label
140
+ label_humanitarian.append(result['humAID_class'])
141
+ crisis_label.append(type_of_crisis)
142
+ if result['humAID_class'] == 'not_humanitarian' :
143
+ label_useful.append('not_humanitarian')
144
+ label_urgent.append('not_humanitarian')
145
+ else :
146
+ label_useful.append('useful')
147
+ if result['humAID_class'] == 'rescue_volunteering_or_donation_effort' :
148
+ label_urgent.append('not_urgent')
149
+ else :
150
+ label_urgent.append('urgent')
151
+
152
+ if result['humAID_class'] == 'not_humanitarian' :
153
+ label_sudden.append('Not_Crisis_period')
154
+ else :
155
+ if type_of_crisis == 'Earthquake' or type_of_crisis == 'WildFire' :
156
+ label_sudden.append('Sudden_Crisis')
157
+ else :
158
+ label_sudden.append('Non_Sudden_Crisis')
159
+
160
+ else:
161
+ for zone in place:
162
+ # if the location mention refer to a state and this reference is in our knowledge database
163
+ bool1 = zone['type'] == 'State' and zone['text'].lower() in tags_state
164
+ # if the location mention refer to a county and this reference is in our knowledge database
165
+ bool2 = zone['type'] == 'County' and zone['text'].lower() in tags_county
166
+ # if the location mention refer to a city and this reference is in our knowledge database
167
+ bool3 = zone['type'] == 'City/town' and zone['text'].lower() in tags_city
168
+ if bool1 or bool2 or bool3:
169
+ date.append(to_date(result['created_at']))
170
+ text.append(result['text'])
171
+ linked_data = window_df[window_df['Date'] == date[-1]]
172
+ my_window = list(linked_data['Window'])[0]
173
+ window_data.append(my_window)
174
+ label_humanitarian.append(result['humAID_class'])
175
+ crisis_label.append(type_of_crisis)
176
+ if result['humAID_class'] == 'not_humanitarian' :
177
+ label_useful.append('not_humanitarian')
178
+ label_urgent.append('not_humanitarian')
179
+ else :
180
+ label_useful.append('useful')
181
+ if result['humAID_class'] == 'rescue_volunteering_or_donation_effort' :
182
+ label_urgent.append('not_urgent')
183
+ else :
184
+ label_urgent.append('urgent')
185
+
186
+ if result['humAID_class'] == 'not_humanitarian' :
187
+ label_sudden.append('Not_Crisis_period')
188
+ else :
189
+ if type_of_crisis == 'Earthquake' or type_of_crisis == 'WildFire' :
190
+ label_sudden.append('Sudden_Crisis')
191
+ else :
192
+ label_sudden.append('Non_Sudden_Crisis')
193
+
194
+ df = pd.DataFrame({'Date': date, 'Text': text,'Crisis_Type' :crisis_label, 'Window': window_data, 'label_humanitarian': label_humanitarian, 'label_useful' : label_useful, 'label_urgent' : label_urgent, 'label_sudden' : label_sudden})
195
+ df_return = pd.concat([df_return, df])
196
+ return df_return
197
+
198
+ # loading file
199
+ parser = argparse.ArgumentParser()
200
+ parser.add_argument("-w", "--window_size",type = int, default = 5, help="size (in number of days) of our daily time series data")
201
+ parser.add_argument("-o", "--output_file",type = str, default = './Multi_modal_dataset/CrisisTS_Eng_Personnalized.csv', help="name of your output file")
202
+ args = parser.parse_args()
203
+
204
+ directory_nlp = './Textual_Data/English_Corpus'
205
+ directory_time_series = './Time_Series/EnglishTS'
206
+ path_knowledge = './utils/crisis_knowledge_eng.csv'
207
+ knowledge = pd.read_csv(path_knowledge, sep='\t') # (9, 3), 3 columns represent Crisis, Places, Path_name (tweets)
208
+
209
+ # for all test crisis in english
210
+ All_possible_crisis = ['Hurricane', 'WildFire', 'Flood','Earthquake']
211
+ Test_crisis = 'Hurricane'
212
+
213
+ Train_knowledge = knowledge[knowledge['Crisis'] != Test_crisis]
214
+ Test_knowledge = knowledge[knowledge['Crisis'] == Test_crisis]
215
+
216
+ # link
217
+ multi_modal_test_data = pd.DataFrame({'Date': [], 'Text': [],'Crisis_Type' :[], 'Window': [], 'label_humanitarian': [], 'label_useful' : [], 'label_urgent' : [], 'label_sudden' : []})
218
+ for index, crisis in knowledge.iterrows():
219
+ print('Linking textual data and meteorological data of '+crisis['Path_name']+ ' ...')
220
+ multi_modal_test_data = pd.concat([multi_modal_test_data,
221
+ linker(crisis, directory_nlp,
222
+ directory_time_series,
223
+ args.window_size,
224
+ 'date',
225
+ 0)])
226
+
227
+
228
+ print('Done !')
229
+ multi_modal_test_data.to_csv(args.output_file,sep = '\t')
Linker_Fr.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import datetime
3
+ import numpy as np
4
+ import datetime
5
+ import pytz
6
+ import warnings
7
+ warnings.filterwarnings('ignore')
8
+ import argparse
9
+
10
+ #this function put on the good format the date from the time series dataset
11
+ def to_datetime(my_date,set_hour = False):
12
+ my_date = str(my_date)
13
+ set_month= {'Jan': '01', 'Feb' : '02', 'Mar' : '03', 'Apr' : '04','May' : '05', 'Jun' : '06', 'Jul' : '07', 'Aug' : '08', 'Sep' : '09', 'Oct' : '10', 'Nov' : '11', 'Dec' : '12'}
14
+ month = int(my_date[4:6])
15
+ day = int(my_date[6:8])
16
+ year = int(my_date[0:4])
17
+ if set_hour :
18
+ hour = 0
19
+ else :
20
+ hour = int(my_date[8:10])
21
+ unaware = datetime.datetime(year, month, day, hour)
22
+ final_date = pytz.utc.localize(unaware)
23
+ return final_date
24
+
25
+ def to_datetimeNLP(my_date,set_hour = False):
26
+ my_date = str(my_date)
27
+ set_month= {'Jan': '01', 'Feb' : '02', 'Mar' : '03', 'Apr' : '04','May' : '05', 'Jun' : '06', 'Jul' : '07', 'Aug' : '08', 'Sep' : '09', 'Oct' : '10', 'Nov' : '11', 'Dec' : '12'}
28
+ month = int(my_date[5:7])
29
+ day = int(my_date[8:10])
30
+ year = int(my_date[0:4])
31
+ if set_hour :
32
+ hour = 0
33
+ else :
34
+ hour = int(my_date[11:13])
35
+ hour = hour - (hour%3)
36
+ unaware = datetime.datetime(year, month, day,hour)
37
+ final_date = pytz.utc.localize(unaware)
38
+ return final_date
39
+
40
+
41
+ #the function take a time series dataframe (df) and return an other dataset with all the data put in one parameter (Window) except the date and the label wich are in different columns
42
+ #window size is equal to the size of the window
43
+ def window_creation(df,size_window,timestamp_colum):
44
+ good_row = list(df.columns)
45
+ good_row.remove(timestamp_colum)
46
+ window_df = df[good_row]
47
+ window_df = window_df.reset_index()
48
+ window_df = window_df.drop(columns=['index'])
49
+ window_df = window_df.tail(size_window)
50
+ window_data = window_df.to_numpy()
51
+ return window_data
52
+
53
+ #the function that link the NLP data and the time series data for French dataset
54
+ #text_data is the french NLP dataset on dataframe format
55
+ #crisis_knowledge is a dataframe from the crisis_knowledge CSV
56
+ #time_data is the path to the repertory with time series data
57
+ #window size is equal to the size of the window
58
+ #fillNA is the value to fill the missing value
59
+ def linker(text_data : object,crisis_knowledge : object,my_features : str,window_size : int,label_column_1 : str,label_column_2 : str, date_column : str, fillNA : int) -> object :
60
+
61
+ #we join crisis knowledge and NLP data on the name of the crisis
62
+ text_data = text_data.join(crisis_knowledge.set_index('Crisis Name'), on='event', validate='m:1')
63
+ text_data['date'] = list(map(to_datetimeNLP,list(text_data['created_at'])))
64
+ #features to keep for Time series
65
+ features =['numer_sta','date','pmer','tend','ff','t','u','n']
66
+ features_clean =['pmer','tend','ff','t','u','n']
67
+ df_return = pd.DataFrame({'Date' : [],'Text' : [], 'Window' : [], 'label' : []})
68
+ list_of_date = []
69
+ list_of_text = []
70
+ list_of_window = []
71
+ list_of_crisis_type = []
72
+ list_of_label_uti = []
73
+ list_of_label_urg = []
74
+ list_of_label_int = []
75
+ list_of_label = []
76
+ #replace missing values and replace label by number
77
+ my_features = my_features[features].fillna(fillNA).replace({'mq': fillNA, 'No_Crisis': 0, 'Crisis': 1, 'Ecological': 1, 'Sudden': 2})
78
+ for clean_f in features_clean :
79
+ my_features[clean_f] = pd.to_numeric(my_features[clean_f])
80
+ my_features[clean_f] = (my_features[clean_f] - my_features[clean_f].min()) / (my_features[clean_f].max() - my_features[clean_f].min())
81
+ #this take from time series data the list of the id for each station
82
+ set_of_station = set(list(my_features['numer_sta']))
83
+ my_features['date'] = list(map(to_datetime,list(my_features['date'])))
84
+ set_of_date = set(list(my_features['date']))
85
+ dict_by_station = {}
86
+ i = 0
87
+ for index,text_line in text_data.iterrows():
88
+
89
+ #if the NLP date can be found in the time series date
90
+ if text_line['date'] in set_of_date :
91
+
92
+ list_of_station = eval(text_line['Related station'])
93
+ for i in range(len(list_of_station)):
94
+ list_of_station[i] = int(list_of_station[i])
95
+ #station related to the crisis
96
+
97
+ current_station = my_features[my_features['numer_sta'] == list_of_station[0]]
98
+ current_station = current_station.sort_values(by=['date'])
99
+ good_date = current_station[current_station['date'] < text_line['date']]
100
+ list_of_date.append(text_line['date'])
101
+ list_of_text.append(text_line['text'])
102
+ list_of_crisis_type.append(text_line['type_crisis'])
103
+ window = window_creation(good_date,window_size,'date')
104
+ list_of_window.append(window)
105
+ list_of_label_uti.append(text_line['utility'])
106
+ list_of_label_int.append(text_line['humanitarian'])
107
+ list_of_label_urg.append(text_line['urgency'])
108
+ if text_line['utility'] == 'Message-NonUtilisable' :
109
+ list_of_label.append('Not_Crisis_period')
110
+ else:
111
+ if text_line['type_crisis'] == 'Flood' or text_line['type_crisis'] == 'Hurricane' or text_line['type_crisis'] == 'Storms':
112
+ list_of_label.append('Ecological_crisis')
113
+ else:
114
+ list_of_label.append('Sudden_Crisis')
115
+
116
+ df_return = pd.DataFrame({'Date' : list_of_date,'Text' : list_of_text,'Crisis_Type' : list_of_crisis_type , 'Window' : list_of_window, 'intention' : list_of_label_int, 'urgency' : list_of_label_urg, 'utility' : list_of_label_uti , 'label' : list_of_label})
117
+ return df_return
118
+
119
+ parser = argparse.ArgumentParser()
120
+ parser.add_argument("-w", "--window_size",type = int, default = 16, help="size (in number of days) of our daily time series data")
121
+ parser.add_argument("-o", "--output_file",type = str, default = './Multi_modal_dataset/CrisisTS_FR_Personnalized.csv', help="name of your output file")
122
+ args = parser.parse_args()
123
+
124
+ Knowledge = pd.read_csv('./utils/crisis_knowledge_fr.csv',sep="\t")
125
+
126
+ nlp_csv = pd.read_csv('./Textual_Data/French_Corpus/Kozlwoski.csv',sep ='\t')
127
+
128
+ time_series = pd.read_csv('./Time_Series/FrenchTS/french_time_series_MERGED.csv',sep = '\t')
129
+
130
+ nlp_csv = nlp_csv.dropna(subset=['date'])
131
+
132
+ nlp_csv = nlp_csv.dropna(subset=['humanitarian'])
133
+
134
+ multi_modal_dataset = linker(nlp_csv ,Knowledge ,time_series , args.window_size,, 'Crisis_Predictability', 'label', 'date', 0)
135
+
136
+
137
+ print('Done !')
138
+ multi_modal_test_data.to_csv(args.output_file,sep = '\t')
french_time_series_MERGED.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb65719c8bb7f9bbf8bc9749b2b0ae278bab5d286a01ad93cc362e83c472befe
3
+ size 377691790