File size: 6,123 Bytes
cd61af8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import pandas as pd
import datetime
import numpy as np
import datetime
import pytz
import warnings
warnings.filterwarnings('ignore')
import argparse

#this function put on the good format the date from the time series dataset
def to_datetime(my_date,set_hour = False):
	my_date = str(my_date)
	set_month= {'Jan': '01', 'Feb' : '02', 'Mar' : '03', 'Apr' : '04','May' : '05', 'Jun' : '06', 'Jul' : '07', 'Aug' : '08', 'Sep' : '09', 'Oct' : '10', 'Nov' : '11', 'Dec' : '12'}
	month = int(my_date[4:6])
	day = int(my_date[6:8])
	year = int(my_date[0:4])
	if set_hour :
		hour = 0
	else :
		hour = int(my_date[8:10])
	unaware = datetime.datetime(year, month, day, hour)
	final_date = pytz.utc.localize(unaware)
	return final_date

def to_datetimeNLP(my_date,set_hour = False):
	my_date = str(my_date)
	set_month= {'Jan': '01', 'Feb' : '02', 'Mar' : '03', 'Apr' : '04','May' : '05', 'Jun' : '06', 'Jul' : '07', 'Aug' : '08', 'Sep' : '09', 'Oct' : '10', 'Nov' : '11', 'Dec' : '12'}
	month = int(my_date[5:7])
	day = int(my_date[8:10])
	year = int(my_date[0:4])
	if set_hour :
		hour = 0
	else :
		hour = int(my_date[11:13])
		hour = hour - (hour%3)
	unaware = datetime.datetime(year, month, day,hour)
	final_date = pytz.utc.localize(unaware)
	return final_date


#the function take a time series dataframe (df) and return an other dataset with all the data put in one parameter (Window) except the date and the label wich are in different columns
#window size is equal to the size of the window
def window_creation(df,size_window,timestamp_colum):
	good_row = list(df.columns)
	good_row.remove(timestamp_colum)
	window_df = df[good_row]
	window_df = window_df.reset_index()
	window_df = window_df.drop(columns=['index'])
	window_df = window_df.tail(size_window)
	window_data = window_df.to_numpy()
	return window_data

#the function that link the NLP data and the time series data for French dataset
#text_data is the french NLP dataset on dataframe format
#crisis_knowledge is a dataframe from the crisis_knowledge CSV
#time_data is the path to the repertory with time series data
#window size is equal to the size of the window
#fillNA is the value to fill the missing value
def linker(text_data : object,crisis_knowledge : object,my_features : str,window_size : int,label_column_1 : str,label_column_2 : str, date_column : str, fillNA : int) -> object :

	#we join crisis knowledge and NLP data on the name of the crisis
	text_data = text_data.join(crisis_knowledge.set_index('Crisis Name'), on='event', validate='m:1')
	text_data['date']  = list(map(to_datetimeNLP,list(text_data['created_at'])))
	#features to keep for Time series
	features =['numer_sta','date','pmer','tend','ff','t','u','n']
	features_clean =['pmer','tend','ff','t','u','n']
	df_return = pd.DataFrame({'Date' : [],'Text' : [], 'Window' : [], 'label' : []})
	list_of_date = []
	list_of_text = []
	list_of_window = []
	list_of_crisis_type = []
	list_of_label_uti = []
	list_of_label_urg = []
	list_of_label_int = []
	list_of_label = []
	#replace missing values and replace label by number
	my_features = my_features[features].fillna(fillNA).replace({'mq': fillNA, 'No_Crisis': 0, 'Crisis': 1, 'Ecological': 1, 'Sudden': 2})
	for clean_f in features_clean :
		my_features[clean_f] = pd.to_numeric(my_features[clean_f])
		my_features[clean_f] = (my_features[clean_f] - my_features[clean_f].min()) / (my_features[clean_f].max() - my_features[clean_f].min())
	#this take from time series data the list of the id for each station
	set_of_station = set(list(my_features['numer_sta']))
	my_features['date'] = list(map(to_datetime,list(my_features['date'])))
	set_of_date = set(list(my_features['date']))
	dict_by_station = {}
	i = 0
	for index,text_line in text_data.iterrows():

		#if the NLP date can be found in the time series date
		if text_line['date'] in set_of_date :

			list_of_station = eval(text_line['Related station'])
			for i in range(len(list_of_station)):
				list_of_station[i] = int(list_of_station[i])
				#station related to the crisis

			current_station = my_features[my_features['numer_sta'] == list_of_station[0]]
			current_station = current_station.sort_values(by=['date'])
			good_date = current_station[current_station['date'] < text_line['date']]
			list_of_date.append(text_line['date'])
			list_of_text.append(text_line['text'])
			list_of_crisis_type.append(text_line['type_crisis'])
			window = window_creation(good_date,window_size,'date')
			list_of_window.append(window)
			list_of_label_uti.append(text_line['utility'])
			list_of_label_int.append(text_line['humanitarian'])
			list_of_label_urg.append(text_line['urgency'])
			if text_line['utility'] == 'Message-NonUtilisable' :
				list_of_label.append('Not_Crisis_period')
			else:
				if text_line['type_crisis'] == 'Flood' or text_line['type_crisis'] == 'Hurricane' or text_line['type_crisis'] == 'Storms':
					list_of_label.append('Ecological_crisis')
				else:
					list_of_label.append('Sudden_Crisis')

	df_return = pd.DataFrame({'Date' : list_of_date,'Text' : list_of_text,'Crisis_Type' : list_of_crisis_type , 'Window' : list_of_window, 'intention' : list_of_label_int, 'urgency' : list_of_label_urg, 'utility' : list_of_label_uti , 'label' : list_of_label})
	return df_return

parser = argparse.ArgumentParser()
parser.add_argument("-w", "--window_size",type = int, default = 16, help="size (in number of days) of our daily time series data")
parser.add_argument("-o", "--output_file",type = str, default = './Multi_modal_dataset/CrisisTS_FR_Personnalized.csv', help="name of your output file")
args = parser.parse_args()

Knowledge = pd.read_csv('./utils/crisis_knowledge_fr.csv',sep="\t")

nlp_csv = pd.read_csv('./Textual_Data/French_Corpus/Kozlwoski.csv',sep ='\t')

time_series = pd.read_csv('./Time_Series/FrenchTS/french_time_series_MERGED.csv',sep = '\t')

nlp_csv = nlp_csv.dropna(subset=['date'])

nlp_csv = nlp_csv.dropna(subset=['humanitarian'])

multi_modal_dataset = linker(nlp_csv ,Knowledge ,time_series , args.window_size,, 'Crisis_Predictability', 'label', 'date', 0)


print('Done !')
multi_modal_test_data.to_csv(args.output_file,sep = '\t')