update main script
Browse files- SciGraph.py +33 -3
SciGraph.py
CHANGED
@@ -111,6 +111,16 @@ class SciGraph(datasets.GeneratorBasedBuilder):
|
|
111 |
"function": data_dir['function'],
|
112 |
"topic": data_dir['topic']
|
113 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
)
|
115 |
]
|
116 |
|
@@ -125,7 +135,10 @@ class SciGraph(datasets.GeneratorBasedBuilder):
|
|
125 |
data = data[['_id', 'abstract', 'label']]
|
126 |
|
127 |
|
128 |
-
train_data,
|
|
|
|
|
|
|
129 |
if split == 'train':
|
130 |
for idx, row in train_data.iterrows():
|
131 |
yield idx, {
|
@@ -133,19 +146,29 @@ class SciGraph(datasets.GeneratorBasedBuilder):
|
|
133 |
"abstract": row.abstract,
|
134 |
"label": row.label
|
135 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
elif split == 'test':
|
137 |
for idx, row in test_data.iterrows():
|
138 |
yield idx, {
|
139 |
"id": row._id,
|
140 |
"abstract": row.abstract,
|
141 |
-
"label":
|
142 |
}
|
|
|
|
|
143 |
|
144 |
if self.config.name == 'topic':
|
145 |
data = pd.read_json(topic)
|
146 |
data = data.replace(to_replace=r'^\s*$', value=np.nan, regex=True).dropna(subset=['keywords'], axis=0)
|
147 |
|
148 |
train_data, test_data = train_test_split(data, test_size=0.1, random_state=42)
|
|
|
149 |
if split == 'train':
|
150 |
for idx, row in train_data.iterrows():
|
151 |
yield idx, {
|
@@ -153,10 +176,17 @@ class SciGraph(datasets.GeneratorBasedBuilder):
|
|
153 |
"abstract": row.abstract,
|
154 |
"keywords": row.keywords.split('#%#')
|
155 |
}
|
156 |
-
elif split == '
|
157 |
for idx, row in test_data.iterrows():
|
158 |
yield idx, {
|
159 |
"id": row._id,
|
160 |
"abstract": row.abstract,
|
161 |
"keywords": row.keywords.split('#%#')
|
162 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
"function": data_dir['function'],
|
112 |
"topic": data_dir['topic']
|
113 |
},
|
114 |
+
),
|
115 |
+
datasets.SplitGenerator(
|
116 |
+
name=datasets.Split.VALIDATION,
|
117 |
+
# These kwargs will be passed to _generate_examples
|
118 |
+
gen_kwargs={
|
119 |
+
"split": "valid",
|
120 |
+
"classes": data_dir['classes'],
|
121 |
+
"function": data_dir['function'],
|
122 |
+
"topic": data_dir['topic']
|
123 |
+
},
|
124 |
)
|
125 |
]
|
126 |
|
|
|
135 |
data = data[['_id', 'abstract', 'label']]
|
136 |
|
137 |
|
138 |
+
train_data, valid_data = train_test_split(data, test_size=0.1, random_state=42)
|
139 |
+
|
140 |
+
test_data = pd.read_json(function)
|
141 |
+
test_data = test_data.loc[test_data[functions].sum(axis=1) == 0]
|
142 |
if split == 'train':
|
143 |
for idx, row in train_data.iterrows():
|
144 |
yield idx, {
|
|
|
146 |
"abstract": row.abstract,
|
147 |
"label": row.label
|
148 |
}
|
149 |
+
elif split == 'valid':
|
150 |
+
for idx, row in valid_data.iterrows():
|
151 |
+
yield idx, {
|
152 |
+
"id": row._id,
|
153 |
+
"abstract": row.abstract,
|
154 |
+
"label": row.label
|
155 |
+
}
|
156 |
elif split == 'test':
|
157 |
for idx, row in test_data.iterrows():
|
158 |
yield idx, {
|
159 |
"id": row._id,
|
160 |
"abstract": row.abstract,
|
161 |
+
"label": -1
|
162 |
}
|
163 |
+
|
164 |
+
|
165 |
|
166 |
if self.config.name == 'topic':
|
167 |
data = pd.read_json(topic)
|
168 |
data = data.replace(to_replace=r'^\s*$', value=np.nan, regex=True).dropna(subset=['keywords'], axis=0)
|
169 |
|
170 |
train_data, test_data = train_test_split(data, test_size=0.1, random_state=42)
|
171 |
+
|
172 |
if split == 'train':
|
173 |
for idx, row in train_data.iterrows():
|
174 |
yield idx, {
|
|
|
176 |
"abstract": row.abstract,
|
177 |
"keywords": row.keywords.split('#%#')
|
178 |
}
|
179 |
+
elif split == 'valid':
|
180 |
for idx, row in test_data.iterrows():
|
181 |
yield idx, {
|
182 |
"id": row._id,
|
183 |
"abstract": row.abstract,
|
184 |
"keywords": row.keywords.split('#%#')
|
185 |
}
|
186 |
+
elif split == 'test':
|
187 |
+
for idx, row in data.iterrows():
|
188 |
+
yield idx, {
|
189 |
+
"id": row._id,
|
190 |
+
"abstract": row.abstract,
|
191 |
+
"keywords": row.keywords.split('#%#')
|
192 |
+
}
|