Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -12,7 +12,6 @@ tags:
|
|
12 |
size_categories:
|
13 |
- 10K<n<100K
|
14 |
---
|
15 |
-
|
16 |
# Virus-Host-Genomes Dataset
|
17 |
|
18 |
## Dataset Summary
|
@@ -79,24 +78,19 @@ The dataset contains the following fields:
|
|
79 |
| segment_label | string | Label for genome segment | "NA" |
|
80 |
|
81 |
#### Data Splits
|
82 |
-
|
83 |
The dataset contains train and test splits:
|
84 |
-
|
85 |
| Split Name | Number of Instances |
|
86 |
|------------|---------------------|
|
87 |
| train | 51,935 |
|
88 |
| test | 6,111 |
|
89 |
|
90 |
## Dataset Creation
|
91 |
-
|
92 |
### Source Data
|
93 |
-
|
94 |
This dataset compiles virus sequences from multiple public repositories, including:
|
95 |
- NCBI Virus
|
96 |
- GenBank
|
97 |
|
98 |
### Data Processing
|
99 |
-
|
100 |
The dataset has undergone several processing steps:
|
101 |
- Sequence standardization (using only unambigious IUPAC nucleotide characters)
|
102 |
- Host information standardization
|
@@ -113,9 +107,7 @@ Host labels were generated through a tier-based approach:
|
|
113 |
Some sequences were annotated using the Gemini AI system to provide additional metadata where information was incomplete.
|
114 |
|
115 |
## Considerations for Using the Data
|
116 |
-
|
117 |
### Limitations and Biases
|
118 |
-
|
119 |
- **Sampling Bias**: The dataset may overrepresent viruses of clinical importance and underrepresent environmental viruses.
|
120 |
- **Temporal Distribution**: More recent viruses (especially those causing outbreaks) may be overrepresented.
|
121 |
- **Geographic Bias**: Samples from regions with stronger research infrastructure may be overrepresented.
|
@@ -124,213 +116,166 @@ Some sequences were annotated using the Gemini AI system to provide additional m
|
|
124 |
|
125 |
## Usage Examples
|
126 |
|
127 |
-
###
|
128 |
-
|
129 |
-
Load the dataset using the Hugging Face Datasets library:
|
130 |
-
|
131 |
-
```python
|
132 |
-
from datasets import load_dataset
|
133 |
-
|
134 |
-
# Load the dataset
|
135 |
-
dataset = load_dataset("hiyata/Virus-Host-Genomes")
|
136 |
-
|
137 |
-
# Display sample
|
138 |
-
print(dataset['train'][0])
|
139 |
-
```
|
140 |
-
|
141 |
-
### K-mer Frequency Extraction
|
142 |
-
|
143 |
-
Extract k-mer frequencies from viral sequences to use as features:
|
144 |
|
145 |
```python
|
146 |
import numpy as np
|
147 |
-
from
|
148 |
from itertools import product
|
149 |
-
import
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
result = {col: df[col].tolist() for col in df.columns}
|
183 |
-
return result
|
184 |
-
|
185 |
-
# Process the dataset
|
186 |
-
kmer_features = dataset['train'].map(
|
187 |
-
process_batch,
|
188 |
-
batched=True,
|
189 |
-
batch_size=100,
|
190 |
-
remove_columns=['sequence'] # Remove original sequence to save memory
|
191 |
-
)
|
192 |
-
|
193 |
-
# Combine with original metadata
|
194 |
-
kmer_dataset = kmer_features.add_column('host', dataset['train']['host'])
|
195 |
-
print(kmer_dataset[0])
|
196 |
-
```
|
197 |
-
|
198 |
-
### Human/Non-Human Host Classification
|
199 |
-
|
200 |
-
Train a classifier to predict if a virus infects humans or non-humans:
|
201 |
|
202 |
-
```python
|
203 |
-
import pandas as pd
|
204 |
-
from sklearn.model_selection import train_test_split
|
205 |
-
from sklearn.ensemble import RandomForestClassifier
|
206 |
-
from sklearn.metrics import classification_report
|
207 |
-
|
208 |
-
# Extract features and labels
|
209 |
-
def prepare_data(dataset):
|
210 |
-
# Process sequences to extract k-mer features
|
211 |
-
kmer_vectors = []
|
212 |
-
hosts = []
|
213 |
-
|
214 |
-
for i in range(len(dataset)):
|
215 |
-
if i % 1000 == 0:
|
216 |
-
print(f"Processing sample {i}/{len(dataset)}")
|
217 |
-
|
218 |
-
sequence = dataset[i]['sequence']
|
219 |
-
host = dataset[i]['host']
|
220 |
-
|
221 |
-
kmer_vector = extract_kmers(sequence, k=4)
|
222 |
-
kmer_vectors.append(kmer_vector)
|
223 |
-
hosts.append(host)
|
224 |
-
|
225 |
-
# Convert to DataFrame
|
226 |
-
X = pd.DataFrame(kmer_vectors)
|
227 |
-
y = pd.Series(hosts)
|
228 |
-
|
229 |
return X, y
|
230 |
|
231 |
-
# Prepare the data
|
232 |
-
X, y = prepare_data(dataset['train'].select(range(5000))) # Use a subset for demonstration
|
233 |
|
234 |
-
|
235 |
-
|
236 |
|
237 |
-
#
|
238 |
-
|
239 |
-
|
|
|
240 |
|
241 |
-
#
|
242 |
-
|
243 |
-
print(classification_report(y_test, y_pred))
|
244 |
|
245 |
-
#
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
}).sort_values('importance', ascending=False)
|
250 |
|
251 |
-
print("
|
252 |
-
print(feature_importance.head(10))
|
253 |
```
|
254 |
|
255 |
-
###
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
|
257 |
-
|
258 |
|
259 |
```python
|
260 |
-
from sklearn.
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
kmer_vector = extract_kmers(sequence, k=5)
|
282 |
-
kmer_vectors.append(kmer_vector)
|
283 |
-
hosts.append(host)
|
284 |
-
|
285 |
-
# Convert to DataFrame
|
286 |
-
X = pd.DataFrame(kmer_vectors)
|
287 |
-
|
288 |
-
# Encode host labels
|
289 |
-
label_encoder = LabelEncoder()
|
290 |
-
y = label_encoder.fit_transform(hosts)
|
291 |
-
|
292 |
-
return X, y, label_encoder
|
293 |
-
|
294 |
-
# Prepare the data
|
295 |
-
X, y, label_encoder = prepare_multihost_data(dataset['train'].select(range(5000)))
|
296 |
-
|
297 |
-
# Split into train and test sets
|
298 |
-
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
299 |
-
|
300 |
-
# Train a multi-class classifier
|
301 |
-
clf = OneVsRestClassifier(LinearSVC(random_state=42))
|
302 |
-
clf.fit(X_train, y_train)
|
303 |
-
|
304 |
-
# Evaluate the model
|
305 |
-
y_pred = clf.predict(X_test)
|
306 |
-
print(classification_report(y_test, y_pred))
|
307 |
-
|
308 |
-
# Display host labels
|
309 |
-
host_labels = label_encoder.classes_
|
310 |
-
print(f"Number of host classes: {len(host_labels)}")
|
311 |
-
print("Sample host classes:", host_labels[:10])
|
312 |
-
|
313 |
-
# Function to predict host for a new sequence
|
314 |
-
def predict_host(sequence, clf, label_encoder, k=5):
|
315 |
-
kmer_vector = extract_kmers(sequence, k=k)
|
316 |
-
X_new = pd.DataFrame([kmer_vector])
|
317 |
-
|
318 |
-
# Make prediction
|
319 |
-
y_pred = clf.predict(X_new)
|
320 |
-
predicted_host = label_encoder.inverse_transform(y_pred)[0]
|
321 |
-
|
322 |
-
return predicted_host
|
323 |
-
|
324 |
-
# Example prediction
|
325 |
-
example_seq = dataset['train'][0]['sequence']
|
326 |
-
predicted_host = predict_host(example_seq, clf, label_encoder)
|
327 |
-
print(f"Predicted host: {predicted_host}")
|
328 |
```
|
329 |
|
330 |
## Additional Information
|
331 |
-
|
332 |
### Citation Information
|
333 |
-
|
334 |
```
|
335 |
@article
|
336 |
author = {},
|
|
|
12 |
size_categories:
|
13 |
- 10K<n<100K
|
14 |
---
|
|
|
15 |
# Virus-Host-Genomes Dataset
|
16 |
|
17 |
## Dataset Summary
|
|
|
78 |
| segment_label | string | Label for genome segment | "NA" |
|
79 |
|
80 |
#### Data Splits
|
|
|
81 |
The dataset contains train and test splits:
|
|
|
82 |
| Split Name | Number of Instances |
|
83 |
|------------|---------------------|
|
84 |
| train | 51,935 |
|
85 |
| test | 6,111 |
|
86 |
|
87 |
## Dataset Creation
|
|
|
88 |
### Source Data
|
|
|
89 |
This dataset compiles virus sequences from multiple public repositories, including:
|
90 |
- NCBI Virus
|
91 |
- GenBank
|
92 |
|
93 |
### Data Processing
|
|
|
94 |
The dataset has undergone several processing steps:
|
95 |
- Sequence standardization (using only unambigious IUPAC nucleotide characters)
|
96 |
- Host information standardization
|
|
|
107 |
Some sequences were annotated using the Gemini AI system to provide additional metadata where information was incomplete.
|
108 |
|
109 |
## Considerations for Using the Data
|
|
|
110 |
### Limitations and Biases
|
|
|
111 |
- **Sampling Bias**: The dataset may overrepresent viruses of clinical importance and underrepresent environmental viruses.
|
112 |
- **Temporal Distribution**: More recent viruses (especially those causing outbreaks) may be overrepresented.
|
113 |
- **Geographic Bias**: Samples from regions with stronger research infrastructure may be overrepresented.
|
|
|
116 |
|
117 |
## Usage Examples
|
118 |
|
119 |
+
### Data Preparation and K-mer Vectorization
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
```python
|
122 |
import numpy as np
|
123 |
+
from datasets import load_dataset
|
124 |
from itertools import product
|
125 |
+
from sklearn.preprocessing import StandardScaler, LabelEncoder
|
126 |
+
import joblib
|
127 |
+
from tqdm import tqdm
|
128 |
+
|
129 |
+
# Load dataset
|
130 |
+
virus_dataset = load_dataset("hiyata/Virus-Host-Genomes")
|
131 |
+
train_dataset = virus_dataset['train']
|
132 |
+
test_dataset = virus_dataset['test']
|
133 |
+
|
134 |
+
|
135 |
+
# Generate k-mer dictionary once
|
136 |
+
def generate_kmer_dict(k):
|
137 |
+
return {''.join(kmer): i for i, kmer in enumerate(product('ACGT', repeat=k))}
|
138 |
+
|
139 |
+
# Calculate k-mer frequency
|
140 |
+
def calculate_kmer_freq(seq, k, kmer_dict):
|
141 |
+
freq = np.zeros(4**k)
|
142 |
+
total_kmers = len(seq) - k + 1
|
143 |
+
for i in range(total_kmers):
|
144 |
+
kmer = seq[i:i+k]
|
145 |
+
if 'N' not in kmer and all(base in 'ACGT' for base in kmer):
|
146 |
+
freq[kmer_dict[kmer]] += 1
|
147 |
+
return freq / total_kmers if total_kmers > 0 else freq
|
148 |
+
|
149 |
+
# Vectorize dataset
|
150 |
+
def vectorize_dataset(dataset, k=4):
|
151 |
+
kmer_dict = generate_kmer_dict(k)
|
152 |
+
num_samples = len(dataset['sequence'])
|
153 |
+
X = np.zeros((num_samples, 4**k))
|
154 |
+
y = np.array(['human' if host.lower() == 'human' else 'non-human' for host in dataset['host']])
|
155 |
+
|
156 |
+
for idx, seq in enumerate(tqdm(dataset['sequence'], desc="Vectorizing sequences")):
|
157 |
+
X[idx] = calculate_kmer_freq(seq.upper(), k, kmer_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
return X, y
|
160 |
|
|
|
|
|
161 |
|
162 |
+
X_train, y_train = vectorize_dataset(train_dataset)
|
163 |
+
X_test, y_test = vectorize_dataset(test_dataset)
|
164 |
|
165 |
+
# Standard Scaler
|
166 |
+
scaler = StandardScaler()
|
167 |
+
X_train = scaler.fit_transform(X_train)
|
168 |
+
X_test = scaler.transform(X_test)
|
169 |
|
170 |
+
# Save scaler
|
171 |
+
joblib.dump(scaler, 'standard_scaler.joblib')
|
|
|
172 |
|
173 |
+
# Label encoding
|
174 |
+
le = LabelEncoder()
|
175 |
+
y_train_enc = le.fit_transform(y_train)
|
176 |
+
y_test_enc = le.transform(y_test)
|
|
|
177 |
|
178 |
+
print("Vectorization complete.")
|
|
|
179 |
```
|
180 |
|
181 |
+
### Neural Network Training for Host Classification
|
182 |
+
|
183 |
+
```python
|
184 |
+
import torch
|
185 |
+
from torch import nn, optim
|
186 |
+
from torch.utils.data import DataLoader, TensorDataset
|
187 |
+
|
188 |
+
# Define your neural network
|
189 |
+
class VirusClassifier(nn.Module):
|
190 |
+
def __init__(self, input_shape: int):
|
191 |
+
super(VirusClassifier, self).__init__()
|
192 |
+
self.network = nn.Sequential(
|
193 |
+
nn.Linear(input_shape, 64),
|
194 |
+
nn.GELU(),
|
195 |
+
nn.BatchNorm1d(64),
|
196 |
+
nn.Dropout(0.3),
|
197 |
+
|
198 |
+
nn.Linear(64, 32),
|
199 |
+
nn.GELU(),
|
200 |
+
nn.BatchNorm1d(32),
|
201 |
+
nn.Dropout(0.3),
|
202 |
+
|
203 |
+
nn.Linear(32, 32),
|
204 |
+
nn.GELU(),
|
205 |
+
|
206 |
+
nn.Linear(32, 2)
|
207 |
+
)
|
208 |
+
|
209 |
+
def forward(self, x):
|
210 |
+
return self.network(x)
|
211 |
+
|
212 |
+
# Device configuration
|
213 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
214 |
+
|
215 |
+
# DataLoader setup
|
216 |
+
train_loader = DataLoader(TensorDataset(
|
217 |
+
torch.tensor(X_train, dtype=torch.float32),
|
218 |
+
torch.tensor(y_train_enc, dtype=torch.long)
|
219 |
+
), batch_size=64, shuffle=True)
|
220 |
+
|
221 |
+
test_loader = DataLoader(TensorDataset(
|
222 |
+
torch.tensor(X_test, dtype=torch.float32),
|
223 |
+
torch.tensor(y_test_enc, dtype=torch.long)
|
224 |
+
), batch_size=64, shuffle=False)
|
225 |
+
|
226 |
+
# Initialize the model
|
227 |
+
model = VirusClassifier(input_shape=X_train.shape[1]).to(device)
|
228 |
+
criterion = nn.CrossEntropyLoss()
|
229 |
+
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
230 |
+
|
231 |
+
# Training loop
|
232 |
+
epochs = 15
|
233 |
+
for epoch in range(epochs):
|
234 |
+
model.train()
|
235 |
+
epoch_loss = 0
|
236 |
+
for X_batch, y_batch in train_loader:
|
237 |
+
X_batch, y_batch = X_batch.to(device), y_batch.to(device)
|
238 |
+
optimizer.zero_grad()
|
239 |
+
outputs = model(X_batch)
|
240 |
+
loss = criterion(outputs, y_batch)
|
241 |
+
loss.backward()
|
242 |
+
optimizer.step()
|
243 |
+
epoch_loss += loss.item()
|
244 |
+
avg_loss = epoch_loss / len(train_loader)
|
245 |
+
print(f"Epoch [{epoch+1}/{epochs}], Loss: {avg_loss:.4f}")
|
246 |
+
|
247 |
+
# Save the trained model
|
248 |
+
torch.save(model.state_dict(), 'virus_classifier_model.pth')
|
249 |
+
```
|
250 |
|
251 |
+
### Model Evaluation with Matthews Correlation Coefficient
|
252 |
|
253 |
```python
|
254 |
+
from sklearn.metrics import classification_report, matthews_corrcoef
|
255 |
+
|
256 |
+
model.eval()
|
257 |
+
y_preds = []
|
258 |
+
y_true = []
|
259 |
+
|
260 |
+
with torch.no_grad():
|
261 |
+
for X_batch, y_batch in test_loader:
|
262 |
+
X_batch = X_batch.to(device)
|
263 |
+
outputs = model(X_batch)
|
264 |
+
preds = torch.argmax(outputs, dim=1).cpu().numpy()
|
265 |
+
y_preds.extend(preds)
|
266 |
+
y_true.extend(y_batch.numpy())
|
267 |
+
|
268 |
+
# Classification Report
|
269 |
+
report = classification_report(y_true, y_preds, target_names=['human', 'non-human'])
|
270 |
+
print("Classification Report:\n", report)
|
271 |
+
|
272 |
+
# MCC Score
|
273 |
+
mcc = matthews_corrcoef(y_true, y_preds)
|
274 |
+
print(f"Matthews Correlation Coefficient (MCC): {mcc:.4f}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
275 |
```
|
276 |
|
277 |
## Additional Information
|
|
|
278 |
### Citation Information
|
|
|
279 |
```
|
280 |
@article
|
281 |
author = {},
|