Update README.md
Browse files
README.md
CHANGED
@@ -91,8 +91,8 @@ We encode protein sequence with single amino acid resolution with 44 vocabularie
|
|
91 |
For more information, visit: [Model Generator](https://github.com/genbio-ai/modelgenerator)
|
92 |
|
93 |
```bash
|
94 |
-
mgen fit --model SequenceClassification --model.backbone
|
95 |
-
mgen test --model SequenceClassification --model.backbone
|
96 |
```
|
97 |
|
98 |
### Or use directly in Python
|
@@ -102,7 +102,7 @@ mgen test --model SequenceClassification --model.backbone aido_ragprotein_16b --
|
|
102 |
```python
|
103 |
import torch
|
104 |
from modelgenerator.tasks import Embed
|
105 |
-
model = Embed.from_config({"model.backbone": "
|
106 |
model.backbone.max_length = 12800
|
107 |
data = torch.load("examples.pt", 'cpu')[0]
|
108 |
transformed_batch = model.transform(data)
|
@@ -117,7 +117,7 @@ print(embedding.shape)
|
|
117 |
```python
|
118 |
import torch
|
119 |
from modelgenerator.tasks import SequenceClassification
|
120 |
-
model = SequenceClassification.from_config({"model.backbone": "
|
121 |
model.backbone.max_length = 12800
|
122 |
data = torch.load("examples.pt", 'cpu')[0]
|
123 |
transformed_batch = model.transform(data)
|
@@ -133,7 +133,7 @@ print(torch.argmax(logits, dim=-1))
|
|
133 |
```python
|
134 |
import torch
|
135 |
from modelgenerator.tasks import TokenClassification
|
136 |
-
model = TokenClassification.from_config({"model.backbone": "
|
137 |
model.backbone.max_length = 12800
|
138 |
data = torch.load("examples.pt", 'cpu')[0]
|
139 |
transformed_batch = model.transform(data)
|
@@ -148,7 +148,7 @@ print(torch.argmax(logits, dim=-1))
|
|
148 |
|
149 |
```python
|
150 |
from modelgenerator.tasks import SequenceRegression
|
151 |
-
model = SequenceRegression.from_config({"model.backbone": "
|
152 |
model.backbone.max_length = 12800
|
153 |
data = torch.load("examples.pt", 'cpu')[0]
|
154 |
transformed_batch = model.transform(data)
|
|
|
91 |
For more information, visit: [Model Generator](https://github.com/genbio-ai/modelgenerator)
|
92 |
|
93 |
```bash
|
94 |
+
mgen fit --model SequenceClassification --model.backbone aido_protein_rag_16b --data SequenceClassificationDataModule --data.path <hf_or_local_path_to_your_dataset>
|
95 |
+
mgen test --model SequenceClassification --model.backbone aido_protein_rag_16b --data SequenceClassificationDataModule --data.path <hf_or_local_path_to_your_dataset>
|
96 |
```
|
97 |
|
98 |
### Or use directly in Python
|
|
|
102 |
```python
|
103 |
import torch
|
104 |
from modelgenerator.tasks import Embed
|
105 |
+
model = Embed.from_config({"model.backbone": "aido_protein_rag_16b"}).eval()
|
106 |
model.backbone.max_length = 12800
|
107 |
data = torch.load("examples.pt", 'cpu')[0]
|
108 |
transformed_batch = model.transform(data)
|
|
|
117 |
```python
|
118 |
import torch
|
119 |
from modelgenerator.tasks import SequenceClassification
|
120 |
+
model = SequenceClassification.from_config({"model.backbone": "aido_protein_rag_16b", "model.n_classes": 2}).eval()
|
121 |
model.backbone.max_length = 12800
|
122 |
data = torch.load("examples.pt", 'cpu')[0]
|
123 |
transformed_batch = model.transform(data)
|
|
|
133 |
```python
|
134 |
import torch
|
135 |
from modelgenerator.tasks import TokenClassification
|
136 |
+
model = TokenClassification.from_config({"model.backbone": "aido_protein_rag_16b", "model.n_classes": 3}).eval()
|
137 |
model.backbone.max_length = 12800
|
138 |
data = torch.load("examples.pt", 'cpu')[0]
|
139 |
transformed_batch = model.transform(data)
|
|
|
148 |
|
149 |
```python
|
150 |
from modelgenerator.tasks import SequenceRegression
|
151 |
+
model = SequenceRegression.from_config({"model.backbone": "aido_protein_rag_16b"}).eval()
|
152 |
model.backbone.max_length = 12800
|
153 |
data = torch.load("examples.pt", 'cpu')[0]
|
154 |
transformed_batch = model.transform(data)
|