yash
commited on
Commit
·
609c11f
1
Parent(s):
4d0d76c
gpu to cpu
Browse files- app.py +2 -1
- transformer.py +2 -2
app.py
CHANGED
|
@@ -50,7 +50,8 @@ drop_prob = 0.1
|
|
| 50 |
num_layers = 6
|
| 51 |
max_sequence_length = 200
|
| 52 |
kn_vocab_size = len(gujarati_vocabulary)
|
| 53 |
-
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
|
|
|
| 54 |
|
| 55 |
transformer = Transformer(d_model,
|
| 56 |
ffn_hidden,
|
|
|
|
| 50 |
num_layers = 6
|
| 51 |
max_sequence_length = 200
|
| 52 |
kn_vocab_size = len(gujarati_vocabulary)
|
| 53 |
+
# device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
| 54 |
+
device = torch.device('cpu')
|
| 55 |
|
| 56 |
transformer = Transformer(d_model,
|
| 57 |
ffn_hidden,
|
transformer.py
CHANGED
|
@@ -5,8 +5,8 @@ from torch import nn
|
|
| 5 |
import torch.nn.functional as F
|
| 6 |
|
| 7 |
def get_device():
|
| 8 |
-
return torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
| 9 |
-
|
| 10 |
|
| 11 |
def scaled_dot_product(q, k, v, mask=None):
|
| 12 |
d_k = q.size()[-1]
|
|
|
|
| 5 |
import torch.nn.functional as F
|
| 6 |
|
| 7 |
def get_device():
|
| 8 |
+
# return torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
| 9 |
+
return torch.device('cpu')
|
| 10 |
|
| 11 |
def scaled_dot_product(q, k, v, mask=None):
|
| 12 |
d_k = q.size()[-1]
|