Fouzi Takelait commited on
Commit
2f1a594
·
1 Parent(s): 0dfd702

Update transformer_mt_roberta/modeling_transformer_final.py

Browse files
transformer_mt_roberta/modeling_transformer_final.py CHANGED
@@ -8,7 +8,7 @@ import torch.nn.functional as F
8
 
9
  from transformer_mt.modeling_attention import MultiHeadAttention
10
  from transformer_mt.utils import pad
11
- from transformers import AutoTokenizer, AutoModelForMaskedML
12
 
13
  Hypothesis = namedtuple("Hypothesis", ["value", "score"])
14
 
@@ -91,7 +91,7 @@ class TransfomerEncoderDecoderModel(nn.Module):
91
 
92
  self.dropout = nn.Dropout(self.dropout_rate)
93
 
94
- self.encoder = AutoModelForMaskedML.from_pretrained("flax-community/roberta_base_danish", output_hidden_states=True)
95
 
96
  self.decoder_layers = nn.ModuleList([TransformerDecoderLayer(hidden = self.hidden,
97
  num_heads = self.num_heads,
 
8
 
9
  from transformer_mt.modeling_attention import MultiHeadAttention
10
  from transformer_mt.utils import pad
11
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
12
 
13
  Hypothesis = namedtuple("Hypothesis", ["value", "score"])
14
 
 
91
 
92
  self.dropout = nn.Dropout(self.dropout_rate)
93
 
94
+ self.encoder = AutoModelForMaskedLM.from_pretrained("flax-community/roberta_base_danish", output_hidden_states=True)
95
 
96
  self.decoder_layers = nn.ModuleList([TransformerDecoderLayer(hidden = self.hidden,
97
  num_heads = self.num_heads,