|
from typing import List |
|
from transformers import PretrainedConfig |
|
|
|
class LidirlLSTMConfig(PretrainedConfig): |
|
model_type = "LidirlLSTM" |
|
|
|
def __init__(self, |
|
embed_dim : int = 128, |
|
hidden_dim : int = 128, |
|
num_layers : int = 2, |
|
vocab_size: int = 256, |
|
label_size : int = 200, |
|
dropout : float = 0.1, |
|
bidirectional : bool = False, |
|
max_length : int = 1024, |
|
multilabel : bool = False, |
|
montecarlo_layer : bool = False, |
|
**kwargs, |
|
): |
|
self.embed_dim = embed_dim |
|
self.hidden_dim = hidden_dim |
|
self.num_layers = num_layers |
|
self.dropout = dropout |
|
self.bidirectional = bidirectional |
|
|
|
self.vocab_size = vocab_size |
|
self.label_size = label_size |
|
self.max_length = max_length |
|
self.multilabel = multilabel |
|
self.montecarlo_layer = montecarlo_layer |
|
|
|
super().__init__(**kwargs) |