eyad-silx commited on
Commit
026d6eb
·
verified ·
1 Parent(s): 3c33c20

Update modeling_quasarv4.py

Browse files
Files changed (1) hide show
  1. modeling_quasarv4.py +5 -5
modeling_quasarv4.py CHANGED
@@ -1143,8 +1143,8 @@ class QuasarV4ForCausalLM(QuasarV4PreTrainedModel):
1143
  def get_decoder(self):
1144
  return self.model
1145
 
1146
- @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1147
- @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1148
  def forward(
1149
  self,
1150
  input_ids: torch.LongTensor = None,
@@ -1314,13 +1314,13 @@ class QuasarV4ForCausalLM(QuasarV4PreTrainedModel):
1314
  padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1315
  each row of the batch).
1316
  """,
1317
- QWEN2_START_DOCSTRING,
1318
  )
1319
  class QuasarV4ForSequenceClassification(QuasarV4PreTrainedModel):
1320
  def __init__(self, config):
1321
  super().__init__(config)
1322
  self.num_labels = config.num_labels
1323
- self.model = Qwen2Model(config)
1324
  self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1325
 
1326
  # Initialize weights and apply final processing
@@ -1332,7 +1332,7 @@ class QuasarV4ForSequenceClassification(QuasarV4PreTrainedModel):
1332
  def set_input_embeddings(self, value):
1333
  self.model.embed_tokens = value
1334
 
1335
- @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1336
  def forward(
1337
  self,
1338
  input_ids: torch.LongTensor = None,
 
1143
  def get_decoder(self):
1144
  return self.model
1145
 
1146
+ @add_start_docstrings_to_model_forward(QUASAR_V4_INPUTS_DOCSTRING)
1147
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=QuasarV4Config)
1148
  def forward(
1149
  self,
1150
  input_ids: torch.LongTensor = None,
 
1314
  padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1315
  each row of the batch).
1316
  """,
1317
+ QUASAR_V4_START_DOCSTRING,
1318
  )
1319
  class QuasarV4ForSequenceClassification(QuasarV4PreTrainedModel):
1320
  def __init__(self, config):
1321
  super().__init__(config)
1322
  self.num_labels = config.num_labels
1323
+ self.model = QuasarV4Model(config)
1324
  self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1325
 
1326
  # Initialize weights and apply final processing
 
1332
  def set_input_embeddings(self, value):
1333
  self.model.embed_tokens = value
1334
 
1335
+ @add_start_docstrings_to_model_forward(QUASAR_V4_INPUTS_DOCSTRING)
1336
  def forward(
1337
  self,
1338
  input_ids: torch.LongTensor = None,