Update modeling_codefuse_cge_large.py
Browse files
modeling_codefuse_cge_large.py
CHANGED
|
@@ -81,7 +81,7 @@ import torch.nn as nn
|
|
| 81 |
import torch.nn.functional as F
|
| 82 |
import math
|
| 83 |
import re
|
| 84 |
-
|
| 85 |
|
| 86 |
class MAB_POST(nn.Module):
|
| 87 |
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
|
|
@@ -144,7 +144,7 @@ class PMA(nn.Module):
|
|
| 144 |
|
| 145 |
|
| 146 |
class CodeFuse_CGE_Large(PreTrainedModel):
|
| 147 |
-
|
| 148 |
def __init__(self, config):
|
| 149 |
super().__init__(config)
|
| 150 |
self.plm_model = Qwen2ForCausalLM(config)
|
|
|
|
| 81 |
import torch.nn.functional as F
|
| 82 |
import math
|
| 83 |
import re
|
| 84 |
+
from .configuration_codefuse_cge_large import CodeFuseCGELargeConfig
|
| 85 |
|
| 86 |
class MAB_POST(nn.Module):
|
| 87 |
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
|
|
|
|
| 144 |
|
| 145 |
|
| 146 |
class CodeFuse_CGE_Large(PreTrainedModel):
|
| 147 |
+
config_class = CodeFuseCGELargeConfig
|
| 148 |
def __init__(self, config):
|
| 149 |
super().__init__(config)
|
| 150 |
self.plm_model = Qwen2ForCausalLM(config)
|