tclf90 commited on
Commit
ad5c24d
·
verified ·
1 Parent(s): b691205

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "tclf90/GLM-4.5V-AWQ",
3
+ "architectures": [
4
+ "Glm4vMoeForConditionalGeneration"
5
+ ],
6
+ "model_type": "glm4v_moe",
7
+ "text_config": {
8
+ "pad_token_id": 151329,
9
+ "vocab_size": 151552,
10
+ "eos_token_id": [
11
+ 151329,
12
+ 151336,
13
+ 151338
14
+ ],
15
+ "image_end_token_id": 151340,
16
+ "image_start_token_id": 151339,
17
+ "image_token_id": 151363,
18
+ "head_dim": 128,
19
+ "attention_bias": true,
20
+ "attention_dropout": 0.0,
21
+ "first_k_dense_replace": 1,
22
+ "hidden_act": "silu",
23
+ "hidden_size": 4096,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 11264,
26
+ "max_position_embeddings": 65536,
27
+ "model_type": "glm4v_moe_text",
28
+ "moe_intermediate_size": 1408,
29
+ "n_group": 1,
30
+ "n_routed_experts": 128,
31
+ "n_shared_experts": 1,
32
+ "norm_topk_prob": true,
33
+ "num_attention_heads": 96,
34
+ "num_experts_per_tok": 8,
35
+ "num_hidden_layers": 46,
36
+ "num_key_value_heads": 8,
37
+ "partial_rotary_factor": 0.5,
38
+ "rms_norm_eps": 1e-05,
39
+ "torch_dtype": "bfloat16",
40
+ "rope_scaling": {
41
+ "rope_type": "default",
42
+ "mrope_section": [
43
+ 8,
44
+ 12,
45
+ 12
46
+ ]
47
+ },
48
+ "rope_theta": 10000.0,
49
+ "routed_scaling_factor": 1.0,
50
+ "topk_group": 1,
51
+ "use_cache": true,
52
+ "use_qk_norm": false
53
+ },
54
+ "torch_dtype": "float16",
55
+ "transformers_version": "4.55.0.dev0",
56
+ "video_end_token_id": 151342,
57
+ "video_start_token_id": 151341,
58
+ "video_token_id": 151364,
59
+ "vision_config": {
60
+ "attention_bias": false,
61
+ "attention_dropout": 0.0,
62
+ "depth": 24,
63
+ "hidden_act": "silu",
64
+ "hidden_size": 1536,
65
+ "image_size": 336,
66
+ "in_channels": 3,
67
+ "initializer_range": 0.02,
68
+ "intermediate_size": 11264,
69
+ "model_type": "glm4v_moe",
70
+ "num_heads": 12,
71
+ "out_hidden_size": 4096,
72
+ "patch_size": 14,
73
+ "rms_norm_eps": 1e-05,
74
+ "spatial_merge_size": 2,
75
+ "temporal_patch_size": 2
76
+ },
77
+ "quantization_config": {
78
+ "quant_method": "awq_marlin",
79
+ "bits": 4,
80
+ "group_size": 128,
81
+ "version": "gemm",
82
+ "zero_point": true,
83
+ "modules_to_not_convert": ["blocks.", "model.embed_tokens", "model.layers.0.mlp.shared_experts.", "model.layers.1.mlp.shared_experts.", "model.layers.2.mlp.shared_experts.", "model.layers.3.mlp.shared_experts.", "model.layers.4.mlp.shared_experts.", "model.layers.5.mlp.shared_experts.", "model.layers.6.mlp.shared_experts.", "model.layers.7.mlp.shared_experts.", "model.layers.8.mlp.shared_experts.", "model.layers.9.mlp.shared_experts.", "model.layers.10.mlp.shared_experts.", "model.layers.11.mlp.shared_experts.", "model.layers.12.mlp.shared_experts.", "model.layers.13.mlp.shared_experts.", "model.layers.14.mlp.shared_experts.", "model.layers.15.mlp.shared_experts.", "model.layers.16.mlp.shared_experts.", "model.layers.17.mlp.shared_experts.", "model.layers.18.mlp.shared_experts.", "model.layers.19.mlp.shared_experts.", "model.layers.20.mlp.shared_experts.", "model.layers.21.mlp.shared_experts.", "model.layers.22.mlp.shared_experts.", "model.layers.23.mlp.shared_experts.", "model.layers.24.mlp.shared_experts.", "model.layers.25.mlp.shared_experts.", "model.layers.26.mlp.shared_experts.", "model.layers.27.mlp.shared_experts.", "model.layers.28.mlp.shared_experts.", "model.layers.29.mlp.shared_experts.", "model.layers.30.mlp.shared_experts.", "model.layers.31.mlp.shared_experts.", "model.layers.32.mlp.shared_experts.", "model.layers.33.mlp.shared_experts.", "model.layers.34.mlp.shared_experts.", "model.layers.35.mlp.shared_experts.", "model.layers.36.mlp.shared_experts.", "model.layers.37.mlp.shared_experts.", "model.layers.38.mlp.shared_experts.", "model.layers.39.mlp.shared_experts.", "model.layers.40.mlp.shared_experts.", "model.layers.41.mlp.shared_experts.", "model.layers.42.mlp.shared_experts.", "model.layers.43.mlp.shared_experts.", "model.layers.44.mlp.shared_experts.", "model.layers.45.mlp.shared_experts.", "lm_head"]
84
+ }
85
+ }
.ipynb_checkpoints/model.safetensors.index-checkpoint.json ADDED
The diff for this file is too large to render. See raw diff
 
README.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ language:
4
+ - zh
5
+ - en
6
+ base_model:
7
+ - zai-org/GLM-4.5-Air-Base
8
+ pipeline_tag: image-text-to-text
9
+ library_name: transformers
10
+ ---
11
+
12
+ # GLM-4.5V
13
+
14
+ <div align="center">
15
+ <img src=https://raw.githubusercontent.com/zai-org/GLM-V/refs/heads/main/resources/logo.svg width="40%"/>
16
+ </div>
17
+ <p align="center">
18
+ 👋 Join our <a href="https://discord.com/invite/8cnQKdAprg" target="_blank">Discord</a> communities.
19
+ <br>
20
+ 📖 Check out the <a href="https://github.com/zai-org/GLM-V/blob/main/resources/GLM-4.5V_technical_report.pdf" target="_blank">paper</a>.
21
+ <br>
22
+ 📍 Access the GLM-V series models via API on the <a href="https://docs.z.ai/guides/vlm/glm-4.5v">ZhipuAI Open Platform</a>.
23
+ </p>
24
+
25
+ ## Introduction
26
+
27
+ Vision-language models (VLMs) have become a key cornerstone of intelligent systems. As real-world AI tasks grow increasingly complex, VLMs urgently need to enhance reasoning capabilities beyond basic multimodal perception — improving accuracy, comprehensiveness, and intelligence — to enable complex problem solving, long-context understanding, and multimodal agents.
28
+
29
+ Through our open-source work, we aim to explore the technological frontier together with the community while empowering more developers to create exciting and innovative applications.
30
+
31
+ GLM-4.5V is based on ZhipuAI’s next-generation flagship text foundation model GLM-4.5-Air (106B parameters, 12B active). It continues the technical approach of GLM-4.1V-Thinking, achieving SOTA performance among models of the same scale on 42 public vision-language benchmarks. It covers common tasks such as image, video, and document understanding, as well as GUI agent operations.
32
+
33
+ ![bench_45](https://raw.githubusercontent.com/zai-org/GLM-V/refs/heads/main/resources/bench_45v.jpeg)
34
+
35
+ Beyond benchmark performance, GLM-4.5V focuses on real-world usability. Through efficient hybrid training, it can handle diverse types of visual content, enabling full-spectrum vision reasoning, including:
36
+ - **Image reasoning** (scene understanding, complex multi-image analysis, spatial recognition)
37
+ - **Video understanding** (long video segmentation and event recognition)
38
+ - **GUI tasks** (screen reading, icon recognition, desktop operation assistance)
39
+ - **Complex chart & long document parsing** (research report analysis, information extraction)
40
+ - **Grounding** (precise visual element localization)
41
+
42
+ The model also introduces a **Thinking Mode** switch, allowing users to balance between quick responses and deep reasoning. This switch works the same as in the `GLM-4.5` language model.
43
+
44
+ ## Quick Start
45
+
46
+ Using with transformers:
47
+
48
+ ```shell
49
+ pip install transformers-v4.55.0-GLM-4.5V-preview
50
+ ```
51
+
52
+ and then run:
53
+
54
+ ```shell
55
+ from transformers import AutoProcessor, Glm4vMoeForConditionalGeneration
56
+ import torch
57
+
58
+ MODEL_PATH = "zai-org/GLM-4.5V"
59
+ messages = [
60
+ {
61
+ "role": "user",
62
+ "content": [
63
+ {
64
+ "type": "image",
65
+ "url": "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png"
66
+ },
67
+ {
68
+ "type": "text",
69
+ "text": "describe this image"
70
+ }
71
+ ],
72
+ }
73
+ ]
74
+ processor = AutoProcessor.from_pretrained(MODEL_PATH)
75
+ model = Glm4vMoeForConditionalGeneration.from_pretrained(
76
+ pretrained_model_name_or_path=MODEL_PATH,
77
+ torch_dtype="auto",
78
+ device_map="auto",
79
+ )
80
+ inputs = processor.apply_chat_template(
81
+ messages,
82
+ tokenize=True,
83
+ add_generation_prompt=True,
84
+ return_dict=True,
85
+ return_tensors="pt"
86
+ ).to(model.device)
87
+ inputs.pop("token_type_ids", None)
88
+ generated_ids = model.generate(**inputs, max_new_tokens=8192)
89
+ output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
90
+ print(output_text)
91
+ ```
92
+
93
+ The special tokens `<|begin_of_box|>` and `<|end_of_box|>` in the response mark the answer’s bounding box in the image. The bounding box is given as four numbers — for example `[x1, y1, x2, y2]`, where `(x1, y1)` is the top-left corner and `(x2, y2`)` is the bottom-right corner. The bracket style may vary ([], [[]], (), <>, etc.), but the meaning is the same: it encloses the coordinates of the box. These coordinates are relative values between 0 and 1000, normalized to the image size.
94
+
95
+ For more code information, please visit our [GitHub](https://github.com/zai-org/GLM-V/).
96
+
97
+ ## Citation
98
+
99
+ If you use this model, please cite the following paper:
100
+
101
+ ```bibtex
102
+ @misc{glmvteam2025glm41vthinkingversatilemultimodalreasoning,
103
+ title={GLM-4.1V-Thinking: Towards Versatile Multimodal Reasoning with Scalable Reinforcement Learning},
104
+ author={GLM-V Team and Wenyi Hong and Wenmeng Yu and Xiaotao Gu and Guo Wang and Guobing Gan and Haomiao Tang and Jiale Cheng and Ji Qi and Junhui Ji and Lihang Pan and Shuaiqi Duan and Weihan Wang and Yan Wang and Yean Cheng and Zehai He and Zhe Su and Zhen Yang and Ziyang Pan and Aohan Zeng and Baoxu Wang and Boyan Shi and Changyu Pang and Chenhui Zhang and Da Yin and Fan Yang and Guoqing Chen and Jiazheng Xu and Jiali Chen and Jing Chen and Jinhao Chen and Jinghao Lin and Jinjiang Wang and Junjie Chen and Leqi Lei and Letian Gong and Leyi Pan and Mingzhi Zhang and Qinkai Zheng and Sheng Yang and Shi Zhong and Shiyu Huang and Shuyuan Zhao and Siyan Xue and Shangqin Tu and Shengbiao Meng and Tianshu Zhang and Tianwei Luo and Tianxiang Hao and Wenkai Li and Wei Jia and Xin Lyu and Xuancheng Huang and Yanling Wang and Yadong Xue and Yanfeng Wang and Yifan An and Yifan Du and Yiming Shi and Yiheng Huang and Yilin Niu and Yuan Wang and Yuanchang Yue and Yuchen Li and Yutao Zhang and Yuxuan Zhang and Zhanxiao Du and Zhenyu Hou and Zhao Xue and Zhengxiao Du and Zihan Wang and Peng Zhang and Debing Liu and Bin Xu and Juanzi Li and Minlie Huang and Yuxiao Dong and Jie Tang},
105
+ year={2025},
106
+ eprint={2507.01006},
107
+ archivePrefix={arXiv},
108
+ primaryClass={cs.CV},
109
+ url={https://arxiv.org/abs/2507.01006},
110
+ }
111
+ ```
112
+
awq_marlin.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from typing import Any, Callable, Optional
5
+
6
+ import torch
7
+ from torch.nn import Parameter
8
+
9
+ import vllm.model_executor.layers.fused_moe # noqa
10
+ from vllm import _custom_ops as ops
11
+ from vllm.logger import init_logger
12
+ from vllm.model_executor.layers.fused_moe.layer import (
13
+ FusedMoE, FusedMoEMethodBase, FusedMoeWeightScaleSupported,
14
+ UnquantizedFusedMoEMethod)
15
+ from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase,
16
+ UnquantizedLinearMethod,
17
+ set_weight_attrs)
18
+ from vllm.model_executor.layers.quantization import QuantizationMethods
19
+ from vllm.model_executor.layers.quantization.awq import (AWQConfig,
20
+ is_layer_skipped_awq)
21
+ from vllm.model_executor.layers.quantization.base_config import (
22
+ QuantizationConfig, QuantizeMethodBase)
23
+ from vllm.model_executor.layers.quantization.utils import replace_parameter
24
+ from vllm.model_executor.layers.quantization.utils.marlin_utils import (
25
+ apply_awq_marlin_linear, awq_to_marlin_zero_points, check_marlin_supported,
26
+ check_marlin_supports_layer, check_moe_marlin_supports_layer,
27
+ marlin_make_empty_g_idx, marlin_make_workspace_new,
28
+ marlin_moe_permute_scales, marlin_permute_scales,
29
+ moe_awq_to_marlin_zero_points, verify_marlin_supported,
30
+ verify_marlin_supports_shape)
31
+ from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead
32
+ from vllm.model_executor.parameter import (GroupQuantScaleParameter,
33
+ PackedvLLMParameter)
34
+ from vllm.platforms import current_platform
35
+ from vllm.scalar_type import scalar_types
36
+
37
+ logger = init_logger(__name__)
38
+
39
+
40
+ class AWQMarlinConfig(QuantizationConfig):
41
+ """Config class for AWQ Marlin"""
42
+
43
+ # num_bits -> type
44
+ TYPE_MAP = {
45
+ 4: scalar_types.uint4,
46
+ 8: scalar_types.uint8,
47
+ }
48
+
49
+ def __init__(self, weight_bits: int, group_size: int, zero_point: bool,
50
+ lm_head_quantized: bool,
51
+ modules_to_not_convert: Optional[list[str]],
52
+ full_config: dict[str, Any]) -> None:
53
+ super().__init__()
54
+ self.pack_factor = 32 // weight_bits # packed into int32
55
+ self.group_size = group_size
56
+ self.zero_point = zero_point
57
+ self.lm_head_quantized = lm_head_quantized
58
+ self.weight_bits = weight_bits
59
+ self.modules_to_not_convert = modules_to_not_convert or []
60
+ self.full_config = full_config
61
+
62
+ if self.weight_bits not in self.TYPE_MAP:
63
+ raise ValueError(f"Unsupported num_bits = {self.weight_bits}. "
64
+ f"Supported num_bits = {self.TYPE_MAP.keys()}")
65
+
66
+ self.quant_type = self.TYPE_MAP[self.weight_bits]
67
+
68
+ verify_marlin_supported(self.quant_type,
69
+ group_size=self.group_size,
70
+ has_zp=self.zero_point)
71
+
72
+ def __repr__(self) -> str:
73
+ return (f"AWQMarlinConfig(quant_type={self.quant_type}, "
74
+ f"group_size={self.group_size}, "
75
+ f"zero_point={self.zero_point}, "
76
+ f"lm_head_quantized={self.lm_head_quantized}, "
77
+ f"modules_to_not_convert={self.modules_to_not_convert})")
78
+
79
+ @classmethod
80
+ def get_name(cls) -> QuantizationMethods:
81
+ return "awq_marlin"
82
+
83
+ @classmethod
84
+ def get_supported_act_dtypes(cls) -> list[torch.dtype]:
85
+ return [torch.half, torch.bfloat16]
86
+
87
+ @classmethod
88
+ def get_min_capability(cls) -> int:
89
+ return 80
90
+
91
+ @classmethod
92
+ def get_config_filenames(cls) -> list[str]:
93
+ return ["quantize_config.json"]
94
+
95
+ @classmethod
96
+ def from_config(cls, config: dict[str, Any]) -> "AWQMarlinConfig":
97
+ weight_bits = cls.get_from_keys(config, ["bits"])
98
+ group_size = cls.get_from_keys(config, ["group_size"])
99
+ zero_point = cls.get_from_keys(config, ["zero_point"])
100
+ lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"],
101
+ default=False)
102
+ modules_to_not_convert = cls.get_from_keys_or(
103
+ config, ["modules_to_not_convert"], None)
104
+ return cls(weight_bits, group_size, zero_point, lm_head_quantized,
105
+ modules_to_not_convert, config)
106
+
107
+ @classmethod
108
+ def override_quantization_method(
109
+ cls, hf_quant_cfg, user_quant) -> Optional[QuantizationMethods]:
110
+ can_convert = cls.is_awq_marlin_compatible(hf_quant_cfg)
111
+ is_valid_user_quant = (user_quant is None or user_quant == "marlin"
112
+ or user_quant == "awq_marlin")
113
+
114
+ if can_convert and is_valid_user_quant:
115
+ msg = ("The model is convertible to {} during runtime."
116
+ " Using {} kernel.".format(cls.get_name(), cls.get_name()))
117
+ logger.info(msg)
118
+ return cls.get_name()
119
+
120
+ if can_convert and user_quant == "awq":
121
+ logger.info("Detected that the model can run with awq_marlin"
122
+ ", however you specified quantization=awq explicitly,"
123
+ " so forcing awq. Use quantization=awq_marlin for"
124
+ " faster inference")
125
+ return None
126
+
127
+ def get_quant_method(self, layer: torch.nn.Module,
128
+ prefix: str) -> Optional["QuantizeMethodBase"]:
129
+ if (isinstance(layer, LinearBase) or
130
+ (isinstance(layer, ParallelLMHead) and self.lm_head_quantized)):
131
+ if is_layer_skipped_awq(prefix, self.modules_to_not_convert):
132
+ return UnquantizedLinearMethod()
133
+ # Check if the layer is supported by AWQMarlin.
134
+ if not check_marlin_supports_layer(layer, self.group_size):
135
+ logger.warning_once(
136
+ "Layer '%s' is not supported by AWQMarlin. Falling back to unoptimized AWQ kernels.", # noqa: E501
137
+ prefix,
138
+ )
139
+ return AWQConfig.from_config(
140
+ self.full_config).get_quant_method(layer, prefix)
141
+ return AWQMarlinLinearMethod(self)
142
+ elif isinstance(layer, FusedMoE):
143
+ if is_layer_skipped_awq(prefix, getattr(self, "modules_to_not_convert", [])):
144
+ return UnquantizedFusedMoEMethod(layer.moe_config)
145
+ from vllm.model_executor.layers.quantization.moe_wna16 import (
146
+ MoeWNA16Config)
147
+ if not check_moe_marlin_supports_layer(layer, self.group_size):
148
+ logger.warning_once(
149
+ f"Layer '{prefix}' is not supported by AWQMoeMarlin. "
150
+ "Falling back to Moe WNA16 kernels.")
151
+ return MoeWNA16Config.from_config(
152
+ self.full_config).get_quant_method(layer, prefix)
153
+ return AWQMoEMethod(self)
154
+ return None
155
+
156
+ @classmethod
157
+ def is_awq_marlin_compatible(cls, quant_config: dict[str, Any]):
158
+ # Extract data from quant config.
159
+ quant_method = quant_config.get("quant_method", "").lower()
160
+ num_bits = quant_config.get("bits")
161
+ group_size = quant_config.get("group_size")
162
+ zero_point = quant_config.get("zero_point")
163
+
164
+ if not current_platform.is_cuda():
165
+ return False
166
+
167
+ if quant_method != "awq":
168
+ return False
169
+
170
+ # If we cannot find the info needed in the config, cannot convert.
171
+ if (num_bits is None or group_size is None or zero_point is None):
172
+ return False
173
+
174
+ if num_bits not in cls.TYPE_MAP:
175
+ return False
176
+
177
+ return check_marlin_supported(quant_type=cls.TYPE_MAP[num_bits],
178
+ group_size=group_size,
179
+ has_zp=zero_point)
180
+
181
+
182
+ class AWQMarlinLinearMethod(LinearMethodBase):
183
+ """Linear method for AWQ Marlin.
184
+
185
+ Args:
186
+ quant_config: The AWQ Marlin quantization config.
187
+ """
188
+
189
+ def __init__(self, quant_config: AWQMarlinConfig) -> None:
190
+ self.quant_config = quant_config
191
+
192
+ def create_weights(
193
+ self,
194
+ layer: torch.nn.Module,
195
+ input_size_per_partition: int,
196
+ output_partition_sizes: list[int],
197
+ input_size: int,
198
+ output_size: int,
199
+ params_dtype: torch.dtype,
200
+ **extra_weight_attrs,
201
+ ) -> None:
202
+ del output_size
203
+ output_size_per_partition = sum(output_partition_sizes)
204
+ weight_loader = extra_weight_attrs.get("weight_loader")
205
+
206
+ # Normalize group_size
207
+ if self.quant_config.group_size != -1:
208
+ group_size = self.quant_config.group_size
209
+ else:
210
+ group_size = input_size
211
+
212
+ verify_marlin_supports_shape(
213
+ output_size_per_partition=output_size_per_partition,
214
+ input_size_per_partition=input_size_per_partition,
215
+ input_size=input_size,
216
+ group_size=group_size)
217
+
218
+ qweight = PackedvLLMParameter(
219
+ data=torch.empty(
220
+ input_size_per_partition,
221
+ output_size_per_partition // self.quant_config.pack_factor,
222
+ dtype=torch.int32,
223
+ ),
224
+ input_dim=0,
225
+ output_dim=1,
226
+ packed_dim=1,
227
+ packed_factor=self.quant_config.pack_factor,
228
+ weight_loader=weight_loader)
229
+
230
+ num_groups = input_size_per_partition // group_size
231
+
232
+ qzeros = PackedvLLMParameter(
233
+ data=torch.empty(
234
+ num_groups,
235
+ output_size_per_partition // self.quant_config.pack_factor,
236
+ dtype=torch.int32,
237
+ ),
238
+ input_dim=0,
239
+ output_dim=1,
240
+ packed_dim=1,
241
+ packed_factor=self.quant_config.pack_factor,
242
+ weight_loader=weight_loader)
243
+
244
+ scales = GroupQuantScaleParameter(data=torch.empty(
245
+ num_groups,
246
+ output_size_per_partition,
247
+ dtype=params_dtype,
248
+ ),
249
+ input_dim=0,
250
+ output_dim=1,
251
+ weight_loader=weight_loader)
252
+
253
+ layer.register_parameter("qweight", qweight)
254
+ layer.register_parameter("qzeros", qzeros)
255
+ layer.register_parameter("scales", scales)
256
+
257
+ layer.input_size_per_partition = input_size_per_partition
258
+ layer.output_size_per_partition = output_size_per_partition
259
+ layer.num_groups = num_groups
260
+
261
+ # TODO: Update this docs
262
+ # Checkpoints are serialized in AutoAWQ format, which is different from the
263
+ # marlin format. This function is called after the weights are loaded.
264
+ # Here, we handle the repacking
265
+ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
266
+ device = layer.qweight.device
267
+ layer.qweight = torch.nn.Parameter(layer.qweight.data,
268
+ requires_grad=False)
269
+ layer.qzeros = torch.nn.Parameter(layer.qzeros.data,
270
+ requires_grad=False)
271
+ layer.scales = torch.nn.Parameter(layer.scales.data,
272
+ requires_grad=False)
273
+
274
+ # Allocate marlin workspace
275
+ layer.workspace = marlin_make_workspace_new(device)
276
+
277
+ # Repack weights from AWQ format to marlin format.
278
+ marlin_qweight = ops.awq_marlin_repack(
279
+ layer.qweight,
280
+ size_k=layer.input_size_per_partition,
281
+ size_n=layer.output_size_per_partition,
282
+ num_bits=self.quant_config.quant_type.size_bits)
283
+ replace_parameter(layer, "qweight", marlin_qweight)
284
+
285
+ # Permute scales from AWQ format to marlin format.
286
+ marlin_scales = marlin_permute_scales(
287
+ layer.scales,
288
+ size_k=layer.input_size_per_partition,
289
+ size_n=layer.output_size_per_partition,
290
+ group_size=self.quant_config.group_size)
291
+ replace_parameter(layer, "scales", marlin_scales)
292
+
293
+ # Permute zero-points from AWQ format to marlin format.
294
+ marlin_zp = awq_to_marlin_zero_points(
295
+ layer.qzeros,
296
+ size_k=layer.num_groups,
297
+ size_n=layer.output_size_per_partition,
298
+ num_bits=self.quant_config.quant_type.size_bits)
299
+ replace_parameter(layer, "qzeros", marlin_zp)
300
+
301
+ # Not-used
302
+ layer.g_idx = marlin_make_empty_g_idx(device)
303
+ layer.g_idx_sort_indices = marlin_make_empty_g_idx(device)
304
+
305
+ def apply(
306
+ self,
307
+ layer: torch.nn.Module,
308
+ x: torch.Tensor,
309
+ bias: Optional[torch.Tensor] = None,
310
+ ) -> torch.Tensor:
311
+ return apply_awq_marlin_linear(
312
+ input=x,
313
+ weight=layer.qweight,
314
+ weight_scale=layer.scales,
315
+ weight_zp=layer.qzeros,
316
+ g_idx=layer.g_idx,
317
+ g_idx_sort_indices=layer.g_idx_sort_indices,
318
+ workspace=layer.workspace,
319
+ quant_type=self.quant_config.quant_type,
320
+ output_size_per_partition=layer.output_size_per_partition,
321
+ input_size_per_partition=layer.input_size_per_partition,
322
+ bias=bias)
323
+
324
+
325
+ class AWQMoEMethod(FusedMoEMethodBase):
326
+
327
+ def __init__(self, quant_config: AWQMarlinConfig):
328
+ self.quant_config = quant_config
329
+ if self.quant_config.weight_bits != 4:
330
+ raise ValueError("AWQMoEMethod only supports 4bit now.")
331
+ self.quant_type = scalar_types.uint4
332
+
333
+ def create_weights(self, layer: torch.nn.Module, num_experts: int,
334
+ hidden_size: int, intermediate_size_per_partition: int,
335
+ params_dtype: torch.dtype, **extra_weight_attrs):
336
+ extra_weight_attrs.update({
337
+ "is_transposed":
338
+ True,
339
+ "quant_method":
340
+ FusedMoeWeightScaleSupported.GROUP.value,
341
+ })
342
+
343
+ w13_qweight = Parameter(
344
+ torch.empty(num_experts,
345
+ hidden_size,
346
+ 2 * intermediate_size_per_partition //
347
+ self.quant_config.pack_factor,
348
+ dtype=torch.int32),
349
+ requires_grad=False)
350
+ layer.register_parameter("w13_qweight", w13_qweight)
351
+ set_weight_attrs(w13_qweight, extra_weight_attrs)
352
+
353
+ w2_qweight = Parameter(torch.empty(num_experts,
354
+ intermediate_size_per_partition,
355
+ hidden_size //
356
+ self.quant_config.pack_factor,
357
+ dtype=torch.int32),
358
+ requires_grad=False)
359
+ layer.register_parameter("w2_qweight", w2_qweight)
360
+ set_weight_attrs(w2_qweight, extra_weight_attrs)
361
+
362
+ num_groups_w13 = hidden_size // self.quant_config.group_size
363
+ num_groups_w2 = (intermediate_size_per_partition //
364
+ self.quant_config.group_size)
365
+
366
+ # WEIGHT_SCALES
367
+ # Allocate 2 scales for w1 and w3 respectively.
368
+ w13_scales = Parameter(torch.empty(num_experts,
369
+ num_groups_w13,
370
+ intermediate_size_per_partition * 2,
371
+ dtype=params_dtype),
372
+ requires_grad=False)
373
+ layer.register_parameter("w13_scales", w13_scales)
374
+ set_weight_attrs(w13_scales, extra_weight_attrs)
375
+
376
+ w2_scales = Parameter(torch.empty(num_experts,
377
+ num_groups_w2,
378
+ hidden_size,
379
+ dtype=params_dtype),
380
+ requires_grad=False)
381
+ layer.register_parameter("w2_scales", w2_scales)
382
+ set_weight_attrs(w2_scales, extra_weight_attrs)
383
+
384
+ # WEIGHT_ZERO_POINT
385
+ # Allocate 2 zero points for w1 and w3 respectively.
386
+ w13_qzeros = Parameter(
387
+ torch.empty(num_experts,
388
+ num_groups_w13,
389
+ 2 * intermediate_size_per_partition //
390
+ self.quant_config.pack_factor,
391
+ dtype=torch.int32),
392
+ requires_grad=False)
393
+ layer.register_parameter("w13_qzeros", w13_qzeros)
394
+ set_weight_attrs(w13_qzeros, extra_weight_attrs)
395
+
396
+ w2_qzeros = Parameter(torch.empty(num_experts,
397
+ num_groups_w2,
398
+ hidden_size //
399
+ self.quant_config.pack_factor,
400
+ dtype=torch.int32),
401
+ requires_grad=False)
402
+ layer.register_parameter("w2_qzeros", w2_qzeros)
403
+ set_weight_attrs(w2_qzeros, extra_weight_attrs)
404
+
405
+ device = layer.w13_qweight.device
406
+ layer.workspace = marlin_make_workspace_new(device, 4)
407
+
408
+ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
409
+ num_experts = layer.w13_qweight.shape[0]
410
+ device = layer.w13_qweight.device
411
+
412
+ layer.w13_g_idx_sort_indices = torch.nn.Parameter(
413
+ torch.empty((num_experts, 0), dtype=torch.int32, device=device),
414
+ requires_grad=False,
415
+ )
416
+ layer.w2_g_idx_sort_indices = torch.nn.Parameter(
417
+ torch.empty((num_experts, 0), dtype=torch.int32, device=device),
418
+ requires_grad=False,
419
+ )
420
+
421
+ marlin_w13_qweight = ops.awq_marlin_moe_repack(
422
+ layer.w13_qweight,
423
+ layer.w13_g_idx_sort_indices,
424
+ size_k=layer.w13_qweight.shape[1],
425
+ size_n=layer.w13_qweight.shape[2] * self.quant_config.pack_factor,
426
+ num_bits=self.quant_config.weight_bits,
427
+ )
428
+ replace_parameter(layer, "w13_qweight", marlin_w13_qweight)
429
+
430
+ marlin_w2_qweight = ops.awq_marlin_moe_repack(
431
+ layer.w2_qweight,
432
+ layer.w2_g_idx_sort_indices,
433
+ size_k=layer.w2_qweight.shape[1],
434
+ size_n=layer.w2_qweight.shape[2] * self.quant_config.pack_factor,
435
+ num_bits=self.quant_config.weight_bits,
436
+ )
437
+ replace_parameter(layer, "w2_qweight", marlin_w2_qweight)
438
+
439
+ # Why does this take the intermediate size for size_k?
440
+ marlin_w13_scales = marlin_moe_permute_scales(
441
+ s=layer.w13_scales,
442
+ size_k=layer.intermediate_size_per_partition,
443
+ size_n=layer.w13_scales.shape[2],
444
+ group_size=self.quant_config.group_size,
445
+ )
446
+
447
+ replace_parameter(layer, "w13_scales", marlin_w13_scales)
448
+
449
+ marlin_w2_scales = marlin_moe_permute_scales(
450
+ s=layer.w2_scales,
451
+ size_k=layer.intermediate_size_per_partition,
452
+ size_n=layer.w2_scales.shape[2],
453
+ group_size=self.quant_config.group_size,
454
+ )
455
+ replace_parameter(layer, "w2_scales", marlin_w2_scales)
456
+
457
+ marlin_w13_zp = moe_awq_to_marlin_zero_points(
458
+ layer.w13_qzeros,
459
+ size_k=layer.w13_qzeros.shape[1],
460
+ size_n=layer.w13_qzeros.shape[2] * self.quant_config.pack_factor,
461
+ num_bits=self.quant_config.weight_bits)
462
+ replace_parameter(layer, "w13_qzeros", marlin_w13_zp)
463
+
464
+ marlin_w2_zp = moe_awq_to_marlin_zero_points(
465
+ layer.w2_qzeros,
466
+ size_k=layer.w2_qzeros.shape[1],
467
+ size_n=layer.w2_qzeros.shape[2] * self.quant_config.pack_factor,
468
+ num_bits=self.quant_config.weight_bits)
469
+ replace_parameter(layer, "w2_qzeros", marlin_w2_zp)
470
+
471
+ def apply(
472
+ self,
473
+ layer: torch.nn.Module,
474
+ x: torch.Tensor,
475
+ router_logits: torch.Tensor,
476
+ top_k: int,
477
+ renormalize: bool,
478
+ use_grouped_topk: bool = False,
479
+ topk_group: Optional[int] = None,
480
+ num_expert_group: Optional[int] = None,
481
+ global_num_experts: int = -1,
482
+ expert_map: Optional[torch.Tensor] = None,
483
+ custom_routing_function: Optional[Callable] = None,
484
+ scoring_func: str = "softmax",
485
+ e_score_correction_bias: Optional[torch.Tensor] = None,
486
+ apply_router_weight_on_input: bool = False,
487
+ activation: str = "silu",
488
+ enable_eplb: bool = False,
489
+ expert_load_view: Optional[torch.Tensor] = None,
490
+ logical_to_physical_map: Optional[torch.Tensor] = None,
491
+ logical_replica_count: Optional[torch.Tensor] = None,
492
+ ) -> torch.Tensor:
493
+ if enable_eplb:
494
+ raise NotImplementedError(
495
+ "EPLB not supported for `AWQMoEMethod` yet.")
496
+
497
+ assert activation == "silu", "Only SiLU activation is supported."
498
+
499
+ topk_weights, topk_ids = FusedMoE.select_experts(
500
+ hidden_states=x,
501
+ router_logits=router_logits,
502
+ use_grouped_topk=use_grouped_topk,
503
+ top_k=top_k,
504
+ renormalize=renormalize,
505
+ topk_group=topk_group,
506
+ num_expert_group=num_expert_group,
507
+ custom_routing_function=custom_routing_function,
508
+ scoring_func=scoring_func,
509
+ e_score_correction_bias=e_score_correction_bias)
510
+
511
+ return torch.ops.vllm.fused_marlin_moe(
512
+ x,
513
+ layer.w13_qweight,
514
+ layer.w2_qweight,
515
+ layer.w13_scales,
516
+ layer.w2_scales,
517
+ router_logits,
518
+ topk_weights,
519
+ topk_ids,
520
+ quant_type_id=self.quant_type.id,
521
+ apply_router_weight_on_input=apply_router_weight_on_input,
522
+ global_num_experts=global_num_experts,
523
+ expert_map=expert_map,
524
+ w1_zeros=layer.w13_qzeros,
525
+ w2_zeros=layer.w2_qzeros,
526
+ workspace=layer.workspace)
chat_template.jinja ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [gMASK]<sop>
2
+ {%- if tools -%}
3
+ <|system|>
4
+ # Tools
5
+
6
+ You may call one or more functions to assist with the user query.
7
+
8
+ You are provided with function signatures within <tools></tools> XML tags:
9
+ <tools>
10
+ {% for tool in tools %}
11
+ {{ tool | tojson(ensure_ascii=False) }}
12
+ {% endfor %}
13
+ </tools>
14
+
15
+ For each function call, output the function name and arguments within the following XML format:
16
+ <tool_call>{function-name}
17
+ <arg_key>{arg-key-1}</arg_key>
18
+ <arg_value>{arg-value-1}</arg_value>
19
+ <arg_key>{arg-key-2}</arg_key>
20
+ <arg_value>{arg-value-2}</arg_value>
21
+ ...
22
+ </tool_call>{%- endif -%}
23
+ {%- macro visible_text(content) -%}
24
+ {%- if content is string -%}
25
+ {{- content }}
26
+ {%- elif content is iterable and content is not mapping -%}
27
+ {%- for item in content -%}
28
+ {%- if item is mapping and item.type == 'text' -%}
29
+ {{- item.text }}
30
+ {%- elif item is mapping and (item.type == 'image' or 'image' in item) -%}
31
+ <|begin_of_image|><|image|><|end_of_image|>
32
+ {%- elif item is mapping and (item.type == 'video' or 'video' in item) -%}
33
+ <|begin_of_video|><|video|><|end_of_video|>
34
+ {%- elif item is string -%}
35
+ {{- item }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{- content }}
40
+ {%- endif -%}
41
+ {%- endmacro -%}
42
+ {%- set ns = namespace(last_user_index=-1) %}
43
+ {%- for m in messages %}
44
+ {%- if m.role == 'user' %}
45
+ {% set ns.last_user_index = loop.index0 -%}
46
+ {%- endif %}
47
+ {%- endfor %}
48
+ {% for m in messages %}
49
+ {%- if m.role == 'user' -%}<|user|>
50
+ {% if m.content is string %}
51
+ {{ m.content }}
52
+ {%- else %}
53
+ {%- for item in m.content %}
54
+ {% if item.type == 'video' or 'video' in item %}
55
+ <|begin_of_video|><|video|><|end_of_video|>{% elif item.type == 'image' or 'image' in item %}
56
+ <|begin_of_image|><|image|><|end_of_image|>{% elif item.type == 'text' %}
57
+ {{ item.text }}
58
+ {%- endif %}
59
+ {%- endfor %}
60
+ {%- endif %}
61
+ {{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}
62
+ {%- elif m.role == 'assistant' -%}
63
+ <|assistant|>
64
+ {%- set reasoning_content = '' %}
65
+ {%- set content = visible_text(m.content) %}
66
+ {%- if m.reasoning_content is string %}
67
+ {%- set reasoning_content = m.reasoning_content %}
68
+ {%- else %}
69
+ {%- if '</think>' in content %}
70
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
71
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
72
+ {%- endif %}
73
+ {%- endif %}
74
+ {%- if loop.index0 > ns.last_user_index and reasoning_content -%}
75
+ {{ '\n<think>' + reasoning_content.strip() + '</think>'}}
76
+ {%- else -%}
77
+ {{ '\n<think></think>' }}
78
+ {%- endif -%}
79
+ {%- if content.strip() -%}
80
+ {{ '\n' + content.strip() }}
81
+ {%- endif -%}
82
+ {% if m.tool_calls %}
83
+ {% for tc in m.tool_calls %}
84
+ {%- if tc.function %}
85
+ {%- set tc = tc.function %}
86
+ {%- endif %}
87
+ {{ '\n<tool_call>' + tc.name }}
88
+ {% set _args = tc.arguments %}
89
+ {% for k, v in _args.items() %}
90
+ <arg_key>{{ k }}</arg_key>
91
+ <arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
92
+ {% endfor %}
93
+ </tool_call>{% endfor %}
94
+ {% endif %}
95
+ {%- elif m.role == 'tool' -%}
96
+ {%- if m.content is string -%}
97
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
98
+ {{- '<|observation|>' }}
99
+ {%- endif %}
100
+ {{- '\n<tool_response>\n' }}
101
+ {{- m.content }}
102
+ {{- '\n</tool_response>' }}
103
+ {%- else -%}
104
+ <|observation|>{% for tr in m.content %}
105
+
106
+ <tool_response>
107
+ {{ tr.output if tr.output is defined else tr }}
108
+ </tool_response>{% endfor -%}
109
+ {% endif -%}
110
+ {%- elif m.role == 'system' -%}
111
+ <|system|>
112
+ {{ visible_text(m.content) }}
113
+ {%- endif -%}
114
+ {%- endfor -%}
115
+ {%- if add_generation_prompt -%}
116
+ <|assistant|>
117
+ {{'<think></think>\n' if (enable_thinking is defined and not enable_thinking) else ''}}
118
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "tclf90/GLM-4.5V-AWQ",
3
+ "architectures": [
4
+ "Glm4vMoeForConditionalGeneration"
5
+ ],
6
+ "model_type": "glm4v_moe",
7
+ "text_config": {
8
+ "pad_token_id": 151329,
9
+ "vocab_size": 151552,
10
+ "eos_token_id": [
11
+ 151329,
12
+ 151336,
13
+ 151338
14
+ ],
15
+ "image_end_token_id": 151340,
16
+ "image_start_token_id": 151339,
17
+ "image_token_id": 151363,
18
+ "head_dim": 128,
19
+ "attention_bias": true,
20
+ "attention_dropout": 0.0,
21
+ "first_k_dense_replace": 1,
22
+ "hidden_act": "silu",
23
+ "hidden_size": 4096,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 11264,
26
+ "max_position_embeddings": 65536,
27
+ "model_type": "glm4v_moe_text",
28
+ "moe_intermediate_size": 1408,
29
+ "n_group": 1,
30
+ "n_routed_experts": 128,
31
+ "n_shared_experts": 1,
32
+ "norm_topk_prob": true,
33
+ "num_attention_heads": 96,
34
+ "num_experts_per_tok": 8,
35
+ "num_hidden_layers": 46,
36
+ "num_key_value_heads": 8,
37
+ "partial_rotary_factor": 0.5,
38
+ "rms_norm_eps": 1e-05,
39
+ "torch_dtype": "bfloat16",
40
+ "rope_scaling": {
41
+ "rope_type": "default",
42
+ "mrope_section": [
43
+ 8,
44
+ 12,
45
+ 12
46
+ ]
47
+ },
48
+ "rope_theta": 10000.0,
49
+ "routed_scaling_factor": 1.0,
50
+ "topk_group": 1,
51
+ "use_cache": true,
52
+ "use_qk_norm": false
53
+ },
54
+ "torch_dtype": "float16",
55
+ "transformers_version": "4.55.0.dev0",
56
+ "video_end_token_id": 151342,
57
+ "video_start_token_id": 151341,
58
+ "video_token_id": 151364,
59
+ "vision_config": {
60
+ "attention_bias": false,
61
+ "attention_dropout": 0.0,
62
+ "depth": 24,
63
+ "hidden_act": "silu",
64
+ "hidden_size": 1536,
65
+ "image_size": 336,
66
+ "in_channels": 3,
67
+ "initializer_range": 0.02,
68
+ "intermediate_size": 11264,
69
+ "model_type": "glm4v_moe",
70
+ "num_heads": 12,
71
+ "out_hidden_size": 4096,
72
+ "patch_size": 14,
73
+ "rms_norm_eps": 1e-05,
74
+ "spatial_merge_size": 2,
75
+ "temporal_patch_size": 2
76
+ },
77
+ "quantization_config": {
78
+ "quant_method": "awq_marlin",
79
+ "bits": 4,
80
+ "group_size": 128,
81
+ "version": "gemm",
82
+ "zero_point": true,
83
+ "modules_to_not_convert": ["blocks.", "model.embed_tokens", "model.layers.0.mlp.shared_experts.", "model.layers.1.mlp.shared_experts.", "model.layers.2.mlp.shared_experts.", "model.layers.3.mlp.shared_experts.", "model.layers.4.mlp.shared_experts.", "model.layers.5.mlp.shared_experts.", "model.layers.6.mlp.shared_experts.", "model.layers.7.mlp.shared_experts.", "model.layers.8.mlp.shared_experts.", "model.layers.9.mlp.shared_experts.", "model.layers.10.mlp.shared_experts.", "model.layers.11.mlp.shared_experts.", "model.layers.12.mlp.shared_experts.", "model.layers.13.mlp.shared_experts.", "model.layers.14.mlp.shared_experts.", "model.layers.15.mlp.shared_experts.", "model.layers.16.mlp.shared_experts.", "model.layers.17.mlp.shared_experts.", "model.layers.18.mlp.shared_experts.", "model.layers.19.mlp.shared_experts.", "model.layers.20.mlp.shared_experts.", "model.layers.21.mlp.shared_experts.", "model.layers.22.mlp.shared_experts.", "model.layers.23.mlp.shared_experts.", "model.layers.24.mlp.shared_experts.", "model.layers.25.mlp.shared_experts.", "model.layers.26.mlp.shared_experts.", "model.layers.27.mlp.shared_experts.", "model.layers.28.mlp.shared_experts.", "model.layers.29.mlp.shared_experts.", "model.layers.30.mlp.shared_experts.", "model.layers.31.mlp.shared_experts.", "model.layers.32.mlp.shared_experts.", "model.layers.33.mlp.shared_experts.", "model.layers.34.mlp.shared_experts.", "model.layers.35.mlp.shared_experts.", "model.layers.36.mlp.shared_experts.", "model.layers.37.mlp.shared_experts.", "model.layers.38.mlp.shared_experts.", "model.layers.39.mlp.shared_experts.", "model.layers.40.mlp.shared_experts.", "model.layers.41.mlp.shared_experts.", "model.layers.42.mlp.shared_experts.", "model.layers.43.mlp.shared_experts.", "model.layers.44.mlp.shared_experts.", "model.layers.45.mlp.shared_experts.", "lm_head"]
84
+ }
85
+ }
configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"image-text-to-text"}
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151329,
6
+ 151336,
7
+ 151338
8
+ ],
9
+ "pad_token_id": 151329,
10
+ "temperature": 1.0,
11
+ "top_k": 1,
12
+ "top_p": 0.0001,
13
+ "transformers_version": "4.55.0.dev"
14
+ }
model-00001-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4c85fbb499a58ffd06ce52729a7f0be18f8089b5c04532d4b2ea4e3d64db1a
3
+ size 2998317112
model-00002-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69960b4e8403009cf6563dd56e94d466772dfda49b4957781b927ecd62d3bd40
3
+ size 2998549936
model-00003-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a522fe22ae1930a3f5add3f4d99ff854ff7b1936f70db759d35f8e250d08833e
3
+ size 2998010304
model-00004-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbb2355675d38d695fe01755c938dcbe4c443f4bd56d9db05232b14e5ceec0e1
3
+ size 2998550688
model-00005-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1732bb579bd643b9c18fd7c69750e03a3d20fb9b2ea44390a70fae3c0e5a2c4c
3
+ size 2997439264
model-00006-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e38a7349a6ba5093010091947537ca81da523dcddc00ff1c78ab2aee67534be
3
+ size 2999126848
model-00007-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aba3f758896d4180c9ded0f69790c5149af043f7544bd58ec96e21d31152f396
3
+ size 2998552792
model-00008-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3433599f8939e6bf74625c50d010f10d61d5860f4b51448cb99b797a8e64a794
3
+ size 2998013176
model-00009-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b56a1fa3e14ed35bcda0381039353d7906b3da0ab823a3a47e0ffa0b29bd583
3
+ size 2998552864
model-00010-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6e4e32b56aa910fe758c9477039eff1c32a3661b7aa85bd348df48322582e42
3
+ size 2998012944
model-00011-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be3f6f0c69833df2ea44922af76007aa00f825019573692a7110e24ba5cea979
3
+ size 2998553016
model-00012-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2039acb9a345e71e808b02810b842bd99a10653c63124adf783317ecfd5056b
3
+ size 2998552800
model-00013-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11c2a1eed091394e4f07141874aca42fa4827e591760d249d243cab0f6f1a5c4
3
+ size 2998013224
model-00014-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6cdd344bcdb2d0317bde77f32be4e0a69f339edd61e9e1316880f8108f9976f
3
+ size 2998552800
model-00015-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1da929b5e622a26f2481fc6a57dab58b81f2a01d6b81222bfe0e261be1c2ab0
3
+ size 2998013032
model-00016-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:155fa8798ea6b4b93fc67c372609c11f7030dcca8f3fb8f809a90fa3959259d1
3
+ size 2998552944
model-00017-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc4237ea97c14fafd482da3f04b3ed7d35dcb0277c1e5dd6bdaf8a3f44f1235b
3
+ size 2998552800
model-00018-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a17d30f0a5348c18bfcb9fe8f10c6197affb04781683a2d6a7222cbf8b064ec3
3
+ size 2998013288
model-00019-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386a445a7ff7bc9e89512fb81d5f857bffbac409446c8b6ff1cb7e9be2b94cc4
3
+ size 2776117192
model-00020-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75882d3df123c824ae9d61c4e13e647b4930e7053562f57d37f9e36ef10fde99
3
+ size 2997425744
model-00021-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d94cd20831a9380e80ec17b587c4040461127752ebffbdfdad27560d927a6e13
3
+ size 531298968
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {"shortest_edge": 12544, "longest_edge": 9633792},
3
+ "do_rescale": true,
4
+ "patch_size": 14,
5
+ "temporal_patch_size": 2,
6
+ "merge_size": 2,
7
+ "image_mean": [0.48145466, 0.4578275, 0.40821073],
8
+ "image_std": [0.26862954, 0.26130258, 0.27577711],
9
+ "image_processor_type": "Glm4vImageProcessor",
10
+ "processor_class": "Glm4vProcessor"
11
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9340665016419c825c4bdabbcc9acc43b7ca2c68ce142724afa829abb1be5efd
3
+ size 19970699
tokenizer_config.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "151329": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "151330": {
12
+ "content": "[MASK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "151331": {
20
+ "content": "[gMASK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "151332": {
28
+ "content": "[sMASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "151333": {
36
+ "content": "<sop>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "151334": {
44
+ "content": "<eop>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "151335": {
52
+ "content": "<|system|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "151336": {
60
+ "content": "<|user|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "151337": {
68
+ "content": "<|assistant|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "151338": {
76
+ "content": "<|observation|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "151339": {
84
+ "content": "<|begin_of_image|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "151340": {
92
+ "content": "<|end_of_image|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "151341": {
100
+ "content": "<|begin_of_video|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "151342": {
108
+ "content": "<|end_of_video|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "151343": {
116
+ "content": "<|begin_of_audio|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "151344": {
124
+ "content": "<|end_of_audio|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "151345": {
132
+ "content": "<|begin_of_transcription|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "151346": {
140
+ "content": "<|end_of_transcription|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "151347": {
148
+ "content": "<|code_prefix|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "151348": {
156
+ "content": "<|code_middle|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "151349": {
164
+ "content": "<|code_suffix|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "151350": {
172
+ "content": "<think>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": false
178
+ },
179
+ "151351": {
180
+ "content": "</think>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": false
186
+ },
187
+ "151352": {
188
+ "content": "<tool_call>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": false
194
+ },
195
+ "151353": {
196
+ "content": "</tool_call>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": false
202
+ },
203
+ "151354": {
204
+ "content": "<tool_response>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": false
210
+ },
211
+ "151355": {
212
+ "content": "</tool_response>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": false
218
+ },
219
+ "151356": {
220
+ "content": "<arg_key>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": false
226
+ },
227
+ "151357": {
228
+ "content": "</arg_key>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": false
234
+ },
235
+ "151358": {
236
+ "content": "<arg_value>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": false
242
+ },
243
+ "151359": {
244
+ "content": "</arg_value>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": false
250
+ },
251
+ "151360": {
252
+ "content": "/nothink",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "151361": {
260
+ "content": "<|begin_of_box|>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": false
266
+ },
267
+ "151362": {
268
+ "content": "<|end_of_box|>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": false
274
+ },
275
+ "151363": {
276
+ "content": "<|image|>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": false
282
+ },
283
+ "151364": {
284
+ "content": "<|video|>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": false
290
+ }
291
+ },
292
+ "additional_special_tokens": [
293
+ "<|endoftext|>",
294
+ "[MASK]",
295
+ "[gMASK]",
296
+ "[sMASK]",
297
+ "<sop>",
298
+ "<eop>",
299
+ "<|system|>",
300
+ "<|user|>",
301
+ "<|assistant|>",
302
+ "<|observation|>",
303
+ "<|begin_of_image|>",
304
+ "<|end_of_image|>",
305
+ "<|begin_of_video|>",
306
+ "<|end_of_video|>",
307
+ "<|begin_of_audio|>",
308
+ "<|end_of_audio|>",
309
+ "<|begin_of_transcription|>",
310
+ "<|end_of_transcription|>",
311
+ "<|code_prefix|>",
312
+ "<|code_middle|>",
313
+ "<|code_suffix|>",
314
+ "/nothink"
315
+ ],
316
+ "clean_up_tokenization_spaces": false,
317
+ "do_lower_case": false,
318
+ "eos_token": "<|endoftext|>",
319
+ "extra_special_tokens": {},
320
+ "model_max_length": 128000,
321
+ "pad_token": "<|endoftext|>",
322
+ "padding_side": "left",
323
+ "remove_space": false,
324
+ "tokenizer_class": "PreTrainedTokenizer"
325
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {"shortest_edge": 12544, "longest_edge": 47040000},
3
+ "do_rescale": true,
4
+ "patch_size": 14,
5
+ "temporal_patch_size": 2,
6
+ "merge_size": 2,
7
+ "image_mean": [0.48145466, 0.4578275, 0.40821073],
8
+ "image_std": [0.26862954, 0.26130258, 0.27577711],
9
+ "video_processor_type": "Glm4vVideoProcessor",
10
+ "processor_class": "Glm4vProcessor"
11
+ }