{ "_name_or_path": "/home/chentianqi/model/GSAI-ML/LLaDA-8B-Instruct", "activation_type": "silu", "alibi": false, "alibi_bias_max": 8.0, "architectures": [ "LLaDAModelLM" ], "attention_dropout": 0.0, "attention_layer_norm": false, "attention_layer_norm_with_affine": true, "auto_map": { "AutoConfig": "configuration_llada.LLaDAConfig", "AutoModel": "modeling_llada.LLaDAModelLM", "AutoModelForCausalLM": "modeling_llada.LLaDAModelLM" }, "bias_for_layer_norm": false, "block_group_size": 1, "block_type": "llama", "d_model": 4096, "embedding_dropout": 0.0, "embedding_size": 126464, "eos_token_id": 126081, "flash_attention": false, "include_bias": false, "include_qkv_bias": false, "init_cutoff_factor": null, "init_device": "meta", "init_fn": "mitchell", "init_std": 0.02, "input_emb_norm": false, "layer_norm_type": "rms", "layer_norm_with_affine": true, "mask_token_id": 126336, "max_sequence_length": 4096, "mlp_hidden_size": 12288, "mlp_ratio": 4, "model_type": "llada", "multi_query_attention": null, "n_heads": 32, "n_kv_heads": 32, "n_layers": 32, "pad_token_id": 126081, "precision": "amp_bf16", "quantization_config": { "bits": 4, "checkpoint_format": "gptq", "desc_act": true, "group_size": 128, "lm_head": false, "meta": { "damp_auto_increment": 0.0025, "damp_percent": 0.01, "mse": 0.0, "quantizer": [ "gptqmodel:2.0.0-dev" ], "static_groups": false, "true_sequential": true, "uri": "https://github.com/modelcloud/gptqmodel" }, "pack_dtype": "int32", "quant_method": "gptq", "sym": false }, "residual_dropout": 0.0, "rms_norm_eps": 1e-05, "rope": true, "rope_full_precision": true, "rope_theta": 500000.0, "scale_logits": false, "torch_dtype": "bfloat16", "transformers_version": "4.38.2", "use_cache": false, "vocab_size": 126464, "weight_tying": false }