# list_modules.py from transformers import AutoModelForCausalLM # 修改为你的模型路径 model_path = "/home/yq238/project_pi_aaa247/yq238/qwen_training/models/Qwen-7B-Chat" model = AutoModelForCausalLM.from_pretrained( model_path, device_map="auto", trust_remote_code=True, torch_dtype="auto" ) print("🔍 模型中包含 'proj' 的模块名:") for name, module in model.named_modules(): if 'proj' in name.lower(): print(name) print("\n🔍 模型中包含 'attn' 的模块名(可能包含注意力层):") for name, module in model.named_modules(): if 'attn' in name.lower() and any(x in name for x in ['q_', 'k_', 'v_', 'o_']): print(name)