gives err type object 'ModelConfig' has no attribute '__match_args__'

#2
by Sakura77 - opened

I receive this error type object 'ModelConfig' has no attribute 'match_args'

when running this from jupiter notebook

from hf_olmo import * # registers the Auto* classes

from transformers import AutoModelForCausalLM, AutoTokenizer

olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-7B")
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-7B")

message = ["Language modeling is "]
inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False)
response = olmo.generate(**inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
print(tokenizer.batch_decode(response, skip_special_tokens=True)[0])

same error when running it locally

from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

Path to the locally saved model

local_model_path = "G:/LLM/olmo"

Check for CUDA availability, and use CPU as fallback

device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cpu":
print("CUDA not available, using CPU instead.")
else:
print("Using CUDA.")

Load the model and tokenizer from the local path with trust_remote_code=True

model = AutoModelForCausalLM.from_pretrained(local_model_path, trust_remote_code=True).to(device)
tokenizer = AutoTokenizer.from_pretrained(local_model_path, trust_remote_code=True)

message = ["Language modeling is "]
inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False).to(device)
response = model.generate(**inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
print(tokenizer.batch_decode(response, skip_special_tokens=True)[0])

Ai2 org

What is your compute environment? Is it MPS?

Ai2 org
Ai2 org
This comment has been hidden

thx for the above link, works perfectly, I initially tried from Anaconda 3 and Python 3.9 (win env), and looks like there is an issue with my current env ; running now on ubuntu, works great :)

Sign up or log in to comment