Spaces:
Runtime error
Runtime error
"""Local reasoning agent using TinyLlama.""" | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
class ReasoningAgent: | |
"""Local reasoning agent using TinyLlama with chain of thought prompting.""" | |
def __init__(self, model_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0"): | |
"""Initialize the agent with local model.""" | |
# Load model and tokenizer | |
self.tokenizer = AutoTokenizer.from_pretrained(model_name) | |
self.model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
def get_response(self, message: str) -> str: | |
"""Generate response using local model with chain of thought prompting.""" | |
try: | |
# Format prompt with chain of thought structure | |
prompt = f"""<|system|> | |
You are a helpful AI assistant that uses chain of thought reasoning to answer questions. | |
For each response, break down your thinking into steps before giving the final answer. | |
</s> | |