INFERENCE
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
import torch
tokenizer = AutoTokenizer.from_pretrained("AquilaX-AI/QnA-prev")
model = AutoModelForCausalLM.from_pretrained("AquilaX-AI/QnA-prev")
prompt = """
<|im_start|>system\nYou are Securitron, a helpful AI assistant specialized in providing accurate and professional responses. Always prioritize clarity and precision in your answers.<|im_end|>
"""
conversation_history = []
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
while True:
user_prompt = input("\nUser Question: ")
if user_prompt.lower() == 'break':
break
user = f"""<|im_start|>user
{user_prompt}<|im_end|>
<|im_start|>assistant"""
conversation_history.append(user)
conversation_history = conversation_history[-5:]
current_prompt = prompt + "\n".join(conversation_history)
encodeds = tokenizer(current_prompt, return_tensors="pt", truncation=True).input_ids.to(device)
text_streamer = TextStreamer(tokenizer, skip_prompt=True)
response = model.generate(
input_ids=encodeds,
streamer=text_streamer,
max_new_tokens=512,
use_cache=True,
pad_token_id=151645,
eos_token_id=151645,
num_return_sequences=1
)
conversation_history.append(tokenizer.decode(response[0]).split('<|im_start|>assistant')[-1].split('<|im_end|>')[0].strip() + "<|im_end|>")