eubert_2025 / inference.py
apapagi's picture
Upload folder using huggingface_hub
3c0443b verified
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Path of fine-tuned model
model_path = "./fine_tuned_model"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path)
# Create chatbot pipeline
chatbot = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device=0 if torch.cuda.is_available() else -1 # Use GPU if available
)
# Example usage
prompt = "Hello, can you tell me some fun facts about european legislation?"
response = chatbot(prompt, max_length=100, do_sample=True, temperature=0.7)
print(response[0]['generated_text'])