Spaces:
Sleeping
Sleeping
File size: 978 Bytes
051cdac a9a408e 051cdac 7478eb7 051cdac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# Load the fine-tuned model and tokenizer
model_name = "ibrahimgiki/qa_facebook_bart_base_new"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Define a custom question-answering pipeline
qa_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
# Streamlit app layout
st.title("Ask anything about crop production, animal husbandry, soil management, and farming practices")
# Text area for the user to input a question
question = st.text_area("Enter your question:")
# Submit button
if st.button("Submit"):
if question:
# Perform inference using the pipeline
result = qa_pipeline(question)
answer = result[0]['generated_text']
# Display the answer
st.write("**Answer:**", answer)
else:
st.write("Please enter a question.")
|