Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
| # Load the fine-tuned model and tokenizer | |
| model_name = "ibrahimgiki/qa_facebook_bart_base_new" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| # Define a custom question-answering pipeline | |
| qa_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer) | |
| # Streamlit app layout | |
| st.title("Ask anything about crop production, animal husbandry, soil management, and farming practices") | |
| # Text area for the user to input a question | |
| question = st.text_area("Enter your question:") | |
| # Submit button | |
| if st.button("Submit"): | |
| if question: | |
| # Perform inference using the pipeline | |
| result = qa_pipeline(question) | |
| answer = result[0]['generated_text'] | |
| # Display the answer | |
| st.write("**Answer:**", answer) | |
| else: | |
| st.write("Please enter a question.") | |