Spaces:
Build error
Build error
| import os | |
| import streamlit as st | |
| from langchain_community.llms import HuggingFaceHub | |
| from dotenv import load_dotenv | |
| # Load .env if running locally | |
| load_dotenv() | |
| # Set your Hugging Face API token (must be set in Hugging Face Spaces Secrets for online use) | |
| HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| if not HUGGINGFACE_API_TOKEN: | |
| st.error("β οΈ Hugging Face API token not found. Please add it as a secret with name 'HUGGINGFACEHUB_API_TOKEN'.") | |
| st.stop() | |
| # Models for each task | |
| SUMMARY_MODEL = "google/flan-t5-small" | |
| QUESTIONS_MODEL = "tiiuae/falcon-rw-1b" | |
| KEYWORDS_MODEL = "google/flan-t5-small" | |
| # Function to get LangChain LLM | |
| def get_llm(model_id, task): | |
| return HuggingFaceHub( | |
| repo_id=model_id, | |
| model_kwargs={"temperature": 0.5, "max_new_tokens": 150}, | |
| task=task, # Changed to a generic task for text generation | |
| huggingfacehub_api_token=HUGGINGFACE_API_TOKEN | |
| ) | |
| # Streamlit app UI | |
| st.set_page_config(page_title="π§ Multi-LLM Research Assistant") | |
| st.title("π§ Research Assistant using Multiple LLMs via LangChain") | |
| topic = st.text_input("π Enter your research topic") | |
| if st.button("Run Multi-LLM Analysis"): | |
| if not topic.strip(): | |
| st.warning("Please enter a topic to continue.") | |
| else: | |
| with st.spinner("Generating..."): | |
| # Step 1: Summary | |
| summary_prompt = f"Provide a short summary about: {topic}" | |
| summary_model = get_llm(SUMMARY_MODEL, task="text-generation") # Corrected task | |
| summary = summary_model.predict(summary_prompt) | |
| # Step 2: Research Questions | |
| questions_prompt = f"Give three research questions about: {topic}" | |
| questions_model = get_llm(QUESTIONS_MODEL, task="text-generation") # Corrected task | |
| questions = questions_model.predict(questions_prompt) | |
| # Step 3: Keywords | |
| keywords_prompt = f"List five keywords related to: {topic}" | |
| keywords_model = get_llm(KEYWORDS_MODEL, task="text-generation") # Corrected task | |
| keywords = keywords_model.predict(keywords_prompt) | |
| # Display results | |
| st.success("β Done! Here's your research output:") | |
| st.subheader("π Summary") | |
| st.write(summary) | |
| st.subheader("β Research Questions") | |
| st.write(questions) | |
| st.subheader("π Keywords") | |
| st.write(keywords) | |