Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,11 +18,11 @@ QUESTIONS_MODEL = "tiiuae/falcon-rw-1b"
|
|
| 18 |
KEYWORDS_MODEL = "google/flan-t5-small"
|
| 19 |
|
| 20 |
# Function to get LangChain LLM
|
| 21 |
-
def get_llm(model_id):
|
| 22 |
return HuggingFaceHub(
|
| 23 |
repo_id=model_id,
|
| 24 |
model_kwargs={"temperature": 0.5, "max_new_tokens": 150},
|
| 25 |
-
task=
|
| 26 |
huggingfacehub_api_token=HUGGINGFACE_API_TOKEN
|
| 27 |
)
|
| 28 |
|
|
@@ -40,17 +40,17 @@ if st.button("Run Multi-LLM Analysis"):
|
|
| 40 |
|
| 41 |
# Step 1: Summary
|
| 42 |
summary_prompt = f"Provide a short summary about: {topic}"
|
| 43 |
-
summary_model = get_llm(SUMMARY_MODEL)
|
| 44 |
summary = summary_model.predict(summary_prompt)
|
| 45 |
|
| 46 |
# Step 2: Research Questions
|
| 47 |
questions_prompt = f"Give three research questions about: {topic}"
|
| 48 |
-
questions_model = get_llm(QUESTIONS_MODEL)
|
| 49 |
questions = questions_model.predict(questions_prompt)
|
| 50 |
|
| 51 |
# Step 3: Keywords
|
| 52 |
keywords_prompt = f"List five keywords related to: {topic}"
|
| 53 |
-
keywords_model = get_llm(KEYWORDS_MODEL)
|
| 54 |
keywords = keywords_model.predict(keywords_prompt)
|
| 55 |
|
| 56 |
# Display results
|
|
|
|
| 18 |
KEYWORDS_MODEL = "google/flan-t5-small"
|
| 19 |
|
| 20 |
# Function to get LangChain LLM
|
| 21 |
+
def get_llm(model_id, task):
|
| 22 |
return HuggingFaceHub(
|
| 23 |
repo_id=model_id,
|
| 24 |
model_kwargs={"temperature": 0.5, "max_new_tokens": 150},
|
| 25 |
+
task=task, # Changed to a generic task for text generation
|
| 26 |
huggingfacehub_api_token=HUGGINGFACE_API_TOKEN
|
| 27 |
)
|
| 28 |
|
|
|
|
| 40 |
|
| 41 |
# Step 1: Summary
|
| 42 |
summary_prompt = f"Provide a short summary about: {topic}"
|
| 43 |
+
summary_model = get_llm(SUMMARY_MODEL, task="text-generation") # Corrected task
|
| 44 |
summary = summary_model.predict(summary_prompt)
|
| 45 |
|
| 46 |
# Step 2: Research Questions
|
| 47 |
questions_prompt = f"Give three research questions about: {topic}"
|
| 48 |
+
questions_model = get_llm(QUESTIONS_MODEL, task="text-generation") # Corrected task
|
| 49 |
questions = questions_model.predict(questions_prompt)
|
| 50 |
|
| 51 |
# Step 3: Keywords
|
| 52 |
keywords_prompt = f"List five keywords related to: {topic}"
|
| 53 |
+
keywords_model = get_llm(KEYWORDS_MODEL, task="text-generation") # Corrected task
|
| 54 |
keywords = keywords_model.predict(keywords_prompt)
|
| 55 |
|
| 56 |
# Display results
|