Spaces:
Running
Running
Commit ·
aa8691d
0
Parent(s):
chatbot with UI
Browse files- .gitignore +1 -0
- README.md +39 -0
- __pycache__/chatbot.cpython-314.pyc +0 -0
- __pycache__/ui.cpython-314.pyc +0 -0
- app.py +96 -0
- chatbot.py +158 -0
- requirements.txt +8 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
config.yaml
|
README.md
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Demo RAG Chatbot
|
| 2 |
+
|
| 3 |
+
A Python demo chatbot that:
|
| 4 |
+
|
| 5 |
+
- loads `config.yaml` with `sambanova_api_key` and `website`
|
| 6 |
+
- scrapes the configured website
|
| 7 |
+
- builds embeddings using HuggingFace models
|
| 8 |
+
- retrieves relevant chunks (RAG)
|
| 9 |
+
- generates answers using SambaNova API
|
| 10 |
+
- returns formatted text output with citations
|
| 11 |
+
|
| 12 |
+
## Setup
|
| 13 |
+
|
| 14 |
+
1. Install dependencies:
|
| 15 |
+
```bash
|
| 16 |
+
pip install -r requirements.txt
|
| 17 |
+
```
|
| 18 |
+
|
| 19 |
+
2. Configure `config.yaml`:
|
| 20 |
+
- `sambanova_api_key`: your SambaNova API key
|
| 21 |
+
- `website`: the URL to scrape
|
| 22 |
+
- `embedding_model`: HuggingFace model (default: `sentence-transformers/all-MiniLM-L6-v2`)
|
| 23 |
+
- `system_prompt`: optional behavior prompt
|
| 24 |
+
|
| 25 |
+
## Run CLI Mode
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
python chatbot.py
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
Type a question and press Enter. Type `exit` to quit.
|
| 32 |
+
|
| 33 |
+
## Run with Streamlit UI
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
streamlit run ui.py
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
Interactive web interface with real-time answers and context display.
|
__pycache__/chatbot.cpython-314.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
__pycache__/ui.cpython-314.pyc
ADDED
|
Binary file (4.68 kB). View file
|
|
|
app.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from sambanova import SambaNova
|
| 4 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 5 |
+
|
| 6 |
+
from chatbot import (
|
| 7 |
+
load_config,
|
| 8 |
+
build_rag_corpus,
|
| 9 |
+
retrieve_relevant_chunks,
|
| 10 |
+
build_prompt,
|
| 11 |
+
ask_model,
|
| 12 |
+
format_answer,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
CONFIG_PATH = Path(__file__).parent / "config.yaml"
|
| 16 |
+
RESOURCE_STATE = {}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def init_resources():
|
| 20 |
+
if RESOURCE_STATE:
|
| 21 |
+
return RESOURCE_STATE
|
| 22 |
+
|
| 23 |
+
if not CONFIG_PATH.exists():
|
| 24 |
+
raise FileNotFoundError(f"Missing config file: {CONFIG_PATH}")
|
| 25 |
+
|
| 26 |
+
config = load_config(CONFIG_PATH)
|
| 27 |
+
llm_api_key = config.get("sambanova_api_key")
|
| 28 |
+
website = config.get("website")
|
| 29 |
+
system_prompt = config.get("system_prompt", "You are a helpful assistant.")
|
| 30 |
+
|
| 31 |
+
if not llm_api_key or not website:
|
| 32 |
+
raise ValueError("Please set sambanova_api_key and website in config.yaml")
|
| 33 |
+
|
| 34 |
+
embed_model = HuggingFaceEmbeddings(model_name=config.get("embedding_model"))
|
| 35 |
+
corpus = build_rag_corpus(config, embed_model, website)
|
| 36 |
+
client = SambaNova(
|
| 37 |
+
api_key=llm_api_key,
|
| 38 |
+
base_url="https://api.sambanova.ai/v1",
|
| 39 |
+
timeout=30,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
RESOURCE_STATE.update(
|
| 43 |
+
config=config,
|
| 44 |
+
website=website,
|
| 45 |
+
system_prompt=system_prompt,
|
| 46 |
+
embed_model=embed_model,
|
| 47 |
+
corpus=corpus,
|
| 48 |
+
client=client,
|
| 49 |
+
)
|
| 50 |
+
return RESOURCE_STATE
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def answer_question(question: str):
|
| 54 |
+
resources = init_resources()
|
| 55 |
+
selected = retrieve_relevant_chunks(
|
| 56 |
+
resources["corpus"],
|
| 57 |
+
question,
|
| 58 |
+
resources["embed_model"],
|
| 59 |
+
top_k=4,
|
| 60 |
+
)
|
| 61 |
+
prompt = build_prompt(resources["system_prompt"], question, selected)
|
| 62 |
+
raw_answer = ask_model(prompt, resources["client"])
|
| 63 |
+
response = format_answer(raw_answer, selected)
|
| 64 |
+
citations = "\n\n".join(
|
| 65 |
+
[f"Chunk {i+1}: {chunk.text[:300]}..." for i, chunk in enumerate(selected)]
|
| 66 |
+
)
|
| 67 |
+
return response, citations
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def main():
|
| 71 |
+
resources = init_resources()
|
| 72 |
+
|
| 73 |
+
with gr.Blocks(title="RAG Chatbot") as demo:
|
| 74 |
+
gr.Markdown("# 🤖 RAG-Powered Chatbot")
|
| 75 |
+
gr.Markdown(f"**Website:** {resources['website']} \n**Chunks:** {len(resources['corpus'])}")
|
| 76 |
+
|
| 77 |
+
with gr.Row():
|
| 78 |
+
with gr.Column(scale=3):
|
| 79 |
+
question_input = gr.Textbox(label="Ask a question", placeholder="What services do you provide?", lines=2)
|
| 80 |
+
submit_button = gr.Button("Ask")
|
| 81 |
+
answer_output = gr.Textbox(label="Answer", lines=12, interactive=False)
|
| 82 |
+
|
| 83 |
+
with gr.Column(scale=1):
|
| 84 |
+
citations_output = gr.Textbox(label="Citations", lines=20, interactive=False)
|
| 85 |
+
|
| 86 |
+
submit_button.click(
|
| 87 |
+
answer_question,
|
| 88 |
+
inputs=[question_input],
|
| 89 |
+
outputs=[answer_output, citations_output],
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
demo.launch()
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
main()
|
chatbot.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import sys
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import bs4
|
| 7 |
+
import numpy as np
|
| 8 |
+
import requests
|
| 9 |
+
from sambanova import SambaNova
|
| 10 |
+
import yaml
|
| 11 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class DocumentChunk:
|
| 15 |
+
text: str
|
| 16 |
+
source: str
|
| 17 |
+
vector: np.ndarray
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def load_config(path: Path) -> dict:
|
| 21 |
+
with path.open("r", encoding="utf-8") as f:
|
| 22 |
+
return yaml.safe_load(f)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def scrape_website(url: str) -> str:
|
| 26 |
+
response = requests.get(url, timeout=15)
|
| 27 |
+
response.raise_for_status()
|
| 28 |
+
soup = bs4.BeautifulSoup(response.text, "html.parser")
|
| 29 |
+
for tag in soup(["script", "style", "header", "footer", "nav", "aside"]):
|
| 30 |
+
tag.decompose()
|
| 31 |
+
text = soup.get_text(separator="\n")
|
| 32 |
+
text = re.sub(r"\n{2,}", "\n", text).strip()
|
| 33 |
+
return text
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def split_into_chunks(text: str, chunk_size: int = 400, overlap: int = 100) -> list[str]:
|
| 37 |
+
sentences = [s.strip() for s in re.split(r"(?<=[\.\?\!])\s+", text) if s.strip()]
|
| 38 |
+
chunks = []
|
| 39 |
+
current = ""
|
| 40 |
+
for sentence in sentences:
|
| 41 |
+
if len(current) + len(sentence) + 1 > chunk_size and current:
|
| 42 |
+
chunks.append(current.strip())
|
| 43 |
+
current = current[-overlap:] if overlap < len(current) else current
|
| 44 |
+
current += " " + sentence
|
| 45 |
+
if current.strip():
|
| 46 |
+
chunks.append(current.strip())
|
| 47 |
+
return chunks
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def embed_texts(texts: list[str], embed_model: HuggingFaceEmbeddings = None) -> list[np.ndarray]:
|
| 51 |
+
if not texts:
|
| 52 |
+
return []
|
| 53 |
+
if embed_model:
|
| 54 |
+
return embed_model.embed_documents(texts)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
|
| 58 |
+
if np.linalg.norm(a) == 0 or np.linalg.norm(b) == 0:
|
| 59 |
+
return 0.0
|
| 60 |
+
return float(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def build_rag_corpus(config: dict, embed_model: HuggingFaceEmbeddings, url: str) -> list[DocumentChunk]:
|
| 64 |
+
print(f"Scraping website: {url}")
|
| 65 |
+
page_text = scrape_website(url)
|
| 66 |
+
chunks = split_into_chunks(page_text)
|
| 67 |
+
print(f"Split content into {len(chunks)} chunks")
|
| 68 |
+
embeddings = embed_texts(chunks, embed_model)
|
| 69 |
+
return [DocumentChunk(text=chunk, source=url, vector=np.array(vector)) for chunk, vector in zip(chunks, embeddings)]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def retrieve_relevant_chunks(chunks: list[DocumentChunk], question: str, embed_model: HuggingFaceEmbeddings, top_k: int = 4) -> list[DocumentChunk]:
|
| 73 |
+
question_embeddings = embed_texts([question], embed_model)
|
| 74 |
+
if not question_embeddings:
|
| 75 |
+
return chunks[:top_k]
|
| 76 |
+
question_vector = np.array(question_embeddings[0])
|
| 77 |
+
scored = [
|
| 78 |
+
(chunk, cosine_similarity(question_vector, chunk.vector))
|
| 79 |
+
for chunk in chunks
|
| 80 |
+
]
|
| 81 |
+
scored.sort(key=lambda item: item[1], reverse=True)
|
| 82 |
+
return [chunk for chunk, _ in scored[:top_k]]
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def build_prompt(system_prompt: str, question: str, context_chunks: list[DocumentChunk]) -> str:
|
| 86 |
+
context_text = "\n---\n".join(chunk.text for chunk in context_chunks)
|
| 87 |
+
return (
|
| 88 |
+
f"{system_prompt}\n\n"
|
| 89 |
+
f"Use the following extracted website text to answer the question clearly.\n"
|
| 90 |
+
f"Context:\n{context_text}\n\n"
|
| 91 |
+
f"Question: {question}\n"
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def create_llm_client(config: dict) -> SambaNova:
|
| 96 |
+
return SambaNova(
|
| 97 |
+
api_key=config.get("sambanova_api_key"),
|
| 98 |
+
base_url="https://api.sambanova.ai/v1",
|
| 99 |
+
timeout=30,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def ask_model(prompt: str, client: SambaNova) -> str:
|
| 104 |
+
response = client.chat.completions.create(
|
| 105 |
+
model="DeepSeek-V3.1",
|
| 106 |
+
messages=[{"role": "user", "content": prompt}],
|
| 107 |
+
max_tokens=1056,
|
| 108 |
+
temperature=0.2,
|
| 109 |
+
)
|
| 110 |
+
return response.choices[0].message.content.strip()
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def format_answer(raw: str, chunks: list[DocumentChunk]) -> str:
|
| 114 |
+
return raw
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def main() -> int:
|
| 118 |
+
config_path = Path(__file__).parent / "config.yaml"
|
| 119 |
+
if not config_path.exists():
|
| 120 |
+
print(f"Missing config file: {config_path}")
|
| 121 |
+
return 1
|
| 122 |
+
|
| 123 |
+
config = load_config(config_path)
|
| 124 |
+
llm_api_key = config.get("sambanova_api_key")
|
| 125 |
+
website = config.get("website")
|
| 126 |
+
system_prompt = config.get("system_prompt", "You are a helpful assistant.")
|
| 127 |
+
|
| 128 |
+
if not llm_api_key or not website:
|
| 129 |
+
print("Please set sambanova_api_key and website in config.yaml")
|
| 130 |
+
return 1
|
| 131 |
+
embed_model = HuggingFaceEmbeddings(model_name=config.get("embedding_model"))
|
| 132 |
+
chunks = build_rag_corpus(config, embed_model, website)
|
| 133 |
+
client = create_llm_client(config)
|
| 134 |
+
print("RAG corpus ready. Ask a question or type 'exit'.")
|
| 135 |
+
|
| 136 |
+
while True:
|
| 137 |
+
try:
|
| 138 |
+
question = input("Question> ").strip()
|
| 139 |
+
except EOFError:
|
| 140 |
+
break
|
| 141 |
+
if not question:
|
| 142 |
+
continue
|
| 143 |
+
if question.lower() in {"exit", "quit"}:
|
| 144 |
+
break
|
| 145 |
+
|
| 146 |
+
selected = retrieve_relevant_chunks(chunks, question, embed_model)
|
| 147 |
+
prompt = build_prompt(system_prompt, question, selected)
|
| 148 |
+
raw_answer = ask_model(prompt, client)
|
| 149 |
+
response = format_answer(raw_answer, selected)
|
| 150 |
+
|
| 151 |
+
print(response)
|
| 152 |
+
print()
|
| 153 |
+
|
| 154 |
+
return 0
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
if __name__ == "__main__":
|
| 158 |
+
sys.exit(main())
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
PyYAML>=6.0
|
| 2 |
+
requests>=2.30.0
|
| 3 |
+
beautifulsoup4>=4.12.2
|
| 4 |
+
numpy>=1.25.0
|
| 5 |
+
openai>=1.0.0
|
| 6 |
+
sambanova>=0.1.0
|
| 7 |
+
gradio>=3.0.0
|
| 8 |
+
langchain-huggingface>=0.0.1
|