import gradio as gr from huggingface_hub import InferenceClient from typing import List, Tuple import fitz # PyMuPDF client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # Placeholder for the app's state class MyApp: def __init__(self) -> None: self.documents = [] self.load_pdf("THEDIA1.pdf") def load_pdf(self, file_path: str) -> None: """Extracts text from a PDF file and stores it in the app's documents.""" doc = fitz.open(file_path) self.documents = [] for page_num in range(len(doc)): page = doc[page_num] text = page.get_text() self.documents.append({"page": page_num + 1, "content": text}) print("PDF processed successfully!") def search_documents(self, query: str, k: int = 3) -> List[str]: """Searches for relevant documents containing the query string.""" results = [doc["content"] for doc in self.documents if query.lower() in doc["content"].lower()] return results[:k] if results else ["No relevant documents found."] app = MyApp() def respond( message: str, history: List[Tuple[str, str]], system_message: str, max_tokens: int, temperature: float, top_p: float, ): system_message = "You are a concise knowledgeable DBT coach. You neever ask more than one followup quetsion. You never give so many options and ask user's consent if needed. Use relevant documents to guide users through DBT exercises and provide helpful information." messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) # RAG - Retrieve relevant documents retrieved_docs = app.search_documents(message) context = "\n".join(retrieved_docs) messages.append({"role": "system", "content": "Relevant documents: " + context}) response = "" for message in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response demo = gr.Blocks() with demo: gr.Markdown("🧘‍♀️ **Dialectical Behaviour Therapy**") gr.Markdown( "Disclaimer: This chatbot is based on a DBT exercise book that is publicly available. " "We are not medical practitioners, and the use of this chatbot is at your own responsibility." ) chatbot = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a knowledgeable DBT coach. Use relevant documents to guide users through DBT exercises and provide helpful information.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), ], examples=[ ["I feel overwhelmed with work. Help me to feel relaxed"], ["Can you guide me through a quick meditation?"], ["How do I stop worrying about things I can't control?"], ["What are some DBT skills for managing anxiety?"], ["Can you guide to practice a mindfulness excercise?"], ["What is radical acceptance?"] ], title='DBT Coach 🧘‍♀️' ) if __name__ == "__main__": demo.launch()