import os import gradio as gr import requests import json import logging from dotenv import load_dotenv # Load environment variables load_dotenv() # Cohere API configuration COHERE_API_KEY = os.getenv("COHERE_API_KEY") if not COHERE_API_KEY: raise ValueError("COHERE_API_KEY not found in environment variables") COHERE_API_URL = "https://api.cohere.ai/v1/chat" MODEL_NAME = "command-r-08-2024" # Vector database configuration API_URL = "https://sendthat.cc" HISTORY_INDEX = "history" def search_document(query, k): try: url = f"{API_URL}/search/{HISTORY_INDEX}" payload = {"text": query, "k": k} headers = {"Content-Type": "application/json"} response = requests.post(url, json=payload, headers=headers) response.raise_for_status() return response.json(), "", k except requests.exceptions.RequestException as e: logging.error(f"Error in search: {e}") return {"error": str(e)}, query, k def generate_answer(question, context, citations): prompt = f"Context: {context}\n\nQuestion: {question}\n\nAnswer the question based on the given context. At the end of your answer, provide citations for the sources you used, referencing them as [1], [2], etc.:" headers = { "Authorization": f"Bearer {COHERE_API_KEY}", "Content-Type": "application/json" } payload = { "message": prompt, "model": MODEL_NAME, "preamble": "You are an AI-assistant chatbot. You are trained to assist users by providing thorough and helpful responses to their queries based on the given context. Always include citations at the end of your answer.", "chat_history": [] # You can add chat history here if needed } try: response = requests.post(COHERE_API_URL, headers=headers, json=payload) response.raise_for_status() answer = response.json()['text'] # Append citations to the answer answer += "\n\nSources:" for i, citation in enumerate(citations, 1): answer += f"\n[{i}] {citation}" return answer except requests.exceptions.RequestException as e: logging.error(f"Error in generate_answer: {e}") return f"An error occurred: {str(e)}" def answer_question(question, k=3): # Search the vector database search_results, _, _ = search_document(question, k) # Extract and combine the retrieved contexts if "results" in search_results: contexts = [] citations = [] for item in search_results['results']: contexts.append(item['metadata']['content']) citations.append(f"{item['metadata'].get('title', 'Unknown Source')} - {item['metadata'].get('source', 'No source provided')}") combined_context = " ".join(contexts) else: logging.error(f"Error in database search or no results found: {search_results}") combined_context = "" citations = [] # Generate answer using the Cohere LLM answer = generate_answer(question, combined_context, citations) return answer def chatbot(message, history): response = answer_question(message) return response # Create Gradio interface iface = gr.ChatInterface( chatbot, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Ask a question about history...", container=False, scale=7), title="History Chatbot", description="Ask me anything about history, and I'll provide answers with citations!", theme="soft", examples=[ "Why was Anne Hutchinson banished from Massachusetts?", "What were the major causes of World War I?", "Who was the first President of the United States?", "What was the significance of the Industrial Revolution?" ], cache_examples=False, retry_btn=None, undo_btn="Delete Previous", clear_btn="Clear", ) # Launch the app if __name__ == "__main__": iface.launch()