import gradio as gr from llama_index.readers.file import PagedCSVReader from llama_index.core.indices import VectorStoreIndex import openai import os openai.api_key = os.getenv('OPENAI_API_KEY') def load_data(): try: loader = PagedCSVReader() documents = loader.load_data('aitalents.csv') index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() return query_engine except Exception as e: print(f"Error loading data or creating index: {e}") return None query_engine = load_data() # Call load_data() to create the query engine def chat(message, history): if query_engine is None: return "An error occurred while loading data. Please try again later." try: response = query_engine.query(message) return str(response) except Exception as e: print(f"Error generating response: {e}") return "I'm still learning how to answer that question. Please try asking something else." # Create the chatbot interface interface = gr.ChatInterface(fn=chat, title="AI Talent Matchmaker") # Launch the interface interface.launch()