# app_pure_llm.py import os import re import gradio as gr import openai from openai import OpenAI from langchain.text_splitter import CharacterTextSplitter from sentence_transformers import SentenceTransformer DARTMOUTH_CHAT_API_KEY = os.getenv('DARTMOUTH_CHAT_API_KEY') if DARTMOUTH_CHAT_API_KEY is None: raise ValueError("DARTMOUTH_CHAT_API_KEY not set.") MODEL = "openai.gpt-4o-2024-08-06" client = OpenAI( base_url="https://chat.dartmouth.edu/api", # Replace with your endpoint URL api_key=DARTMOUTH_CHAT_API_KEY, # Replace with your API key, if required ) # --- Load and Prepare Data --- # (Even if not used by the pure LLM function, we load the file to maintain consistency.) with open("gen_agents.txt", "r", encoding="utf-8") as f: full_text = f.read() text_splitter = CharacterTextSplitter(separator="\n\n", chunk_size=512, chunk_overlap=20) docs = text_splitter.create_documents([full_text]) # You might not need passages for the pure LLM output, but we'll load them for completeness. passages = [doc.page_content for doc in docs] # --- Provided Function for Pure LLM --- def generate_plain_answer(query): """ Generate an answer using GPT-4 without additional context. """ messages = [ {"role": "system", "content": "You are a knowledgeable teaching assistant."}, {"role": "user", "content": f"Answer the question: {query}"} ] response = client.chat.completions.create( model=MODEL, messages=messages, ) answer = response.choices[0].message.content.strip() return answer # --- Gradio App Function --- def get_pure_llm_output(query): answer = generate_plain_answer(query) return f"