Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python | |
| # -*- coding: utf-8 -*- | |
| """ | |
| LLM Client Module | |
| -------------- | |
| Handles communication with the LLM API using credentials from .env. | |
| """ | |
| import os | |
| import logging | |
| import json | |
| from dotenv import load_dotenv | |
| import openai | |
| logger = logging.getLogger(__name__) | |
| class LLMClient: | |
| """Client for interacting with OpenAI-compatible LLM APIs.""" | |
| def __init__(self, model="gpt-3.5-turbo"): | |
| """ | |
| Initialize the LLM client with API credentials from .env. | |
| Args: | |
| model: Default LLM model to use | |
| """ | |
| # Ensure environment variables are loaded | |
| load_dotenv() | |
| self.api_key = os.getenv("OPENAI_API_KEY") | |
| self.base_url = os.getenv("OPENAI_BASE_URL") | |
| self.default_model = model | |
| if not self.api_key: | |
| raise ValueError("OPENAI_API_KEY not found in environment variables") | |
| if not self.base_url: | |
| raise ValueError("OPENAI_BASE_URL not found in environment variables") | |
| # 初始化OpenAI客户端 | |
| self.client = openai.OpenAI( | |
| api_key=self.api_key, | |
| base_url=self.base_url | |
| ) | |
| logger.info(f"LLM client initialized with base URL: {self.base_url} and default model: {model}") | |
| def identify_intent(self, prompt, model=None, temperature=0.3): | |
| """ | |
| Call the LLM API to identify the intent from the prompt. | |
| Args: | |
| prompt: The prompt to send to the LLM | |
| model: The model to use for the API call (defaults to self.default_model) | |
| temperature: Sampling temperature for the model | |
| Returns: | |
| Identified intent as a string | |
| """ | |
| try: | |
| # 使用指定的模型或默认模型 | |
| model_to_use = model or self.default_model | |
| logger.info(f"Calling LLM API with model: {model_to_use}") | |
| # 使用OpenAI客户端调用Chat Completion API | |
| response = self.client.chat.completions.create( | |
| model=model_to_use, | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=temperature, | |
| max_tokens=50 # We only need a short response for the intent | |
| ) | |
| # 从响应中提取内容 | |
| content = response.choices[0].message.content.strip() | |
| logger.info(f"LLM identified intent: {content}") | |
| return content | |
| except Exception as e: | |
| logger.error(f"Error identifying intent: {e}", exc_info=True) | |
| return f"Error: {str(e)}" | |
| def chat(self, messages, model=None, temperature=0.7, max_tokens=1000): | |
| """ | |
| 进行对话聊天 | |
| Args: | |
| messages: 消息列表,每个消息包含role和content | |
| model: 要使用的模型 (默认为self.default_model) | |
| temperature: 采样温度 | |
| max_tokens: 最大生成令牌数 | |
| Returns: | |
| 生成的回复内容 | |
| """ | |
| try: | |
| # 使用指定的模型或默认模型 | |
| model_to_use = model or self.default_model | |
| logger.info(f"Calling LLM chat API with model: {model_to_use}") | |
| # 使用OpenAI客户端调用Chat Completion API | |
| response = self.client.chat.completions.create( | |
| model=model_to_use, | |
| messages=messages, | |
| temperature=temperature, | |
| max_tokens=max_tokens | |
| ) | |
| # 从响应中提取内容 | |
| content = response.choices[0].message.content | |
| return content | |
| except Exception as e: | |
| logger.error(f"Error in chat: {e}", exc_info=True) | |
| return f"Error: {str(e)}" | |
| # For testing | |
| if __name__ == "__main__": | |
| logging.basicConfig(level=logging.INFO) | |
| # Load environment variables | |
| load_dotenv() | |
| client = LLMClient() | |
| test_prompt = """ | |
| 请根据用户的输入,判断用户的意图属于下列哪一类。 | |
| 用户输入: "请帮我理清当前场景中的问题" | |
| 可能的意图类别及其示例: | |
| 意图: 1. 需求梳理 | |
| 示例: | |
| - "我目前不确定面临的决策问题,请帮我理清我的优化需求。" | |
| - "请引导我一步步明确建模场景中的关键问题。" | |
| - "请协助我明确问题中哪些部分需要约束条件。" | |
| 意图: 2. 建模 | |
| 示例: | |
| - "请根据我提供的场景描述自动生成一个数学模型。" | |
| - "帮我构建一个模型,包含目标函数和约束条件。" | |
| - "请生成数学模型,并自动添加必要的决策变量。" | |
| 根据以上信息,用户的输入最符合哪个意图?请只回复意图名称。 | |
| """ | |
| intent = client.identify_intent(test_prompt) | |
| print(f"Identified intent: {intent}") | |
| # 测试聊天功能 | |
| chat_messages = [ | |
| {"role": "system", "content": "你是一个专业的数学建模助手。"}, | |
| {"role": "user", "content": "请帮我理清当前场景中的问题"} | |
| ] | |
| chat_response = client.chat(chat_messages) | |
| print("\nChat response:") | |
| print(chat_response) |