File size: 5,351 Bytes
4140736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
LLM Client Module
--------------
Handles communication with the LLM API using credentials from .env.
"""

import os
import logging
import json
from dotenv import load_dotenv
import openai

logger = logging.getLogger(__name__)

class LLMClient:
    """Client for interacting with OpenAI-compatible LLM APIs."""
    
    def __init__(self, model="gpt-3.5-turbo"):
        """
        Initialize the LLM client with API credentials from .env.
        
        Args:
            model: Default LLM model to use
        """
        # Ensure environment variables are loaded
        load_dotenv()
        
        self.api_key = os.getenv("OPENAI_API_KEY")
        self.base_url = os.getenv("OPENAI_BASE_URL")
        self.default_model = model
        
        if not self.api_key:
            raise ValueError("OPENAI_API_KEY not found in environment variables")
        
        if not self.base_url:
            raise ValueError("OPENAI_BASE_URL not found in environment variables")
        
        # 初始化OpenAI客户端
        self.client = openai.OpenAI(
            api_key=self.api_key,
            base_url=self.base_url
        )
        
        logger.info(f"LLM client initialized with base URL: {self.base_url} and default model: {model}")
    
    def identify_intent(self, prompt, model=None, temperature=0.3):
        """
        Call the LLM API to identify the intent from the prompt.
        
        Args:
            prompt: The prompt to send to the LLM
            model: The model to use for the API call (defaults to self.default_model)
            temperature: Sampling temperature for the model
            
        Returns:
            Identified intent as a string
        """
        try:
            # 使用指定的模型或默认模型
            model_to_use = model or self.default_model
            
            logger.info(f"Calling LLM API with model: {model_to_use}")
            
            # 使用OpenAI客户端调用Chat Completion API
            response = self.client.chat.completions.create(
                model=model_to_use,
                messages=[{"role": "user", "content": prompt}],
                temperature=temperature,
                max_tokens=50  # We only need a short response for the intent
            )
            
            # 从响应中提取内容
            content = response.choices[0].message.content.strip()
            logger.info(f"LLM identified intent: {content}")
            return content
                
        except Exception as e:
            logger.error(f"Error identifying intent: {e}", exc_info=True)
            return f"Error: {str(e)}"
    
    def chat(self, messages, model=None, temperature=0.7, max_tokens=1000):
        """
        进行对话聊天
        
        Args:
            messages: 消息列表,每个消息包含role和content
            model: 要使用的模型 (默认为self.default_model)
            temperature: 采样温度
            max_tokens: 最大生成令牌数
            
        Returns:
            生成的回复内容
        """
        try:
            # 使用指定的模型或默认模型
            model_to_use = model or self.default_model
            
            logger.info(f"Calling LLM chat API with model: {model_to_use}")
            
            # 使用OpenAI客户端调用Chat Completion API
            response = self.client.chat.completions.create(
                model=model_to_use,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens
            )
            
            # 从响应中提取内容
            content = response.choices[0].message.content
            return content
                
        except Exception as e:
            logger.error(f"Error in chat: {e}", exc_info=True)
            return f"Error: {str(e)}"

# For testing
if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    
    # Load environment variables
    load_dotenv()
    
    client = LLMClient()
    
    test_prompt = """
    请根据用户的输入,判断用户的意图属于下列哪一类。

    用户输入: "请帮我理清当前场景中的问题"

    可能的意图类别及其示例:

    意图: 1. 需求梳理
    示例:
    - "我目前不确定面临的决策问题,请帮我理清我的优化需求。"
    - "请引导我一步步明确建模场景中的关键问题。"
    - "请协助我明确问题中哪些部分需要约束条件。"

    意图: 2. 建模
    示例:
    - "请根据我提供的场景描述自动生成一个数学模型。"
    - "帮我构建一个模型,包含目标函数和约束条件。"
    - "请生成数学模型,并自动添加必要的决策变量。"

    根据以上信息,用户的输入最符合哪个意图?请只回复意图名称。
    """
    
    intent = client.identify_intent(test_prompt)
    print(f"Identified intent: {intent}")
    
    # 测试聊天功能
    chat_messages = [
        {"role": "system", "content": "你是一个专业的数学建模助手。"},
        {"role": "user", "content": "请帮我理清当前场景中的问题"}
    ]
    
    chat_response = client.chat(chat_messages)
    print("\nChat response:")
    print(chat_response)