import openai
from collections import deque


class LLM:
    '''大语言模型集成模块'''
    def __init__(self):
        api_key = '<用户 API Key>'
        base_url = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
        self.model = 'qwen-turbo-latest'
        self.client = openai.OpenAI(
                api_key=api_key,
                base_url=base_url,
                )
    def generate_response(self, messages):
        try:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=0.7,
                max_tokens=2000
            )
            return response.choices[0].message.content.strip()
        except Exception as e:
            return f'生成响应时出错：{str(e)}'

class MemoryManager:
    '''记忆管理模块'''
    def __init__(self, max_history=5):
        self.history = deque(maxlen=max_history)

    def add_history(self, role, content):
        self.history.append({'role': role, 'content': content})

    def get_history(self):
        return list(self.history)

class SimpleKnowledgeBase:
    '''简化版知识库模块'''
    def __init__(self):
        # 硬编码知识条目（内容，关键词）
        self.entries = [
            ('Python使用动态类型，运行时确定变量类型', ['python', '动态类型', '变量']),
            ('RAG结合检索和生成，减少模型幻觉', ['RAG', '检索', '生成', '幻觉']),
            ('HTTP状态码200表示成功，404未找到', ['HTTP', '状态码', '200', '404'])
        ]
    
    def retrieve(self, query):
        '''关键词匹配搜索'''
        matches = []
        for content, keywords in self.entries:
            # 计算共同关键词数量
            common = len(set(query) & set(''.join(keywords)))
            if common > 2:
                matches.append(content)
        # 返回前2条匹配记录
        return '\n'.join(matches[:2]) if matches else '无相关信息'

class ConversationAgent:
    '''集成大模型的对话智能体'''
    def __init__(self):
        self.llm = LLM()
        self.memory = MemoryManager()
        self.knowledge_base = SimpleKnowledgeBase()
        self.system_prompt = {'role': 'system', 'content': '你是一个 FGAI 专业助手，可以根据上下文与历史信息进行回答，请给出专业、准确的回答，如信息不足请明确说明。回答不要超过50个字。'}

    def _build_messages(self, knowledge, history, prompt):
        if knowledge != '无相关信息':
            prompt = f'{prompt}\n\n以下为知识库上下文\n[Context]\n{knowledge}\n[/Context]'
        history.append({'role': 'user', 'content': prompt})
        messages = [self.system_prompt] + history
        print('messages: ', messages)
        return messages
    
    def process_input(self, user_input):
        # 知识检索
        knowledge = self.knowledge_base.retrieve(user_input)
        # 记忆检索
        history = self.memory.get_history()
        messages = self._build_messages(knowledge, history, user_input)
        
        # 生成响应
        response = self.llm.generate_response(messages)
        
        # 更新记忆
        self.memory.add_history('user', user_input)
        self.memory.add_history('assistant', response)
        return response

def main():
    agent = ConversationAgent()
    # 对话示例
    queries = [
        '请解释Python的动态类型系统',
        '请介绍 React Agent？',
        'RAG技术的主要优势是什么？',
        '我之前问过关于Python的问题吗？'
    ]
    
    for query in queries:
        print(f'[用户] {query}')
        response = agent.process_input(query)
        print(f"[助手] {response}\n{'-'*60}")
# 大模型回复测试
def llm_test():
    llm = LLM()
    messages = [{'role': 'user', 'content': '你好'}]
    print(llm.generate_response(messages))

# 记忆管理测试
def memory_test():
    mm = MemoryManager(max_history=4)
    print(mm.get_history())
    for i in range(4):
        mm.add_history('user', f'user {i}')
        mm.add_history('assistant', f'assistant {i}')
        print(mm.get_history())

# 知识库测试
def knowledge_base_test():
    kb = SimpleKnowledgeBase()
    print(kb.retrieve('动态类型'))
    print(kb.retrieve('你好'))
    print(kb.retrieve('检索'))
    print(kb.retrieve('HTTP 404'))
    print(kb.retrieve('HTTP 200'))
    print(kb.retrieve('HTTP 200 404'))
    print(kb.retrieve('HTTP 200404 500'))


if __name__ == '__main__':
    # 测试大模型
    # llm_test()

    # 测试记忆管理
    # memory_test()

    # 测试知识库
    # knowledge_base_test()

    # 测试对话智能体
    main()