import asyncio
import os
from typing import List, Dict, Tuple

from openai import OpenAI

import utils

class EvolveAIEngine:
    """智能进化核心引擎"""
    def __init__(self):
        self.operational_principles: List[str] = []  # 长期原则
        self.session_demands: List[str] = []         # 临时需求
        self.feedback_history: List[Dict] = []       # 反馈记忆库

    def optimize(self, task: str) -> str:
        """优化主循环"""
        iteration = 0
        while True:
            iteration += 1
            solution = self._generate_solution(task, iteration)
            print(f"\n◇ 第{iteration}次迭代方案 ◇\n{solution}")
            
            if self._process_feedback():
                return solution

    def _generate_solution(self, task: str, iteration: int) -> str:
        """生成解决方案"""
        prompt = f"""
        核心任务 {task}
        长期原则 {self.operational_principles}
        本次需求 {self.session_demands}
        历史反馈 {self.feedback_history[-3:]}
        迭代次数 第{iteration}次
        """
        return self._call_llm(prompt)

    def _process_feedback(self) -> bool:
        """处理用户反馈"""
        print("\n请输入反馈（输入Y结束）：")
        feedback = input("> ").strip()
        if feedback.lower() in ('y', 'yes', '满意'):
            return True
        
        # 需求提取与分类
        demands = self._extract_demands(feedback)
        principles, demands = self._classify_demands(demands)
        
        # 更新存储
        self.operational_principles.extend(principles)
        self.session_demands = demands  # 仅保留最新需求
        self.feedback_history.append({
            "raw_feedback": feedback,
            "principles": principles,
            "demands": demands
        })
        
        # 冲突解决
        self._resolve_conflicts()
        return False

    def _extract_demands(self, feedback: str) -> List[str]:
        """提取潜在需求"""
        prompt = f"""
        分析用户反馈，列出所有潜在需求：
        反馈内容：{feedback}
        现有原则：{self.operational_principles}
        
        输出要求：
        - 每个需求用「•」开头
        - 保持原始语义
        - 最多5条
        """
        raw = self._call_llm(prompt)
        return [line[2:] for line in raw.split('\n') if line.startswith('• ')]

    def _classify_demands(self, demands: List[str]) -> Tuple[List[str], List[str]]:
        """需求分类处理"""
        prompt = f"""
        对需求进行分类：
        需求列表：{demands}
        
        判断标准：
        1. 长期原则：适用于未来类似场景
        2. 临时需求：仅限本次任务
        
        输出格式：
        - 原则：<需求内容>
        - 临时：<需求内容>
        """
        raw = self._call_llm(prompt)
        
        principles = []
        session_demands = []
        for line in raw.split('\n'):
            if line.startswith('- 原则：'):
                principles.append(line[5:])
            elif line.startswith('- 临时：'):
                session_demands.append(line[5:])
        return principles, session_demands

    def _resolve_conflicts(self):
        """解决原则冲突"""
        conflicted = set()
        for i in range(len(self.operational_principles)):
            for j in range(i+1, len(self.operational_principles)):
                if self._is_conflicting(
                    self.operational_principles[i],
                    self.operational_principles[j]
                ):
                    conflicted.add(j)  # 保留前者
        self.operational_principles = [
            p for idx, p in enumerate(self.operational_principles)
            if idx not in conflicted
        ]
        
    def _is_conflicting(self, a: str, b: str) -> bool:
        """智能冲突检测"""
        prompt = f"""
        判断两个原则是否矛盾：
        1. {a}
        2. {b}
        只需回复Y/N
        """
        return (self._call_llm(prompt)).strip().lower() == 'y'

    def _call_llm(self, prompt: str, llm_type = "deepseek", llm_model: str = "deepseek-chat") -> str:
        """大模型调用接口"""
        if llm_type == "openai":
            client = OpenAI(
                api_key=os.getenv("OPENAI_API_KEY"),
                base_url="https://api.openai.com/v1"
            )
        else:  # 默认使用DeepSeek配置
            client = OpenAI(
                api_key=os.getenv("DEEPSEEK_API_KEY"),  
                base_url="https://api.deepseek.com/v1"
            )
        return utils.call_ai(client, prompt, system_prompt=None, model=llm_model)


evolve_ai_engine = EvolveAIEngine()

if __name__ == "__main__":
    task = "制定一个到西安的旅行计划"
    solution = evolve_ai_engine.optimize(task)
