Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Create orchestrator.py
Browse files- orchestrator.py +208 -0
    	
        orchestrator.py
    ADDED
    
    | @@ -0,0 +1,208 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import json
         | 
| 2 | 
            +
            import logging
         | 
| 3 | 
            +
            from enum import Enum, auto
         | 
| 4 | 
            +
            from llm_client import LLMClient
         | 
| 5 | 
            +
            from prompts import Prompts
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            # ==============================================================================
         | 
| 8 | 
            +
            # --- 日志系统配置 (双日志系统) ---
         | 
| 9 | 
            +
            # ==============================================================================
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            # 1. 调试日志 (orchestrator.log) - 记录所有技术细节
         | 
| 12 | 
            +
            #    用于开发者调试,包含LLM返回的原始信息、错误堆栈等。
         | 
| 13 | 
            +
            debug_logger = logging.getLogger('orchestrator_logger')
         | 
| 14 | 
            +
            debug_logger.setLevel(logging.INFO)
         | 
| 15 | 
            +
            if not debug_logger.handlers:
         | 
| 16 | 
            +
                # 使用 mode='a' 来追加日志,而不是覆盖
         | 
| 17 | 
            +
                file_handler = logging.FileHandler('orchestrator.log', mode='a', encoding='utf-8')
         | 
| 18 | 
            +
                formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
         | 
| 19 | 
            +
                file_handler.setFormatter(formatter)
         | 
| 20 | 
            +
                debug_logger.addHandler(file_handler)
         | 
| 21 | 
            +
                # 阻止日志向上传播,避免在控制台输出
         | 
| 22 | 
            +
                debug_logger.propagate = False
         | 
| 23 | 
            +
                # 在每次程序启动时写入一个分隔符,方便区分不同的运行会话
         | 
| 24 | 
            +
                debug_logger.info("\n" + "="*20 + " APPLICATION STARTED " + "="*20)
         | 
| 25 | 
            +
             | 
| 26 | 
            +
             | 
| 27 | 
            +
            # 2. 演示日志 (demo_show.log) - 只记录用户输入和状态变化
         | 
| 28 | 
            +
            #    用于展示和快速回顾对话流程。
         | 
| 29 | 
            +
            demo_logger = logging.getLogger('demo_logger')
         | 
| 30 | 
            +
            demo_logger.setLevel(logging.INFO)
         | 
| 31 | 
            +
            if not demo_logger.handlers:
         | 
| 32 | 
            +
                # 使用 mode='a' 来追加日志
         | 
| 33 | 
            +
                demo_file_handler = logging.FileHandler('demo_show.log', mode='a', encoding='utf-8')
         | 
| 34 | 
            +
                # 使用更简洁的格式,只包含时间戳和消息
         | 
| 35 | 
            +
                demo_formatter = logging.Formatter('%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
         | 
| 36 | 
            +
                demo_file_handler.setFormatter(demo_formatter)
         | 
| 37 | 
            +
                demo_logger.addHandler(demo_file_handler)
         | 
| 38 | 
            +
                # 同样阻止日志向上传播
         | 
| 39 | 
            +
                demo_logger.propagate = False
         | 
| 40 | 
            +
                # 在演示日志中也添加会话启动分隔符
         | 
| 41 | 
            +
                demo_logger.info("\n" + "="*20 + " NEW DEMO SESSION STARTED " + "="*20)
         | 
| 42 | 
            +
             | 
| 43 | 
            +
             | 
| 44 | 
            +
            # ==============================================================================
         | 
| 45 | 
            +
            # --- 核心代码 ---
         | 
| 46 | 
            +
            # ==============================================================================
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            class DialogueState(Enum):
         | 
| 49 | 
            +
                """对话状态枚举"""
         | 
| 50 | 
            +
                REQUIREMENT_ELICITATION = auto()  # 需求梳理
         | 
| 51 | 
            +
                AI_MODELING = auto()              # AI建模
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            class Orchestrator:
         | 
| 54 | 
            +
                """
         | 
| 55 | 
            +
                对话编排器,负责管理对话流程、状态和调用判别器。
         | 
| 56 | 
            +
                """
         | 
| 57 | 
            +
                def __init__(self, model="gpt-4-turbo"):
         | 
| 58 | 
            +
                    self.llm_client = LLMClient(model=model)
         | 
| 59 | 
            +
                    self.conversation_history = []
         | 
| 60 | 
            +
                    self.state = DialogueState.REQUIREMENT_ELICITATION
         | 
| 61 | 
            +
                    self.prompts = Prompts()
         | 
| 62 | 
            +
                    
         | 
| 63 | 
            +
                    # 记录初始化信息到各自的日志
         | 
| 64 | 
            +
                    debug_logger.info(f"Orchestrator initialized. Initial state: {self.state.name}")
         | 
| 65 | 
            +
                    demo_logger.info(f"Initial State: {self.state.name}")
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                def _format_history_for_prompt(self) -> str:
         | 
| 68 | 
            +
                    """将对话历史格式化为字符串"""
         | 
| 69 | 
            +
                    if not self.conversation_history:
         | 
| 70 | 
            +
                        return "对话尚未开始。"
         | 
| 71 | 
            +
                    return "\n".join([f"{msg['role']}: {msg['content']}" for msg in self.conversation_history])
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                def _check_information_sufficiency(self) -> bool:
         | 
| 74 | 
            +
                    """
         | 
| 75 | 
            +
                    【判别器部分-隐式触发】
         | 
| 76 | 
            +
                    使用大模型判断对话历史中信息是否足够建模。
         | 
| 77 | 
            +
                    """
         | 
| 78 | 
            +
                    if len(self.conversation_history) < 2:
         | 
| 79 | 
            +
                        return False
         | 
| 80 | 
            +
             | 
| 81 | 
            +
                    history_str = self._format_history_for_prompt()
         | 
| 82 | 
            +
                    prompt = self.prompts.INFORMATION_SUFFICIENCY_CHECK.format(conversation_history=history_str)
         | 
| 83 | 
            +
             | 
| 84 | 
            +
                    try:
         | 
| 85 | 
            +
                        response_str = self.llm_client.chat(messages=[{"role": "user", "content": prompt}], temperature=0.1)
         | 
| 86 | 
            +
                        debug_logger.info(f"Sufficiency check response: {response_str}")
         | 
| 87 | 
            +
                        
         | 
| 88 | 
            +
                        json_response = json.loads(response_str.strip())
         | 
| 89 | 
            +
                        
         | 
| 90 | 
            +
                        if json_response.get("sufficient") is True:
         | 
| 91 | 
            +
                            debug_logger.info(f"Information deemed sufficient. Reason: {json_response.get('reason')}")
         | 
| 92 | 
            +
                            return True
         | 
| 93 | 
            +
                        else:
         | 
| 94 | 
            +
                            debug_logger.info(f"Information insufficient. Missing: {json_response.get('missing_elements')}")
         | 
| 95 | 
            +
                            return False
         | 
| 96 | 
            +
                    except (json.JSONDecodeError, KeyError, Exception) as e:
         | 
| 97 | 
            +
                        debug_logger.error(f"Error during sufficiency check: {e}")
         | 
| 98 | 
            +
                        return False
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                def _check_explicit_trigger(self, user_input: str) -> bool:
         | 
| 101 | 
            +
                    """
         | 
| 102 | 
            +
                    【判别器部分-显式触发】
         | 
| 103 | 
            +
                    使用大模型判断用户是否明确要求建模。
         | 
| 104 | 
            +
                    """
         | 
| 105 | 
            +
                    prompt = self.prompts.EXPLICIT_TRIGGER_CHECK.format(user_input=user_input)
         | 
| 106 | 
            +
                    try:
         | 
| 107 | 
            +
                        intent = self.llm_client.identify_intent(prompt, temperature=0.1)
         | 
| 108 | 
            +
                        debug_logger.info(f"Explicit trigger check intent: '{intent}'")
         | 
| 109 | 
            +
                        if "StartModeling" in intent:
         | 
| 110 | 
            +
                            debug_logger.info("Explicit trigger to model detected.")
         | 
| 111 | 
            +
                            return True
         | 
| 112 | 
            +
                        return False
         | 
| 113 | 
            +
                    except Exception as e:
         | 
| 114 | 
            +
                        debug_logger.error(f"Error during explicit trigger check: {e}")
         | 
| 115 | 
            +
                        return False
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                def _discriminator(self, user_input: str) -> bool:
         | 
| 118 | 
            +
                    """
         | 
| 119 | 
            +
                    判别器主函数。
         | 
| 120 | 
            +
                    判断是否应该从“需求梳理”切换到“AI建模”。
         | 
| 121 | 
            +
                    """
         | 
| 122 | 
            +
                    # 规则1:最高优先级,检查用户是否明确要求建模
         | 
| 123 | 
            +
                    if self._check_explicit_trigger(user_input):
         | 
| 124 | 
            +
                        return True
         | 
| 125 | 
            +
                    
         | 
| 126 | 
            +
                    # 定义一个阈值来判断是否为“长文本”
         | 
| 127 | 
            +
                    LONG_TEXT_THRESHOLD = 200 # 字符数,你可以根据需要调整
         | 
| 128 | 
            +
                    # 规则2:如果用户输入的是长文本,使用专门的单文本分析器
         | 
| 129 | 
            +
                    if len(user_input) > LONG_TEXT_THRESHOLD:
         | 
| 130 | 
            +
                        debug_logger.info(f"Long input detected (length: {len(user_input)}), using single text sufficiency check.")
         | 
| 131 | 
            +
                        if self._check_single_text_sufficiency(user_input):
         | 
| 132 | 
            +
                            return True
         | 
| 133 | 
            +
                    # 规则3:对于常规的、渐进式对话,使用基于历史的分析器
         | 
| 134 | 
            +
                    # 注意:只有在不是长文本的情况下,或者长文本分析不充分时,才会走到这一步
         | 
| 135 | 
            +
                    if self._check_information_sufficiency():
         | 
| 136 | 
            +
                        return True
         | 
| 137 | 
            +
                        
         | 
| 138 | 
            +
                    return False
         | 
| 139 | 
            +
             | 
| 140 | 
            +
                def _check_single_text_sufficiency(self, text_block: str) -> bool:
         | 
| 141 | 
            +
                    """
         | 
| 142 | 
            +
                    【判别器部分-单文本分析】
         | 
| 143 | 
            +
                    使用大模型判断单一大段文本中的信息是否足够建模。
         | 
| 144 | 
            +
                    """
         | 
| 145 | 
            +
                    prompt = self.prompts.SINGLE_TEXT_SUFFICIENCY_CHECK.format(text_block=text_block)
         | 
| 146 | 
            +
                    try:
         | 
| 147 | 
            +
                        response_str = self.llm_client.chat(messages=[{"role": "user", "content": prompt}], temperature=0.1)
         | 
| 148 | 
            +
                        debug_logger.info(f"Single text sufficiency check response: {response_str}")
         | 
| 149 | 
            +
                        
         | 
| 150 | 
            +
                        json_response = json.loads(response_str.strip())
         | 
| 151 | 
            +
                        
         | 
| 152 | 
            +
                        if json_response.get("sufficient") is True:
         | 
| 153 | 
            +
                            debug_logger.info(f"Information in single text deemed sufficient. Reason: {json_response.get('reason')}")
         | 
| 154 | 
            +
                            return True
         | 
| 155 | 
            +
                        else:
         | 
| 156 | 
            +
                            debug_logger.info(f"Information in single text insufficient. Missing: {json_response.get('missing_elements')}")
         | 
| 157 | 
            +
                            return False
         | 
| 158 | 
            +
                    except (json.JSONDecodeError, KeyError, Exception) as e:
         | 
| 159 | 
            +
                        debug_logger.error(f"Error during single text sufficiency check: {e}")
         | 
| 160 | 
            +
                        return False
         | 
| 161 | 
            +
                    
         | 
| 162 | 
            +
                def process_user_message(self, user_input: str) -> str:
         | 
| 163 | 
            +
                    """
         | 
| 164 | 
            +
                    处理单轮用户输入,并返回助手的回复。
         | 
| 165 | 
            +
                    """
         | 
| 166 | 
            +
                    # 在演示日志中记录用户输入
         | 
| 167 | 
            +
                    demo_logger.info(f"User Input: {user_input}")
         | 
| 168 | 
            +
                    
         | 
| 169 | 
            +
                    self.conversation_history.append({"role": "user", "content": user_input})
         | 
| 170 | 
            +
                    
         | 
| 171 | 
            +
                    # 如果已在建模阶段,直接返回提示信息
         | 
| 172 | 
            +
                    if self.state == DialogueState.AI_MODELING:
         | 
| 173 | 
            +
                        response_content = "当前已处于建模阶段,请等待模型生成结果。"
         | 
| 174 | 
            +
                        # 记录当前状态到演示日志
         | 
| 175 | 
            +
                        demo_logger.info(f"Current State: {self.state.name} (No change)")
         | 
| 176 | 
            +
                        return response_content
         | 
| 177 | 
            +
             | 
| 178 | 
            +
                    # 调用判别器判断是否切换状态
         | 
| 179 | 
            +
                    should_switch = self._discriminator(user_input)
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                    if should_switch:
         | 
| 182 | 
            +
                        self.state = DialogueState.AI_MODELING
         | 
| 183 | 
            +
                        # 同时记录到两个日志文件
         | 
| 184 | 
            +
                        debug_logger.info(f"State changed to: {self.state.name}")
         | 
| 185 | 
            +
                        demo_logger.info(f"State Changed To: {self.state.name}")
         | 
| 186 | 
            +
                        response_content = self.prompts.AI_MODELING_NOTICE
         | 
| 187 | 
            +
                    else:
         | 
| 188 | 
            +
                        # 同样,同时记录状态信息
         | 
| 189 | 
            +
                        debug_logger.info(f"State remains: {self.state.name}")
         | 
| 190 | 
            +
                        demo_logger.info(f"State Remains: {self.state.name}")
         | 
| 191 | 
            +
                        system_prompt = self.prompts.REQUIREMENT_ELICITATION_SYSTEM_PROMPT
         | 
| 192 | 
            +
                        messages = [{"role": "system", "content": system_prompt}] + self.conversation_history
         | 
| 193 | 
            +
                        response_content = self.llm_client.chat(messages=messages)
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                    self.conversation_history.append({"role": "assistant", "content": response_content})
         | 
| 196 | 
            +
                    #demo_logger.info(f"Assistant Response: {response_content}")
         | 
| 197 | 
            +
                    return response_content
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                def reset(self):
         | 
| 200 | 
            +
                    """重置对话"""
         | 
| 201 | 
            +
                    self.conversation_history = []
         | 
| 202 | 
            +
                    self.state = DialogueState.REQUIREMENT_ELICITATION
         | 
| 203 | 
            +
                    
         | 
| 204 | 
            +
                    # 在两个日志中都记录重置事件
         | 
| 205 | 
            +
                    debug_logger.info("Orchestrator has been reset.")
         | 
| 206 | 
            +
                    demo_logger.info("="*20 + " SESSION RESET " + "="*20)
         | 
| 207 | 
            +
                    demo_logger.info(f"Initial State: {self.state.name}")
         | 
| 208 | 
            +
             |