import sys
import os
import time
import copy # 仍然可能在其他地方需要，但要小心使用
import logging
from multiprocessing import Pool, Manager # Manager might not be needed with current structure
from functools import partial
import datetime as dt
import json
import openai # 保持导入，即使当前未使用
from openai import OpenAI # 确保OpenAI v1.x.x的用法

from dotenv import load_dotenv
import os

# 加载环境变量
load_dotenv()
# 获取API key
api_key = os.getenv('OPENAI_API_KEY')

# 设置日志编码为UTF-8
# (这段代码在之前的版本中已有，保持)
for handler in logging.root.handlers:
    if isinstance(handler, logging.StreamHandler) and hasattr(handler.stream, 'reconfigure'):
        try:
            handler.stream.reconfigure(encoding='utf-8')
        except Exception as e:
            # 在某些环境（如无控制台的后台进程）下，reconfigure可能会失败
            # print(f"Warning: Failed to reconfigure stream encoding for handler {handler}: {e}")
            pass


# 设置项目根目录
ROOT_DIR = r"D:\code\MAS_DA" # 请根据您的实际路径修改
sys.path.append(ROOT_DIR)

import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np

import pcse
from pcse.input import CABOFileReader, YAMLAgroManagementReader, ExcelWeatherDataProvider
from pcse.base import ParameterProvider
from pcse.models import Wofost72_WLP_FD
from pcse.input import DummySoilDataProvider, WOFOST72SiteDataProvider

# --- 全局和PCSE日志配置 ---
# 主日志记录器
logger = logging.getLogger("AssimilationSystem")
logger.setLevel(logging.INFO) # 主logger级别
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

# 文件处理器
file_handler = logging.FileHandler('multi_agent_assimilation.log', mode='w', encoding='utf-8') # 指定编码
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)

# 控制台处理器
stream_handler = logging.StreamHandler(sys.stdout) # 显式使用 sys.stdout
stream_handler.setFormatter(log_formatter)
logger.addHandler(stream_handler)

# PCSE特定日志配置
pcse_logger = logging.getLogger('pcse')
pcse_logger.handlers.clear() # 清除PCSE可能已添加的任何处理器
pcse_logger.setLevel(logging.ERROR) # 只记录PCSE的错误信息
pcse_console_handler = logging.StreamHandler(sys.stderr) # PCSE错误输出到stderr
pcse_console_handler.setFormatter(logging.Formatter('PCSE %(levelname)s: %(message)s'))
pcse_logger.addHandler(pcse_console_handler)
pcse_logger.propagate = False # 防止PCSE日志向上传播到根记录器

# 禁用PCSE内置的文件日志（如果它尝试创建的话）
if hasattr(pcse, 'base') and hasattr(pcse.base, 'VariableKiosk'):
    # PCSE 内部日志文件通常通过 VariableKiosk 的 logfile 属性控制
    # 但更直接的方式是确保其 logger 不会写入文件
    pass


# --- OpenAI API 配置 (保持，即使当前未使用) ---
OPENAI_API_KEY = api_key
OPENAI_API_BASE = "https://api.openai.com/v1" # 或您的代理地址

if OPENAI_API_KEY.startswith("sk-") and len(OPENAI_API_KEY) > 50 : # Basic check
    openai.api_key = OPENAI_API_KEY
    openai.api_base = OPENAI_API_BASE
else:
    logger.warning("OpenAI API key seems invalid or not set. LLM calls will likely fail. Key: %s", OPENAI_API_KEY[:10]+"...")


# --- LLM 调用逻辑 (保持，当前 ObservationQualityAgent 不直接调用) ---
def construct_llm_prompt(base_prompt: str, context_data: dict = None, query_type: str = "general") -> tuple[str, str]:
    system_message_content = """你是一位专门从事作物模型（特别是WOFOST）、遥感数据同化和农业数据分析的专家。
    请用中文提供简洁、可操作的建议。回答应该专业但易于理解。"""
    user_prompt = f"{base_prompt}\n"
    if context_data:
        user_prompt += f"参考数据:\n{json.dumps(context_data, indent=2, default=str, ensure_ascii=False)}\n"

    if query_type == "obs_quality":
        crop_stage_guess = "未知"
        if context_data:
            lai_history = context_data.get('lai_history', [])
            if lai_history:
                if len(lai_history) < 3: crop_stage_guess = "早期生长期"
                elif lai_history[-1] > max(lai_history[:-1] or [0]) and lai_history[-1] < 3: crop_stage_guess = "营养生长期"
                elif lai_history[-1] >= 3: crop_stage_guess = "生长高峰期或成熟期"
                elif lai_history[-1] < (lai_history[-2] if len(lai_history)>1 else lai_history[-1]): crop_stage_guess = "衰老期"

        user_prompt = (
            "请评估当前LAI观测值是否适合同化到WOFOST模型中。"
            "这是冬小麦，请考虑其典型生长模式。"
            "具体建议使用以下决策之一：'ASSIMILATE'（同化）, 'SKIP'（跳过）, 或 'ADJUST_UNCERTAINTY'（调整不确定性）。"
            "如果选择'ADJUST_UNCERTAINTY'，请建议一个新的相对不确定性值（例如：0.2表示20%）。"
            "请简要说明理由。\n"
            f"观测日期: {context_data.get('current_date')}\n"
            f"观测LAI值: {context_data.get('current_obs_lai'):.2f}\n"
            f"上次观测LAI值: {context_data.get('prev_obs_lai', 'N/A')}\n"
            f"模型预报LAI值（集合平均）: {context_data.get('model_forecast_lai'):.2f}\n"
            f"LAI观测历史（按时间顺序）: {context_data.get('lai_history')}\n"
            f"当前观测不确定性（标准差）: {context_data.get('current_obs_std'):.2f}\n"
            f"估计作物生长阶段: {crop_stage_guess}\n"
            "请以决策关键词开始回复（ASSIMILATE, SKIP, ADJUST_UNCERTAINTY），后跟理由和数值（如适用）。"
            "例如：'ASSIMILATE: 观测值合理。' 或 'SKIP: 观测值异常偏低。' 或 'ADJUST_UNCERTAINTY 0.3: 变异性较大，建议增加不确定性。'"
        )

    return system_message_content, user_prompt


def llm_call(base_prompt: str, context_data: dict = None, agent_name: str = "LLM", query_type: str = "general") -> str:
    if not (OPENAI_API_KEY.startswith("sk-") and len(OPENAI_API_KEY) > 50):
        logger.warning(f"LLM call for {agent_name} ({query_type}) skipped: API key not configured.")
        if query_type == "obs_quality": return "ASSIMILATE: LLM not configured, defaulting to assimilate."
        return "LLM not configured."
    try:
        client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE) # 确保Client正确实例化
    except Exception as e:
        logger.error(f"Failed to initialize OpenAI client: {e}")
        if query_type == "obs_quality": return "ASSIMILATE: LLM client init failed, defaulting to assimilate."
        return "LLM client init error."

    logger.info(f"[{agent_name}-LLMQuery-{query_type}] PROMPT (base): {base_prompt}")
    if context_data:
        logger.debug(f"[{agent_name}-LLMQuery-{query_type}] CONTEXT: {json.dumps(context_data, indent=2, default=str, ensure_ascii=False)}")

    system_message, user_message = construct_llm_prompt(base_prompt, context_data, query_type)
    
    try:
        response = client.chat.completions.create(
            model="gpt-4o", # Or your preferred model
            messages=[
                {"role": "system", "content": system_message},
                {"role": "user", "content": user_message}
            ],
            temperature=0.5,
            max_tokens=1000 # 增加一点token以防中文回答被截断
        )
        response_text = response.choices[0].message.content.strip()
        logger.info(f"[{agent_name}-LLMResponse-{query_type}] RESPONSE: {response_text}")
        return response_text
        
    except Exception as e:
        logger.error(f"OpenAI API call failed for {agent_name} ({query_type}): {e}")
        if query_type == "obs_quality":
            return "ASSIMILATE: LLM call failed, defaulting to assimilate."
        return f"LLM call error: {e}"



# --- 智能体基类 ---
class BaseAgent:
    def __init__(self, name: str, shared_knowledge: dict = None):
        self.name = name
        self.logger = logging.getLogger(f"Agent.{name}")
        self.logger.propagate = True
        self.shared_knowledge = shared_knowledge if shared_knowledge is not None else {}

    def log(self, message: str, level: int = logging.INFO, **kwargs):
        """增强的日志方法，支持额外的日志参数"""
        self.logger.log(level, message, **kwargs)

class LLMAssistantAgent(BaseAgent):
    """专门处理LLM交互的智能体"""
    def __init__(self, shared_knowledge: dict):
        super().__init__("LLMAssistant", shared_knowledge)
        self.llm_enabled = OPENAI_API_KEY.startswith("sk-") and len(OPENAI_API_KEY) > 50
        
    def query_llm(self, query_type: str, context: dict) -> dict:
        """统一处理LLM查询"""
        if not self.llm_enabled:
            return self._get_default_response(query_type)
            
        try:
            response = llm_call(
                base_prompt="",  # 由construct_llm_prompt自动生成
                context_data=context,
                agent_name=self.name,
                query_type=query_type
            )
            parsed_response = self._parse_llm_response(response, query_type)
            self.log(f"LLM响应解析结果: {parsed_response}")  # 添加日志
            return parsed_response
        except Exception as e:
            self.log(f"LLM查询失败: {e}", logging.ERROR)
            return self._get_default_response(query_type)
    
    def _parse_llm_response(self, response: str, query_type: str) -> dict:
        """解析LLM响应为结构化数据"""
        if query_type == "obs_quality":
            # 原有的obs_quality处理逻辑保持不变
            parts = response.split(":", 1)
            decision = parts[0].strip()
            
            if "ADJUST_UNCERTAINTY" in decision:
                value_part = decision.split()[1]
                new_std = float(value_part)
                return {
                    'action': 'ADJUST_UNCERTAINTY',
                    'new_obs_std': new_std,
                    'reason': parts[1].strip() if len(parts) > 1 else ""
                }
            else:
                return {
                    'action': decision,
                    'reason': parts[1].strip() if len(parts) > 1 else ""
                }
        elif query_type == "report_section":
            # 处理报告部分的响应
            return {
                'content': response.strip() if response else "暂无内容"
            }
        elif query_type == "report_enhancement":
            # 处理报告增强的响应
            return {
                'enhanced_analysis': response.strip() if response else "暂无增强分析"
            }
        
        # 默认返回完整响应
        return {'response': response}
    
    def _get_default_response(self, query_type: str) -> dict:
        """LLM不可用时的默认响应"""
        if query_type == "obs_quality":
            return {'action': 'ASSIMILATE', 'reason': 'LLM不可用，默认同化'}
        elif query_type == "report_section":
            return {'content': '由于LLM服务不可用，无法生成详细分析。'}
        elif query_type == "report_enhancement":
            return {'enhanced_analysis': '由于LLM服务不可用，无法提供增强分析。'}
        return {'response': 'LLM服务不可用'}

# --- 配置智能体 ---
class ConfigAgent(BaseAgent):
    def __init__(self, shared_knowledge: dict):
        super().__init__("ConfigAgent", shared_knowledge)
        self.config = None

    def load_initial_config(self) -> dict:
        self.log("Loading initial configuration...")
        if not os.path.exists(ROOT_DIR):
             self.log(f"ROOT_DIR '{ROOT_DIR}' does not exist. Please check the path.", logging.ERROR)
             raise FileNotFoundError(f"ROOT_DIR '{ROOT_DIR}' does not exist.")

        config = {
            'data_dir': os.path.join(ROOT_DIR, "data"),
            'crop_file': os.path.join(ROOT_DIR, "data/crop/winterwheat.crop"),
            'soil_file': os.path.join(ROOT_DIR, "data/soil/ec3.soil"),
            'agro_file': os.path.join(ROOT_DIR, "data/agro/dachang.agro"),
            'weather_file': os.path.join(ROOT_DIR, "data/meteo/dc.xlsx"),
            'observation_file': os.path.join(ROOT_DIR, "high.xlsx"), # 你的观测文件
            'ensemble_size': 50,
            'random_seed': 10000,
            'output_dir': os.path.join(os.getcwd(), 'output_agents_fixed_v2'), # 修改输出目录名
            'obs_dates': [
                            dt.date(2022,11,3),
                            dt.date(2022,11,5),dt.date(2022,11,10),dt.date(2022,11,15),
                            dt.date(2022,12,5),dt.date(2022,12,8),dt.date(2022,12,10),
                            dt.date(2022,12,28),dt.date(2023,1,4),dt.date(2023,1,9),
                            dt.date(2023,1,17),dt.date(2023,1,22),dt.date(2023,1,29),
                            dt.date(2023,2,3),dt.date(2023,2,8),dt.date(2023, 2, 26),
                            dt.date(2023,2,28),dt.date(2023,3,5),dt.date(2023,3,10),
                            dt.date(2023,3,25),dt.date(2023,3,30),
                            dt.date(2023,4,9),dt.date(2023,4,19),dt.date(2023,4,29),
                            dt.date(2023,5,2), dt.date(2023,5,7),dt.date(2023,5,22),
                            dt.date(2023,5,29),dt.date(2023,6,6),dt.date(2023,6,8),
                            dt.date(2023,6,13),dt.date(2023,6,16)],
            # 'obs_dates': [dt.date(2022,11,3),dt.date(2023,3,25),dt.date(2023,5,7),dt.date(2023,6,6)],
            'num_processes': 1, # 为了调试deepcopy问题，暂时设为1
            'max_pixels_to_process': 10, # Limit for testing
        }
        
        os.makedirs(os.path.join(config['output_dir'], 'excel'), exist_ok=True)
        os.makedirs(os.path.join(config['output_dir'], 'png'), exist_ok=True)
        
        self.config = config
        self.shared_knowledge['config'] = self.config 
        self.log("Initial configuration loaded.")
        return self.config

# --- 数据准备智能体 ---
class DataPreparationAgent(BaseAgent):
    def __init__(self, shared_knowledge: dict):
        super().__init__("DataPrepAgent", shared_knowledge)
        self.wofost_inputs = {}

    def load_wofost_inputs(self):
        config = self.shared_knowledge.get('config')
        if not config:
            self.log("Configuration not found.", logging.ERROR); return False
        try:
            self.log("Loading WOFOST model inputs...")
            cropd = CABOFileReader(config['crop_file'])
            soild = CABOFileReader(config['soil_file'])
            site_params_dict = {"WAV": 100., "SMLIM": 0.4, "IFUNRN": 0, "NOTINF": 0, "SSI": 0, "SSMAX": 0.}
            sited = WOFOST72SiteDataProvider(**site_params_dict)
            
            self.wofost_inputs['parameters'] = ParameterProvider(cropdata=cropd, soildata=soild, sitedata=sited)
            self.wofost_inputs['agromanagement'] = YAMLAgroManagementReader(config['agro_file'])
            self.wofost_inputs['wdp'] = ExcelWeatherDataProvider(config['weather_file'])
            self.shared_knowledge['wofost_inputs'] = self.wofost_inputs
            self.log("WOFOST inputs loaded.")
            return True
        except Exception as e:
            self.log(f"Error loading WOFOST inputs: {e}", logging.ERROR, exc_info=True); return False

    def load_observation_data(self):
        config = self.shared_knowledge.get('config')
        if not config: self.log("配置未找到。", logging.ERROR); return False
        try:
            obs_file = config['observation_file']
            self.log(f"尝试加载观测数据: {obs_file}")
            if not os.path.exists(obs_file):
                self.log(f"观测数据文件不存在: {obs_file}", logging.ERROR); return False
            
            obs_df = pd.read_excel(obs_file)
            pixel_tasks = []
            num_expected_obs = len(config['obs_dates'])
            self.log(f"预期观测点数量: {num_expected_obs}")

            for row_idx, row in enumerate(obs_df.itertuples(index=False)): 
                pixel_id_val = str(row[0]) 
                try:
                    lai_values_raw = row[2 : 2 + num_expected_obs]
                    lai_values = pd.Series(lai_values_raw).astype(float)
                    
                    if len(lai_values) != num_expected_obs:
                        self.log(f"像素 {pixel_id_val} (行 {row_idx+1}) LAI数据列数({len(lai_values)})与观测日期数({num_expected_obs})不匹配. 跳过.", logging.WARNING)
                        continue
                        
                    if lai_values.isnull().any():
                        self.log(f"像素 {pixel_id_val} (行 {row_idx+1}) 包含无效值，将被替换为0.0", logging.WARNING)
                        lai_values = lai_values.fillna(0.0) 
                        
                    pixel_tasks.append({
                        'pixel_id': pixel_id_val, 
                        'lai_series': lai_values, 
                        'obs_dates': config['obs_dates']
                    })
                except Exception as e: 
                    self.log(f"处理行 {row_idx+1} (像素ID尝试为 {pixel_id_val}) 时出错: {e}. 跳过.", logging.WARNING, exc_info=True)
                
            self.log(f"成功准备了 {len(pixel_tasks)} 个有效像素任务")
            if not pixel_tasks:
                self.log("没有有效的像素数据", logging.ERROR); return False
            
            self.shared_knowledge['pixel_tasks'] = pixel_tasks
            return True
        except Exception as e:
            self.log(f"加载观测数据时出错: {e}", logging.ERROR, exc_info=True); return False

# --- 参数顾问智能体 ---
class ParameterAdvisorAgent(BaseAgent):
    def __init__(self, shared_knowledge: dict):
        super().__init__("ParamAdvisorAgent", shared_knowledge)

    def get_ensemble_perturbation_strategy(self, ensemble_size: int) -> dict:
        self.log("Defining ensemble perturbation strategy...")
        np.random.seed(self.shared_knowledge.get('config', {}).get('random_seed', 10000))
        override_params = {
            "TDWI": np.random.normal(150., 30., ensemble_size), 
            "SPAN": np.random.normal(31, 2, ensemble_size),  
        }
        self.log(f"Perturbation strategy defined: {list(override_params.keys())}")
        return override_params

    def get_initial_observation_uncertainty(self, observed_lai_value: float) -> float:
        if observed_lai_value <= 0.01: # Use a small absolute uncertainty for very low/zero LAI
            return 0.05 
        return observed_lai_value * 0.15 


class ObservationQualityAgent(BaseAgent):
    def __init__(self, shared_knowledge: dict):
        super().__init__("ObsQualityAgent", shared_knowledge)
        self.llm_assistant = LLMAssistantAgent(shared_knowledge)
        self.use_llm = shared_knowledge.get('config', {}).get('use_llm', True)
        
    def assess_observation(self, current_obs_lai: float, current_obs_std: float,
                         current_date: dt.date, model_forecast_lai: float,
                         lai_history: list, prev_obs_lai: float = None) -> dict:
        context = {
            'current_obs_lai': current_obs_lai,
            'current_obs_std': current_obs_std,
            'current_date': current_date,
            'model_forecast_lai': model_forecast_lai,
            'lai_history': lai_history,
            'prev_obs_lai': prev_obs_lai
        }
        
        # 先用规则引擎处理明显情况
        rule_based_result = self._rule_based_assessment(context)
        if rule_based_result['action'] != 'UNCERTAIN':
            return rule_based_result
            
        # 不确定的情况使用LLM
        if self.use_llm:
            llm_result = self.llm_assistant.query_llm("obs_quality", context)
            self.log(f"LLM决策: {llm_result}")
            return llm_result
            
        return {'action': 'ASSIMILATE', 'reason': '未使用LLM，默认同化'}
    
    def _rule_based_assessment(self, context: dict) -> dict:
        """处理明确情况的规则引擎"""
        current_obs_lai = context['current_obs_lai']
        
        # 规则1: 无效值
        if np.isnan(current_obs_lai) or current_obs_lai < 0:
            return {'action': 'SKIP', 'reason': '无效的LAI值'}
            
        # 规则2: 极高值
        if current_obs_lai > 10:
            return {'action': 'ADJUST_UNCERTAINTY', 'new_obs_std': context['current_obs_std'] * 3}
            
        # 规则3: 相比前次观测的剧烈下降
        if context['prev_obs_lai'] is not None and context['prev_obs_lai'] > 0.3:
            if current_obs_lai < context['prev_obs_lai'] * 0.25 and context['model_forecast_lai'] > current_obs_lai * 2.0:
                return {'action': 'SKIP',
                        'reason': f'LAI相较前次({context["prev_obs_lai"]:.2f})发生极剧烈下降至{current_obs_lai:.2f}(减少超过75%)，且与模型预测({context["model_forecast_lai"]:.2f})严重不符，跳过此观测点.'}

        # 规则4: 相比前次观测的显著下降
        if context['prev_obs_lai'] is not None and context['prev_obs_lai'] > 0.5:
            if current_obs_lai < context['prev_obs_lai'] * 0.50 and context['model_forecast_lai'] > current_obs_lai * 1.5:
                return {'action': 'ADJUST_UNCERTAINTY', 'new_obs_std': context['current_obs_std'] * 2.5,
                        'reason': f'LAI相较前次({context["prev_obs_lai"]:.2f})大幅下降至{current_obs_lai:.2f}(减少超过50%)且与模型({context["model_forecast_lai"]:.2f})不符, 显著增加不确定性.'}

        # 规则5: 观测值极低但模型预测显著生长
        if current_obs_lai < 0.1 and context['model_forecast_lai'] > 0.5:
            if context['model_forecast_lai'] > current_obs_lai * 5.0:
                return {'action': 'ADJUST_UNCERTAINTY', 'new_obs_std': context['current_obs_std'] * 2.0,
                        'reason': f'观测LAI({current_obs_lai:.2f})极低，但模型预测LAI({context["model_forecast_lai"]:.2f})已显著增长，观测值可疑，增加不确定性.'}

        # 规则6: 与模型预测的大幅偏差
        if context['model_forecast_lai'] > 0.2:
            if not (current_obs_lai < 0.15 and context['model_forecast_lai'] < 0.45):
                if current_obs_lai > context['model_forecast_lai'] * 3.0 or context['model_forecast_lai'] > current_obs_lai * 3.0:
                    return {'action': 'ADJUST_UNCERTAINTY', 'new_obs_std': context['current_obs_std'] * 2.0,
                            'reason': f'观测值({current_obs_lai:.2f})与模型预测({context["model_forecast_lai"]:.2f})差异超过3倍, 增加不确定性.'}

        # 默认规则：如果没有触发其他规则，则进行同化
        return {'action': 'ASSIMILATE', 'reason': '观测值在可接受范围内.'}
    

#         # Rule 7: LAI stagnation or slight decrease during expected peak growth (winter wheat specific)
#         # This needs careful consideration of crop calendar knowledge.
#         # For example, if in April for winter wheat and LAI is not increasing or slightly drops from a mid-range value.
#         # This is a more advanced rule and might require phenology estimation.
#         # month = current_date.month
#         # if prev_obs_lai and 0.5 < prev_obs_lai < 3.0 and month in [3, 4]: # March, April for Northern Hemisphere Winter Wheat
#         #     if current_obs_lai <= prev_obs_lai * 1.05: # Stagnation or slight decrease when rapid growth expected
#         #         self.log(f"Decision for {current_date}: ADJUST_UNCERTAINTY (Stagnation/slight decrease in peak growth: {current_obs_lai:.2f} from {prev_obs_lai:.2f})")
#         #         return {'action': 'ADJUST_UNCERTAINTY', 'new_obs_std': current_obs_std * 1.5,
#         #                 'reason': f'在预期的快速生长期({current_date.strftime("%B")})LAI停滞或轻微下降({current_obs_lai:.2f} from {prev_obs_lai:.2f}), 增加不确定性.'}


#         # Default: Assimilate if no other rule triggered
#         self.log(f"Decision for {current_date}: ASSIMILATE (Obs={current_obs_lai:.2f}, Model={model_forecast_lai:.2f})")
#         return {'action': 'ASSIMILATE', 'reason': '观测值在可接受范围内.'}


# --- WOFOST 同化智能体 ---
class WofostAssimilationAgent(BaseAgent):
    # Store ensemble parameters at the class level, keyed by original member ID
    # This is a workaround for pickling issues if models are directly modified
    # and then passed around in multiprocessing, as their internal state might
    # not be fully captured.
    ensemble_params_store = {} # Use a class variable if Manager is not used/problematic

    @staticmethod
    def _initialize_model_components(config, wofost_inputs_global):
        current_parameters = copy.deepcopy(wofost_inputs_global['parameters'])
        current_agromanagement = copy.deepcopy(wofost_inputs_global['agromanagement'])
        current_wdp = copy.deepcopy(wofost_inputs_global['wdp']) 
        return current_parameters, current_agromanagement, current_wdp

    @staticmethod
    def _create_wofost_model_instance(params, wdp, agro):
        return Wofost72_WLP_FD(params, wdp, agro)

    @staticmethod
    def _create_ensemble(base_parameters, wdp, agromanagement, perturbation_params, ensemble_size, pixel_id):
        ensemble = []
        # Clear previous pixel's params if any (important if running multiple pixels sequentially in same process)
        WofostAssimilationAgent.ensemble_params_store.clear() 

        base_param_copy = copy.deepcopy(base_parameters)
        wdp_copy = copy.deepcopy(wdp) # wdp is often read-only, but deepcopy for safety
        agro_copy = copy.deepcopy(agromanagement)
        
        for i in range(ensemble_size):
            try:
                params_member = copy.deepcopy(base_param_copy)
                for par, distr_values in perturbation_params.items():
                    value_to_set = distr_values[i]
                    params_member.set_override(par, float(value_to_set))
                
                member = WofostAssimilationAgent._create_wofost_model_instance(params_member, wdp_copy, agro_copy)
                # Store the parameter object itself, associated with the model instance's id
                WofostAssimilationAgent.ensemble_params_store[id(member)] = params_member
                ensemble.append(member)
            except Exception as e:
                logger.warning(f"[Pixel-{pixel_id}] Failed to create ensemble member {i}: {e}", exc_info=False) # exc_info=False to reduce log verbosity
        
        if not ensemble:
            logger.error(f"[Pixel-{pixel_id}] Failed to create ANY valid ensemble members.")
            raise ValueError("Ensemble creation failed completely.")
        
        logger.info(f"[Pixel-{pixel_id}] Successfully created {len(ensemble)} ensemble members.")
        return ensemble

    @staticmethod
    def _get_ensemble_mean_forecast(ensemble, target_date, 
                                   wdp_for_forecast, agro_for_forecast, 
                                   pixel_id, variable="LAI"):
        forecasts = []
        
        for i, original_member in enumerate(ensemble):
            try:
                member_params = WofostAssimilationAgent.ensemble_params_store.get(id(original_member))
                if member_params is None:
                    logger.warning(f"[Pixel-{pixel_id}] Forecast: No stored params found for member {i} (id: {id(original_member)})")
                    continue
                    
                temp_model = WofostAssimilationAgent._create_wofost_model_instance(
                    member_params, 
                    wdp_for_forecast, # Use passed wdp, assumed to be a fresh copy for this forecast context
                    agro_for_forecast # Use passed agro, assumed to be a fresh copy
                )
                
                # Determine start date for this temporary run
                # If the original member has a 'day', it means it has run. Use that.
                # Otherwise, it's a fresh model, use the campaign start from agro.
                start_date_for_temp_run = getattr(original_member, 'day', None)
                current_model_day_is_valid = isinstance(start_date_for_temp_run, dt.date)

                if not current_model_day_is_valid: # Model hasn't run, get start date from agro
                    if isinstance(agro_for_forecast, list) and agro_for_forecast: # YAMLAgroManagementReader
                         campaign_events = agro_for_forecast[0].get('events', [])
                         if campaign_events:
                             start_date_for_temp_run = campaign_events[0].get('date')
                    elif hasattr(agro_for_forecast, 'agro_management_data'): # Old style?
                         start_date_for_temp_run = agro_for_forecast.agro_management_data[0]['CampaignStartDate']
                
                if not isinstance(start_date_for_temp_run, dt.date):
                    logger.error(f"[Pixel-{pixel_id}] Forecast: Cannot determine valid start_date_for_temp_run for member {i}.")
                    continue

                # If the original model had run, its state (variables) needs to be copied to the temp_model
                # This is tricky as PCSE models don't have a simple state copy mechanism.
                # The safest is to re-run the temp_model from its start up to original_member.day
                # then continue to target_date.
                # For LAI forecast, we usually run from the *current state* of the ensemble member.
                # The `original_member` IS the current state.
                # We need to make a temporary copy of its state if we run it further.
                
                # Simplification: Assume original_member is at the correct state *before* target_date
                # and we just run it forward to target_date.
                # However, PCSE model.run_till() modifies in place.
                # So, create a forecast model, set its state to match original_member, then run.
                # This is complex. The current approach re-runs from start with perturbed params.
                # Let's stick to re-running the temp_model from its actual start_date.
                
                if target_date >= start_date_for_temp_run:
                    # If original_member has run and its current day is *before* target_date,
                    # we can potentially start the temp_model from original_member.day.
                    # This requires copying state, which is hard.
                    # The current logic re-runs the temp_model from its initial conditions
                    # with its specific perturbed parameters up to target_date.
                    # This is computationally more expensive but safer from state corruption.

                    # If the original member has already run and has state variables,
                    # we need to ensure the temp_model starts "fresh" but with the same params.
                    # The temp_model *is* fresh here.
                    
                    # If original_member has already run to a certain point (e.g., day_before_target_date)
                    # we should copy that state to temp_model and run temp_model for one day.
                    # This is the most complex part of EnKF if not handled by the framework.
                    # For now, the simple re-run from campaign start with member's params:
                    temp_model.run_till(target_date)
                    val = temp_model.get_variable(variable)
                    if val is not None and not np.isnan(val):
                        forecasts.append(val)
                # else:
                #     logger.debug(f"[Pixel-{pixel_id}] Forecast: Target date {target_date} is before member's start date {start_date_for_temp_run}.")

            except Exception as e:
                logger.warning(f"[Pixel-{pixel_id}] Forecast: Error processing member {i} for target {target_date}: {e}", exc_info=False)
        
        if not forecasts:
            logger.warning(f"[Pixel-{pixel_id}] Forecast: No valid forecasts for {variable} at {target_date} from {len(ensemble)} members.")
            return 0.0 # Default to 0 if no forecast, or handle as error
        
        mean_fcst = np.mean(forecasts)
        return mean_fcst


    @staticmethod
    def _perform_assimilation_step(ensemble, current_obs_date, observation_value, obs_std, pixel_id, variable_to_assimilate="LAI"):
        for member_idx, member in enumerate(ensemble):
            try:
                if member.day <= current_obs_date: 
                     member.run_till(current_obs_date)
            except Exception as e:
                logger.warning(f"[Pixel-{pixel_id}] AssimStep: Member {member_idx} failed run_till({current_obs_date}): {e}", exc_info=False)

        model_states = []
        valid_indices_for_update = [] 
        for member_idx, member in enumerate(ensemble):
            try:
                state = member.get_variable(variable_to_assimilate)
                if state is not None and not np.isnan(state):
                    model_states.append(state)
                    valid_indices_for_update.append(member_idx)
            except Exception as e:
                logger.warning(f"[Pixel-{pixel_id}] AssimStep: Error getting state from member {member_idx}: {e}", exc_info=False)

        if not model_states:
            logger.error(f"[Pixel-{pixel_id}] AssimStep: No valid model states collected for {variable_to_assimilate} at {current_obs_date}. Skipping update.")
            return ensemble

        num_valid_states = len(model_states)
        X_f = np.array(model_states).reshape(num_valid_states, 1) 

        H = np.array([[1.0]]) 
        R = np.array([[obs_std**2]]) 
        if R[0,0] < 1e-9: R[0,0] = 1e-9 

        P_f_scalar = np.var(X_f) if num_valid_states > 1 else 1e-6 
        if P_f_scalar < 1e-9: P_f_scalar = 1e-9
        P_f_matrix = np.array([[P_f_scalar]])


        denominator = (H @ P_f_matrix @ H.T + R)[0,0]
        if abs(denominator) < 1e-9: # Check for singular matrix (denominator effectively zero)
            logger.warning(f"[Pixel-{pixel_id}] AssimStep: Kalman gain denominator close to zero (singular). P_f_scalar={P_f_scalar:.2e}, R={R[0,0]:.2e}. Skipping update for {current_obs_date}.")
            return ensemble 
        
        K_gain_matrix = P_f_matrix @ H.T @ np.linalg.inv(H @ P_f_matrix @ H.T + R)
        K_gain_scalar = K_gain_matrix[0,0]


        perturbed_observations = np.random.normal(observation_value, obs_std, num_valid_states)
        if obs_std < 1e-6: # If obs_std is virtually zero, don't perturb, use actual value
            perturbed_observations = np.full(num_valid_states, observation_value)


        updated_states_count = 0
        for i, member_original_idx in enumerate(valid_indices_for_update):
            member_to_update = ensemble[member_original_idx]
            forecast_state_scalar = X_f[i, 0]
            
            innovation = perturbed_observations[i] - (H[0,0] * forecast_state_scalar)
            analysis_state_scalar = forecast_state_scalar + K_gain_scalar * innovation
            
            analysis_state_scalar = max(0.0, analysis_state_scalar) 
            if np.isnan(analysis_state_scalar):
                logger.debug(f"[Pixel-{pixel_id}] AssimStep: Member {member_original_idx} analysis state is NaN. Skipping update for this member.")
                continue

            try:
                member_to_update.set_variable(variable_to_assimilate, analysis_state_scalar)
                updated_states_count +=1
            except Exception as e:
                logger.warning(f"[Pixel-{pixel_id}] AssimStep: Failed to set variable for member {member_original_idx}: {e}", exc_info=False)
        
        logger.debug(f"[Pixel-{pixel_id}] AssimStep: Updated {updated_states_count}/{num_valid_states} members at {current_obs_date} with K={K_gain_scalar:.3f}.")
        return ensemble

    @staticmethod
    def _save_results(pixel_id, dates, mean_lai, mean_sm, 
                      original_observed_lai_series, 
                      accepted_obs_dates, accepted_obs_lai, accepted_obs_std,
                      original_lai_series,  # 改名以更好地表达其含义
                      config, obs_dates_dt_all):
        output_dir = config['output_dir']
        excel_dir = os.path.join(output_dir, 'excel')
        png_dir = os.path.join(output_dir, 'png')

        # 确保日期是正确的格式
        try:
            # 如果dates是Index对象，先转换为datetime
            if isinstance(dates, pd.Index):
                dates_dt = pd.to_datetime(dates)
            else:
                # 如果是列表或其他格式，也转换为datetime
                dates_dt = pd.to_datetime(dates)
        except Exception as e:
            logger.warning(f"[Pixel-{pixel_id}] 日期转换失败: {e}, 使用原始日期")
            dates_dt = dates

        # 创建结果DataFrame
        result_df = pd.DataFrame({
            'date': dates_dt,
            'mean_lai_assimilated': mean_lai,
            'mean_sm_assimilated': mean_sm
        })
        
        # 保存Excel文件
        excel_path = os.path.join(excel_dir, f'{pixel_id}_assim_results.xlsx')
        try:
            result_df.to_excel(excel_path, index=False)
        except Exception as e:
            logger.error(f"[Pixel-{pixel_id}] Failed to save Excel: {e}")

        # 创建图表
        # fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))  # 修改为单个图表
        fig, ax1 = plt.subplots(figsize=(12, 6))  # 只创建一个图表
        
        # 绘制LAI图
        ax1.plot(dates_dt, mean_lai, 'b-', linewidth=2, label='Assimilated LAI (Ensemble Mean)')
        if original_lai_series is not None:  # 添加判断
            ax1.plot(dates_dt, original_lai_series, 'g--', linewidth=2, label='Original WOFOST')
        ax1.plot(obs_dates_dt_all, original_observed_lai_series.values, 'kx', markersize=7, label='Original RS Observations')
        if accepted_obs_dates:
            ax1.errorbar(accepted_obs_dates, accepted_obs_lai, yerr=accepted_obs_std,
                         fmt='ro', capsize=5, markersize=5, label='Accepted Observations (+- StdDev)')
        
        ax1.set_title(f'Pixel {pixel_id} - Leaf Area Index (LAI)', fontsize=12)
        ax1.set_ylabel("LAI (m2/m2)")
        ax1.legend(fontsize=16)  # 将图例字体大小设置为12
        ax1.grid(True)

        # # 绘制土壤水分图
        # ax2.plot(dates_dt, mean_sm, 'g-', linewidth=2, label='Assimilated Soil Moisture (SM)')
        # ax2.set_title(f'Pixel {pixel_id} - Volumetric Soil Moisture (SM)', fontsize=12)
        # ax2.set_ylabel("Soil Moisture (cm3/cm3)")
        # ax2.legend()
        # ax2.grid(True)
        
        fig.autofmt_xdate()
        plt.tight_layout()
        
        # 保存图表
        png_path = os.path.join(png_dir, f'{pixel_id}_assim_plot.png')
        try:
            plt.savefig(png_path)
        except Exception as e:
            logger.error(f"[Pixel-{pixel_id}] Failed to save PNG: {e}")
        plt.close(fig)

    @staticmethod
    def process_pixel_task(pixel_task_data, static_config, static_wofost_inputs_global, 
                           perturbation_strategy_global, param_advisor_config, obs_quality_config):
        pixel_id = pixel_task_data['pixel_id']
        original_observed_lai_series = pixel_task_data['lai_series']
        all_obs_dates_dt = pixel_task_data['obs_dates']

        logger.info(f"[Pixel-{pixel_id}] Starting assimilation.")
        
        param_advisor = ParameterAdvisorAgent(param_advisor_config)
        obs_quality_assessor = ObservationQualityAgent(obs_quality_config)

        try:
            current_wofost_params = copy.deepcopy(static_wofost_inputs_global['parameters'])
            # WDP and Agro are often shared if not modified per pixel. 
            # However, to be super safe, especially if agro details change per pixel (not in this example)
            # or if any part of wdp could be stateful (unlikely for ExcelWeatherDataProvider).
            current_wofost_wdp = copy.deepcopy(static_wofost_inputs_global['wdp'])
            current_wofost_agro = copy.deepcopy(static_wofost_inputs_global['agromanagement'])
            
            # Pass pixel_id to _create_ensemble for better logging context
            ensemble = WofostAssimilationAgent._create_ensemble(
                current_wofost_params, 
                current_wofost_wdp, 
                current_wofost_agro, 
                perturbation_strategy_global, 
                static_config['ensemble_size'],
                pixel_id 
            )

            accepted_lai_history = [] 
            accepted_observations_for_plotting = {'dates': [], 'lai_values': [], 'std_devs': []}
            prev_raw_obs_lai = None 
            
            # 创建报告智能体
            reporting_agent_config = {'config': static_config}
            reporting_agent = ReportingAgent(reporting_agent_config)
            
            report_dates = [
                dt.date(2022, 12, 1),  # 越冬期
                dt.date(2023, 3, 1),   # 返青期
                dt.date(2023, 4, 1),   # 拔节孕穗期
                dt.date(2023, 4, 25),  # 抽穗开花期
                dt.date(2023, 5, 15)   # 灌浆期
            ]
            pixel_results_by_stage = {date: [] for date in report_dates}
            current_pixel_result = None

            for i, obs_date in enumerate(all_obs_dates_dt):
                current_original_obs_lai = original_observed_lai_series.iloc[i]
                
                if np.isnan(current_original_obs_lai): # Should have been caught by fillna(0.0) in DataPrep, but double check
                    logger.warning(f"[Pixel-{pixel_id}] Original LAI at {obs_date} is NaN. Skipping this date.")
                    prev_raw_obs_lai = current_original_obs_lai 
                    continue

                # For forecast, create temporary copies of WDP and Agro to avoid state pollution
                # if the forecast run modifies them (PCSE models usually don't modify these inputs, but good practice)
                temp_wdp_for_forecast = copy.deepcopy(current_wofost_wdp)
                temp_agro_for_forecast = copy.deepcopy(current_wofost_agro)
                
                model_forecast_lai = WofostAssimilationAgent._get_ensemble_mean_forecast(
                    ensemble, 
                    obs_date,
                    temp_wdp_for_forecast,
                    temp_agro_for_forecast,
                    pixel_id, # Pass pixel_id for logging context
                    variable="LAI"
                )

                current_original_obs_std = param_advisor.get_initial_observation_uncertainty(current_original_obs_lai)

                assessment_result = obs_quality_assessor.assess_observation(
                    current_obs_lai=current_original_obs_lai, current_obs_std=current_original_obs_std,
                    current_date=obs_date, model_forecast_lai=model_forecast_lai,
                    lai_history=list(accepted_lai_history), prev_obs_lai=prev_raw_obs_lai 
                )
                
                action = assessment_result['action']; reason = assessment_result['reason']
                logger.info(f"[Pixel-{pixel_id}] Obs @ {obs_date}: OrigLAI={current_original_obs_lai:.2f}(Std={current_original_obs_std:.2f}), ModelFcst={model_forecast_lai:.2f}. Decision: {action}. Reason: {reason}")
                
                prev_raw_obs_lai = current_original_obs_lai 

                if action == 'SKIP': 
                    logger.info(f"[Pixel-{pixel_id}] Skipping assimilation for {obs_date} based on quality assessment.")
                    continue # Move to the next observation date
                
                lai_to_assimilate = current_original_obs_lai
                std_for_assimilation = current_original_obs_std
                if action == 'ADJUST_UNCERTAINTY':
                    # Ensure 'new_obs_std' is present and positive, otherwise default
                    adjusted_std = assessment_result.get('new_obs_std', current_original_obs_std * 1.5)
                    if adjusted_std > 0:
                        std_for_assimilation = adjusted_std
                    else: # Fallback if a rule suggests non-positive std
                        logger.warning(f"[Pixel-{pixel_id}] Invalid new_obs_std ({adjusted_std}) suggested for {obs_date}. Using {current_original_obs_std * 1.5:.2f} instead.")
                        std_for_assimilation = current_original_obs_std * 1.5
                    logger.info(f"[Pixel-{pixel_id}] Uncertainty for {obs_date} was {current_original_obs_std:.2f}, adjusted to {std_for_assimilation:.2f}")

                ensemble = WofostAssimilationAgent._perform_assimilation_step(
                    ensemble, obs_date, lai_to_assimilate, std_for_assimilation, pixel_id, "LAI"
                )
                
                accepted_lai_history.append(lai_to_assimilate) # Store the LAI value that was actually used
                accepted_observations_for_plotting['dates'].append(obs_date)
                accepted_observations_for_plotting['lai_values'].append(lai_to_assimilate)
                accepted_observations_for_plotting['std_devs'].append(std_for_assimilation)

                if not ensemble: # Should be caught by _perform_assimilation_step if it returns empty
                    logger.error(f"[Pixel-{pixel_id}] Ensemble collapse after assimilation at {obs_date}.")
                    return {'pixel_id': pixel_id, 'status': 'failed', 'error': 'Ensemble collapse'}
            
                # 更新当前像素的结果
                current_pixel_result = {
                    'pixel_id': pixel_id,
                    'status': 'success',
                    'mean_lai': [model.get_variable('LAI') for model in ensemble],
                    'mean_tagp': np.mean([model.get_variable('TAGP') for model in ensemble if model.get_variable('TAGP') is not None]),
                    'output_dates': [d.isoformat() for d in all_obs_dates_dt[:i+1]]
                }

                # 检查是否需要生成阶段性报告
                for report_date in report_dates:
                    if obs_date <= report_date < (all_obs_dates_dt[i+1] if i+1 < len(all_obs_dates_dt) else dt.date.max):
                        pixel_results_by_stage[report_date].append(current_pixel_result)
                        
                        # 生成该生长期的报告
                        logger.info(f"生成{report_date}的生长期报告...")
                        reporting_agent.generate_summary_report(
                            pixel_results_by_stage[report_date],
                            report_date=report_date
                        )

            logger.info(f"[Pixel-{pixel_id}] Finished assimilation loop. Running final simulation for {len(ensemble)} members.")
            final_outputs = []
            
            campaign_end_date = None
            try:
                agro_data_list = current_wofost_agro # This is YAMLAgroManagementReader's structure
                if isinstance(agro_data_list, list) and agro_data_list:
                    # The actual agromanagement dictionary is the first element of the list
                    agro_management_dict = agro_data_list[0] 
                    # Events are under 'params', then 'agromanagement', then list of dicts
                    # This parsing seems to be specific to how YAMLAgroManagementReader structures it.
                    # Let's try to find the last event date.
                    # A more robust way: use the model's own campaign end date if available,
                    # or the last date in weather data, or last obs date + buffer.
                    # For now, let's assume the YAML structure is campaign_start_date and then events.
                    # The structure from YAMLAgroManagementReader is a list containing one dict.
                    # That dict has 'campaign_start_date' and 'events' (which is a list of event dicts).
                    
                    # Simplified: Find the date of the latest event in the agromanagement file
                    all_event_dates = [event['date'] for event in agro_management_dict.get('events', []) if 'date' in event]
                    if all_event_dates:
                        campaign_end_date = max(all_event_dates)
                        # Add a buffer if the last event is not harvest, e.g., 15 days, or rely on wdp end
                        # campaign_end_date += dt.timedelta(days=15) # Optional buffer
                    
                    # If no specific end event, run until weather data ends or a fixed period
                    if campaign_end_date is None:
                       wdp_end_date = current_wofost_wdp. শেষ_तिথि() # Example fictional method
                       # Or: campaign_end_date = all_obs_dates_dt[-1] + dt.timedelta(days=30)
                       campaign_end_date = all_obs_dates_dt[-1] # Fallback to last obs date
                       logger.warning(f"[Pixel-{pixel_id}] Could not determine campaign end from agro, using last obs date: {campaign_end_date}")
                else: # Fallback if agro structure is not as expected
                    campaign_end_date = all_obs_dates_dt[-1]
                    logger.warning(f"[Pixel-{pixel_id}] Agro management structure not recognized for end date, using last obs date: {campaign_end_date}")

            except Exception as e:
                campaign_end_date = all_obs_dates_dt[-1] # Ultimate fallback
                logger.warning(f"[Pixel-{pixel_id}] Error getting campaign end date: {e}. Using last observation date: {campaign_end_date}", exc_info=False)

            # Ensure campaign_end_date is a date object
            if not isinstance(campaign_end_date, dt.date):
                 campaign_end_date = all_obs_dates_dt[-1] # Fallback if it became None or wrong type
                 logger.warning(f"[Pixel-{pixel_id}] Campaign end date was invalid, reset to last obs date: {campaign_end_date}")


            for member_idx, member in enumerate(ensemble):
                try:
                    # Run till campaign_end_date or terminate if model naturally finishes earlier
                    # The run_till method handles cases where the model might terminate due to crop maturity
                    # before campaign_end_date.
                    if member.day <= campaign_end_date:
                        member.run_till(campaign_end_date) 
                    
                    output_df = pd.DataFrame(member.get_output()).set_index("day")
                    if not output_df.empty:
                        final_outputs.append(output_df)
                    else:
                        logger.warning(f"[Pixel-{pixel_id}] Member {member_idx} produced empty output after final run.")
                except pcse.exceptions.WeatherDataProviderError as e: # More specific error
                    logger.warning(f"[Pixel-{pixel_id}] Member {member_idx} weather data error during final run (possibly beyond wdp range): {e}", exc_info=False)
                except Exception as e:
                    logger.warning(f"[Pixel-{pixel_id}] Member {member_idx} failed final run: {e}", exc_info=False)
            
            if not final_outputs:
                logger.error(f"[Pixel-{pixel_id}] No successful final runs for any member yielding output.")
                if accepted_observations_for_plotting['dates']: # Still try to plot obs if any
                    WofostAssimilationAgent._save_results(
                        pixel_id, 
                        common_index,  # 直接传入索引，让_save_results方法处理转换
                        mean_lai, mean_sm,
                        original_observed_lai_series, 
                        accepted_observations_for_plotting['dates'],
                        accepted_observations_for_plotting['lai_values'],
                        accepted_observations_for_plotting['std_devs'],
                        original_lai_series,  # 传入完整的原始模拟LAI时间序列
                        static_config,
                        all_obs_dates_dt
                    )
                return {'pixel_id': pixel_id, 'status': 'failed', 'error': 'No successful final runs with output'}

            all_indices = [df.index for df in final_outputs if not df.empty]
            if not all_indices:
                 logger.error(f"[Pixel-{pixel_id}] All final_outputs are empty. Cannot determine common_index.")
                 # Similar fallback for plotting if needed
                 return {'pixel_id': pixel_id, 'status': 'failed', 'error': 'All final_outputs empty'}


            common_index = pd.DatetimeIndex([]) # Start with an empty DatetimeIndex
            if all_indices:
                common_index = all_indices[0]
                for idx in all_indices[1:]:
                    common_index = common_index.union(idx) # Union of DatetimeIndexes
            
            if common_index.empty:
                logger.error(f"[Pixel-{pixel_id}] Common index is empty after union. Cannot proceed with results aggregation.")
                return {'pixel_id': pixel_id, 'status': 'failed', 'error': 'Common index empty'}


            reindexed_lais = [df_out['LAI'].reindex(common_index).values for df_out in final_outputs if 'LAI' in df_out.columns and not df_out.empty]
            reindexed_sms = [df_out['SM'].reindex(common_index).values for df_out in final_outputs if 'SM' in df_out.columns and not df_out.empty]

            if not reindexed_lais: # This should not happen if final_outputs were not empty and had LAI
                 logger.error(f"[Pixel-{pixel_id}] No LAI data in final outputs after reindexing.")
                 return {'pixel_id': pixel_id, 'status': 'failed', 'error': 'No LAI in reindexed final outputs'}

            mean_lai = np.nanmean(np.array(reindexed_lais), axis=0)
            mean_sm = np.nanmean(np.array(reindexed_sms), axis=0) if reindexed_sms else np.full_like(mean_lai, np.nan)
            
            # Get final TAGP (Total Aboveground Production)
            tagp_values = []
            for df_out in final_outputs:
                if 'TAGP' in df_out.columns and not df_out.empty and not df_out['TAGP'].empty:
                    # Ensure TAGP is numeric and handle potential NaNs if model didn't finish
                    last_tagp = pd.to_numeric(df_out['TAGP'].iloc[-1], errors='coerce')
                    if not np.isnan(last_tagp):
                        tagp_values.append(last_tagp)
            mean_tagp = np.nanmean(tagp_values) if tagp_values else np.nan

            # 创建一个原始的WOFOST模型来获取基准模拟结果
            original_wofost = WofostAssimilationAgent._create_wofost_model_instance(
                copy.deepcopy(static_wofost_inputs_global['parameters']),
                copy.deepcopy(static_wofost_inputs_global['wdp']),
                copy.deepcopy(static_wofost_inputs_global['agromanagement'])
            )
            
            # 运行到结束日期
            original_wofost.run_till(campaign_end_date)
            original_output = pd.DataFrame(original_wofost.get_output()).set_index("day")
            original_lai = original_output['LAI'].values if 'LAI' in original_output.columns else None

            WofostAssimilationAgent._save_results(
                pixel_id, 
                common_index,  # 直接传入索引，让_save_results方法处理转换
                mean_lai, mean_sm,
                original_observed_lai_series, 
                accepted_observations_for_plotting['dates'],
                accepted_observations_for_plotting['lai_values'],
                accepted_observations_for_plotting['std_devs'],
                original_lai,  # 传入完整的原始模拟LAI时间序列
                static_config,
                all_obs_dates_dt
            )
            logger.info(f"[Pixel-{pixel_id}] Assimilation completed and results saved.")
            
            return {
                'pixel_id': pixel_id, 'status': 'success', 
                'output_dates': [d.isoformat() for d in common_index.to_list()],
                'mean_lai': mean_lai.tolist() if isinstance(mean_lai, np.ndarray) else list(mean_lai), 
                'mean_sm': mean_sm.tolist() if isinstance(mean_sm, np.ndarray) else list(mean_sm),
                'mean_tagp': float(mean_tagp) if not np.isnan(mean_tagp) else None,
                'plot_path_png': os.path.join(static_config['output_dir'], 'png', f'{pixel_id}_assim_plot.png'),
            }

        except Exception as e:
            logger.error(f"[Pixel-{pixel_id}] Critical Error in process_pixel_task: {e}", exc_info=True)
            return {'pixel_id': pixel_id, 'status': 'failed', 'error': str(e)}


# --- 报告智能体 ---
class ReportingAgent(BaseAgent):
    def __init__(self, shared_knowledge: dict):
        super().__init__("ReportingAgent", shared_knowledge)
        self.llm_assistant = LLMAssistantAgent(shared_knowledge)
        self.growth_periods = {
            "越冬期": {
                "time_range": "11月下旬至12月上旬",
                "key_points": ["封冻水", "防冻"],
                "temp_range": "日均温3-5℃"
            },
            "返青期": {
                "time_range": "2月下旬至3月中旬",
                "key_points": ["返青水", "促进分蘖"],
                "temp_range": "稳定气温>3℃"
            },
            "拔节孕穗期": {
                "time_range": "3月下旬至4月中旬",
                "key_points": ["拔节孕穗水", "肥水调控"],
                "temp_range": "适宜生长温度"
            },
            "抽穗开花期": {
                "time_range": "4月下旬至5月初",
                "key_points": ["抽穗扬花水", "防止干热风"],
                "temp_range": "适宜开花温度"
            },
            "灌浆期": {
                "time_range": "5月上中旬",
                "key_points": ["灌浆水", "籽粒充实"],
                "temp_range": "适宜灌浆温度"
            }
        }

    def _get_current_growth_period(self, date=None):
        """根据日期判断当前生长期"""
        if date is None:
            date = dt.datetime.now()
        
        month = date.month
        day = date.day
        
        if month == 9 and day >= 20 or month == 10 and day <= 10:
            return "播种期"
        elif month == 11 and day >= 20 or month == 12 and day <= 10:
            return "越冬期"
        elif month == 2 and day >= 20 or month == 3 and day <= 15:
            return "返青期"
        elif month == 3 and day > 15 or month == 4 and day <= 15:
            return "拔节孕穗期"
        elif month == 4 and day > 15 or month == 5 and day <= 10:
            return "抽穗开花期"
        elif month == 5 and day > 10 and day <= 20:
            return "灌浆期"
        else:
            return None

    def _generate_period_specific_advice(self, growth_period, pixel_analysis, mean_tagp):
        """根据生长期生成特定建议"""
        advice = {
            "irrigation": [],
            "fertilization": [],
            "pest_control": [],
            "special_care": []
        }
        
        if growth_period == "播种期":
            advice["irrigation"].append("确保10-20cm土层墒情适宜，适时进行底墒水灌溉")
            advice["special_care"].extend([
                "播种深度控制在3-5cm",
                "确保播种质量和均匀度"
            ])
            
        elif growth_period == "越冬期":
            advice["irrigation"].append("适时浇封冻水，水层深度3-4cm")
            advice["special_care"].extend([
                "注意防寒保温",
                "控制冬前旺长"
            ])
            
        elif growth_period == "返青期":
            advice["irrigation"].append("及时浇返青水，促进分蘖")
            advice["fertilization"].append("适量追施氮肥，促进分蘖生长")
            
        elif growth_period == "拔节孕穗期":
            if pixel_analysis['max_lai'] > 4:
                advice["irrigation"].append("控制灌溉量，防止倒伏")
            else:
                advice["irrigation"].append("保证充足水分供应，促进穗分化")
                
        elif growth_period == "抽穗开花期":
            advice["irrigation"].append("适时浇水，保证开花授粉")
            advice["special_care"].extend([
                "防止干热风危害",
                "注意病虫害防控"
            ])
            
        elif growth_period == "灌浆期":
            advice["irrigation"].append("后期控水，提高籽粒品质")
            advice["special_care"].append("注意防止倒伏")
            
        return advice

    def generate_summary_report(self, all_pixel_results: list, report_date=None):
        """使用LLM生成决策报告"""
        self.log(f"生成{report_date if report_date else '最终'}决策支持报告...")
        
        current_period = self._get_current_growth_period(report_date)
        if not current_period:
            self.log("当前时间不在关键生长期内", logging.WARNING)
            return self._save_empty_report()
            
        period_info = self.growth_periods[current_period]
        
        # 准备LLM分析所需的上下文数据
        context = self._prepare_report_context(all_pixel_results, current_period, period_info)
        
        # 使用LLM生成报告
        report_sections = self._generate_report_sections(context)
        
        # 组装最终报告
        report = self._assemble_final_report(report_sections, context)
        
        # 保存报告
        self._save_report(report, report_date)
        
        return report

    def _prepare_report_context(self, all_pixel_results, current_period, period_info):
        """准备报告生成所需的上下文数据"""
        successful_pixels = [res for res in all_pixel_results if res and res.get('status') == 'success']
        
        # 计算基础统计指标
        yields = [p['mean_tagp'] for p in successful_pixels if p.get('mean_tagp') is not None and not np.isnan(p['mean_tagp'])]
        avg_yield = np.nanmean(yields) if yields else np.nan
        yield_std = np.nanstd(yields) if yields else np.nan
        
        # 分析样本像素并确保数据可JSON序列化
        pixel_analyses = []
        for pixel in successful_pixels[:3]:
            analysis = self._analyze_pixel_growth(pixel)
            if analysis:
                # 将布尔值转换为字符串
                analysis['is_stable'] = "是" if analysis['is_stable'] else "否"
                pixel_analyses.append({
                    'pixel_id': pixel['pixel_id'],
                    'analysis': {
                        'stability': float(analysis['stability']),  # 确保数值类型
                        'is_stable': analysis['is_stable'],
                        'growth_stage': analysis['growth_stage'],
                        'max_lai': float(analysis['max_lai']),
                        'current_lai': float(analysis['current_lai']),
                        'lai_trend': analysis['lai_trend']
                    },
                    'mean_tagp': float(pixel.get('mean_tagp', 0))  # 确保数值类型
                })

        return {
            'current_period': current_period,
            'period_info': period_info,
            'statistics': {
                'total_pixels': int(len(all_pixel_results)),
                'successful_pixels': int(len(successful_pixels)),
                'avg_yield': float(avg_yield),
                'yield_std': float(yield_std),
                'success_rate': float(len(successful_pixels)/len(all_pixel_results)*100)
            },
            'pixel_analyses': pixel_analyses,
            'generation_time': dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

    def _generate_report_sections(self, context):
        """使用LLM生成报告各个部分"""
        sections = {}
        
        try:
            # 生成整体形势分析
            situation_prompt = f"""
            基于以下数据生成小麦{context['current_period']}的整体形势分析：
            1. 当前生长期：{context['current_period']}（{context['period_info']['time_range']}）
            2. 关键管理点：{', '.join(context['period_info']['key_points'])}
            3. 温度要求：{context['period_info']['temp_range']}
            4. 平均预期产量：{context['statistics']['avg_yield']:.2f} kg/ha
            5. 产量变异系数：{(context['statistics']['yield_std']/context['statistics']['avg_yield']*100):.1f}%

            请从生长势、产量潜力、关键管理措施等方面进行分析。
            """
            
            response = self.llm_assistant.query_llm(
                "report_section",
                {"section_type": "situation", "prompt": situation_prompt}
            )
            sections['situation'] = response.get('content', '暂无分析')
            self.log(f"形势分析生成完成: {len(sections['situation'])}字符")

            # 生成田间管理建议
            management_prompt = f"""
            基于以下像素分析数据，生成具体的田间管理建议：
            {json.dumps(context['pixel_analyses'], ensure_ascii=False, indent=2, default=str)}

            请针对当前生长期（{context['current_period']}）的特点，从以下方面提供建议：
            1. 水分管理
            2. 肥料管理
            3. 病虫害防控
            4. 其他关键管理措施
            """
            
            response = self.llm_assistant.query_llm(
                "report_section",
                {"section_type": "management", "prompt": management_prompt}
            )
            sections['management'] = response.get('content', '暂无建议')
            self.log(f"管理建议生成完成: {len(sections['management'])}字符")

            # 生成风险提示
            risk_prompt = f"""
            基于当前生长期（{context['current_period']}）的特点和以下数据：
            1. 生长期信息：{json.dumps(context['period_info'], ensure_ascii=False, default=str)}
            2. 像素分析：{json.dumps(context['pixel_analyses'], ensure_ascii=False, indent=2, default=str)}

            请分析可能存在的主要风险，并提供防范建议。
            """
            
            response = self.llm_assistant.query_llm(
                "report_section",
                {"section_type": "risks", "prompt": risk_prompt}
            )
            sections['risks'] = response.get('content', '暂无风险提示')
            self.log(f"风险提示生成完成: {len(sections['risks'])}字符")

        except Exception as e:
            self.log(f"生成报告部分时出错: {e}", logging.ERROR, exc_info=True)
            sections['error'] = f"生成报告时发生错误: {str(e)}"

        return sections

    def _assemble_final_report(self, sections, context):
        """组装最终报告"""
        report = f"""
        ====== 小麦{context['current_period']}智能农业决策支持报告 ======
        生成时间: {context['generation_time']}

        一、整体形势分析
        ---------------
        {sections.get('situation', '暂无分析')}

        二、田间管理建议
        ---------------
        {sections.get('management', '暂无建议')}

        三、风险提示
        -----------
        {sections.get('risks', '暂无风险提示')}
        """
        return report

    def _save_report(self, report, report_date=None):
        """保存报告到文件，根据日期保存到不同目录"""
        if report_date:
            # 使用日期创建子目录
            report_dir = os.path.join(self.shared_knowledge['config']['output_dir'], 
                                    f"report_{report_date.strftime('%Y%m%d')}")
            os.makedirs(report_dir, exist_ok=True)
            report_path = os.path.join(report_dir, "agricultural_decision_support.txt")
        else:
            # 默认路径
            report_path = os.path.join(self.shared_knowledge['config']['output_dir'], 
                                     "agricultural_decision_support.txt")
        
        try:
            with open(report_path, 'w', encoding='utf-8') as f:
                f.write(report)
            self.log(f"决策支持报告已保存至: {report_path}")
        except Exception as e:
            self.log(f"报告保存失败: {e}", logging.ERROR)

    def _save_empty_report(self):
        """保存空报告"""
        report = """
        ====== 智能农业决策支持报告 ======
        生成时间: {dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
        
        警告：未处理任何像素数据，无法生成决策建议。
        请检查数据输入和处理流程。
        """
        
        report_path = os.path.join(self.shared_knowledge['config']['output_dir'], "agricultural_decision_support.txt")
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(report)
        return report

    def _analyze_pixel_growth(self, pixel_result):
        """分析单个像素的生长情况"""
        try:
            # 获取LAI时间序列
            lai_values = np.array(pixel_result.get('mean_lai', []))
            if len(lai_values) < 2:
                return None
                
            # LAI稳定性分析
            lai_changes = np.diff(lai_values)
            stability = np.std(lai_changes)
            is_stable = stability < 0.5  # LAI变化标准差阈值
            
            # 获取生长阶段特征
            max_lai = np.max(lai_values)
            max_lai_idx = np.argmax(lai_values)
            current_lai = lai_values[-1]
            
            # 判断生长阶段
            if max_lai_idx < len(lai_values) * 0.3:
                growth_stage = "营养生长期"
            elif max_lai_idx < len(lai_values) * 0.6:
                growth_stage = "拔节抽穗期"
            else:
                growth_stage = "灌浆成熟期"
            
            return {
                'stability': stability,
                'is_stable': is_stable,
                'growth_stage': growth_stage,
                'max_lai': max_lai,
                'current_lai': current_lai,
                'lai_trend': 'increasing' if current_lai > lai_values[-2] else 'decreasing'
            }
        except Exception as e:
            self.log(f"像素生长分析失败: {e}", logging.ERROR)
            return None

class EnhancedReportingAgent(ReportingAgent):
    def generate_summary_report(self, all_pixel_results: list):
        base_report = super().generate_summary_report(all_pixel_results)
        
        if not self.shared_knowledge.get('config', {}).get('use_llm', True):
            return base_report
            
        # 使用LLM增强报告
        llm_context = {
            'technical_indicators': self._extract_indicators(all_pixel_results),
            'pixel_analysis_samples': self._sample_pixel_analyses(all_pixel_results[:3]),
            'base_report': base_report
        }
        
        llm_response = self.llm_assistant.query_llm(
            "report_enhancement",
            llm_context
        )
        
        return f"{base_report}\n\n===== LLM增强分析 =====\n{llm_response.get('enhanced_analysis', '')}"
    
# --- 协调智能体 ---
class CoordinatorAgent(BaseAgent):
    def __init__(self):
        # Initialize shared_knowledge here BEFORE calling super, so BaseAgent has it.
        self.shared_knowledge = Manager().dict() if False else {} # Manager for true multiprocessing, dict for serial/testing
        super().__init__("CoordinatorAgent", self.shared_knowledge)
        
        # Agents now receive the shared_knowledge object
        self.config_agent = ConfigAgent(self.shared_knowledge)
        self.data_prep_agent = DataPreparationAgent(self.shared_knowledge)
        self.param_advisor_agent = ParameterAdvisorAgent(self.shared_knowledge) # Will use config from shared_knowledge
        self.reporting_agent = ReportingAgent(self.shared_knowledge) # Will use config from shared_knowledge
        self.config = None # This will be populated from shared_knowledge
        
    def run_pipeline(self):
        self.log("Starting data assimilation pipeline...")
        start_time = time.time()
        try:
            # ConfigAgent populates self.shared_knowledge['config']
            if not self.config_agent.load_initial_config(): 
                self.log("Pipeline aborted: Config error.", logging.CRITICAL); return
            self.config = self.shared_knowledge['config'] # Get a local reference for convenience

            if not self.data_prep_agent.load_wofost_inputs(): 
                self.log("Pipeline aborted: WOFOST input error.", logging.CRITICAL); return
            if not self.data_prep_agent.load_observation_data(): 
                self.log("Pipeline aborted: Observation data error.", logging.CRITICAL); return

            pixel_tasks = self.shared_knowledge.get('pixel_tasks', [])
            if not pixel_tasks: 
                self.log("No pixel tasks found. Exiting.", logging.INFO); return
            
            max_pixels = self.config.get('max_pixels_to_process', len(pixel_tasks))
            pixel_tasks_to_process = pixel_tasks[:max_pixels]

            # ParamAdvisorAgent uses shared_knowledge['config'] internally now
            perturb_strategy = self.param_advisor_agent.get_ensemble_perturbation_strategy(self.config['ensemble_size'])
            
            # These configs are simple dicts, safe to pass.
            # They are used to initialize agents within the worker process if multiprocessing.
            param_advisor_knowledge_for_worker = {'config': dict(self.config)} # Pass a copy of config
            obs_quality_knowledge_for_worker = {'config': dict(self.config)}   # Pass a copy of config

            process_pixel_partial = partial(
                WofostAssimilationAgent.process_pixel_task,
                static_config=dict(self.config),
                static_wofost_inputs_global=self.shared_knowledge['wofost_inputs'],
                perturbation_strategy_global=perturb_strategy,
                param_advisor_config=param_advisor_knowledge_for_worker,
                obs_quality_config=obs_quality_knowledge_for_worker
            )
            
            num_processes = self.config.get('num_processes', os.cpu_count() or 1)
            if num_processes > 1 and len(pixel_tasks_to_process) < num_processes : # Don't use more processes than tasks
                num_processes = len(pixel_tasks_to_process)

            self.log(f"Processing {len(pixel_tasks_to_process)} pixel tasks with {num_processes} process(es).")

            if num_processes > 1:
                 if sys.platform == "win32":
                     logger.warning("Multiprocessing (num_processes > 1) on Windows can be sensitive to pickling. If errors occur, try num_processes = 1 for easier debugging.")
                 # Ensure WofostAssimilationAgent.ensemble_params_store is NOT a Manager.dict if using Pool,
                 # as class variables are not shared across processes unless explicitly managed.
                 # The current design re-creates it per task or relies on it being cleared.
                 # For Pool, each process gets its own copy of the class.
                 with Pool(processes=num_processes) as pool:
                    all_results = pool.map(process_pixel_partial, pixel_tasks_to_process)
            else: 
                all_results = []
                for i, task in enumerate(pixel_tasks_to_process):
                    self.log(f"Processing task {i+1}/{len(pixel_tasks_to_process)}: Pixel {task['pixel_id']} (Single Process Mode)")
                    all_results.append(process_pixel_partial(task))


            self.log(f"Finished processing {len(pixel_tasks_to_process)} pixels.")
            self.reporting_agent.generate_summary_report(all_results)

        except Exception as e:
            self.log(f"Critical pipeline error: {e}", logging.CRITICAL, exc_info=True)
        finally:
            self.log(f"Pipeline finished in {time.time() - start_time:.2f} seconds.")
            # Clean up Manager if used
            if isinstance(self.shared_knowledge, type(Manager().dict())):
                # This might not be standard, check Manager lifecycle if issues
                pass 


# --- 主执行逻辑 ---
def cleanup_pcse_default_log():
    try:
        logdir = os.path.join(os.path.expanduser("~"), ".pcse", "logs")
        default_log = os.path.join(logdir, "pcse.log")
        if os.path.exists(default_log):
            os.remove(default_log)
            logger.info(f"Removed default PCSE log: {default_log}")
    except Exception as e:
        logger.debug(f"Could not remove default PCSE log: {e}")


def main():
    logger.info("="*50 + "\nMulti-Agent Data Assimilation System - Startup\n" + "="*50)
    cleanup_pcse_default_log() 
    
    if not os.path.isdir(ROOT_DIR):
        logger.critical(f"ROOT_DIR '{ROOT_DIR}' is not a valid directory. Please check the path and ensure it's accessible.")
        sys.exit(1)
    # Check for OpenAI API key if LLM features are desired (currently peripheral)
    if not (OPENAI_API_KEY.startswith("sk-") and len(OPENAI_API_KEY) > 50): # Basic check
        logger.warning("OpenAI API key is not set or appears invalid. LLM functionalities will be skipped or use defaults.")
        logger.warning("To enable LLM features, set the OPENAI_API_KEY environment variable or edit the script.")

    coordinator = CoordinatorAgent()
    coordinator.run_pipeline()
    
    logger.info("="*50 + "\nMulti-Agent Data Assimilation System - Shutdown\n" + "="*50)

if __name__ == "__main__":
    main()