use super::cognitive::CognitiveAgent;
use crate::research_engine::{ResearchEngine, ResearchResult};
use std::collections::HashMap;

/// 高级智能体实现
pub struct AdvancedAgent {
    pub id: u32,
    pub cognitive_agent: CognitiveAgent,
    pub strategy_weights: HashMap<String, f32>,
    pub adaptation_rate: f32,
}

impl AdvancedAgent {
    pub fn new(id: u32) -> Self {
        AdvancedAgent {
            id,
            cognitive_agent: CognitiveAgent::new(id),
            strategy_weights: HashMap::from([
                ("market_trend".to_string(), 0.7),
                ("risk_analysis".to_string(), 0.8),
                ("competitor_behavior".to_string(), 0.6),
            ]),
            adaptation_rate: 0.1,
        }
    }

    /// 综合决策方法
    pub fn make_integrated_decision(&mut self, context: &HashMap<String, f32>) -> String {
        // 认知决策
        let cognitive_decision = self.cognitive_agent.make_decision(context);
        
        // 策略分析
        let strategy_score = self.analyze_strategic_factors(context);
        
        // 研究分析（如果有关键词）
        if let Some(keywords) = context.get("research_keywords") {
            let research_result = self.conduct_research(keywords);
            self.process_research(research_result);
        }
        
        // 综合结果
        if strategy_score > 0.7 {
            format!("enhanced_{}", cognitive_decision)
        } else {
            cognitive_decision
        }
    }

    /// 执行深入研究
    pub fn conduct_research(&mut self, keywords: &str) -> ResearchResult {
        let engine = ResearchEngine::new();
        let keywords: Vec<String> = keywords.split(',')
            .map(|s| s.trim().to_string())
            .collect();
        
        engine.analyze(&keywords, self)
    }

    /// 处理研究结果
    fn process_research(&mut self, result: ResearchResult) {
        // 更新认知记忆
        for insight in result.key_insights {
            self.cognitive_agent.process_memory(&insight, 0.9);
        }
        
        // 调整策略权重
        for (concept, weight) in result.related_concepts {
            self.strategy_weights.entry(concept)
                .and_modify(|w| *w = (*w + weight).clamp(0.1, 1.0))
                .or_insert(weight.clamp(0.3, 0.8));
        }
    }

    fn analyze_strategic_factors(&self, context: &HashMap<String, f32>) -> f32 {
        self.strategy_weights.iter()
            .filter_map(|(factor, weight)| context.get(factor).map(|v| v * weight))
            .sum()
    }

    /// 自适应学习
    pub fn adapt(&mut self, performance: f32) {
        self.adaptation_rate = (self.adaptation_rate * (1.0 + performance)).clamp(0.01, 0.5);
        self.cognitive_agent.learn_from_experience(performance);
        
        // 调整策略权重
        for weight in self.strategy_weights.values_mut() {
            *weight = (*weight * (1.0 + self.adaptation_rate * performance)).clamp(0.1, 1.0);
        }
    }
}