use std::collections::{HashMap, VecDeque};
use serde::{Serialize, Deserialize};
use rand::Rng;

/// 记忆单元
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Memory {
    pub content: String,
    pub importance: f32,
    pub timestamp: u64,
    pub associations: Vec<String>,
}

/// 认知状态
#[derive(Debug, Clone)]
pub struct CognitiveState {
    pub goals: Vec<String>,
    pub beliefs: HashMap<String, f32>, // 信念强度
    pub emotional_state: EmotionalState,
}

/// 情感状态
#[derive(Debug, Clone, Serialize)]
pub enum EmotionalState {
    Neutral,
    Optimistic(f32),  // 乐观程度
    Pessimistic(f32), // 悲观程度
    Stressed(f32),    // 压力程度
}

/// 高级认知智能体
pub struct CognitiveAgent {
    pub id: u32,
    pub memory_buffer: VecDeque<Memory>,
    pub long_term_memory: Vec<Memory>,
    pub cognitive_state: CognitiveState,
    pub decision_weights: HashMap<String, f32>,
}

impl CognitiveAgent {
    pub fn new(id: u32) -> Self {
        let mut rng = rand::thread_rng();
        
        // 初始化决策权重
        let mut weights = HashMap::new();
        weights.insert("profit".to_string(), rng.gen_range(0.7..1.0));
        weights.insert("risk".to_string(), rng.gen_range(0.3..0.6));
        weights.insert("reputation".to_string(), rng.gen_range(0.4..0.8));
        
        CognitiveAgent {
            id,
            memory_buffer: VecDeque::with_capacity(100),
            long_term_memory: Vec::new(),
            cognitive_state: CognitiveState {
                goals: vec!["maximize_utility".to_string()],
                beliefs: HashMap::new(),
                emotional_state: EmotionalState::Neutral,
            },
            decision_weights: weights,
        }
    }

    /// 记忆处理流程
    pub fn process_memory(&mut self, observation: &str, importance: f32) {
        let memory = Memory {
            content: observation.to_string(),
            importance,
            timestamp: chrono::Utc::now().timestamp() as u64,
            associations: self.find_associations(observation),
        };
        
        self.memory_buffer.push_back(memory);
        self.consolidate_memories();
    }

    /// 记忆关联分析
    fn find_associations(&self, content: &str) -> Vec<String> {
        // 实现基于内容的关联分析
        let keywords = vec!["price", "market", "trade", "cost"];
        keywords.iter()
            .filter(|&kw| content.contains(kw))
            .map(|s| s.to_string())
            .collect()
    }

    /// 记忆巩固机制
    fn consolidate_memories(&mut self) {
        let consolidation_threshold = 0.7;
        while let Some(mem) = self.memory_buffer.pop_front() {
            if mem.importance >= consolidation_threshold {
                self.long_term_memory.push(mem);
            }
        }
    }

    /// 基于认知的决策
    pub fn make_decision(&mut self, context: &HashMap<String, f32>) -> String {
        // 情感状态更新
        self.update_emotional_state(context);
        
        // 多因素决策计算
        let mut scores = HashMap::new();
        for (factor, weight) in &self.decision_weights {
            if let Some(value) = context.get(factor) {
                scores.insert(factor, value * weight);
            }
        }
        
        // 添加认知偏差
        self.apply_cognitive_biases(&mut scores);
        
        // 选择最佳决策
        self.select_best_option(&scores)
    }

    fn update_emotional_state(&mut self, context: &HashMap<String, f32>) {
        let risk = context.get("risk").unwrap_or(&0.0);
        let profit = context.get("profit").unwrap_or(&0.0);
        
        self.cognitive_state.emotional_state = match (risk, profit) {
            (r, p) if *r > 0.8 => EmotionalState::Stressed(0.9),
            (r, p) if *r > 0.5 => EmotionalState::Pessimistic(0.6),
            (r, p) if *p > 0.7 => EmotionalState::Optimistic(0.8),
            _ => EmotionalState::Neutral
        };
    }

    fn apply_cognitive_biases(&self, scores: &mut HashMap<&String, f32>) {
        // 实现常见认知偏差
        match self.cognitive_state.emotional_state {
            EmotionalState::Optimistic(_) => {
                for score in scores.values_mut() {
                    *score *= 1.2;
                }
            },
            EmotionalState::Pessimistic(_) => {
                for score in scores.values_mut() {
                    *score *= 0.8;
                }
            },
            EmotionalState::Stressed(_) => {
                if let Some(risk_score) = scores.get_mut(&"risk".to_string()) {
                    *risk_score *= 1.5;
                }
            },
            _ => {}
        }
    }

    fn select_best_option(&self, scores: &HashMap<&String, f32>) -> String {
        // 实现基于权重的决策选择
        if let Some((_, score)) = scores.iter().max_by(|a, b| a.1.partial_cmp(b.1).unwrap()) {
            if score > &0.7 {
                "aggressive".to_string()
            } else if score > &0.4 {
                "moderate".to_string()
            } else {
                "conservative".to_string()
            }
        } else {
            "neutral".to_string()
        }
    }

    /// 学习机制
    pub fn learn_from_experience(&mut self, outcome: f32) {
        // 根据结果调整决策权重
        let learning_rate = 0.1;
        for (_, weight) in self.decision_weights.iter_mut() {
            *weight = (*weight + learning_rate * outcome).clamp(0.1, 1.0);
        }
        
        // 更新信念系统
        if outcome > 0.7 {
            self.cognitive_state.beliefs.insert("success_tendency".to_string(), 0.8);
        } else if outcome < 0.3 {
            self.cognitive_state.beliefs.insert("failure_risk".to_string(), 0.6);
        }
    }
}