use arrow::array::{ArrayRef, Float64Array};
use arrow::datatypes::{DataType, Field, Schema};
use arrow::record_batch::RecordBatch;
use anyhow::{Context, Result};
use crate::compute::indicators::technical_indicators::*;
use crate::compute::parallel::{
    ComputeConfig, 
    VectorizeStrategy, 
    ParallelStrategy,
    VectorizedProcessor,
    WindowProcessor
};
use std::sync::Arc;
use std::time::Instant;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};

/// 缓存键结构
#[derive(Clone, Debug)]
struct CacheKey {
    data_hash: u64,
    window_size: usize,
    indicator_type: String,
}

impl Hash for CacheKey {
    fn hash<H: Hasher>(&self, state: &mut H) {
        self.data_hash.hash(state);
        self.window_size.hash(state);
        self.indicator_type.hash(state);
    }
}

impl PartialEq for CacheKey {
    fn eq(&self, other: &Self) -> bool {
        self.data_hash == other.data_hash &&
        self.window_size == other.window_size &&
        self.indicator_type == other.indicator_type
    }
}

impl Eq for CacheKey {}

/// 特征工程模块，负责计算和生成交易模型所需的技术指标
pub struct FeatureEngineering {
    /// 是否启用高级特征
    use_advanced_features: bool,
    /// 计算窗口大小
    window_size: usize,
    /// 计算配置
    compute_config: ComputeConfig,
    /// 指标计算缓存
    cache: HashMap<CacheKey, Vec<f64>>,
}

impl FeatureEngineering {
    /// 创建新的特征工程实例
    pub fn new(window_size: usize, use_advanced_features: bool) -> Self {
        let mut compute_config = ComputeConfig::new();
        compute_config.auto_configure(); // 自动配置并行和向量化策略

        Self {
            use_advanced_features,
            window_size,
            compute_config,
            cache: HashMap::new(),
        }
    }

    /// 使用自定义计算配置创建特征工程实例
    pub fn with_config(
        window_size: usize,
        use_advanced_features: bool,
        compute_config: ComputeConfig,
    ) -> Self {
        Self {
            use_advanced_features,
            window_size,
            compute_config,
            cache: HashMap::new(),
        }
    }

    /// 计算数据的哈希值
    fn calculate_data_hash(&self, data: &[f64]) -> u64 {
        use std::hash::Hasher;
        let mut hasher = std::collections::hash_map::DefaultHasher::new();
        for value in data {
            value.to_bits().hash(&mut hasher);
        }
        hasher.finish()
    }

    /// 从缓存获取或计算指标
    fn get_or_compute<F>(&mut self, key: CacheKey, compute_fn: F) -> Result<Vec<f64>>
    where
        F: FnOnce() -> Result<Vec<f64>>,
    {
        if let Some(cached_result) = self.cache.get(&key) {
            return Ok(cached_result.clone());
        }

        let result = compute_fn()?;
        self.cache.insert(key, result.clone());
        Ok(result)
    }

    /// 并行计算ATR
    fn parallel_atr(&self, high: &[f64], low: &[f64], close: &[f64], period: usize) -> Result<Vec<f64>> {
        let window_processor = WindowProcessor::new(period, self.compute_config.clone());

        // 计算TR序列
        let mut tr_values = Vec::with_capacity(close.len());
        tr_values.push(high[0] - low[0]); // 第一个TR值

        for i in 1..close.len() {
            let tr = (high[i] - low[i])
                .max(f64::abs(high[i] - close[i-1]))
                .max(f64::abs(low[i] - close[i-1]));
            tr_values.push(tr);
        }

        // 使用窗口处理器计算ATR
        let atr_values = window_processor.process(&tr_values, |window| {
            Ok(window.iter().sum::<f64>() / window.len() as f64)
        })?;

        Ok(atr_values)
    }

    /// 并行计算ADX
    fn parallel_adx(&self, high: &[f64], low: &[f64], close: &[f64], period: usize) -> Result<Vec<f64>> {
        let window_processor = WindowProcessor::new(period, self.compute_config.clone());

        // 计算+DM和-DM
        let mut plus_dm = vec![0.0];
        let mut minus_dm = vec![0.0];
        
        for i in 1..high.len() {
            let high_diff = high[i] - high[i-1];
            let low_diff = low[i-1] - low[i];
            
            if high_diff > low_diff && high_diff > 0.0 {
                plus_dm.push(high_diff);
            } else {
                plus_dm.push(0.0);
            }
            
            if low_diff > high_diff && low_diff > 0.0 {
                minus_dm.push(low_diff);
            } else {
                minus_dm.push(0.0);
            }
        }

        // 计算TR序列
        let mut tr_values = Vec::with_capacity(close.len());
        tr_values.push(high[0] - low[0]);
        
        for i in 1..close.len() {
            let tr = (high[i] - low[i])
                .max(f64::abs(high[i] - close[i-1]))
                .max(f64::abs(low[i] - close[i-1]));
            tr_values.push(tr);
        }

        // 计算平滑后的+DM、-DM和TR
        let smooth_plus_dm = window_processor.process(&plus_dm, |window| {
            Ok(window.iter().sum::<f64>())
        })?;

        let smooth_minus_dm = window_processor.process(&minus_dm, |window| {
            Ok(window.iter().sum::<f64>())
        })?;

        let smooth_tr = window_processor.process(&tr_values, |window| {
            Ok(window.iter().sum::<f64>())
        })?;

        // 计算+DI和-DI
        let mut plus_di = Vec::with_capacity(close.len());
        let mut minus_di = Vec::with_capacity(close.len());
        
        for i in 0..close.len() {
            if smooth_tr[i] > 0.0 {
                plus_di.push(100.0 * smooth_plus_dm[i] / smooth_tr[i]);
                minus_di.push(100.0 * smooth_minus_dm[i] / smooth_tr[i]);
            } else {
                plus_di.push(0.0);
                minus_di.push(0.0);
            }
        }

        // 计算DX
        let mut dx = Vec::with_capacity(close.len());
        for i in 0..close.len() {
            let di_sum = plus_di[i] + minus_di[i];
            if di_sum > 0.0 {
                dx.push(100.0 * f64::abs(plus_di[i] - minus_di[i]) / di_sum);
            } else {
                dx.push(0.0);
            }
        }

        // 计算ADX (DX的移动平均)
        let adx = window_processor.process(&dx, |window| {
            Ok(window.iter().sum::<f64>() / window.len() as f64)
        })?;

        Ok(adx)
    }

    /// 计算基本技术指标，使用并行计算
    pub fn calculate_basic_indicators(&mut self, prices: &RecordBatch) -> Result<RecordBatch> {
        let start_time = Instant::now();

        // 提取价格数据
        let close = prices
            .column_by_name("close")
            .context("找不到收盘价列")?
            .as_any()
            .downcast_ref::<Float64Array>()
            .context("收盘价列不是Float64Array类型")?;

        let high = prices
            .column_by_name("high")
            .context("找不到最高价列")?
            .as_any()
            .downcast_ref::<Float64Array>()
            .context("最高价列不是Float64Array类型")?;

        let low = prices
            .column_by_name("low")
            .context("找不到最低价列")?
            .as_any()
            .downcast_ref::<Float64Array>()
            .context("最低价列不是Float64Array类型")?;

        // 转换Arrow数组为Vec
        let close_vec: Vec<f64> = close.values().to_vec();
        let high_vec: Vec<f64> = high.values().to_vec();
        let low_vec: Vec<f64> = low.values().to_vec();

        // 计算数据哈希值
        let data_hash = self.calculate_data_hash(&close_vec);

        // 创建缓存键
        let ma_key = CacheKey {
            data_hash,
            window_size: 5,
            indicator_type: "ma".to_string(),
        };

        let rsi_key = CacheKey {
            data_hash,
            window_size: 14,
            indicator_type: "rsi".to_string(),
        };

        let bb_key = CacheKey {
            data_hash,
            window_size: 20,
            indicator_type: "bollinger".to_string(),
        };

        let atr_key = CacheKey {
            data_hash,
            window_size: 14,
            indicator_type: "atr".to_string(),
        };

        let adx_key = CacheKey {
            data_hash,
            window_size: 14,
            indicator_type: "adx".to_string(),
        };

        // 从缓存获取或计算指标
        let ma5_vec = self.get_or_compute(ma_key, || {
            let window_processor = WindowProcessor::new(5, self.compute_config.clone());
            window_processor.process(&close_vec, |window| {
                Ok(window.iter().sum::<f64>() / window.len() as f64)
            })
        })?;

        let rsi14_vec = self.get_or_compute(rsi_key, || {
            self.parallel_rsi(&close_vec, 14)
        })?;

        let (upper_band_vec, middle_band_vec, lower_band_vec) = {
            let bb_result = self.get_or_compute(bb_key.clone(), || {
                self.parallel_bollinger_bands(&close_vec, 20, 2.0)
            })?;
            if let Ok((upper, middle, lower)) = self.parallel_bollinger_bands(&close_vec, 20, 2.0) {
                (upper, middle, lower)
            } else {
                (vec![0.0; close_vec.len()], vec![0.0; close_vec.len()], vec![0.0; close_vec.len()])
            }
        };

        let atr14_vec = self.get_or_compute(atr_key, || {
            self.parallel_atr(&high_vec, &low_vec, &close_vec, 14)
        })?;

        let adx14_vec = self.get_or_compute(adx_key, || {
            self.parallel_adx(&high_vec, &low_vec, &close_vec, 14)
        })?;

        // 创建Arrow数组
        let ma5 = Float64Array::from(ma5_vec);
        let rsi14 = Float64Array::from(rsi14_vec);
        let upper_band = Float64Array::from(upper_band_vec);
        let middle_band = Float64Array::from(middle_band_vec);
        let lower_band = Float64Array::from(lower_band_vec);
        let atr14 = Float64Array::from(atr14_vec);
        let adx14 = Float64Array::from(adx14_vec);

        // 创建输出数据结构
        let schema = Schema::new(vec![
            Field::new("close", DataType::Float64, false),
            Field::new("ma5", DataType::Float64, false),
            Field::new("rsi14", DataType::Float64, false),
            Field::new("bb_upper", DataType::Float64, false),
            Field::new("bb_middle", DataType::Float64, false),
            Field::new("bb_lower", DataType::Float64, false),
            Field::new("atr14", DataType::Float64, false),
            Field::new("adx14", DataType::Float64, false),
        ]);

        let columns: Vec<ArrayRef> = vec![
            Arc::new(close.clone()),
            Arc::new(ma5),
            Arc::new(rsi14),
            Arc::new(upper_band),
            Arc::new(middle_band),
            Arc::new(lower_band),
            Arc::new(atr14),
            Arc::new(adx14),
        ];

        let result = RecordBatch::try_new(Arc::new(schema), columns)
            .context("创建技术指标RecordBatch失败")?;

        let duration = start_time.elapsed();
        println!("计算基本技术指标耗时: {:?}", duration);

        Ok(result)
    }

        /// 并行计算RSI
    fn parallel_rsi(&self, close: &[f64], period: usize) -> Result<Vec<f64>> {
        let window_processor = WindowProcessor::new(period, self.compute_config.clone());

        let gains_losses = window_processor.process(close, |window| {
            if window.len() < 2 {
                return Ok((0.0, 0.0));
            }
            let diff = window[window.len() - 1] - window[window.len() - 2];
            if diff > 0.0 {
                Ok((diff, 0.0))
            } else {
                Ok((0.0, -diff))
            }
        })?;

        let (mut avg_gain, mut avg_loss) = (0.0, 0.0);
        let mut rsi = vec![f64::NAN; period];

        for i in period..close.len() {
            let (gain, loss) = gains_losses[i];
            if i == period {
                avg_gain = gains_losses[1..=period].iter().map(|(g, _)| g).sum::<f64>() / period as f64;
                avg_loss = gains_losses[1..=period].iter().map(|(_, l)| l).sum::<f64>() / period as f64;
            } else {
                avg_gain = (avg_gain * (period - 1) as f64 + gain) / period as f64;
                avg_loss = (avg_loss * (period - 1) as f64 + loss) / period as f64;
            }

            let rs = if avg_loss == 0.0 { 100.0 } else { avg_gain / avg_loss };
            let rsi_value = 100.0 - (100.0 / (1.0 + rs));
            rsi.push(rsi_value);
        }

        Ok(rsi)
    }

    /// 并行计算布林带
    fn parallel_bollinger_bands(&self, close: &[f64], period: usize, num_std_dev: f64) -> Result<(Vec<f64>, Vec<f64>, Vec<f64>)> {
        let window_processor = WindowProcessor::new(period, self.compute_config.clone());

        let (middle_band, std_dev) = window_processor.process(close, |window| {
            let mean = window.iter().sum::<f64>() / window.len() as f64;
            let variance = window.iter().map(|&x| (x - mean).powi(2)).sum::<f64>() / window.len() as f64;
            let std_dev = variance.sqrt();
            Ok((mean, std_dev))
        })?;

        let upper_band: Vec<f64> = middle_band.iter().zip(std_dev.iter())
            .map(|(&m, &s)| m + num_std_dev * s)
            .collect();

        let lower_band: Vec<f64> = middle_band.iter().zip(std_dev.iter())
            .map(|(&m, &s)| m - num_std_dev * s)
            .collect();

        Ok((upper_band, middle_band, lower_band))
    }

    /// 计算高级技术指标，使用并行计算
    pub fn calculate_advanced_indicators(&mut self, prices: &RecordBatch) -> Result<RecordBatch> {
        if !self.use_advanced_features {
            return Ok(RecordBatch::new_empty(Arc::new(Schema::empty())));
        }

        // 提取价格数据
        let close = prices
            .column_by_name("close")
            .context("找不到收盘价列")?
            .as_any()
            .downcast_ref::<Float64Array>()
            .context("收盘价列不是Float64Array类型")?;

        let high = prices
            .column_by_name("high")
            .context("找不到最高价列")?
            .as_any()
            .downcast_ref::<Float64Array>()
            .context("最高价列不是Float64Array类型")?;

        let low = prices
            .column_by_name("low")
            .context("找不到最低价列")?
            .as_any()
            .downcast_ref::<Float64Array>()
            .context("最低价列不是Float64Array类型")?;

        // 转换Arrow数组为Vec
        let close_vec: Vec<f64> = close.values().to_vec();
        let high_vec: Vec<f64> = high.values().to_vec();
        let low_vec: Vec<f64> = low.values().to_vec();

        // 这里可以添加更多高级指标的并行计算
        // 目前返回空的RecordBatch
        Ok(RecordBatch::new_empty(Arc::new(Schema::empty())))
    }

    /// 合并基本指标和高级指标
    pub fn merge_indicators(
        &self,
        basic_indicators: &RecordBatch,
        advanced_indicators: &RecordBatch,
    ) -> Result<RecordBatch> {
        if advanced_indicators.num_columns() == 0 {
            return Ok(basic_indicators.clone());
        }

        // 合并两个RecordBatch的列
        let mut fields = Vec::new();
        let mut columns = Vec::new();

        // 添加基本指标
        for i in 0..basic_indicators.num_columns() {
            fields.push(basic_indicators.schema().field(i).clone());
            columns.push(basic_indicators.column(i).clone());
        }

        // 添加高级指标
        for i in 0..advanced_indicators.num_columns() {
            fields.push(advanced_indicators.schema().field(i).clone());
            columns.push(advanced_indicators.column(i).clone());
        }

        let schema = Schema::new(fields);
        RecordBatch::try_new(Arc::new(schema), columns).context("合并指标RecordBatch失败")
    }

    /// 自动调优计算配置
    pub fn auto_tune(&mut self, prices: &RecordBatch) -> Result<()> {
        let strategies = vec![
            (VectorizeStrategy::Auto, ParallelStrategy::Auto),
            (VectorizeStrategy::Avx512, ParallelStrategy::ThreadPool),
            (VectorizeStrategy::Avx2, ParallelStrategy::TaskSplit),
            (VectorizeStrategy::Sse4, ParallelStrategy::SingleThread),
            (VectorizeStrategy::Scalar, ParallelStrategy::Auto),
        ];

        let mut best_time = std::f64::INFINITY;
        let mut best_config = self.compute_config.clone();

        for (vectorize, parallel) in strategies {
            self.compute_config.vectorize = vectorize;
            self.compute_config.parallel = parallel;

            let start_time = Instant::now();
            let _ = self.calculate_basic_indicators(prices)?;
            let duration = start_time.elapsed();

            println!("Strategy: {:?}, {:?} - Duration: {:?}", vectorize, parallel, duration);

            if duration.as_secs_f64() < best_time {
                best_time = duration.as_secs_f64();
                best_config = self.compute_config.clone();
            }
        }

        self.compute_config = best_config;
        println!("Best configuration: {:?}", self.compute_config);

        Ok(())
    }

    /// 清除缓存
    pub fn clear_cache(&mut self) {
        self.cache.clear();
    }

    /// 获取缓存大小
    pub fn get_cache_size(&self) -> usize {
        self.cache.len()
    }

    /// 获取性能指标
    pub fn get_performance_metrics(&self) -> HashMap<String, f64> {
        let mut metrics = HashMap::new();
        metrics.insert("cache_size".to_string(), self.cache.len() as f64);
        metrics.insert("vectorize_strategy".to_string(), self.compute_config.vectorize as u8 as f64);
        metrics.insert("parallel_strategy".to_string(), self.compute_config.parallel as u8 as f64);
        metrics
    }
}