use std::arch::is_x86_feature_detected;

#[cfg(target_arch = "x86_64")]
pub fn is_avx_supported() -> bool {
    is_x86_feature_detected!("avx2")
}

#[cfg(not(target_arch = "x86_64"))]
pub fn is_avx_supported() -> bool {
    false
}

pub fn is_simd_supported() -> bool {
    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
    {
        true
    }
    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
    {
        false
    }
}

/// SIMD优化的向量点乘
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_dot_product(a: &[f64], b: &[f64]) -> f64 {
    use std::arch::x86_64::*;
    
    let mut sum = _mm256_setzero_pd();
    let len = a.len();
    let simd_len = len - (len % 4);
    
    // 4个双精度浮点数一组进行SIMD计算
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        let vb = _mm256_loadu_pd(&b[i]);
        sum = _mm256_add_pd(sum, _mm256_mul_pd(va, vb));
    }
    
    // 水平求和
    let mut result = 0.0;
    let mut temp = [0.0; 4];
    _mm256_storeu_pd(temp.as_mut_ptr(), sum);
    result += temp.iter().sum::<f64>();
    
    // 处理剩余元素
    for i in simd_len..len {
        result += a[i] * b[i];
    }
    
    result
}

/// 标量版本的向量点乘
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_dot_product(a: &[f64], b: &[f64]) -> f64 {
    a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
}

/// SIMD优化的向量加法
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_add(a: &[f64], b: &[f64], result: &mut [f64]) {
    use std::arch::x86_64::*;
    
    let len = a.len();
    let simd_len = len - (len % 4);
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        let vb = _mm256_loadu_pd(&b[i]);
        let sum = _mm256_add_pd(va, vb);
        _mm256_storeu_pd(&mut result[i], sum);
    }
    
    for i in simd_len..len {
        result[i] = a[i] + b[i];
    }
}

/// 标量版本的向量加法
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_add(a: &[f64], b: &[f64], result: &mut [f64]) {
    for i in 0..a.len() {
        result[i] = a[i] + b[i];
    }
}

/// SIMD优化的向量减法
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_sub(a: &[f64], b: &[f64], result: &mut [f64]) {
    use std::arch::x86_64::*;
    
    let len = a.len();
    let simd_len = len - (len % 4);
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        let vb = _mm256_loadu_pd(&b[i]);
        let diff = _mm256_sub_pd(va, vb);
        _mm256_storeu_pd(&mut result[i], diff);
    }
    
    for i in simd_len..len {
        result[i] = a[i] - b[i];
    }
}

/// 标量版本的向量减法
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_sub(a: &[f64], b: &[f64], result: &mut [f64]) {
    for i in 0..a.len() {
        result[i] = a[i] - b[i];
    }
}

/// SIMD优化的向量乘法
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_mul(a: &[f64], b: &[f64], result: &mut [f64]) {
    use std::arch::x86_64::*;
    
    let len = a.len();
    let simd_len = len - (len % 4);
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        let vb = _mm256_loadu_pd(&b[i]);
        let prod = _mm256_mul_pd(va, vb);
        _mm256_storeu_pd(&mut result[i], prod);
    }
    
    for i in simd_len..len {
        result[i] = a[i] * b[i];
    }
}

/// 标量版本的向量乘法
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_mul(a: &[f64], b: &[f64], result: &mut [f64]) {
    for i in 0..a.len() {
        result[i] = a[i] * b[i];
    }
}

/// SIMD优化的向量除法
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_div(a: &[f64], b: &[f64], result: &mut [f64]) {
    use std::arch::x86_64::*;
    
    let len = a.len();
    let simd_len = len - (len % 4);
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        let vb = _mm256_loadu_pd(&b[i]);
        let quot = _mm256_div_pd(va, vb);
        _mm256_storeu_pd(&mut result[i], quot);
    }
    
    for i in simd_len..len {
        result[i] = a[i] / b[i];
    }
}

/// 标量版本的向量除法
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_div(a: &[f64], b: &[f64], result: &mut [f64]) {
    for i in 0..a.len() {
        result[i] = a[i] / b[i];
    }
}

/// SIMD优化的向量最大值
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_max(a: &[f64]) -> f64 {
    use std::arch::x86_64::*;
    
    let len = a.len();
    if len == 0 {
        return f64::NEG_INFINITY;
    }
    
    let simd_len = len - (len % 4);
    let mut max_vec = _mm256_set1_pd(f64::NEG_INFINITY);
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        max_vec = _mm256_max_pd(max_vec, va);
    }
    
    let mut max_arr = [0.0; 4];
    _mm256_storeu_pd(max_arr.as_mut_ptr(), max_vec);
    let mut max_val = max_arr.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));
    
    for i in simd_len..len {
        max_val = max_val.max(a[i]);
    }
    
    max_val
}

/// 标量版本的向量最大值
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_max(a: &[f64]) -> f64 {
    a.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b))
}

/// SIMD优化的向量最小值
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_min(a: &[f64]) -> f64 {
    use std::arch::x86_64::*;
    
    let len = a.len();
    if len == 0 {
        return f64::INFINITY;
    }
    
    let simd_len = len - (len % 4);
    let mut min_vec = _mm256_set1_pd(f64::INFINITY);
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        min_vec = _mm256_min_pd(min_vec, va);
    }
    
    let mut min_arr = [0.0; 4];
    _mm256_storeu_pd(min_arr.as_mut_ptr(), min_vec);
    let mut min_val = min_arr.iter().fold(f64::INFINITY, |a, &b| a.min(b));
    
    for i in simd_len..len {
        min_val = min_val.min(a[i]);
    }
    
    min_val
}

/// 标量版本的向量最小值
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_min(a: &[f64]) -> f64 {
    a.iter().fold(f64::INFINITY, |a, &b| a.min(b))
}

/// SIMD优化的向量平均值
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_mean(a: &[f64]) -> f64 {
    use std::arch::x86_64::*;
    
    let len = a.len();
    if len == 0 {
        return 0.0;
    }
    
    let simd_len = len - (len % 4);
    let mut sum_vec = _mm256_setzero_pd();
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        sum_vec = _mm256_add_pd(sum_vec, va);
    }
    
    let mut sum_arr = [0.0; 4];
    _mm256_storeu_pd(sum_arr.as_mut_ptr(), sum_vec);
    let mut sum = sum_arr.iter().sum::<f64>();
    
    for i in simd_len..len {
        sum += a[i];
    }
    
    sum / len as f64
}

/// 标量版本的向量平均值
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_mean(a: &[f64]) -> f64 {
    if a.is_empty() {
        return 0.0;
    }
    a.iter().sum::<f64>() / a.len() as f64
}

/// SIMD优化的向量标准差
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_std(a: &[f64]) -> f64 {
    use std::arch::x86_64::*;
    
    let len = a.len();
    if len <= 1 {
        return 0.0;
    }
    
    let mean = vector_mean(a);
    let mean_vec = _mm256_set1_pd(mean);
    
    let simd_len = len - (len % 4);
    let mut sum_sq_vec = _mm256_setzero_pd();
    
    for i in (0..simd_len).step_by(4) {
        let va = _mm256_loadu_pd(&a[i]);
        let diff = _mm256_sub_pd(va, mean_vec);
        sum_sq_vec = _mm256_add_pd(sum_sq_vec, _mm256_mul_pd(diff, diff));
    }
    
    let mut sum_sq_arr = [0.0; 4];
    _mm256_storeu_pd(sum_sq_arr.as_mut_ptr(), sum_sq_vec);
    let mut sum_sq = sum_sq_arr.iter().sum::<f64>();
    
    for i in simd_len..len {
        let diff = a[i] - mean;
        sum_sq += diff * diff;
    }
    
    (sum_sq / (len - 1) as f64).sqrt()
}

/// 标量版本的向量标准差
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_std(a: &[f64]) -> f64 {
    if a.len() <= 1 {
        return 0.0;
    }
    let mean = vector_mean(a);
    let sum_sq = a.iter().map(|&x| (x - mean).powi(2)).sum::<f64>();
    (sum_sq / (a.len() - 1) as f64).sqrt()
}

/// SIMD优化的指数移动平均计算
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_ema(data: &[f64], alpha: f64) -> Vec<f64> {
    use std::arch::x86_64::*;
    
    let len = data.len();
    if len == 0 {
        return Vec::new();
    }
    
    let mut result = Vec::with_capacity(len);
    result.push(data[0]);  // 第一个值作为EMA的初始值
    
    let simd_len = (len - 1) - ((len - 1) % 4);
    let alpha_vec = _mm256_set1_pd(alpha);
    let one_minus_alpha_vec = _mm256_set1_pd(1.0 - alpha);
    
    let mut prev_ema = data[0];
    
    // 使用SIMD计算EMA
    for i in (1..=simd_len).step_by(4) {
        let data_vec = _mm256_loadu_pd(&data[i]);
        let prev_ema_vec = _mm256_set1_pd(prev_ema);
        
        // EMA = alpha * current + (1 - alpha) * prev_ema
        let ema_vec = _mm256_add_pd(
            _mm256_mul_pd(alpha_vec, data_vec),
            _mm256_mul_pd(one_minus_alpha_vec, prev_ema_vec)
        );
        
        let mut ema_arr = [0.0; 4];
        _mm256_storeu_pd(ema_arr.as_mut_ptr(), ema_vec);
        
        for &ema in &ema_arr {
            result.push(ema);
            prev_ema = ema;
        }
    }
    
    // 处理剩余元素
    for i in simd_len + 1..len {
        let ema = alpha * data[i] + (1.0 - alpha) * prev_ema;
        result.push(ema);
        prev_ema = ema;
    }
    
    result
}

/// 标量版本的指数移动平均计算
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_ema(data: &[f64], alpha: f64) -> Vec<f64> {
    let len = data.len();
    if len == 0 {
        return Vec::new();
    }
    
    let mut result = Vec::with_capacity(len);
    result.push(data[0]);
    
    let mut prev_ema = data[0];
    for &value in &data[1..] {
        let ema = alpha * value + (1.0 - alpha) * prev_ema;
        result.push(ema);
        prev_ema = ema;
    }
    
    result
}

/// SIMD优化的移动平均计算
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_sma(data: &[f64], period: usize) -> Vec<f64> {
    use std::arch::x86_64::*;
    
    let len = data.len();
    if len < period {
        return Vec::new();
    }
    
    let mut result = Vec::with_capacity(len - period + 1);
    
    // 计算第一个移动平均值
    let mut sum = data[0..period].iter().sum::<f64>();
    result.push(sum / period as f64);
    
    let simd_len = (len - period) - ((len - period) % 4);
    let period_f64 = _mm256_set1_pd(period as f64);
    
    // 使用SIMD优化滑动窗口计算
    for i in 0..simd_len {
        let remove_vec = _mm256_set1_pd(data[i]);
        let add_vec = _mm256_loadu_pd(&data[i + period]);
        
        let sum_vec = _mm256_set1_pd(sum);
        let new_sum_vec = _mm256_add_pd(
            _mm256_sub_pd(sum_vec, remove_vec),
            add_vec
        );
        
        let sma_vec = _mm256_div_pd(new_sum_vec, period_f64);
        
        let mut sma_arr = [0.0; 4];
        _mm256_storeu_pd(sma_arr.as_mut_ptr(), sma_vec);
        
        for &sma in &sma_arr {
            result.push(sma);
            sum = sma * period as f64;
        }
    }
    
    // 处理剩余元素
    for i in simd_len..len - period {
        sum = sum - data[i] + data[i + period];
        result.push(sum / period as f64);
    }
    
    result
}

/// 标量版本的移动平均计算
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_sma(data: &[f64], period: usize) -> Vec<f64> {
    let len = data.len();
    if len < period {
        return Vec::new();
    }
    
    let mut result = Vec::with_capacity(len - period + 1);
    let mut sum = data[0..period].iter().sum::<f64>();
    result.push(sum / period as f64);
    
    for i in 0..len - period {
        sum = sum - data[i] + data[i + period];
        result.push(sum / period as f64);
    }
    
    result
}

/// SIMD优化的加权移动平均计算
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_wma(data: &[f64], weights: &[f64]) -> Vec<f64> {
    use std::arch::x86_64::*;
    
    let len = data.len();
    let period = weights.len();
    if len < period {
        return Vec::new();
    }
    
    let mut result = Vec::with_capacity(len - period + 1);
    let weights_sum = weights.iter().sum::<f64>();
    let weights_sum_vec = _mm256_set1_pd(weights_sum);
    
    let simd_len = (len - period) - ((len - period) % 4);
    
    for i in 0..=simd_len {
        let mut sum_vec = _mm256_setzero_pd();
        
        // 计算加权和
        for (j, &weight) in weights.iter().enumerate() {
            let data_vec = _mm256_loadu_pd(&data[i + j]);
            let weight_vec = _mm256_set1_pd(weight);
            sum_vec = _mm256_add_pd(sum_vec, _mm256_mul_pd(data_vec, weight_vec));
        }
        
        // 除以权重和得到WMA
        let wma_vec = _mm256_div_pd(sum_vec, weights_sum_vec);
        
        let mut wma_arr = [0.0; 4];
        _mm256_storeu_pd(wma_arr.as_mut_ptr(), wma_vec);
        
        result.push(wma_arr[0]);
    }
    
    // 处理剩余元素
    for i in simd_len + 1..=len - period {
        let sum = data[i..i + period]
            .iter()
            .zip(weights.iter())
            .map(|(&d, &w)| d * w)
            .sum::<f64>();
        result.push(sum / weights_sum);
    }
    
    result
}

/// 标量版本的加权移动平均计算
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_wma(data: &[f64], weights: &[f64]) -> Vec<f64> {
    let len = data.len();
    let period = weights.len();
    if len < period {
        return Vec::new();
    }
    
    let mut result = Vec::with_capacity(len - period + 1);
    let weights_sum = weights.iter().sum::<f64>();
    
    for i in 0..=len - period {
        let sum = data[i..i + period]
            .iter()
            .zip(weights.iter())
            .map(|(&d, &w)| d * w)
            .sum::<f64>();
        result.push(sum / weights_sum);
    }
    
    result
}

/// SIMD优化的相对强弱指标(RSI)计算
#[cfg(target_arch = "x86_64")]
pub unsafe fn vector_rsi(data: &[f64], period: usize) -> Vec<f64> {
    use std::arch::x86_64::*;
    
    let len = data.len();
    if len <= period {
        return Vec::new();
    }
    
    let mut gains = Vec::with_capacity(len - 1);
    let mut losses = Vec::with_capacity(len - 1);
    
    // 计算价格变化
    let simd_len = (len - 1) - ((len - 1) % 4);
    for i in (0..simd_len).step_by(4) {
        let prev_vec = _mm256_loadu_pd(&data[i]);
        let next_vec = _mm256_loadu_pd(&data[i + 1]);
        let diff_vec = _mm256_sub_pd(next_vec, prev_vec);
        
        let zero_vec = _mm256_setzero_pd();
        let gains_vec = _mm256_max_pd(diff_vec, zero_vec);
        let losses_vec = _mm256_mul_pd(_mm256_min_pd(diff_vec, zero_vec), _mm256_set1_pd(-1.0));
        
        let mut gains_arr = [0.0; 4];
        let mut losses_arr = [0.0; 4];
        _mm256_storeu_pd(gains_arr.as_mut_ptr(), gains_vec);
        _mm256_storeu_pd(losses_arr.as_mut_ptr(), losses_vec);
        
        gains.extend_from_slice(&gains_arr);
        losses.extend_from_slice(&losses_arr);
    }
    
    // 处理剩余元素
    for i in simd_len..len - 1 {
        let diff = data[i + 1] - data[i];
        gains.push(diff.max(0.0));
        losses.push((-diff).max(0.0));
    }
    
    // 计算初始平均值
    let mut avg_gain = gains[0..period].iter().sum::<f64>() / period as f64;
    let mut avg_loss = losses[0..period].iter().sum::<f64>() / period as f64;
    
    let mut result = Vec::with_capacity(len - period);
    result.push(100.0 - (100.0 / (1.0 + avg_gain / avg_loss)));
    
    // 使用SIMD计算RSI
    let alpha = 1.0 / period as f64;
    let alpha_vec = _mm256_set1_pd(alpha);
    let one_minus_alpha_vec = _mm256_set1_pd(1.0 - alpha);
    
    for i in period..gains.len() {
        avg_gain = (avg_gain * (period - 1) as f64 + gains[i]) / period as f64;
        avg_loss = (avg_loss * (period - 1) as f64 + losses[i]) / period as f64;
        
        if avg_loss == 0.0 {
            result.push(100.0);
        } else {
            let rs = avg_gain / avg_loss;
            result.push(100.0 - (100.0 / (1.0 + rs)));
        }
    }
    
    result
}

/// 标量版本的相对强弱指标(RSI)计算
#[cfg(not(target_arch = "x86_64"))]
pub fn vector_rsi(data: &[f64], period: usize) -> Vec<f64> {
    let len = data.len();
    if len <= period {
        return Vec::new();
    }
    
    // 计算价格变化
    let mut gains = Vec::with_capacity(len - 1);
    let mut losses = Vec::with_capacity(len - 1);
    for i in 0..len - 1 {
        let diff = data[i + 1] - data[i];
        gains.push(diff.max(0.0));
        losses.push((-diff).max(0.0));
    }
    
    // 计算初始平均值
    let mut avg_gain = gains[0..period].iter().sum::<f64>() / period as f64;
    let mut avg_loss = losses[0..period].iter().sum::<f64>() / period as f64;
    
    let mut result = Vec::with_capacity(len - period);
    result.push(100.0 - (100.0 / (1.0 + avg_gain / avg_loss)));
    
    // 计算RSI
    for i in period..gains.len() {
        avg_gain = (avg_gain * (period - 1) as f64 + gains[i]) / period as f64;
        avg_loss = (avg_loss * (period - 1) as f64 + losses[i]) / period as f64;
        
        if avg_loss == 0.0 {
            result.push(100.0);
        } else {
            let rs = avg_gain / avg_loss;
            result.push(100.0 - (100.0 / (1.0 + rs)));
        }
    }
    
    result
}