﻿using MathNet.Numerics.Statistics;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace DataAnalyticsTools.Core
{
    /// <summary>
    /// 特征重要性分析
    /// </summary>
    public static class FeatureImportanceAnalyzer
    {
        /// <summary>
        /// 使用方差分析(ANOVA)计算特征重要性
        /// 通过比较组间方差与组内方差的比值来评估特征区分不同类别的能力
        /// </summary>
        /// <param name="features">特征矩阵，形状为[样本数, 特征数]</param>
        /// <param name="labels">样本标签数组，长度等于样本数</param>
        /// <returns>
        /// 特征重要性分数（伊塔平方，eta-squared）数组，长度等于特征数，值域[0,1]
        /// 值越大表示该特征对区分不同类别越重要
        /// </returns>
        /// <example>
        /// 输入: 
        ///   features = [[1, 0.1], [2, 0.2], [3, 0.1], [4, 0.2]]
        ///   labels = [0, 0, 1, 1]
        /// 输出: [0.85, 0.12]  // 第一个特征重要性高，第二个特征重要性低
        /// </example>
        public static float[] CalculateByANOVA(float[][] features, int[] labels)
        {
            if (features == null || features.Length == 0) return Array.Empty<float>();
            int m = features[0].Length;
            var scores = new float[m];
            var distinctLabels = labels.Distinct().ToArray();

            for (int j = 0; j < m; j++)
            {
                var col = features.Select(r => (double)r[j]).ToArray();
                double overallMean = col.Average();
                double ssBetween = 0.0;
                double ssWithin = 0.0;

                foreach (var lab in distinctLabels)
                {
                    var group = col.Where((_, idx) => labels[idx] == lab).ToArray();
                    if (group.Length == 0) continue;
                    double meanG = group.Average();
                    ssBetween += group.Length * Math.Pow(meanG - overallMean, 2);
                    ssWithin += group.Sum(x => Math.Pow(x - meanG, 2));
                }

                double ssTotal = ssBetween + ssWithin;
                double eta = ssTotal <= 0 ? 0.0 : ssBetween / ssTotal;
                scores[j] = (float)eta; // 已在 [0,1]
            }

            return scores;
        }

        /// <summary>
        /// 使用随机置换方法计算特征重要性
        /// 通过随机打乱特征值观察模型性能下降程度来评估重要性
        /// 使用KNN分类器作为基础模型
        /// </summary>
        /// <param name="features">特征矩阵，形状为[样本数, 特征数]</param>
        /// <param name="labels">样本标签数组，长度等于样本数</param>
        /// <param name="permutations">置换次数，默认10次</param>
        /// <param name="k">KNN中的K值，默认为5</param>
        /// <returns>
        /// 特征重要性分数数组，长度等于特征数，值域[0,1]
        /// 值越大表示该特征对模型预测越重要
        /// </returns>
        public static float[] CalculateByPermutation(float[][] features, int[] labels, int permutations = 10, int k = 5)
        {
            if (features == null || features.Length == 0)
                return Array.Empty<float>();

            if (labels == null || labels.Length != features.Length)
                throw new ArgumentException("标签数组长度必须与特征矩阵行数相同");

            int featureCount = features[0].Length;

            // 调整K值，确保不超过样本数
            k = Math.Min(k, features.Length - 1);
            if (k < 1) k = 1;

            // 标准化特征（避免量级差异导致KNN偏差）
            var scaledFeatures = MathHelper.MinMaxScale(features);

            // 基准准确率（使用留一法交叉验证）
            var baseAccuracy = CalculateKNNAccuracy(scaledFeatures, labels, k);

            var importanceScores = new float[featureCount];

            // 对每个特征进行置换测试
            for (int j = 0; j < featureCount; j++)
            {
                double totalDrop = 0;

                for (int p = 0; p < permutations; p++)
                {
                    var permuted = PermuteFeature(scaledFeatures, j);
                    var permutedAcc = CalculateKNNAccuracy(permuted, labels, k);

                    // 准确率下降比例（相对下降）
                    var drop = Math.Max(0, baseAccuracy - permutedAcc) / baseAccuracy;
                    totalDrop += drop;
                }

                // 平均下降比例
                importanceScores[j] = (float)(totalDrop / permutations);
            }

            return MathHelper.MinMaxNormalize(importanceScores);
        }


        /// <summary>
        /// 使用皮尔逊相关性计算特征重要性
        /// 通过特征与目标变量的线性相关程度来评估重要性
        /// </summary>
        /// <param name="features">特征矩阵，形状为[样本数, 特征数]</param>
        /// <param name="target">目标变量数组，长度等于样本数</param>
        /// <returns>
        /// 特征重要性分数数组，长度等于特征数，值域[0,1]
        /// 值越大表示该特征与目标变量相关性越强
        /// </returns>
        /// <example>
        /// 输入:
        ///   features = [[1, 10], [2, 20], [3, 30], [4, 40]]
        ///   target = [2, 4, 6, 8]
        /// 输出: [1.0, 0.0]  // 第一个特征与目标完全相关，第二个特征无关
        /// </example>
        public static float[] CalculateByCorrelation(float[][] features, float[] target)
        {
            if (features == null || features.Length == 0 || target == null || target.Length == 0)
                return Array.Empty<float>();

            if (features.Length != target.Length)
                throw new ArgumentException("特征矩阵和目标变量长度必须相同");

            int featureCount = features[0].Length;
            var importanceScores = new float[featureCount];

            for (int j = 0; j < featureCount; j++)
            {
                var featureValues = features.Select(row => (double)row[j]).ToArray();
                var targetValues = target.Select(t => (double)t).ToArray();

                var r = Correlation.Pearson(featureValues, targetValues);

                // 转换为 r²（解释方差比例），语义上与 eta² 一致
                var r2 = double.IsNaN(r) ? 0.0 : r * r;

                importanceScores[j] = (float)Math.Clamp(r2, 0f, 1f);
            }

            // 对于极端情况（全部相同或全部零），再做一次归一化
            return MathHelper.MinMaxNormalize(importanceScores);
        }

        // 使用共享的 Random 实例，避免多次创建导致的随机性问题
        private static readonly Random _rng = new Random();
        private static readonly object _lock = new object();

        private static float[][] PermuteFeature(float[][] features, int featureIndex)
        {
            var permuted = features.Select(row => row.ToArray()).ToArray();

            int[] indices;
            lock (_lock)  // 保证线程安全
            {
                indices = Enumerable.Range(0, features.Length).OrderBy(x => _rng.Next()).ToArray();
            }

            for (int i = 0; i < features.Length; i++)
            {
                permuted[i][featureIndex] = features[indices[i]][featureIndex];
            }

            return permuted;
        }

        private static double CalculateKNNAccuracy(float[][] features, int[] labels, int k)
        {
            int correctPredictions = 0;
            int n = features.Length;

            // 留一法：每次留出一个样本作为测试，其余作为训练
            for (int i = 0; i < n; i++)
            {
                var testSample = features[i];
                var testLabel = labels[i];

                // 计算测试样本到所有训练样本的距离
                var distances = new List<(double distance, int label)>();

                for (int j = 0; j < n; j++)
                {
                    if (i == j) continue; // 跳过测试样本本身

                    double distance = MathHelper.EuclideanDistance(testSample, features[j]);
                    distances.Add((distance, labels[j]));
                }

                // 找到K个最近邻
                var kNearest = distances.OrderBy(d => d.distance).Take(k);

                // 多数投票
                var prediction = kNearest
                    .GroupBy(x => x.label)
                    .OrderByDescending(g => g.Count())
                    .First()
                    .Key;

                if (prediction == testLabel)
                    correctPredictions++;
            }

            return (double)correctPredictions / n;
        }

    }
}
