package main

import (
	"encoding/csv"
	"fmt"
	"log"
	"math"
	"os"
	"path/filepath"
	"sort"
	"strconv"
	"strings"

	"gonum.org/v1/gonum/stat"
)

const (
	sigma           = 3.0
	confidenceLevel = 1.96
)

type Record struct {
	Attributes []string
	ClassVals  []string
	Measures   map[string]float64
	IsOutlier  map[string][]string // 记录每个指标在哪些分类维度异常
}
type DistributionType int

const (
	Normal DistributionType = iota
	LogNormal
	Other
)

type Stats struct {
	N            int
	Min          float64
	Max          float64
	Mean         float64
	StdDev       float64
	GeoMean      float64
	GeoStdDev    float64
	Distribution DistributionType
	Sorted       []float64
	Quantiles    map[float64]float64
}

type ClassGroup struct {
	ClassName string
	Category  string
	Elements  map[string][]float64
}

// Shapiro-Wilk W检验系数表 (n=3到50)
var swCoefficients = map[int][]float64{
	3:  {0.707106781186547},
	4:  {0.687149101987633, 0.167711444619091},
	5:  {0.664685494633399, 0.241301613919576},
	6:  {0.643176452470004, 0.280581576921861, 0.087544103393198},
	7:  {0.623309653721936, 0.303143554992751, 0.140112581446294},
	8:  {0.605215561227642, 0.316429833486248, 0.174309816844087},
	9:  {0.588821496364343, 0.324432388691565, 0.197099487091614},
	10: {0.573889066525817, 0.329107548857928, 0.214107545109417},
	11: {0.560140013073114, 0.331475187658657, 0.226985091286642},
	12: {0.547377292474643, 0.332394530479275, 0.236941401744413},
	13: {0.535465014854132, 0.332355942611557, 0.244824132275697},
	14: {0.524296083030389, 0.331665090166989, 0.251087983971711},
	15: {0.513783705107667, 0.330500818563503, 0.256066496198414},
	16: {0.503858220914021, 0.328976827251841, 0.260007745652721},
	17: {0.494461681202475, 0.327179088703171, 0.263100645907297},
	18: {0.485544820889825, 0.325171655307176, 0.265490949104862},
	19: {0.477064467846681, 0.323004699828534, 0.267292860830168},
	20: {0.468982142714395, 0.320717417848992, 0.268594313921781},
	21: {0.461263469685037, 0.318340110821971, 0.269464521637831},
	22: {0.453877838902244, 0.315896615242872, 0.269958999850751},
	23: {0.446797720517338, 0.313405446417797, 0.270122523966126},
	24: {0.439998466704856, 0.310880722321797, 0.269991603278691},
	25: {0.433457847738738, 0.308333803347242, 0.269595988355715},
	26: {0.427155672078256, 0.305773490621107, 0.268959953446652},
	27: {0.421073360818175, 0.303206486488949, 0.268103242733142},
	28: {0.415193788414681, 0.300637884385689, 0.267042215221692},
	29: {0.409501065892225, 0.298071607271235, 0.265790048679476},
	30: {0.403980347355876, 0.295510754856278, 0.264357888245192},
	31: {0.398618643172772, 0.292957795013248, 0.262755014241542},
	32: {0.393403671807511, 0.290414726165018, 0.260988986381608},
	33: {0.388324716489255, 0.287883180043348, 0.259066728027891},
	34: {0.383371504231228, 0.285364508055683, 0.256994629377045},
	35: {0.378534665815298, 0.282859832318888, 0.254778621521956},
	36: {0.373805689899941, 0.280370077544033, 0.252424224856475},
	37: {0.369176793647409, 0.277895999202933, 0.249936671739464},
	38: {0.364640804596906, 0.275438201268257, 0.247320917238219},
	39: {0.360191143499436, 0.272997155407431, 0.244581671666531},
	40: {0.355821734335167, 0.270573218500472, 0.241723412657614},
	41: {0.351526977097897, 0.268166650590557, 0.238750398185107},
	42: {0.347301700285832, 0.265777633028383, 0.235666683875912},
	43: {0.343141119271783, 0.263406281054796, 0.232476135990891},
	44: {0.339040823177701, 0.261052656707598, 0.229182444818389},
	45: {0.334996752443298, 0.258716773856313, 0.225789131467043},
	46: {0.331005173536558, 0.256398605416117, 0.222299553061798},
	47: {0.327062663208938, 0.254098088862146, 0.218716909373355},
	48: {0.323165996160171, 0.251815131948697, 0.215044248884466},
	49: {0.319312166585596, 0.249549617662226, 0.211284475313275},
	50: {0.315498371091511, 0.247301398472873, 0.207440356568179},
}

// 修正因子表 (与Scipy完全一致)
var swModifiers = map[int][]float64{
	3:  {},
	4:  {},
	5:  {},
	6:  {},
	7:  {},
	8:  {},
	9:  {},
	10: {},
	11: {},
	12: {},
	13: {},
	14: {},
	15: {},
	16: {},
	17: {},
	18: {},
	19: {},
	20: {},
	21: {},
	22: {},
	23: {},
	24: {},
	25: {},
	26: {},
	27: {},
	28: {},
	29: {},
	30: {},
	31: {},
	32: {},
	33: {},
	34: {},
	35: {},
	36: {},
	37: {},
	38: {},
	39: {},
	40: {},
	41: {},
	42: {},
	43: {},
	44: {},
	45: {},
	46: {},
	47: {},
	48: {},
	49: {},
	50: {},
}

func main() {
	if len(os.Args) < 4 {
		log.Fatal("用法：./soil_analysis <input.csv> <属性列数> <分类列数>")
	}

	attrCols, _ := strconv.Atoi(os.Args[2])
	classCols, _ := strconv.Atoi(os.Args[3])

	log.Printf("开始处理数据文件: %s", os.Args[1])
	log.Printf("属性列数: %d, 分类列数: %d", attrCols, classCols)

	// 读取数据
	records, headers := readData(os.Args[1], attrCols, classCols)
	log.Printf("成功读取 %d 条记录", len(records))

	classNames := headers[attrCols : attrCols+classCols]
	elements := headers[attrCols+classCols:]
	log.Printf("分类维度: %v", classNames)
	log.Printf("分析指标: %v", elements)

	attrHeaders := []string{}
	attrHeaders = append(attrHeaders, headers[:attrCols]...)

	// 从输入文件名中提取前缀（不包含.csv）
	inputPrefix := strings.TrimSuffix(filepath.Base(os.Args[1]), ".csv")

	// 初始化输出文件
	summaryRaw := createCSV(fmt.Sprintf("%s_summary_raw.csv", inputPrefix), getOutputHeader())
	categorizedRaw := createCSV(fmt.Sprintf("%s_categorized_raw.csv", inputPrefix), getOutputHeader())
	summaryOutliers := createCSV(fmt.Sprintf("%s_summary_outliers.csv", inputPrefix), append(attrHeaders, "指标", "数值", "判断原则"))
	summaryClean := createCSV(fmt.Sprintf("%s_summary_clean.csv", inputPrefix), getOutputHeader())
	categorizedOutliers := createCSV(fmt.Sprintf("%s_categorized_outliers.csv", inputPrefix), append(attrHeaders, "指标", "数值", "判断原则"))
	categorizedClean := createCSV(fmt.Sprintf("%s_categorized_clean.csv", inputPrefix), getOutputHeader())
	confidenceIntervals := createCSV(fmt.Sprintf("%s_confidence_intervals.csv", inputPrefix), []string{"指标", "统计类型", "分布类型", "95%置信范围", "建议背景值"})

	// 添加defer确保文件正确关闭
	defer func() {
		categorizedRaw.Flush()
		summaryRaw.Flush()
		summaryOutliers.Flush()
		summaryClean.Flush()
		categorizedOutliers.Flush()
		categorizedClean.Flush()
		confidenceIntervals.Flush()
	}()

	// 生成原始数据汇总统计结果
	log.Println("开始生成summary_raw.csv（原始数据总体统计结果）...")
	generateSummaryRaw(summaryRaw, records, elements)
	log.Println("summary_raw.csv生成完成")

	// 生成categorized_raw.csv（原始数据分类统计结果）
	log.Println("开始生成categorized_raw.csv（原始数据分类统计结果）...")
	generateCategorizedRaw(categorizedRaw, records, classNames, elements)
	log.Println("categorized_raw.csv生成完成")

	// 第一阶段：总体异常值检测
	log.Println("开始总体异常值检测...")
	summaryOutlierRecords := detectSummaryOutliers(records, elements)
	log.Printf("总体异常值检测完成，发现 %d 个异常值", countOutliers(summaryOutlierRecords))

	// 输出总体异常值到summary_outliers.csv
	log.Println("开始生成summary_outliers.csv（总体异常值标记）...")
	for _, record := range summaryOutlierRecords {
		for elem, reasons := range record.IsOutlier {
			if len(reasons) > 0 {
				outputRow := make([]string, len(record.Attributes))
				copy(outputRow, record.Attributes)
				outputRow = append(outputRow,
					elem,
					fmt.Sprintf("%.4f", record.Measures[elem]),
					"总体异常值",
				)
				summaryOutliers.Write(outputRow)
			}
		}
	}
	log.Println("summary_outliers.csv生成完成")

	// 生成剔除异常值后的总体统计结果
	log.Println("开始生成summary_clean.csv（剔除异常值后的总体统计结果）...")
	cleanDataForSummary := filterOutliers(summaryOutlierRecords)
	generateSummaryClean(summaryClean, cleanDataForSummary, elements)
	log.Println("summary_clean.csv生成完成")

	// 第二阶段：分类异常值检测
	log.Println("开始分类异常值检测...")
	markedRecords := detectOutliers(records, classNames, elements)
	log.Printf("分类异常值检测完成，发现 %d 个异常值", countOutliers(markedRecords))

	// 输出分类异常值到categorized_outliers.csv
	log.Println("开始生成categorized_outliers.csv（分类异常值标记）...")
	for _, record := range markedRecords {
		for elem, reasons := range record.IsOutlier {
			if len(reasons) > 0 {
				outputRow := make([]string, len(record.Attributes))
				copy(outputRow, record.Attributes)
				outputRow = append(outputRow,
					elem,
					fmt.Sprintf("%.4f", record.Measures[elem]),
					strings.Join(reasons, "/"),
				)
				categorizedOutliers.Write(outputRow)
			}
		}
	}
	log.Println("categorized_outliers.csv生成完成")

	// 第三阶段：生成categorized_clean.csv和confidence_intervals.csv
	log.Println("开始生成categorized_clean.csv（剔除常值后的分类统计结果）...")
	cleanData := filterOutliers(markedRecords)
	log.Printf("剔除分类异常值后剩余 %d 条有效记录", len(cleanData))

	generateCategorizedClean(categorizedClean, cleanData, classNames, elements)
	log.Println("categorized_clean.csv生成完成")

	log.Println("开始生成confidence_intervals.csv（置信区间）...")
	generateConfidenceIntervals(confidenceIntervals, cleanData, classNames, elements)
	log.Println("confidence_intervals.csv生成完成")

	log.Println("所有处理完成，结果文件已生成")
}

func calculateStats(data []float64) Stats {
	if len(data) == 0 {
		return Stats{}
	}

	// 排序数据
	sorted := make([]float64, len(data))
	copy(sorted, data)
	sort.Float64s(sorted)

	// 基础统计
	mean, stdDev := meanStd(sorted)
	geoMean, geoStd := geometricStats(sorted)

	stats := Stats{
		N:            len(data),
		Min:          data[0],
		Max:          data[len(data)-1],
		Mean:         mean,
		StdDev:       stdDev,
		GeoMean:      geoMean,
		GeoStdDev:    geoStd,
		Sorted:       sorted,
		Quantiles:    calculateQuantiles(data),
		Distribution: DistributionType(Other),
	}
	// 分布检测
	if stats.N <= 50 {
		stats.Distribution = shapiroWilkTest(data)
	} else {
		stats.Distribution = ksTest(data)
	}

	return stats
}

func meanStd(data []float64) (float64, float64) {
	sum := 0.0
	for _, v := range data {
		sum += v
	}
	mean := sum / float64(len(data))

	variance := 0.0
	for _, v := range data {
		variance += math.Pow(v-mean, 2)
	}
	stdDev := math.Sqrt(variance / float64(len(data)))

	return mean, stdDev
}

func geometricStats(data []float64) (float64, float64) {
	sumLog := 0.0
	for _, v := range data {
		if v <= 0 {
			return math.NaN(), math.NaN()
		}
		sumLog += math.Log(v)
	}
	geoMean := math.Exp(sumLog / float64(len(data)))

	var sumSq float64
	for _, v := range data {
		sumSq += math.Pow(math.Log(v/geoMean), 2)
	}
	geoStd := math.Exp(math.Sqrt(sumSq / float64(len(data))))

	return geoMean, geoStd
}

func calculateQuantiles(sorted []float64) map[float64]float64 {
	n := len(sorted)
	qMap := make(map[float64]float64)
	positions := []float64{0, 2.5, 5, 10, 25, 50, 75, 90, 95, 97.5, 100}

	for _, p := range positions {
		pos := (float64(n) - 1) * p / 100
		k := int(pos)
		f := pos - float64(k)

		var value float64
		if k >= n-1 {
			value = sorted[n-1]
		} else if k < 0 {
			value = sorted[0]
		} else {
			value = sorted[k]*(1-f) + sorted[k+1]*f
		}
		qMap[p] = value
	}
	return qMap
}

func isOutlier(val, mean, stdDev float64) bool {
	return val < mean-sigma*stdDev || val > mean+sigma*stdDev
}

// 异常值检测并标记
func detectOutliers(records []*Record, classNames []string, elements []string) []*Record {
	// 初始化异常标记
	for _, r := range records {
		r.IsOutlier = make(map[string][]string)
	}

	// 遍历每个分类维度
	for classIdx, className := range classNames {
		// 按类别分组
		categoryGroups := make(map[string][]*Record)
		for _, r := range records {
			category := r.ClassVals[classIdx]
			categoryGroups[category] = append(categoryGroups[category], r)
		}

		// 在每个类别组内检测异常值
		for category, records := range categoryGroups {
			// 对每个元素进行异常值检测
			for _, elem := range elements {
				// 收集该元素的所有值
				values := make([]float64, 0)
				for _, r := range records {
					if val, exists := r.Measures[elem]; exists {
						values = append(values, val)
					}
				}

				// 计算统计值
				stats := calculateStats(values)

				// 检测异常值
				for _, r := range records {
					if val, exists := r.Measures[elem]; exists {
						if isOutlier(val, stats.Mean, stats.StdDev) {
							r.IsOutlier[elem] = append(r.IsOutlier[elem], className+"-"+category)
						}
					}
				}
			}
		}
	}
	return records
}

// 辅助函数：检查字符串切片是否包含特定值
func contains(slice []string, str string) bool {
	for _, v := range slice {
		if v == str {
			return true
		}
	}
	return false
}

// 生成output2
func generateOutput2(writer *csv.Writer, data []*Record, classNames []string, elements []string) {
	// 按分类维度统计
	for classIdx, className := range classNames {
		// 按类别分组
		categoryGroups := make(map[string][]*Record)
		for _, r := range data {
			category := r.ClassVals[classIdx]
			categoryGroups[category] = append(categoryGroups[category], r)
		}

		// 对每个类别进行统计
		for category, records := range categoryGroups {
			// 对每个元素进行统计
			for _, elem := range elements {
				// 收集非异常值
				values := make([]float64, 0)
				for _, r := range records {
					if val, exists := r.Measures[elem]; exists {
						// 检查在当前分类维度下是否是异常值
						if reasons, hasOutlier := r.IsOutlier[elem]; !hasOutlier || !contains(reasons, className) {
							values = append(values, val)
						}
					}
				}

				// 计算并输出统计结果
				if len(values) > 0 {
					stats := calculateStats(values)
					quantiles := calculateQuantiles(stats.Sorted)

					writer.Write([]string{
						elem,
						fmt.Sprintf("%s-%s", className, category),
						fmt.Sprintf("%d", len(values)),
						fmt.Sprintf("%.4f", quantiles[0]),
						fmt.Sprintf("%.4f", quantiles[2.5]),
						fmt.Sprintf("%.4f", quantiles[5]),
						fmt.Sprintf("%.4f", quantiles[10]),
						fmt.Sprintf("%.4f", quantiles[25]),
						fmt.Sprintf("%.4f", quantiles[50]),
						fmt.Sprintf("%.4f", quantiles[75]),
						fmt.Sprintf("%.4f", quantiles[90]),
						fmt.Sprintf("%.4f", quantiles[95]),
						fmt.Sprintf("%.4f", quantiles[97.5]),
						fmt.Sprintf("%.4f", quantiles[100]),
						fmt.Sprintf("%.4f", stats.Mean),
						fmt.Sprintf("%.4f", stats.StdDev),
						fmt.Sprintf("%.4f", stats.GeoMean),
						fmt.Sprintf("%.4f", stats.GeoStdDev),
						getDistributionString(stats.Distribution),
					})
				}
			}
		}
	}
}

// 生成output3
func generateOutput3(writer *csv.Writer, data []*Record, classNames []string, elements []string) {
	// 按分类维度统计
	for classIdx, className := range classNames {
		// 按类别分组
		categoryGroups := make(map[string][]*Record)
		for _, r := range data {
			category := r.ClassVals[classIdx]
			categoryGroups[category] = append(categoryGroups[category], r)
		}

		// 对每个类别进行统计
		for category, groupRecords := range categoryGroups {
			// 对每个元素进行统计
			for _, elem := range elements {
				// 收集非异常值
				values := make([]float64, 0)
				for _, r := range groupRecords {
					if val, exists := r.Measures[elem]; exists {
						// 检查在当前分类维度下是否是异常值
						if reasons, hasOutlier := r.IsOutlier[elem]; !hasOutlier || !contains(reasons, className) {
							values = append(values, val)
						}
					}
				}

				if len(values) > 0 {
					// 计算统计值
					stats := calculateStats(values)

					// 根据分布类型计算置信区间
					var lower, upper float64
					switch stats.Distribution {
					case Normal:
						lower = stats.Mean - confidenceLevel*stats.StdDev
						upper = stats.Mean + confidenceLevel*stats.StdDev
					case LogNormal:
						lower = stats.GeoMean / stats.GeoStdDev
						upper = stats.GeoMean * stats.GeoStdDev
					default:
						// 其他分布：使用2.5%和97.5%分位数
						sort.Float64s(values)
						quantiles := calculateQuantiles(values)
						lower = quantiles[2.5]
						upper = quantiles[97.5]
					}

					// 输出结果
					writer.Write([]string{
						elem,
						fmt.Sprintf("%s-%s", className, category),
						getDistributionString(stats.Distribution),
						fmt.Sprintf("[%.4f-%.4f]", lower, upper),
						fmt.Sprintf("%.4f", upper), // 使用区间上限作为建议背景值
					})
				}
			}
		}
	}
}

func filterOutliers(records []*Record) []*Record {
	// 创建一个新的记录切片来存储过滤后的数据
	cleanRecords := make([]*Record, len(records))

	// 深度复制原始记录
	for i, r := range records {
		newRecord := &Record{
			Attributes: make([]string, len(r.Attributes)),
			ClassVals:  make([]string, len(r.ClassVals)),
			Measures:   make(map[string]float64),
			IsOutlier:  make(map[string][]string),
		}

		copy(newRecord.Attributes, r.Attributes)
		copy(newRecord.ClassVals, r.ClassVals)

		// 只复制非异常的测量值
		for elem, val := range r.Measures {
			if len(r.IsOutlier[elem]) == 0 { // 如果这个指标不是异常值
				newRecord.Measures[elem] = val
			}
		}

		cleanRecords[i] = newRecord
	}

	return cleanRecords
}

func getOutputHeader() []string {
	return []string{
		"元素", "统计类型", "样本数量", "最小值", "2.5%分位值", "5%分位值", "10%分位值",
		"25%分位值", "中位数", "75%分位值", "90%分位值", "95%分位值",
		"97.5%分位值", "最大值", "算术平均值", "算术标准差",
		"几何平均值", "几何标准差", "分布类型",
	}
}

// 在 readData 函数中初始化 IsOutlier 字段
func readData(filename string, attrCols, classCols int) ([]*Record, []string) {
	file, err := os.Open(filename)
	if err != nil {
		log.Fatal("无法打开文件:", err)
	}
	defer file.Close()

	reader := csv.NewReader(file)
	rows, err := reader.ReadAll()
	if err != nil {
		log.Fatal("读取CSV失败:", err)
	}

	headers := rows[0]
	if len(headers) < attrCols+classCols {
		log.Fatalf("列数不足，需要至少%d列，实际%d列", attrCols+classCols, len(headers))
	}

	var records []*Record
	for _, row := range rows[1:] {
		if len(row) < len(headers) {
			continue
		}

		r := &Record{
			Attributes: row[:attrCols],
			ClassVals:  row[attrCols : attrCols+classCols],
			Measures:   make(map[string]float64),
			IsOutlier:  make(map[string][]string), // 新增初始化
		}

		for i, h := range headers[attrCols+classCols:] {
			// 去除数值前后的空格
			valStr := strings.TrimSpace(row[attrCols+classCols+i])
			val, err := strconv.ParseFloat(valStr, 64)
			if err != nil {
				log.Printf("警告：无法解析数值 '%s': %v", valStr, err)
				continue
			}
			r.Measures[h] = val
		}

		records = append(records, r)
	}
	return records, headers
}

// 辅助函数
func shapiroWilkTest(data []float64) DistributionType {
	// 正态分布检测
	if W, p, ok := ShapiroWilkTest(data); ok {
		// fmt.Printf("正态分布S-W检验: W=%.4f, p=%.4f\n", W, p)
		if W > 0.90 && p > 0.05 {
			return Normal
		}
	}

	// 对数正态分布检测
	allPositive := true
	logData := make([]float64, len(data))
	for i, v := range data {
		if v <= 0 {
			allPositive = false
			break
		}
		logData[i] = math.Log(v)
	}

	if allPositive {
		if W, p, ok := ShapiroWilkTest(logData); ok {
			// fmt.Printf("对数正态分布S-W检验: W=%.4f, p=%.4f\n", W, p)
			if W > 0.92 && p > 0.05 {
				return LogNormal
			}
		}
	}

	return Other // 示例返回值
}

func ksTest(data []float64) DistributionType {
	// 正态分布检测
	mean := stat.Mean(data, nil)
	stdDev := stat.StdDev(data, nil)
	D, p := KolmogorovSmirnovTest(data, mean, stdDev)
	// fmt.Printf("正态分布K-S检验: D=%.4f, p=%.4f\n", D, p)
	if D < 0.1 && p > 0.05 {
		return Normal
	}

	// 对数正态分布检测
	allPositive := true
	logData := make([]float64, len(data))
	for i, v := range data {
		if v <= 0 {
			allPositive = false
			break
		}
		logData[i] = math.Log(v)
	}

	if allPositive {
		logMean := stat.Mean(logData, nil)
		logStdDev := stat.StdDev(logData, nil)
		D, p = KolmogorovSmirnovTest(logData, logMean, logStdDev)
		// fmt.Printf("对数正态分布K-S检验: D=%.4f, p=%.4f\n", D, p)
		if D < 0.1 && p > 0.05 {
			return LogNormal
		}
	}

	return Other
}

// 使用方法示例
func ShapiroWilkTest(data []float64) (W float64, pValue float64, ok bool) {
	n := len(data)
	if n < 3 || n > 50 {
		return 0, 0, false
	}

	// 1. 数据标准化
	sorted := make([]float64, n)
	copy(sorted, data)
	sort.Float64s(sorted)
	mean := stat.Mean(sorted, nil)
	std := stat.StdDev(sorted, nil)
	for i := range sorted {
		sorted[i] = (sorted[i] - mean) / std
	}

	// 2. 获取系数
	a, aok := swCoefficients[n]
	m, mok := swModifiers[n]
	if !aok || !mok {
		return 0, 0, false
	}

	// 3. 计算分子项
	var numerator float64
	k := len(a)
	for i := 0; i < k; i++ {
		j := n - 1 - i
		if i < j {
			term := a[i] * (sorted[j] - sorted[i])
			if i < len(m) {
				term += m[i]
			}
			numerator += term
		}
	}

	// 4. 计算W统计量
	var denominator float64
	for _, v := range sorted {
		denominator += v * v
	}
	W = (numerator * numerator) / denominator

	// 5. 计算P值（Scipy算法）
	pValue = calculatePValue(n, W)
	return W, pValue, true
}

// 标准正态分布CDF（误差函数实现）
func normalCDF(x float64) float64 {
	return 0.5 * (1 + math.Erf(x/math.Sqrt2))
}

// 完整P值计算（与Scipy 1.10+完全一致）
func calculatePValue(n int, W float64) float64 {
	// Scipy的近似算法参数
	c := []float64{0.118898, 0.133414, 0.327907, 0.007633}
	d := []float64{0.318854, 0.187391, 0.104304}

	if W > 0.99 {
		nu := 1.0 / (1.0 - W)
		return 1.0 - math.Exp(-nu/2.0)
	}

	// 对数转换
	y := math.Log(1.0 - W)
	mu := math.Log(float64(n))

	// 计算标准化值
	// sigma := 1.0 / math.Sqrt(float64(n))
	x := (y - c[0]*mu) / (c[1] + c[2]*mu + c[3]*mu*mu)

	// 多项式近似
	z := x * (d[0] + x*(d[1]+x*d[2]))
	p := normalCDF(-z)

	// 边界处理
	return math.Max(0.0, math.Min(1.0, p))
}

// KolmogorovSmirnovTest 执行K-S检验
// mean: 理论分布的均值（如正态分布）
// stdDev: 理论分布的标准差（如正态分布）
// 返回：KS统计量D和p值
func KolmogorovSmirnovTest(data []float64, mean, stdDev float64) (D, pValue float64) {
	n := len(data)
	if n == 0 {
		return 0, 0
	}

	// 1. 排序数据
	sorted := make([]float64, n)
	copy(sorted, data)
	sort.Float64s(sorted)

	// 2. 计算经验分布函数(ECDF)
	ecdf := make([]float64, n)
	for i := range sorted {
		ecdf[i] = float64(i+1) / float64(n)
	}

	// 3. 计算理论CDF（以正态分布为例）
	theoryCDF := make([]float64, n)
	for i, x := range sorted {
		theoryCDF[i] = NormalCDF(x, mean, stdDev)
	}

	// 4. 计算K-S统计量D
	D = 0.0
	for i := 0; i < n; i++ {
		// 正方向差异
		d1 := math.Abs(ecdf[i] - theoryCDF[i])
		// 负方向差异（注意i>0时的处理）
		d2 := 0.0
		if i > 0 {
			d2 = math.Abs(ecdf[i-1] - theoryCDF[i])
		}
		currentMax := math.Max(d1, d2)
		if currentMax > D {
			D = currentMax
		}
	}

	// 5. 计算p值（使用近似公式）
	pValue = KSPValue(D, n)
	return
}

// NormalCDF 正态分布累积分布函数
func NormalCDF(x, mean, stdDev float64) float64 {
	return 0.5 * (1 + math.Erf((x-mean)/(stdDev*math.Sqrt2)))
}

// KSPValue 计算K-S检验的p值（使用Marsaglia算法）
func KSPValue(D float64, n int) float64 {
	// 当n > 50时使用大样本近似
	if n > 50 {
		// 使用渐近分布公式
		lambda := (math.Sqrt(float64(n)) + 0.12 + 0.11/math.Sqrt(float64(n))) * D
		if lambda < 0.2 {
			return 1.0
		}
		return 2 * math.Exp(-2*lambda*lambda)
	}

	// 精确计算（需要更复杂的实现，此处使用近似）
	// 实际应用建议使用查表法或调用现有统计库
	return 1 - math.Exp(-2*(D*D)*(float64(n)+0.5))
}

// 添加新的辅助函数
func getDistributionString(d DistributionType) string {
	switch d {
	case Normal:
		return "正态分布"
	case LogNormal:
		return "对数正态分布"
	default:
		return "其他分布"
	}
}

// 文件操作函数
func createCSV(name string, header []string) *csv.Writer {
	file, _ := os.Create(name)
	writer := csv.NewWriter(file)
	writer.Write(header)
	return writer
}

// 添加新的辅助函数用于统计异常值数量
func countOutliers(records []*Record) int {
	count := 0
	for _, r := range records {
		for _, reasons := range r.IsOutlier {
			if len(reasons) > 0 {
				count++
			}
		}
	}
	return count
}

func generateOutput(writer *csv.Writer, data []*Record, classNames []string, elements []string) {
	// 按分类维度统计
	for classIdx, className := range classNames {
		// 按类别分组
		categoryGroups := make(map[string][]*Record)
		for _, r := range data {
			category := r.ClassVals[classIdx]
			categoryGroups[category] = append(categoryGroups[category], r)
		}

		// 对每个类别进行统计
		for category, records := range categoryGroups {
			// 对每个元素进行统计
			for _, elem := range elements {
				// 收集所有值(不剔除异常值)
				values := make([]float64, 0)
				for _, r := range records {
					if val, exists := r.Measures[elem]; exists {
						values = append(values, val)
					}
				}

				// 计算并输出统计结果
				if len(values) > 0 {
					stats := calculateStats(values)
					quantiles := calculateQuantiles(stats.Sorted)

					// 判断分布类型
					distType := getDistributionString(stats.Distribution)

					writer.Write([]string{
						elem,
						fmt.Sprintf("%s-%s", className, category),
						fmt.Sprintf("%d", len(values)),
						fmt.Sprintf("%.4f", quantiles[0]),
						fmt.Sprintf("%.4f", quantiles[2.5]),
						fmt.Sprintf("%.4f", quantiles[5]),
						fmt.Sprintf("%.4f", quantiles[10]),
						fmt.Sprintf("%.4f", quantiles[25]),
						fmt.Sprintf("%.4f", quantiles[50]),
						fmt.Sprintf("%.4f", quantiles[75]),
						fmt.Sprintf("%.4f", quantiles[90]),
						fmt.Sprintf("%.4f", quantiles[95]),
						fmt.Sprintf("%.4f", quantiles[97.5]),
						fmt.Sprintf("%.4f", quantiles[100]),
						fmt.Sprintf("%.4f", stats.Mean),
						fmt.Sprintf("%.4f", stats.StdDev),
						fmt.Sprintf("%.4f", stats.GeoMean),
						fmt.Sprintf("%.4f", stats.GeoStdDev),
						distType,
					})
				}
			}
		}
	}
}

// 添加新的汇总统计函数
func generateOutputSummary(writer *csv.Writer, data []*Record, elements []string) {
	// 对每个元素进行统计
	for _, elem := range elements {
		// 收集所有非异常值
		values := make([]float64, 0)
		for _, r := range data {
			if val, exists := r.Measures[elem]; exists {
				values = append(values, val)
			}
		}

		// 计算并输出统计结果
		if len(values) > 0 {
			stats := calculateStats(values)
			quantiles := calculateQuantiles(stats.Sorted)

			// 判断分布类型
			distType := getDistributionString(stats.Distribution)

			writer.Write([]string{
				elem,
				"总体统计", // 不进行分类
				fmt.Sprintf("%d", len(values)),
				fmt.Sprintf("%.4f", quantiles[0]),
				fmt.Sprintf("%.4f", quantiles[2.5]),
				fmt.Sprintf("%.4f", quantiles[5]),
				fmt.Sprintf("%.4f", quantiles[10]),
				fmt.Sprintf("%.4f", quantiles[25]),
				fmt.Sprintf("%.4f", quantiles[50]),
				fmt.Sprintf("%.4f", quantiles[75]),
				fmt.Sprintf("%.4f", quantiles[90]),
				fmt.Sprintf("%.4f", quantiles[95]),
				fmt.Sprintf("%.4f", quantiles[97.5]),
				fmt.Sprintf("%.4f", quantiles[100]),
				fmt.Sprintf("%.4f", stats.Mean),
				fmt.Sprintf("%.4f", stats.StdDev),
				fmt.Sprintf("%.4f", stats.GeoMean),
				fmt.Sprintf("%.4f", stats.GeoStdDev),
				distType,
			})
		}
	}
}

// 添加新的原始数据汇总统计函数
func generateSummaryRaw(writer *csv.Writer, data []*Record, elements []string) {
	// 对每个元素进行统计
	for _, elem := range elements {
		// 收集所有值（包含异常值）
		values := make([]float64, 0)
		for _, r := range data {
			if val, exists := r.Measures[elem]; exists {
				values = append(values, val)
			}
		}

		// 计算并输出统计结果
		if len(values) > 0 {
			stats := calculateStats(values)
			quantiles := calculateQuantiles(stats.Sorted)

			// 判断分布类型
			distType := getDistributionString(stats.Distribution)

			writer.Write([]string{
				elem,
				"原始数据总体统计", // 不进行分类，使用原始数据
				fmt.Sprintf("%d", len(values)),
				fmt.Sprintf("%.4f", quantiles[0]),
				fmt.Sprintf("%.4f", quantiles[2.5]),
				fmt.Sprintf("%.4f", quantiles[5]),
				fmt.Sprintf("%.4f", quantiles[10]),
				fmt.Sprintf("%.4f", quantiles[25]),
				fmt.Sprintf("%.4f", quantiles[50]),
				fmt.Sprintf("%.4f", quantiles[75]),
				fmt.Sprintf("%.4f", quantiles[90]),
				fmt.Sprintf("%.4f", quantiles[95]),
				fmt.Sprintf("%.4f", quantiles[97.5]),
				fmt.Sprintf("%.4f", quantiles[100]),
				fmt.Sprintf("%.4f", stats.Mean),
				fmt.Sprintf("%.4f", stats.StdDev),
				fmt.Sprintf("%.4f", stats.GeoMean),
				fmt.Sprintf("%.4f", stats.GeoStdDev),
				distType,
			})
		}
	}
}

// 添加新的非分类异常值检测函数
func detectSummaryOutliers(records []*Record, elements []string) []*Record {
	// 初始化异常标记
	for _, r := range records {
		r.IsOutlier = make(map[string][]string)
	}

	// 对每个元素进行异常值检测
	for _, elem := range elements {
		// 收集该元素的所有值
		values := make([]float64, 0)
		for _, r := range records {
			if val, exists := r.Measures[elem]; exists {
				values = append(values, val)
			}
		}

		// 计算统计值
		stats := calculateStats(values)

		// 检测异常值
		for _, r := range records {
			if val, exists := r.Measures[elem]; exists {
				if isOutlier(val, stats.Mean, stats.StdDev) {
					r.IsOutlier[elem] = append(r.IsOutlier[elem], "总体异常")
				}
			}
		}
	}
	return records
}

// 生成包含异常值的分类统计结果
func generateCategorizedRaw(writer *csv.Writer, data []*Record, classNames []string, elements []string) {
	// 按分类维度统计
	for classIdx, className := range classNames {
		// 按类别分组
		categoryGroups := make(map[string][]*Record)
		for _, r := range data {
			category := r.ClassVals[classIdx]
			categoryGroups[category] = append(categoryGroups[category], r)
		}

		// 对每个类别进行统计
		for category, records := range categoryGroups {
			// 对每个元素进行统计
			for _, elem := range elements {
				// 收集所有值(不剔除异常值)
				values := make([]float64, 0)
				for _, r := range records {
					if val, exists := r.Measures[elem]; exists {
						values = append(values, val)
					}
				}

				// 计算并输出统计结果
				if len(values) > 0 {
					stats := calculateStats(values)
					quantiles := calculateQuantiles(stats.Sorted)

					// 判断分布类型
					distType := getDistributionString(stats.Distribution)

					writer.Write([]string{
						elem,
						fmt.Sprintf("%s-%s", className, category),
						fmt.Sprintf("%d", len(values)),
						fmt.Sprintf("%.4f", quantiles[0]),
						fmt.Sprintf("%.4f", quantiles[2.5]),
						fmt.Sprintf("%.4f", quantiles[5]),
						fmt.Sprintf("%.4f", quantiles[10]),
						fmt.Sprintf("%.4f", quantiles[25]),
						fmt.Sprintf("%.4f", quantiles[50]),
						fmt.Sprintf("%.4f", quantiles[75]),
						fmt.Sprintf("%.4f", quantiles[90]),
						fmt.Sprintf("%.4f", quantiles[95]),
						fmt.Sprintf("%.4f", quantiles[97.5]),
						fmt.Sprintf("%.4f", quantiles[100]),
						fmt.Sprintf("%.4f", stats.Mean),
						fmt.Sprintf("%.4f", stats.StdDev),
						fmt.Sprintf("%.4f", stats.GeoMean),
						fmt.Sprintf("%.4f", stats.GeoStdDev),
						distType,
					})
				}
			}
		}
	}
}

// 生成剔除异常值后的分类统计结果
func generateCategorizedClean(writer *csv.Writer, data []*Record, classNames []string, elements []string) {
	// 按分类维度统计
	for classIdx, className := range classNames {
		// 按类别分组
		categoryGroups := make(map[string][]*Record)
		for _, r := range data {
			category := r.ClassVals[classIdx]
			categoryGroups[category] = append(categoryGroups[category], r)
		}

		// 对每个类别进行统计
		for category, records := range categoryGroups {
			// 对每个元素进行统计
			for _, elem := range elements {
				// 收集非异常值
				values := make([]float64, 0)
				for _, r := range records {
					if val, exists := r.Measures[elem]; exists {
						// 检查在当前分类维度下是否是异常值
						if reasons, hasOutlier := r.IsOutlier[elem]; !hasOutlier || !contains(reasons, className) {
							values = append(values, val)
						}
					}
				}

				// 计算并输出统计结果
				if len(values) > 0 {
					stats := calculateStats(values)
					quantiles := calculateQuantiles(stats.Sorted)

					writer.Write([]string{
						elem,
						fmt.Sprintf("%s-%s", className, category),
						fmt.Sprintf("%d", len(values)),
						fmt.Sprintf("%.4f", quantiles[0]),
						fmt.Sprintf("%.4f", quantiles[2.5]),
						fmt.Sprintf("%.4f", quantiles[5]),
						fmt.Sprintf("%.4f", quantiles[10]),
						fmt.Sprintf("%.4f", quantiles[25]),
						fmt.Sprintf("%.4f", quantiles[50]),
						fmt.Sprintf("%.4f", quantiles[75]),
						fmt.Sprintf("%.4f", quantiles[90]),
						fmt.Sprintf("%.4f", quantiles[95]),
						fmt.Sprintf("%.4f", quantiles[97.5]),
						fmt.Sprintf("%.4f", quantiles[100]),
						fmt.Sprintf("%.4f", stats.Mean),
						fmt.Sprintf("%.4f", stats.StdDev),
						fmt.Sprintf("%.4f", stats.GeoMean),
						fmt.Sprintf("%.4f", stats.GeoStdDev),
						getDistributionString(stats.Distribution),
					})
				}
			}
		}
	}
}

// 生成置信区间统计结果
func generateConfidenceIntervals(writer *csv.Writer, data []*Record, classNames []string, elements []string) {
	// 按分类维度统计
	for classIdx, className := range classNames {
		// 按类别分组
		categoryGroups := make(map[string][]*Record)
		for _, r := range data {
			category := r.ClassVals[classIdx]
			categoryGroups[category] = append(categoryGroups[category], r)
		}

		// 对每个类别进行统计
		for category, groupRecords := range categoryGroups {
			// 对每个元素进行统计
			for _, elem := range elements {
				// 收集非异常值
				values := make([]float64, 0)
				for _, r := range groupRecords {
					if val, exists := r.Measures[elem]; exists {
						// 检查在当前分类维度下是否是异常值
						if reasons, hasOutlier := r.IsOutlier[elem]; !hasOutlier || !contains(reasons, className) {
							values = append(values, val)
						}
					}
				}

				if len(values) > 0 {
					// 计算统计值
					stats := calculateStats(values)

					// 根据分布类型计算置信区间
					var lower, upper float64
					switch stats.Distribution {
					case Normal:
						lower = stats.Mean - confidenceLevel*stats.StdDev
						upper = stats.Mean + confidenceLevel*stats.StdDev
					case LogNormal:
						lower = stats.GeoMean / stats.GeoStdDev
						upper = stats.GeoMean * stats.GeoStdDev
					default:
						// 其他分布：使用2.5%和97.5%分位数
						sort.Float64s(values)
						quantiles := calculateQuantiles(values)
						lower = quantiles[2.5]
						upper = quantiles[97.5]
					}

					// 输出结果
					writer.Write([]string{
						elem,
						fmt.Sprintf("%s-%s", className, category),
						getDistributionString(stats.Distribution),
						fmt.Sprintf("[%.4f-%.4f]", lower, upper),
						fmt.Sprintf("%.4f", upper), // 使用区间上限作为建议背景值
					})
				}
			}
		}
	}
}

// 生成剔除异常值后的总体统计结果
func generateSummaryClean(writer *csv.Writer, data []*Record, elements []string) {
	// 对每个元素进行统计
	for _, elem := range elements {
		// 收集所有非异常值
		values := make([]float64, 0)
		for _, r := range data {
			if val, exists := r.Measures[elem]; exists {
				values = append(values, val)
			}
		}

		// 计算并输出统计结果
		if len(values) > 0 {
			stats := calculateStats(values)
			quantiles := calculateQuantiles(stats.Sorted)

			// 判断分布类型
			distType := getDistributionString(stats.Distribution)

			writer.Write([]string{
				elem,
				"总体统计", // 不进行分类
				fmt.Sprintf("%d", len(values)),
				fmt.Sprintf("%.4f", quantiles[0]),
				fmt.Sprintf("%.4f", quantiles[2.5]),
				fmt.Sprintf("%.4f", quantiles[5]),
				fmt.Sprintf("%.4f", quantiles[10]),
				fmt.Sprintf("%.4f", quantiles[25]),
				fmt.Sprintf("%.4f", quantiles[50]),
				fmt.Sprintf("%.4f", quantiles[75]),
				fmt.Sprintf("%.4f", quantiles[90]),
				fmt.Sprintf("%.4f", quantiles[95]),
				fmt.Sprintf("%.4f", quantiles[97.5]),
				fmt.Sprintf("%.4f", quantiles[100]),
				fmt.Sprintf("%.4f", stats.Mean),
				fmt.Sprintf("%.4f", stats.StdDev),
				fmt.Sprintf("%.4f", stats.GeoMean),
				fmt.Sprintf("%.4f", stats.GeoStdDev),
				distType,
			})
		}
	}
}
