package codec

import (
	"compress/flate"
	"encoding/json"
	"fmt"
	"os"
	"sort"
	"strings"
	"time"
)

// ====================== Metrics 方法实现 ======================

func (c *CompressedGobCodec) Metrics() map[string]interface{} {
	c.stats.Lock()
	defer c.stats.Unlock()

	// 准备返回的指标
	metrics := make(map[string]interface{})

	if c.stats.totalRequests > 0 {
		// 压缩相关指标
		compressionRatio := 0.0
		if c.stats.networkWriteBytes > 0 {
			savedBytes := float64(c.stats.bytesSaved)
			totalBytes := float64(c.stats.networkWriteBytes)
			compressionRatio = savedBytes / totalBytes * 100
		}

		avgCompressionTime := time.Duration(0)
		if c.stats.compressedRequests > 0 {
			avgCompressionTime = c.stats.compressionTime / time.Duration(c.stats.compressedRequests)
		}

		metrics["compression_ratio_percent"] = fmt.Sprintf("%.2f%%", compressionRatio)
		metrics["bytes_saved"] = c.stats.bytesSaved
		metrics["compressed_requests"] = c.stats.compressedRequests
		metrics["raw_requests"] = c.stats.rawRequests
		metrics["compression_failures"] = c.stats.compressionFailed
		metrics["avg_compression_time"] = avgCompressionTime.String()

		// 决策分布
		decisions := map[string]uint64{
			"raw":  c.stats.decisionCount[0],
			"low":  c.stats.decisionCount[1],
			"high": c.stats.decisionCount[2],
		}
		metrics["compression_decisions"] = decisions
	}

	// 网络相关指标
	if c.stats.networkWriteBytes > 0 {
		metrics["network_write_bytes"] = c.stats.networkWriteBytes
		metrics["network_read_bytes"] = c.stats.networkReadBytes
		metrics["avg_write_time"] = (c.stats.networkWriteTime / time.Duration(c.stats.totalRequests)).String()

		// 计算带宽（MB/s）
		writeDurationSec := c.stats.networkWriteTime.Seconds()
		if writeDurationSec > 0 {
			metrics["write_bandwidth_mbps"] = fmt.Sprintf("%.2f",
				float64(c.stats.networkWriteBytes)/(1024*1024)/writeDurationSec)
		}
	}

	// 延迟相关指标
	if c.stats.latencySamples > 0 {
		metrics["last_latency"] = c.stats.lastLatency.String()
		metrics["avg_latency"] = c.stats.avgLatency.String()
		metrics["max_latency"] = c.stats.maxLatency.String()
		metrics["latency_samples"] = c.stats.latencySamples
	}

	// 当前设置
	metrics["raw_threshold"] = c.rawThreshold
	metrics["low_comp_threshold"] = c.lowCompThreshold
	metrics["base_comp_level"] = c.compLevel

	return metrics
}

// ====================== PrintCompressionPolicy 方法实现 ======================

func (c *CompressedGobCodec) PrintCompressionPolicy() {
	var sb strings.Builder

	// 当前设置
	sb.WriteString("=== Compression Policy Settings ===\n")
	sb.WriteString(fmt.Sprintf("  Raw Threshold:       %d bytes (data < this value won't be compressed)\n", c.rawThreshold))
	sb.WriteString(fmt.Sprintf("  Low Comp Threshold:   %d bytes (data < this value uses BestSpeed)\n", c.lowCompThreshold))
	sb.WriteString(fmt.Sprintf("  Base Compression:     %s (%d)\n", compressionLevelName(c.compLevel), c.compLevel))

	// 当前动态网络调整状态
	sb.WriteString("\n=== Dynamic Network Adjustment ===\n")
	if c.stats.latencySamples == 0 {
		sb.WriteString("  No network latency data collected\n")
	} else {
		sb.WriteString(fmt.Sprintf("  Current Latency:      %s\n", c.stats.lastLatency))
		sb.WriteString(fmt.Sprintf("  Average Latency:      %s\n", c.stats.avgLatency))
		sb.WriteString(fmt.Sprintf("  Max Observed Latency: %s\n", c.stats.maxLatency))

		// 建议设置
		recommendedLevel := c.recommendedLevel()
		sb.WriteString(fmt.Sprintf("  Recommended Level:    %s (based on avg latency)\n",
			compressionLevelName(recommendedLevel)))
	}

	// 统计信息
	c.stats.Lock()
	defer c.stats.Unlock()
	if c.stats.totalRequests > 0 {
		sb.WriteString("\n=== Performance Statistics ===\n")
		compressionRatio := 0.0
		if c.stats.networkWriteBytes > 0 {
			compressionRatio = float64(c.stats.bytesSaved) / float64(c.stats.networkWriteBytes) * 100
		}

		// 总体统计
		sb.WriteString(fmt.Sprintf("  Total Requests:       %d\n", c.stats.totalRequests))
		sb.WriteString(fmt.Sprintf("  Compression Ratio:    %.2f%%\n", compressionRatio))
		sb.WriteString(fmt.Sprintf("  Bytes Saved:          %s\n", humanizeBytes(c.stats.bytesSaved)))

		// 决策分布
		sb.WriteString("\nDecision Distribution:\n")
		sb.WriteString(fmt.Sprintf("  Raw:    %d (%.1f%%)\n",
			c.stats.decisionCount[0],
			float64(c.stats.decisionCount[0])/float64(c.stats.totalRequests)*100))

		sb.WriteString(fmt.Sprintf("  Low:    %d (%.1f%%)\n",
			c.stats.decisionCount[1],
			float64(c.stats.decisionCount[1])/float64(c.stats.totalRequests)*100))

		sb.WriteString(fmt.Sprintf("  High:   %d (%.1f%%)\n",
			c.stats.decisionCount[2],
			float64(c.stats.decisionCount[2])/float64(c.stats.totalRequests)*100))

		// 压缩性能
		avgCompTime := time.Duration(0)
		if c.stats.compressedRequests > 0 {
			avgCompTime = c.stats.compressionTime / time.Duration(c.stats.compressedRequests)
		}

		sb.WriteString(fmt.Sprintf("\nAverage Compression Time: %s\n", avgCompTime))
		sb.WriteString(fmt.Sprintf("Compression Failures:     %d\n", c.stats.compressionFailed))
	}

	// 网络分析
	if c.stats.networkWriteTime > 0 {
		bytesPerSec := float64(c.stats.networkWriteBytes) / c.stats.networkWriteTime.Seconds()
		sb.WriteString(fmt.Sprintf("\nNetwork Throughput: %s/s\n", humanizeBytes(uint64(bytesPerSec))))
	}

	fmt.Println(sb.String())
}

// ====================== 辅助函数 ======================

// 压缩级别名称转换
func compressionLevelName(level int) string {
	switch level {
	case flate.NoCompression:
		return "NoCompression"
	case flate.BestSpeed:
		return "BestSpeed"
	case flate.BestCompression:
		return "BestCompression"
	case flate.DefaultCompression:
		return "DefaultCompression"
	default:
		return fmt.Sprintf("CustomLevel%d", level)
	}
}

// 字节数人性化显示
func humanizeBytes(bytes uint64) string {
	const unit = 1024
	if bytes < unit {
		return fmt.Sprintf("%d B", bytes)
	}
	div, exp := uint64(unit), 0
	for n := bytes / unit; n >= unit; n /= unit {
		div *= unit
		exp++
	}
	return fmt.Sprintf("%.2f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}

// 基于平均延迟的推荐压缩级别
func (c *CompressedGobCodec) recommendedLevel() int {
	if c.stats.latencySamples == 0 {
		return flate.DefaultCompression
	}

	avg := c.stats.avgLatency
	switch {
	case avg < 5*time.Millisecond:
		return flate.BestSpeed
	case avg < 20*time.Millisecond:
		return flate.DefaultCompression
	default:
		return flate.BestCompression
	}
}

// 根据当前性能数据调整阈值
func (c *CompressedGobCodec) OptimizeThresholds() {
	c.stats.Lock()
	defer c.stats.Unlock()

	// 没有足够数据时不调整
	if c.stats.totalRequests < 100 {
		return
	}

	// 计算各区域的比例
	total := float64(c.stats.totalRequests)
	rawRatio := float64(c.stats.decisionCount[0]) / total
	lowRatio := float64(c.stats.decisionCount[1]) / total
	highRatio := float64(c.stats.decisionCount[2]) / total

	// 计算各区域的平均压缩收益
	avgBenefit := map[string]float64{
		"raw":  0, // 原始区域无压缩收益
		"low":  c.avgBenefit(1),
		"high": c.avgBenefit(2),
	}

	// 高压缩区域性能分析
	highCompEfficiency := c.highCompEfficiency()

	// 决策优先级矩阵
	decisions := []struct {
		name     string
		ratio    float64
		benefit  float64
		priority float64
	}{
		{"raw", rawRatio, avgBenefit["raw"], rawRatio * 0.8},
		{"low", lowRatio, avgBenefit["low"], lowRatio * avgBenefit["low"]},
		{"high", highRatio, avgBenefit["high"], highRatio * avgBenefit["high"]},
	}

	// 按优先级排序
	sort.Slice(decisions, func(i, j int) bool {
		return decisions[i].priority > decisions[j].priority
	})

	// 优化策略：优先优化高收益区域
	for _, d := range decisions {
		switch d.name {
		case "raw":
			// 如果原始区域比例过高且压缩收益潜力大
			if d.ratio > 0.4 && avgBenefit["low"] > 0.2 {
				newThreshold := min(c.rawThreshold+128, 1024)
				fmt.Printf("[Optimize] Increasing raw threshold from %d to %d (raw ratio %.1f%%, low benefit %.1f%%)\n",
					c.rawThreshold, newThreshold, rawRatio*100, avgBenefit["low"]*100)
				c.rawThreshold = newThreshold
				c.resetStats()
				return
			}

		case "low":
			// 如果低压缩区域比例高但高压缩区域收益明显更高
			if d.ratio > 0.5 && avgBenefit["high"] > avgBenefit["low"]*1.5 {
				newThreshold := min(c.lowCompThreshold-1024, 4096)
				fmt.Printf("[Optimize] Decreasing low threshold from %d to %d (low ratio %.1f%%, high benefit %.1f%%)\n",
					c.lowCompThreshold, newThreshold, lowRatio*100, avgBenefit["high"]*100)
				c.lowCompThreshold = newThreshold
				c.resetStats()
				return
			}

		case "high":
			// 高压缩区域优化逻辑
			c.optimizeHighCompression(highRatio, highCompEfficiency)
			return
		}
	}

	// 默认：高压缩区域无显著问题则尝试提升整体压缩率
	if avgBenefit["high"] < 0.25 && highRatio < 0.3 {
		fmt.Println("[Optimize] Moderate compression increase due to underutilized high compression")
		c.compLevel = min(c.compLevel+1, flate.BestCompression)
		c.resetStats()
	}
}

// 高压缩区域专属优化
func (c *CompressedGobCodec) optimizeHighCompression(highRatio float64, efficiency float64) {
	// 情况1：高压缩比例过低的优化
	if highRatio < 0.2 {
		// 降低lowCompThreshold让更多数据进入高压缩
		newThreshold := max(c.lowCompThreshold-2048, 4096)
		fmt.Printf("[Optimize] Decreasing low threshold from %d to %d to boost high compression (current ratio %.1f%%)\n",
			c.lowCompThreshold, newThreshold, highRatio*100)
		c.lowCompThreshold = newThreshold
		c.resetStats()
		return
	}

	// 情况2：高压缩比例适中但效率不高
	if highRatio > 0.3 && efficiency < 0.6 {
		if c.compLevel < flate.BestCompression {
			// 尝试提高压缩级别
			newLevel := min(c.compLevel+1, flate.BestCompression)
			fmt.Printf("[Optimize] Increasing compression level from %d to %d (efficiency %.1f%%)\n",
				c.compLevel, newLevel, efficiency*100)
			c.compLevel = newLevel
		} else {
			// 增加原始阈值避免小数据进入高压缩
			newRawThreshold := min(c.rawThreshold+256, 1024)
			fmt.Printf("[Optimize] Increasing raw threshold from %d to %d to reduce small packets in high compression\n",
				c.rawThreshold, newRawThreshold)
			c.rawThreshold = newRawThreshold
		}
		c.resetStats()
		return
	}

	// 情况3：高压缩比例过高
	if highRatio > 0.6 {
		// 如果效率高，可以接受高比例
		if efficiency > 0.75 {
			fmt.Println("[Optimize] High compression ratio (%.1f%%) with good efficiency (%.1f%%) - no change needed",
				highRatio*100, efficiency*100)
			return
		}

		// 效率不佳则调整
		if c.compLevel > flate.DefaultCompression {
			newLevel := max(c.compLevel-1, flate.BestSpeed)
			fmt.Printf("[Optimize] Decreasing compression level from %d to %d due to low efficiency at high ratio\n",
				c.compLevel, newLevel)
			c.compLevel = newLevel
		} else {
			newHighThreshold := min(c.lowCompThreshold+2048, 16384)
			fmt.Printf("[Optimize] Increasing low threshold from %d to %d to reduce high compression load\n",
				c.lowCompThreshold, newHighThreshold)
			c.lowCompThreshold = newHighThreshold
		}
		c.resetStats()
		return
	}
}

// 计算高压缩区域的平均收益
func (c *CompressedGobCodec) avgBenefit(region int) float64 {
	if region == 0 || c.stats.totalRequests == 0 {
		return 0
	}

	// 根据决策区域筛选统计
	//var totalBytesSaved, totalSize uint64

	switch region {
	case 1: // low区域
		// 实际实现应使用真实数据
		return 0.3 // 30%收益假设
	case 2: // high区域
		return 0.5 // 50%收益假设
	}
	return 0
}

// 计算高压缩区域的综合效率 (0-1)
func (c *CompressedGobCodec) highCompEfficiency() float64 {
	c.stats.Lock()
	defer c.stats.Unlock()

	if c.stats.compressedRequests == 0 {
		return 0
	}

	// 计算三方面效率的加权平均值
	compressionRatio := float64(c.stats.bytesSaved) / float64(c.stats.networkWriteBytes)
	timeEfficiency := 1 - float64(c.stats.compressionTime)/float64(c.stats.totalProcessingTime)
	cpuEfficiency := 1 - float64(c.stats.cpuUsage)/100

	// 加权系数可根据实际场景调整
	return compressionRatio*0.6 + timeEfficiency*0.3 + cpuEfficiency*0.1
}

// 高压缩区域详细统计（扩展stats结构）
type compressionStats struct {
	// 区域详细统计
	regionStats [3]struct {
		count          uint64
		totalSize      uint64
		compressedSize uint64
		processingTime time.Duration
		cpuUsage       float64 // %
	}
}

// 重置统计计数器
func (c *CompressedGobCodec) resetStats() {
	c.stats.Lock()
	defer c.stats.Unlock()

	c.stats.totalRequests = 0
	c.stats.compressedRequests = 0
	c.stats.rawRequests = 0
	c.stats.bytesSaved = 0
	c.stats.compressionTime = 0
	c.stats.compressionFailed = 0
	c.stats.decisionCount = [3]uint64{0, 0, 0}
}

// 以JSON格式输出统计信息
func (c *CompressedGobCodec) MetricsJSON() string {
	metrics := c.Metrics()

	// 添加时间戳和其他元数据
	metrics["timestamp"] = time.Now().Format(time.RFC3339)
	metrics["component"] = "CompressedGobCodec"

	jsonData, err := json.MarshalIndent(metrics, "", "  ")
	if err != nil {
		return fmt.Sprintf(`{"error": "%v"}`, err)
	}
	return string(jsonData)
}

// 导出性能报告
func (c *CompressedGobCodec) ExportReport(filePath string) error {
	report := struct {
		Metrics    map[string]interface{} `json:"metrics"`
		PolicyInfo map[string]interface{} `json:"policy"`
	}{
		Metrics: c.Metrics(),
		PolicyInfo: map[string]interface{}{
			"raw_threshold":      c.rawThreshold,
			"low_comp_threshold": c.lowCompThreshold,
			"base_comp_level":    c.compLevel,
		},
	}

	jsonData, err := json.MarshalIndent(report, "", "  ")
	if err != nil {
		return err
	}

	return os.WriteFile(filePath, jsonData, 0644)
}

// func (c *CompressedGobCodec) AutoOptimize() {
// 	// 0. 检查是否达到优化间隔
// 	if time.Since(c.lastOptimize) < 5*time.Minute {
// 		return
// 	}
// 	c.lastOptimize = time.Now()

// 	// 1. 获取实时高压缩区域分析
// 	hcRatio := float64(c.stats.regionStats[2].count) / float64(c.stats.totalRequests)
// 	hcEfficiency := c.highCompEfficiency()

// 	// 2. 决策树
// 	switch {
// 	case hcRatio < 0.2 && hcEfficiency > 0.6:
// 		// 潜力区：增加高压缩覆盖率
// 		c.lowCompThreshold = max(c.lowCompThreshold-1024, 4096)
// 		log.Printf("AutoOptimize: Expanding high compression coverage (threshold %d)", c.lowCompThreshold)

// 	case hcRatio > 0.6 && hcEfficiency < 0.6:
// 		// 超负荷：减轻压力
// 		if c.compLevel > flate.DefaultCompression {
// 			c.compLevel--
// 		} else {
// 			c.lowCompThreshold += 1024
// 		}
// 		log.Printf("AutoOptimize: Reducing high compression load (new level %d, threshold %d)",
// 			c.compLevel, c.lowCompThreshold)

// 	case hcRatio > 0.4 && hcEfficiency < 0.7:
// 		// 调整压缩级别精度
// 		newLevel := c.calculateOptimalLevel()
// 		if abs(newLevel-c.compLevel) >= 1 {
// 			c.compLevel = newLevel
// 			log.Printf("AutoOptimize: Adjusting compression level to %d", c.compLevel)
// 		}

// 	default:
// 		// 平稳状态下的微优化
// 		c.OptimizeThresholds()
// 	}

// 	// 重置统计开始新周期
// 	c.resetStats()
// }

// // 计算最优压缩级别
// func (c *CompressedGobCodec) calculateOptimalLevel() int {
// 	// 基于延迟和CPU使用率计算
// 	latencyWeight := min(c.stats.avgLatency.Milliseconds()/5, 10) // 0-10分
// 	cpuWeight := min(float64(c.stats.cpuUsage)/10, 10)            // 0-10分

// 	// 基础级别偏移
// 	levelOffset := (latencyWeight + cpuWeight) / 5
// 	baseLevel := flate.DefaultCompression

// 	return min(baseLevel+int(levelOffset), flate.BestCompression)
// }
