package logger

import (
	"encoding/json"
	"fmt"
	"os"
	"path/filepath"
	"sync"
	"time"
)

type PerformanceMetrics struct {
	Timestamp   time.Time `json:"timestamp"`
	NodeID      uint64    `json:"node_id"`
	Epoch       uint64    `json:"epoch"`
	TPS         float64   `json:"tps"`
	Latency     float64   `json:"latency_ms"`
	BatchSize   int       `json:"batch_size"`
	TotalTX     uint64    `json:"total_transactions"`
	NetworkDelay float64  `json:"network_delay_ms"`
	CryptoTime  float64   `json:"crypto_time_ms"`
	ConsensusTime float64 `json:"consensus_time_ms"`
	// 新增延迟记录
	RBCDelay    float64   `json:"rbc_delay_ms"`
	ABADelay    float64   `json:"aba_delay_ms"`
	CommonCoinDelay float64 `json:"common_coin_delay_ms"`
	TPKEDelay   float64   `json:"tpke_delay_ms"`
	TBLSDelay   float64   `json:"tbls_delay_ms"`
}

type PerformanceLogger struct {
	nodeID     uint64
	logDir     string
	logFile    *os.File
	metrics    []PerformanceMetrics
	lock       sync.RWMutex
	startTime  time.Time
	lastEpoch  uint64
	lastTX     uint64
}

func NewPerformanceLogger(nodeID uint64, logDir string) (*PerformanceLogger, error) {
	// 确保日志目录存在
	if err := os.MkdirAll(logDir, 0755); err != nil {
		return nil, fmt.Errorf("failed to create log directory: %v", err)
	}
	
	// 创建日志文件
	timestamp := time.Now().Format("2006-01-02_15-04-05")
	logFile := filepath.Join(logDir, fmt.Sprintf("node_%d_%s.log", nodeID, timestamp))
	
	file, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
	if err != nil {
		return nil, fmt.Errorf("failed to create log file: %v", err)
	}
	
	return &PerformanceLogger{
		nodeID:    nodeID,
		logDir:    logDir,
		logFile:   file,
		metrics:   make([]PerformanceMetrics, 0),
		startTime: time.Now(),
	}, nil
}

func (pl *PerformanceLogger) LogEpoch(epoch uint64, batchSize int, latency time.Duration, networkDelay, cryptoTime, consensusTime time.Duration) {
	pl.lock.Lock()
	defer pl.lock.Unlock()
	
	now := time.Now()
	totalTX := pl.lastTX + uint64(batchSize)
	
	// 计算TPS (每秒事务数)
	timeDiff := now.Sub(pl.startTime).Seconds()
	tps := float64(totalTX) / timeDiff
	
	// 计算延迟 (毫秒)
	latencyMs := float64(latency.Nanoseconds()) / 1e6
	
	metric := PerformanceMetrics{
		Timestamp:     now,
		NodeID:        pl.nodeID,
		Epoch:         epoch,
		TPS:           tps,
		Latency:       latencyMs,
		BatchSize:     batchSize,
		TotalTX:       totalTX,
		NetworkDelay:  float64(networkDelay.Nanoseconds()) / 1e6,
		CryptoTime:    float64(cryptoTime.Nanoseconds()) / 1e6,
		ConsensusTime: float64(consensusTime.Nanoseconds()) / 1e6,
	}
	
	pl.metrics = append(pl.metrics, metric)
	pl.lastEpoch = epoch
	pl.lastTX = totalTX
	
	// 写入日志文件
	pl.writeToFile(metric)
	
	// 每30秒输出一次摘要
	if len(pl.metrics) > 0 && len(pl.metrics)%30 == 0 {
		pl.printSummary()
	}
}

func (pl *PerformanceLogger) writeToFile(metric PerformanceMetrics) {
	jsonData, err := json.Marshal(metric)
	if err != nil {
		fmt.Printf("Error marshaling metrics: %v\n", err)
		return
	}
	
	jsonData = append(jsonData, '\n')
	if _, err := pl.logFile.Write(jsonData); err != nil {
		fmt.Printf("Error writing to log file: %v\n", err)
	}
}

func (pl *PerformanceLogger) printSummary() {
	if len(pl.metrics) == 0 {
		return
	}
	
	// 计算最近30个epoch的平均值
	start := len(pl.metrics) - 30
	if start < 0 {
		start = 0
	}
	
	recentMetrics := pl.metrics[start:]
	
	var avgTPS, avgLatency, avgNetworkDelay, avgCryptoTime, avgConsensusTime float64
	
	for _, metric := range recentMetrics {
		avgTPS += metric.TPS
		avgLatency += metric.Latency
		avgNetworkDelay += metric.NetworkDelay
		avgCryptoTime += metric.CryptoTime
		avgConsensusTime += metric.ConsensusTime
	}
	
	count := float64(len(recentMetrics))
	avgTPS /= count
	avgLatency /= count
	avgNetworkDelay /= count
	avgCryptoTime /= count
	avgConsensusTime /= count
	
	latest := recentMetrics[len(recentMetrics)-1]
	
	fmt.Printf("\n=== Node %d Performance Summary (Last 30 epochs) ===\n", pl.nodeID)
	fmt.Printf("Timestamp: %s\n", latest.Timestamp.Format("2006-01-02 15:04:05"))
	fmt.Printf("Current Epoch: %d\n", latest.Epoch)
	fmt.Printf("Average TPS: %.2f TX/s\n", avgTPS)
	fmt.Printf("Average Latency: %.2f ms\n", avgLatency)
	fmt.Printf("Total Transactions: %d\n", latest.TotalTX)
	fmt.Printf("Network Delay: %.2f ms\n", avgNetworkDelay)
	fmt.Printf("Crypto Time: %.2f ms\n", avgCryptoTime)
	fmt.Printf("Consensus Time: %.2f ms\n", avgConsensusTime)
	fmt.Printf("==========================================\n")
}

func (pl *PerformanceLogger) GetMetrics() []PerformanceMetrics {
	pl.lock.RLock()
	defer pl.lock.RUnlock()
	
	// 返回副本
	result := make([]PerformanceMetrics, len(pl.metrics))
	copy(result, pl.metrics)
	return result
}

func (pl *PerformanceLogger) GetLatestMetrics() *PerformanceMetrics {
	pl.lock.RLock()
	defer pl.lock.RUnlock()
	
	if len(pl.metrics) == 0 {
		return nil
	}
	
	return &pl.metrics[len(pl.metrics)-1]
}

func (pl *PerformanceLogger) Close() error {
	if pl.logFile != nil {
		return pl.logFile.Close()
	}
	return nil
}

// 全局性能统计
type GlobalStats struct {
	TotalNodes    int     `json:"total_nodes"`
	TotalTPS      float64 `json:"total_tps"`
	AverageLatency float64 `json:"average_latency"`
	TotalTX       uint64  `json:"total_transactions"`
	Timestamp     time.Time `json:"timestamp"`
}

func (pl *PerformanceLogger) GetGlobalStats(nodeMetrics []PerformanceMetrics) GlobalStats {
	var totalTPS, totalLatency float64
	var totalTX uint64
	
	for _, metric := range nodeMetrics {
		totalTPS += metric.TPS
		totalLatency += metric.Latency
		totalTX += metric.TotalTX
	}
	
	return GlobalStats{
		TotalNodes:    1, // 单个节点
		TotalTPS:      totalTPS,
		AverageLatency: totalLatency / float64(len(nodeMetrics)),
		TotalTX:       totalTX,
		Timestamp:     time.Now(),
	}
}
