package logger

import (
	"encoding/csv"
	"fmt"
	"os"
	"sync"
	"time"
)

// PerformanceMetrics represents performance metrics for a single epoch
type PerformanceMetrics struct {
	Timestamp     time.Time
	NodeID        int
	Epoch         int
	TotalTPS      float64
	EpochTPS      float64
	TotalLatency  float64
	EpochLatency  float64
	RBCDelay      float64
	ABADelay      float64
	MVBADelay     float64
	ACSDelay      float64
	DeliveredTxs  int
	DeliveredSize int64 // 已交付的交易量大小（字节）
}

// PerformanceLogger logs performance metrics
type PerformanceLogger struct {
	nodeID       int
	metrics      []PerformanceMetrics
	mutex        sync.RWMutex
	logFile      *os.File
	csvWriter    *csv.Writer
	startTime    time.Time
	totalTxs     int
	totalSize    int64
	lastLogTime  time.Time
	batchSize    int // batch size (B) for TPS calculation
	N            int // total number of nodes
	F            int // number of faulty nodes
}

// NewPerformanceLogger creates a new performance logger
func NewPerformanceLogger(nodeID int, logDir string) (*PerformanceLogger, error) {
	// Create log directory if it doesn't exist
	if err := os.MkdirAll(logDir, 0755); err != nil {
		return nil, err
	}

	// Create log file
	logFile, err := os.Create(fmt.Sprintf("%s/node_%d_performance.csv", logDir, nodeID))
	if err != nil {
		return nil, err
	}

	csvWriter := csv.NewWriter(logFile)
	
	// Write CSV header
	header := []string{
		"timestamp", "node_id", "epoch", "total_tps", "epoch_tps", 
		"total_latency", "epoch_latency", "rbc_delay_ms", "aba_delay_ms", 
		"mvba_delay_ms", "acs_delay_ms", "delivered_txs", "delivered_size_bytes",
	}
	csvWriter.Write(header)
	csvWriter.Flush()

	return &PerformanceLogger{
		nodeID:      nodeID,
		metrics:     make([]PerformanceMetrics, 0),
		logFile:     logFile,
		csvWriter:   csvWriter,
		startTime:   time.Now(),
		lastLogTime: time.Now(),
		batchSize:   0, // will be set via SetParameters
		N:           0,
		F:           0,
	}, nil
}

// SetParameters sets the consensus parameters for TPS calculation
func (pl *PerformanceLogger) SetParameters(batchSize, N, F int) {
	pl.mutex.Lock()
	defer pl.mutex.Unlock()
	pl.batchSize = batchSize
	pl.N = N
	pl.F = F
}

// LogEpochMetrics logs metrics for a completed epoch
func (pl *PerformanceLogger) LogEpochMetrics(epoch int, epochTxs int, epochSize int64, 
	delays map[string]time.Duration) {
	pl.mutex.Lock()
	defer pl.mutex.Unlock()

	now := time.Now()
	elapsed := now.Sub(pl.startTime).Seconds()
	
	// Update totals
	pl.totalTxs += epochTxs
	pl.totalSize += epochSize
	
	// Calculate TPS according to Dumbo-NG paper:
	// TPS = B * (N-F) / L_epoch
	// where B = batch size, N = total nodes, F = faulty nodes, L_epoch = epoch latency
	epochLatency := delays["ACS"].Seconds()
	
	// Epoch TPS: TPS for this single epoch
	var epochTPS float64
	if epochLatency > 0 && pl.batchSize > 0 && pl.N > 0 {
		// TPS = B * (N-F) / L_epoch
		epochTPS = float64(pl.batchSize * (pl.N - pl.F)) / epochLatency
	} else {
		// Fallback: use actual delivered transactions / time since last log
		timeSinceLastLog := now.Sub(pl.lastLogTime).Seconds()
		if timeSinceLastLog > 0 {
			epochTPS = float64(epochTxs) / timeSinceLastLog
		}
	}
	
	// Total TPS: average TPS across all epochs
	var totalTPS float64
	if elapsed > 0 {
		totalTPS = float64(pl.totalTxs) / elapsed
	}
	
	// Create metrics record
	metrics := PerformanceMetrics{
		Timestamp:     now,
		NodeID:        pl.nodeID,
		Epoch:         epoch,
		TotalTPS:      totalTPS,
		EpochTPS:      epochTPS,
		TotalLatency:  0, // will be calculated after storing
		EpochLatency:  epochLatency,
		RBCDelay:      float64(delays["RBC"].Milliseconds()),
		ABADelay:      float64(delays["ABA"].Milliseconds()),
		MVBADelay:     float64(delays["MVBA"].Milliseconds()),
		ACSDelay:      float64(delays["ACS"].Milliseconds()),
		DeliveredTxs:  epochTxs,
		DeliveredSize: epochSize,
	}
	
	// Store metrics
	pl.metrics = append(pl.metrics, metrics)
	
	// Calculate total latency (average of all epochs including this one)
	totalLatency := pl.calculateAverageEpochLatencyInternal()
	metrics.TotalLatency = totalLatency
	// Update the stored metrics
	pl.metrics[len(pl.metrics)-1].TotalLatency = totalLatency
	
	// Write to CSV
	record := []string{
		now.Format("2006-01-02 15:04:05.000"),
		fmt.Sprintf("%d", pl.nodeID),
		fmt.Sprintf("%d", epoch),
		fmt.Sprintf("%.2f", totalTPS),
		fmt.Sprintf("%.2f", epochTPS),
		fmt.Sprintf("%.3f", totalLatency),
		fmt.Sprintf("%.3f", epochLatency),
		fmt.Sprintf("%.2f", float64(delays["RBC"].Milliseconds())),
		fmt.Sprintf("%.2f", float64(delays["ABA"].Milliseconds())),
		fmt.Sprintf("%.2f", float64(delays["MVBA"].Milliseconds())),
		fmt.Sprintf("%.2f", float64(delays["ACS"].Milliseconds())),
		fmt.Sprintf("%d", epochTxs),
		fmt.Sprintf("%d", epochSize),
	}
	
	pl.csvWriter.Write(record)
	pl.csvWriter.Flush()
	
	pl.lastLogTime = now
	
	// Log to console
	fmt.Printf("[Node %d] Epoch %d: TPS=%.2f, EpochTPS=%.2f, Latency=%.3fs, Txs=%d\n",
		pl.nodeID, epoch, totalTPS, epochTPS, epochLatency, epochTxs)
}

// GetTotalMetrics returns total performance metrics
func (pl *PerformanceLogger) GetTotalMetrics() map[string]interface{} {
	pl.mutex.RLock()
	defer pl.mutex.RUnlock()
	
	elapsed := time.Since(pl.startTime).Seconds()
	
	return map[string]interface{}{
		"node_id":         pl.nodeID,
		"total_tps":       float64(pl.totalTxs) / elapsed,
		"total_txs":       pl.totalTxs,
		"total_size":      pl.totalSize,
		"total_epochs":    len(pl.metrics),
		"uptime_seconds":  elapsed,
		"avg_epoch_tps":   pl.calculateAverageEpochTPS(),
		"avg_epoch_latency": pl.calculateAverageEpochLatency(),
	}
}

// calculateAverageEpochTPS calculates average TPS per epoch
func (pl *PerformanceLogger) calculateAverageEpochTPS() float64 {
	if len(pl.metrics) == 0 {
		return 0
	}
	
	total := 0.0
	for _, m := range pl.metrics {
		total += m.EpochTPS
	}
	return total / float64(len(pl.metrics))
}

// calculateAverageEpochLatencyInternal calculates average latency per epoch (requires mutex held)
func (pl *PerformanceLogger) calculateAverageEpochLatencyInternal() float64 {
	if len(pl.metrics) == 0 {
		return 0
	}
	
	total := 0.0
	for _, m := range pl.metrics {
		total += m.EpochLatency
	}
	return total / float64(len(pl.metrics))
}

// calculateAverageEpochLatency calculates average latency per epoch (with mutex)
func (pl *PerformanceLogger) calculateAverageEpochLatency() float64 {
	if len(pl.metrics) == 0 {
		return 0
	}
	
	total := 0.0
	for _, m := range pl.metrics {
		total += m.EpochLatency
	}
	return total / float64(len(pl.metrics))
}

// GetComponentDelays returns average delays for each component
func (pl *PerformanceLogger) GetComponentDelays() map[string]float64 {
	pl.mutex.RLock()
	defer pl.mutex.RUnlock()
	
	if len(pl.metrics) == 0 {
		return map[string]float64{
			"RBC":  0,
			"ABA":  0,
			"MVBA": 0,
			"ACS":  0,
		}
	}
	
	rbcTotal := 0.0
	abaTotal := 0.0
	mvbaTotal := 0.0
	acsTotal := 0.0
	
	for _, m := range pl.metrics {
		rbcTotal += m.RBCDelay
		abaTotal += m.ABADelay
		mvbaTotal += m.MVBADelay
		acsTotal += m.ACSDelay
	}
	
	count := float64(len(pl.metrics))
	
	return map[string]float64{
		"RBC":  rbcTotal / count,
		"ABA":  abaTotal / count,
		"MVBA": mvbaTotal / count,
		"ACS":  acsTotal / count,
	}
}

// Close closes the logger and flushes remaining data
func (pl *PerformanceLogger) Close() error {
	pl.mutex.Lock()
	defer pl.mutex.Unlock()
	
	pl.csvWriter.Flush()
	return pl.logFile.Close()
}

// LogSummary logs a summary of all metrics
func (pl *PerformanceLogger) LogSummary() {
	pl.mutex.RLock()
	defer pl.mutex.RUnlock()
	
	totalMetrics := pl.GetTotalMetrics()
	componentDelays := pl.GetComponentDelays()
	
	fmt.Printf("\n=== Node %d Performance Summary ===\n", pl.nodeID)
	fmt.Printf("Total TPS: %.2f\n", totalMetrics["total_tps"])
	fmt.Printf("Total Transactions: %d\n", totalMetrics["total_txs"])
	fmt.Printf("Total Epochs: %d\n", totalMetrics["total_epochs"])
	fmt.Printf("Average Epoch TPS: %.2f\n", totalMetrics["avg_epoch_tps"])
	fmt.Printf("Average Epoch Latency: %.3f seconds\n", totalMetrics["avg_epoch_latency"])
	fmt.Printf("Component Delays (ms):\n")
	fmt.Printf("  RBC: %.2f\n", componentDelays["RBC"])
	fmt.Printf("  ABA: %.2f\n", componentDelays["ABA"])
	fmt.Printf("  MVBA: %.2f\n", componentDelays["MVBA"])
	fmt.Printf("  ACS: %.2f\n", componentDelays["ACS"])
	fmt.Printf("=====================================\n")
}
