package main

import (
	"encoding/json"
	"fmt"
	"log"
	"os"
	"path/filepath"
	"sync"
	"time"
)

// PerformanceMetrics 性能指标
type PerformanceMetrics struct {
	Timestamp time.Time `json:"timestamp"`
	NodeID    int       `json:"node_id"`
	TPS       float64   `json:"tps"`
	Latency   int64     `json:"latency_ms"`
	Epoch     int       `json:"epoch"`
	BatchSize int       `json:"batch_size"`
	TxSize    int       `json:"tx_size"`
}

// SimpleLogger 简单日志记录器
type SimpleLogger struct {
	nodeID   int
	logDir   string
	logFile  *os.File
	mu       sync.RWMutex
	interval time.Duration
	stopChan chan struct{}
	metrics  []PerformanceMetrics
}

// NewSimpleLogger 创建简单日志记录器
func NewSimpleLogger(nodeID int, logDir string, interval time.Duration) (*SimpleLogger, error) {
	// 确保日志目录存在
	if err := os.MkdirAll(logDir, 0755); err != nil {
		return nil, fmt.Errorf("failed to create log directory: %v", err)
	}

	// 创建日志文件
	logFile := filepath.Join(logDir, fmt.Sprintf("node_%d_performance.log", nodeID))
	file, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
	if err != nil {
		return nil, fmt.Errorf("failed to open log file: %v", err)
	}

	sl := &SimpleLogger{
		nodeID:   nodeID,
		logDir:   logDir,
		logFile:  file,
		interval: interval,
		stopChan: make(chan struct{}),
		metrics:  make([]PerformanceMetrics, 0),
	}

	// 启动定期记录
	go sl.periodicLog()

	return sl, nil
}

// RecordMetrics 记录性能指标
func (sl *SimpleLogger) RecordMetrics(tps float64, latency int64, epoch int, batchSize int, txSize int) {
	sl.mu.Lock()
	defer sl.mu.Unlock()

	metric := PerformanceMetrics{
		Timestamp: time.Now(),
		NodeID:    sl.nodeID,
		TPS:       tps,
		Latency:   latency,
		Epoch:     epoch,
		BatchSize: batchSize,
		TxSize:    txSize,
	}

	sl.metrics = append(sl.metrics, metric)
}

// periodicLog 定期记录日志
func (sl *SimpleLogger) periodicLog() {
	ticker := time.NewTicker(sl.interval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			sl.logCurrentMetrics()
		case <-sl.stopChan:
			return
		}
	}
}

// logCurrentMetrics 记录当前指标
func (sl *SimpleLogger) logCurrentMetrics() {
	sl.mu.RLock()
	defer sl.mu.RUnlock()

	if len(sl.metrics) == 0 {
		return
	}

	// 获取最新的指标
	latest := sl.metrics[len(sl.metrics)-1]

	// 写入日志文件
	logEntry := fmt.Sprintf("[%s] Node-%d: TPS=%.2f, Latency=%dms, Epoch=%d, BatchSize=%d, TxSize=%d\n",
		latest.Timestamp.Format("2006-01-02 15:04:05"),
		latest.NodeID,
		latest.TPS,
		latest.Latency,
		latest.Epoch,
		latest.BatchSize,
		latest.TxSize)

	sl.logFile.WriteString(logEntry)
	sl.logFile.Sync()

	// 同时输出到控制台
	fmt.Print(logEntry)
}

// ExportMetrics 导出指标到JSON文件
func (sl *SimpleLogger) ExportMetrics() error {
	sl.mu.RLock()
	defer sl.mu.RUnlock()

	jsonFile := filepath.Join(sl.logDir, fmt.Sprintf("node_%d_metrics.json", sl.nodeID))
	file, err := os.Create(jsonFile)
	if err != nil {
		return fmt.Errorf("failed to create JSON file: %v", err)
	}
	defer file.Close()

	encoder := json.NewEncoder(file)
	encoder.SetIndent("", "  ")
	return encoder.Encode(sl.metrics)
}

// Stop 停止日志记录器
func (sl *SimpleLogger) Stop() {
	close(sl.stopChan)
	sl.logFile.Close()
}

// GetMetrics 获取所有指标
func (sl *SimpleLogger) GetMetrics() []PerformanceMetrics {
	sl.mu.RLock()
	defer sl.mu.RUnlock()
	return sl.metrics
}

func main() {
	// 测试日志模块
	logger, err := NewSimpleLogger(0, "logger", 5*time.Second)
	if err != nil {
		log.Fatalf("Failed to create logger: %v", err)
	}

	fmt.Println("=== EbbFlow性能日志模块测试 ===")
	fmt.Println("每5秒记录一次性能指标...")

	// 模拟性能数据
	for i := 0; i < 6; i++ {
		tps := 1000.0 + float64(i*100)
		latency := int64(20 + i*5)
		epoch := i + 1
		batchSize := 1000
		txSize := 250

		logger.RecordMetrics(tps, latency, epoch, batchSize, txSize)
		time.Sleep(6 * time.Second)
	}

	// 停止并导出
	logger.Stop()
	logger.ExportMetrics()

	fmt.Println("测试完成！日志文件保存在 logger/ 目录")
}
