package turritopsis

import (
	"ebbflow/pkg/adkr"
	"log"
	"time"
)




type Metrics struct {
    Start     time.Time `json:"start"`
    End       time.Time `json:"end"`
    DurationS float64   `json:"duration_s"`
    Epochs    int       `json:"epochs"`
    // Optional: batches per epoch (length==Epochs)
    Batches   []int     `json:"batches,omitempty"`
    // Optional: per-epoch extended records
    EpochsExt []EpochRecord `json:"epochs_ext,omitempty"`
    // Detailed phase timing
    PhaseTimings PhaseTimings `json:"phase_timings,omitempty"`
    // Message statistics
    MessageStats MessageStats `json:"ebbflow/src/message_stats,omitempty"`
    // Batch size statistics
    BatchStats BatchStats `json:"batch_stats,omitempty"`
    // Thread-safe access
    mu sync.RWMutex
}

func (m *Metrics) Begin() { 
    m.mu.Lock()
    defer m.mu.Unlock()
    m.Start = time.Now() 
}

func (m *Metrics) EndNow() { 
    m.mu.Lock()
    defer m.mu.Unlock()
    m.End = time.Now()
    m.DurationS = m.End.Sub(m.Start).Seconds()
    m.computeAggregatedStats()
}

// RecordEpochPhase records detailed phase timing for an epoch
func (m *Metrics) RecordEpochPhase(epoch int, phase string, start, end time.Time) {
    m.mu.Lock()
    defer m.mu.Unlock()
    
    // Ensure EpochsExt has enough capacity
    for len(m.EpochsExt) <= epoch {
        m.EpochsExt = append(m.EpochsExt, EpochRecord{Epoch: len(m.EpochsExt)})
    }
    
    duration := end.Sub(start).Milliseconds()
    
    switch phase {
    case "acss":
        m.EpochsExt[epoch].PhaseDetails.ACSSStart = start
        m.EpochsExt[epoch].PhaseDetails.ACSSEnd = end
        m.EpochsExt[epoch].PhaseDetails.ACSSDuration = duration
    case "mvba":
        m.EpochsExt[epoch].PhaseDetails.MVAStart = start
        m.EpochsExt[epoch].PhaseDetails.MVAEnd = end
        m.EpochsExt[epoch].PhaseDetails.MVADuration = duration
    case "keygen":
        m.EpochsExt[epoch].PhaseDetails.KeyGenStart = start
        m.EpochsExt[epoch].PhaseDetails.KeyGenEnd = end
        m.EpochsExt[epoch].PhaseDetails.KeyGenDuration = duration
    case "verify":
        m.EpochsExt[epoch].PhaseDetails.VerifyStart = start
        m.EpochsExt[epoch].PhaseDetails.VerifyEnd = end
        m.EpochsExt[epoch].PhaseDetails.VerifyDuration = duration
    }
}

// RecordEpochMessages records message statistics for an epoch
func (m *Metrics) RecordEpochMessages(epoch int, counts MessageCounts) {
    m.mu.Lock()
    defer m.mu.Unlock()
    
    // Ensure EpochsExt has enough capacity
    for len(m.EpochsExt) <= epoch {
        m.EpochsExt = append(m.EpochsExt, EpochRecord{Epoch: len(m.EpochsExt)})
    }
    
    m.EpochsExt[epoch].MessageCounts = counts
}

// RecordBatchSize records batch size for an epoch
func (m *Metrics) RecordBatchSize(epoch int, batchSize int) {
    m.mu.Lock()
    defer m.mu.Unlock()
    
    // Ensure EpochsExt has enough capacity
    for len(m.EpochsExt) <= epoch {
        m.EpochsExt = append(m.EpochsExt, EpochRecord{Epoch: len(m.EpochsExt)})
    }
    
    m.EpochsExt[epoch].Batches = batchSize
    
    // Update batch size distribution
    if m.BatchStats.BatchSizeDistribution == nil {
        m.BatchStats.BatchSizeDistribution = make(map[int]int)
    }
    m.BatchStats.BatchSizeDistribution[batchSize]++
}

// computeAggregatedStats computes aggregated statistics from epoch records
func (m *Metrics) computeAggregatedStats() {
    if len(m.EpochsExt) == 0 {
        return
    }
    
    // Initialize phase timing stats
    m.PhaseTimings.ACSSMin = int64(^uint64(0) >> 1) // Max int64
    m.PhaseTimings.MVAMin = int64(^uint64(0) >> 1)
    m.PhaseTimings.KeyGenMin = int64(^uint64(0) >> 1)
    m.PhaseTimings.VerifyMin = int64(^uint64(0) >> 1)
    
    // Initialize batch stats
    m.BatchStats.MinBatchSize = int(^uint(0) >> 1) // Max int
    m.BatchStats.MaxBatchSize = 0
    
    var acssDurations, mvaDurations, keyGenDurations, verifyDurations []int64
    var krDurations []int64
    var totalBatches, totalTransactions int
    
    for _, epoch := range m.EpochsExt {
        // Phase timing aggregation
        if epoch.PhaseDetails.ACSSDuration > 0 {
            acssDurations = append(acssDurations, epoch.PhaseDetails.ACSSDuration)
            m.PhaseTimings.ACSSTotal += epoch.PhaseDetails.ACSSDuration
            if epoch.PhaseDetails.ACSSDuration < m.PhaseTimings.ACSSMin {
                m.PhaseTimings.ACSSMin = epoch.PhaseDetails.ACSSDuration
            }
            if epoch.PhaseDetails.ACSSDuration > m.PhaseTimings.ACSSMax {
                m.PhaseTimings.ACSSMax = epoch.PhaseDetails.ACSSDuration
            }
        }
        
        if epoch.PhaseDetails.MVADuration > 0 {
            mvaDurations = append(mvaDurations, epoch.PhaseDetails.MVADuration)
            m.PhaseTimings.MVATotal += epoch.PhaseDetails.MVADuration
            if epoch.PhaseDetails.MVADuration < m.PhaseTimings.MVAMin {
                m.PhaseTimings.MVAMin = epoch.PhaseDetails.MVADuration
            }
            if epoch.PhaseDetails.MVADuration > m.PhaseTimings.MVAMax {
                m.PhaseTimings.MVAMax = epoch.PhaseDetails.MVADuration
            }
        }
        
        if epoch.PhaseDetails.KeyGenDuration > 0 {
            keyGenDurations = append(keyGenDurations, epoch.PhaseDetails.KeyGenDuration)
            m.PhaseTimings.KeyGenTotal += epoch.PhaseDetails.KeyGenDuration
            if epoch.PhaseDetails.KeyGenDuration < m.PhaseTimings.KeyGenMin {
                m.PhaseTimings.KeyGenMin = epoch.PhaseDetails.KeyGenDuration
            }
            if epoch.PhaseDetails.KeyGenDuration > m.PhaseTimings.KeyGenMax {
                m.PhaseTimings.KeyGenMax = epoch.PhaseDetails.KeyGenDuration
            }
        }
        
        if epoch.PhaseDetails.VerifyDuration > 0 {
            verifyDurations = append(verifyDurations, epoch.PhaseDetails.VerifyDuration)
            m.PhaseTimings.VerifyTotal += epoch.PhaseDetails.VerifyDuration
            if epoch.PhaseDetails.VerifyDuration < m.PhaseTimings.VerifyMin {
                m.PhaseTimings.VerifyMin = epoch.PhaseDetails.VerifyDuration
            }
            if epoch.PhaseDetails.VerifyDuration > m.PhaseTimings.VerifyMax {
                m.PhaseTimings.VerifyMax = epoch.PhaseDetails.VerifyDuration
            }
        }
        // KR summary from EpochRecord.KRms if present, else sum sub-phases when >0
        var krms int64
        if epoch.KRms > 0 {
            krms = epoch.KRms
        } else {
            sum := epoch.PhaseDetails.MVADuration + epoch.PhaseDetails.KeyGenDuration + epoch.PhaseDetails.VerifyDuration
            if sum > 0 { krms = sum }
        }
        if krms > 0 {
            krDurations = append(krDurations, krms)
            m.PhaseTimings.KRTotal += krms
            if m.PhaseTimings.KRMin == 0 || krms < m.PhaseTimings.KRMin { m.PhaseTimings.KRMin = krms }
            if krms > m.PhaseTimings.KRMax { m.PhaseTimings.KRMax = krms }
        }
        
        // Message statistics aggregation
        m.MessageStats.TotalACSSMessages += epoch.MessageCounts.ACSSMessages
        m.MessageStats.TotalMVAMessages += epoch.MessageCounts.MVAMessages
        m.MessageStats.TotalKeyGenMessages += epoch.MessageCounts.KeyGenMessages
        m.MessageStats.TotalVerifyMessages += epoch.MessageCounts.VerifyMessages
        m.MessageStats.TotalBytesSent += epoch.MessageCounts.BytesSent
        m.MessageStats.TotalBytesReceived += epoch.MessageCounts.BytesReceived
        
        // Batch statistics aggregation
        if epoch.Batches > 0 {
            totalBatches++
            totalTransactions += epoch.Batches
            if epoch.Batches < m.BatchStats.MinBatchSize {
                m.BatchStats.MinBatchSize = epoch.Batches
            }
            if epoch.Batches > m.BatchStats.MaxBatchSize {
                m.BatchStats.MaxBatchSize = epoch.Batches
            }
        }
    }
    
    // Compute averages
    if len(acssDurations) > 0 {
        m.PhaseTimings.ACSSAvg = float64(m.PhaseTimings.ACSSTotal) / float64(len(acssDurations))
    }
    if len(mvaDurations) > 0 {
        m.PhaseTimings.MVAAvg = float64(m.PhaseTimings.MVATotal) / float64(len(mvaDurations))
    }
    if len(keyGenDurations) > 0 {
        m.PhaseTimings.KeyGenAvg = float64(m.PhaseTimings.KeyGenTotal) / float64(len(keyGenDurations))
    }
    if len(verifyDurations) > 0 {
        m.PhaseTimings.VerifyAvg = float64(m.PhaseTimings.VerifyTotal) / float64(len(verifyDurations))
    }
    
    // Message stats
    m.MessageStats.TotalMessages = m.MessageStats.TotalACSSMessages + m.MessageStats.TotalMVAMessages + 
        m.MessageStats.TotalKeyGenMessages + m.MessageStats.TotalVerifyMessages
    if len(m.EpochsExt) > 0 {
        m.MessageStats.AvgMessagesPerEpoch = float64(m.MessageStats.TotalMessages) / float64(len(m.EpochsExt))
        m.MessageStats.AvgBytesPerEpoch = float64(m.MessageStats.TotalBytesSent + m.MessageStats.TotalBytesReceived) / float64(len(m.EpochsExt))
    }
    
    // Batch stats
    m.BatchStats.TotalBatches = totalBatches
    m.BatchStats.TotalTransactions = totalTransactions
    if totalBatches > 0 {
        m.BatchStats.AvgBatchSize = float64(totalTransactions) / float64(totalBatches)
    }
    
    // Reset min values if no data
    if m.PhaseTimings.ACSSMin == int64(^uint64(0) >> 1) {
        m.PhaseTimings.ACSSMin = 0
    }
    if m.PhaseTimings.MVAMin == int64(^uint64(0) >> 1) {
        m.PhaseTimings.MVAMin = 0
    }
    if m.PhaseTimings.KeyGenMin == int64(^uint64(0) >> 1) {
        m.PhaseTimings.KeyGenMin = 0
    }
    if m.PhaseTimings.VerifyMin == int64(^uint64(0) >> 1) {
        m.PhaseTimings.VerifyMin = 0
    }
    if m.BatchStats.MinBatchSize == int(^uint(0) >> 1) {
        m.BatchStats.MinBatchSize = 0
    }
    if m.PhaseTimings.KRMin == 0 && len(krDurations) == 0 { m.PhaseTimings.KRMin = 0 }
    if len(krDurations) > 0 { m.PhaseTimings.KRAvg = float64(m.PhaseTimings.KRTotal) / float64(len(krDurations)) }
}

func (m *Metrics) WriteJSON(path string) error {
    f, err := os.Create(path)
    if err != nil { return err }
    defer f.Close()
    enc := json.NewEncoder(f)
    enc.SetIndent("", "  ")
    return enc.Encode(m)
}

func (m *Metrics) WriteCSV(path string) error {
    f, err := os.Create(path)
    if err != nil { return err }
    defer f.Close()
    w := csv.NewWriter(f)
    defer w.Flush()
    
    // Write summary CSV
    if err := w.Write([]string{
        "start", "end", "duration_s", "epochs", "batches_total",
        "acss_total_ms", "acss_avg_ms", "acss_min_ms", "acss_max_ms",
        "mvba_total_ms", "mvba_avg_ms", "mvba_min_ms", "mvba_max_ms", 
        "keygen_total_ms", "keygen_avg_ms", "keygen_min_ms", "keygen_max_ms",
        "verify_total_ms", "verify_avg_ms", "verify_min_ms", "verify_max_ms",
        "kr_total_ms", "kr_avg_ms", "kr_min_ms", "kr_max_ms",
        "total_messages", "avg_messages_per_epoch", "total_bytes_sent", "total_bytes_received",
        "min_batch_size", "max_batch_size", "avg_batch_size", "total_transactions",
    }); err != nil { return err }
    
    total := 0
    for _, v := range m.Batches { total += v }
    
    rec := []string{
        m.Start.Format(time.RFC3339Nano), m.End.Format(time.RFC3339Nano),
        fmt.Sprintf("%.6f", m.DurationS), fmt.Sprintf("%d", m.Epochs), fmt.Sprintf("%d", total),
        fmt.Sprintf("%d", m.PhaseTimings.ACSSTotal), fmt.Sprintf("%.2f", m.PhaseTimings.ACSSAvg),
        fmt.Sprintf("%d", m.PhaseTimings.ACSSMin), fmt.Sprintf("%d", m.PhaseTimings.ACSSMax),
        fmt.Sprintf("%d", m.PhaseTimings.MVATotal), fmt.Sprintf("%.2f", m.PhaseTimings.MVAAvg),
        fmt.Sprintf("%d", m.PhaseTimings.MVAMin), fmt.Sprintf("%d", m.PhaseTimings.MVAMax),
        fmt.Sprintf("%d", m.PhaseTimings.KeyGenTotal), fmt.Sprintf("%.2f", m.PhaseTimings.KeyGenAvg),
        fmt.Sprintf("%d", m.PhaseTimings.KeyGenMin), fmt.Sprintf("%d", m.PhaseTimings.KeyGenMax),
        fmt.Sprintf("%d", m.PhaseTimings.VerifyTotal), fmt.Sprintf("%.2f", m.PhaseTimings.VerifyAvg),
        fmt.Sprintf("%d", m.PhaseTimings.VerifyMin), fmt.Sprintf("%d", m.PhaseTimings.VerifyMax),
        fmt.Sprintf("%d", m.PhaseTimings.KRTotal), fmt.Sprintf("%.2f", m.PhaseTimings.KRAvg),
        fmt.Sprintf("%d", m.PhaseTimings.KRMin), fmt.Sprintf("%d", m.PhaseTimings.KRMax),
        fmt.Sprintf("%d", m.MessageStats.TotalMessages), fmt.Sprintf("%.2f", m.MessageStats.AvgMessagesPerEpoch),
        fmt.Sprintf("%d", m.MessageStats.TotalBytesSent), fmt.Sprintf("%d", m.MessageStats.TotalBytesReceived),
        fmt.Sprintf("%d", m.BatchStats.MinBatchSize), fmt.Sprintf("%d", m.BatchStats.MaxBatchSize),
        fmt.Sprintf("%.2f", m.BatchStats.AvgBatchSize), fmt.Sprintf("%d", m.BatchStats.TotalTransactions),
    }
    return w.Write(rec)
}

// WriteDetailedCSV writes detailed per-epoch CSV
func (m *Metrics) WriteDetailedCSV(path string) error {
    f, err := os.Create(path)
    if err != nil { return err }
    defer f.Close()
    w := csv.NewWriter(f)
    defer w.Flush()
    
    // Write detailed header
    if err := w.Write([]string{
        "epoch", "n", "f", "l", "batches",
        "acss_start", "acss_end", "acss_duration_ms",
        "mvba_start", "mvba_end", "mvba_duration_ms",
        "keygen_start", "keygen_end", "keygen_duration_ms",
        "verify_start", "verify_end", "verify_duration_ms",
        "acss_messages", "mvba_messages", "keygen_messages", "verify_messages", "total_messages",
        "bytes_sent", "bytes_received",
    }); err != nil { return err }
    
    // Write per-epoch data
    for _, epoch := range m.EpochsExt {
        rec := []string{
            fmt.Sprintf("%d", epoch.Epoch), fmt.Sprintf("%d", epoch.N), fmt.Sprintf("%d", epoch.F), fmt.Sprintf("%d", epoch.L), fmt.Sprintf("%d", epoch.Batches),
            epoch.PhaseDetails.ACSSStart.Format(time.RFC3339Nano), epoch.PhaseDetails.ACSSEnd.Format(time.RFC3339Nano), fmt.Sprintf("%d", epoch.PhaseDetails.ACSSDuration),
            epoch.PhaseDetails.MVAStart.Format(time.RFC3339Nano), epoch.PhaseDetails.MVAEnd.Format(time.RFC3339Nano), fmt.Sprintf("%d", epoch.PhaseDetails.MVADuration),
            epoch.PhaseDetails.KeyGenStart.Format(time.RFC3339Nano), epoch.PhaseDetails.KeyGenEnd.Format(time.RFC3339Nano), fmt.Sprintf("%d", epoch.PhaseDetails.KeyGenDuration),
            epoch.PhaseDetails.VerifyStart.Format(time.RFC3339Nano), epoch.PhaseDetails.VerifyEnd.Format(time.RFC3339Nano), fmt.Sprintf("%d", epoch.PhaseDetails.VerifyDuration),
            fmt.Sprintf("%d", epoch.MessageCounts.ACSSMessages), fmt.Sprintf("%d", epoch.MessageCounts.MVAMessages),
            fmt.Sprintf("%d", epoch.MessageCounts.KeyGenMessages), fmt.Sprintf("%d", epoch.MessageCounts.VerifyMessages), fmt.Sprintf("%d", epoch.MessageCounts.TotalMessages),
            fmt.Sprintf("%d", epoch.MessageCounts.BytesSent), fmt.Sprintf("%d", epoch.MessageCounts.BytesReceived),
        }
        if err := w.Write(rec); err != nil { return err }
    }
    
    return nil
}

// EpochRecord stores per-epoch plan & durations for ACS and KR
type EpochRecord struct {
    Epoch   int     `json:"epoch"`
    N       int     `json:"n"`
    F       int     `json:"f"`
    L       int     `json:"l"`
    Batches int     `json:"batches"`
    ACSms   int64   `json:"acs_ms"`
    KRms    int64   `json:"kr_ms"`
    // Detailed phase timings for this epoch
    PhaseDetails PhaseDetails `json:"phase_details,omitempty"`
    // Message counts for this epoch
    MessageCounts MessageCounts `json:"ebbflow/src/message_counts,omitempty"`
}

// PhaseDetails stores detailed timing for each phase
type PhaseDetails struct {
    ACSSStart    time.Time `json:"acss_start"`
    ACSSEnd      time.Time `json:"acss_end"`
    ACSSDuration int64     `json:"acss_duration_ms"`
    MVAStart     time.Time `json:"mvba_start"`
    MVAEnd       time.Time `json:"mvba_end"`
    MVADuration  int64     `json:"mvba_duration_ms"`
    KeyGenStart  time.Time `json:"keygen_start"`
    KeyGenEnd    time.Time `json:"keygen_end"`
    KeyGenDuration int64   `json:"keygen_duration_ms"`
    VerifyStart  time.Time `json:"verify_start"`
    VerifyEnd    time.Time `json:"verify_end"`
    VerifyDuration int64   `json:"verify_duration_ms"`
}

// MessageCounts stores message statistics for an epoch
type MessageCounts struct {
    ACSSMessages    int `json:"acss_messages"`
    MVAMessages     int `json:"mvba_messages"`
    KeyGenMessages  int `json:"keygen_messages"`
    VerifyMessages  int `json:"verify_messages"`
    TotalMessages   int `json:"total_messages"`
    BytesSent       int64 `json:"bytes_sent"`
    BytesReceived   int64 `json:"bytes_received"`
}

// PhaseTimings stores aggregated timing statistics across all epochs
type PhaseTimings struct {
    ACSSTotal    int64   `json:"acss_total_ms"`
    ACSSAvg      float64 `json:"acss_avg_ms"`
    ACSSMin      int64   `json:"acss_min_ms"`
    ACSSMax      int64   `json:"acss_max_ms"`
    MVATotal     int64   `json:"mvba_total_ms"`
    MVAAvg       float64 `json:"mvba_avg_ms"`
    MVAMin       int64   `json:"mvba_min_ms"`
    MVAMax       int64   `json:"mvba_max_ms"`
    KeyGenTotal  int64   `json:"keygen_total_ms"`
    KeyGenAvg    float64 `json:"keygen_avg_ms"`
    KeyGenMin    int64   `json:"keygen_min_ms"`
    KeyGenMax    int64   `json:"keygen_max_ms"`
    VerifyTotal  int64   `json:"verify_total_ms"`
    VerifyAvg    float64 `json:"verify_avg_ms"`
    VerifyMin    int64   `json:"verify_min_ms"`
    VerifyMax    int64   `json:"verify_max_ms"`
    // KR summary (sum of MVBA+KeyGen+Verify or EpochRecord.KRms where provided)
    KRTotal      int64   `json:"kr_total_ms"`
    KRAvg        float64 `json:"kr_avg_ms"`
    KRMin        int64   `json:"kr_min_ms"`
    KRMax        int64   `json:"kr_max_ms"`
}

// MessageStats stores aggregated message statistics
type MessageStats struct {
    TotalACSSMessages    int   `json:"total_acss_messages"`
    TotalMVAMessages     int   `json:"total_mvba_messages"`
    TotalKeyGenMessages  int   `json:"total_keygen_messages"`
    TotalVerifyMessages  int   `json:"total_verify_messages"`
    TotalMessages        int   `json:"total_messages"`
    TotalBytesSent       int64 `json:"total_bytes_sent"`
    TotalBytesReceived   int64 `json:"total_bytes_received"`
    AvgMessagesPerEpoch  float64 `json:"avg_messages_per_epoch"`
    AvgBytesPerEpoch     float64 `json:"avg_bytes_per_epoch"`
}

// BatchStats stores batch size statistics
type BatchStats struct {
    MinBatchSize     int     `json:"min_batch_size"`
    MaxBatchSize     int     `json:"max_batch_size"`
    AvgBatchSize     float64 `json:"avg_batch_size"`
    TotalBatches     int     `json:"total_batches"`
    TotalTransactions int    `json:"total_transactions"`
    BatchSizeDistribution map[int]int `json:"batch_size_distribution,omitempty"`
}



