package core

import (
	"errors"
	"github.com/hashicorp/go-hclog"
	"github.com/seafooler/dumbo/config"
	"github.com/seafooler/dumbo/conn"
	"github.com/seafooler/dumbo/logger"
	"reflect"
	"sync"
	"time"
)

// Node represents a Dumbo consensus node
type Node struct {
	*config.Config
	logger hclog.Logger

	// Protocol instances
	rbc *RBC
	aba *ABA
	mvba *MVBA
	acs *ACS
	
	// Network transport
	trans *conn.NetworkTransport
	reflectedTypesMap map[uint8]reflect.Type

	// Node state
	status             uint8 // Current status
	statusChangeSignal chan StatusChangeSignal
	sn                 int   // Current sequence number
	
	// Message caching
	cachedMsgs map[int][3][]interface{} // [SN][Status]Messages
	
	// Transaction handling
	txPool      [][]byte // Transaction pool
	txPoolMutex sync.RWMutex
	
	// Consensus results
	consensusResults [][]byte
	resultsMutex     sync.RWMutex
	
	// Statistics
	startTime        time.Time
	processedTxs     int
	consensusRounds  int
	
	// Delay monitoring
	delayMonitor *DelayMonitor
	
	// Performance logging
	perfLogger *logger.PerformanceLogger
	
	mutex sync.RWMutex
}

// NewNode creates a new Dumbo node
func NewNode(conf *config.Config) *Node {
	node := &Node{
		Config:             conf,
		reflectedTypesMap:  ReflectedTypesMap,
		statusChangeSignal: make(chan StatusChangeSignal),
		cachedMsgs:         make(map[int][3][]interface{}),
		txPool:             make([][]byte, 0),
		consensusResults:   make([][]byte, 0),
		startTime:          time.Now(),
		delayMonitor:       NewDelayMonitor(),
	}
	
	// Initialize logger
	node.logger = hclog.New(&hclog.LoggerOptions{
		Name:   "dumbo-node",
		Output: hclog.DefaultOutput,
		Level:  hclog.Level(conf.LogLevel),
	})
	
	// Initialize performance logger
	perfLogger, err := logger.NewPerformanceLogger(conf.Id, "logs")
	if err != nil {
		node.logger.Error("Failed to initialize performance logger", "error", err)
	} else {
		node.perfLogger = perfLogger
		// Set consensus parameters for TPS calculation
		// TPS = B * (N-F) / L_epoch according to Dumbo-NG paper
		perfLogger.SetParameters(conf.MaxPayloadSize, conf.N, conf.F)
	}

	// Initialize protocol instances
	node.rbc = NewRBC(node)
	node.aba = NewABA(node)
	node.mvba = NewMVBA(node)
	node.acs = NewACS(node)
	node.acs.Initialize(node.rbc, node.aba, node.mvba)
	
	return node
}

// StartP2PListen starts the node to listen for P2P connections
func (n *Node) StartP2PListen() error {
	var err error
	n.trans, err = conn.NewTCPTransport(":"+n.P2pPort, 2*time.Second,
		nil, n.MaxPool, n.reflectedTypesMap)
	if err != nil {
		return err
	}
	return nil
}

// EstablishP2PConns establishes P2P connections with other nodes
func (n *Node) EstablishP2PConns() error {
	if n.trans == nil {
		return errors.New("network transport has not been created")
	}
	for name, addr := range n.Id2AddrMap {
		addrWithPort := addr + ":" + n.Id2PortMap[name]
		conn, err := n.trans.GetConn(addrWithPort)
		if err != nil {
			return err
		}
		n.trans.ReturnConn(conn)
		n.logger.Debug("connection has been established", "sender", n.Name, "receiver", addr)
	}
	return nil
}

// HandleMsgsLoop starts a loop to handle messages from other peers
func (n *Node) HandleMsgsLoop() {
	msgCh := n.trans.MsgChan()
	n.logger.Info("Dumbo node message loop started")
	
	for {
		select {
		case msg := <-msgCh:
			n.handleMessage(msg)
		case scs := <-n.statusChangeSignal:
			n.handleStatusChange(scs)
		}
	}
}

// handleMessage processes incoming messages
func (n *Node) handleMessage(msg interface{}) {
	switch msgAsserted := msg.(type) {
	case RBCEchoMsg:
		n.logger.Debug("Received RBC Echo", "sn", msgAsserted.SN, "sender", msgAsserted.Sender)
		n.rbc.HandleEchoMsg(&msgAsserted)
		
	case RBCReadyMsg:
		n.logger.Debug("Received RBC Ready", "sn", msgAsserted.SN, "sender", msgAsserted.Sender)
		n.rbc.HandleReadyMsg(&msgAsserted)
		
	case RBCOutputMsg:
		n.logger.Debug("Received RBC Output", "sn", msgAsserted.SN, "sender", msgAsserted.Sender)
		n.acs.ProcessRBCOutput(msgAsserted.SN, msgAsserted.Data)
		
	case ABABvalMsg:
		n.logger.Debug("Received ABA Bval", "sn", msgAsserted.SN, "sender", msgAsserted.Sender, "round", msgAsserted.Round, "value", msgAsserted.Value)
		n.aba.HandleBvalMsg(&msgAsserted)
		
	case ABAAuxMsg:
		n.logger.Debug("Received ABA Aux", "sn", msgAsserted.SN, "sender", msgAsserted.Sender, "round", msgAsserted.Round, "value", msgAsserted.Value)
		n.aba.HandleAuxMsg(&msgAsserted)
		
	case ABAConfMsg:
		n.logger.Debug("Received ABA Conf", "sn", msgAsserted.SN, "sender", msgAsserted.Sender, "round", msgAsserted.Round, "value", msgAsserted.Value)
		n.aba.HandleConfMsg(&msgAsserted)
		
	case ABAFinishMsg:
		n.logger.Debug("Received ABA Finish", "sn", msgAsserted.SN, "sender", msgAsserted.Sender, "value", msgAsserted.Value)
		n.acs.ProcessABADecision(msgAsserted.SN, msgAsserted.Value)
		
	case MVBAValMsg:
		n.logger.Debug("Received MVBA Val", "sn", msgAsserted.SN, "sender", msgAsserted.Sender)
		n.mvba.HandleValMsg(&msgAsserted)
		
	case MVBAEchoMsg:
		n.logger.Debug("Received MVBA Echo", "sn", msgAsserted.SN, "sender", msgAsserted.Sender)
		n.mvba.HandleEchoMsg(&msgAsserted)
		
	case MVBAReadyMsg:
		n.logger.Debug("Received MVBA Ready", "sn", msgAsserted.SN, "sender", msgAsserted.Sender)
		n.mvba.HandleReadyMsg(&msgAsserted)
		
	case MVBAOutputMsg:
		n.logger.Debug("Received MVBA Output", "sn", msgAsserted.SN, "sender", msgAsserted.Sender)
		n.acs.ProcessMVBAOutput(msgAsserted.SN, msgAsserted.Data)
		
	case PayloadMsg:
		n.logger.Debug("Received Payload", "sn", msgAsserted.SN, "sender", msgAsserted.Sender, "tx_count", len(msgAsserted.Txs))
		n.addTransactions(msgAsserted.Txs)
		
	default:
		n.logger.Error("Unknown message type", "type", reflect.TypeOf(msg))
	}
}

// handleStatusChange handles status change signals
func (n *Node) handleStatusChange(scs StatusChangeSignal) {
	n.mutex.Lock()
	defer n.mutex.Unlock()
	
	n.logger.Info("Status change", "from", n.status, "to", scs.Status, "sn", scs.SN)
	
	if scs.SN == n.sn {
		n.status = scs.Status
	}
}

// StartConsensus starts the Dumbo consensus process
func (n *Node) StartConsensus() {
	n.logger.Info("Starting Dumbo consensus")
	
	// Start with status 0 (RBC)
	n.status = StatusRBC
	n.sn = 0
	
	// Start message handling loop
	go n.HandleMsgsLoop()
	
	// Start transaction generation
	go n.generateTransactions()
	
	// Start consensus rounds
	go n.consensusLoop()
}

// consensusLoop runs the main consensus loop
func (n *Node) consensusLoop() {
	for {
		// Wait for transactions (no artificial delay)
		
		// Get transactions from pool
		txs := n.getTransactions()
		if len(txs) == 0 {
			continue
		}
		
		n.logger.Info("Starting consensus round", "sn", n.sn, "tx_count", len(txs))
		
		// Start ACS with current transactions
		n.acs.StartACS(txs)
		
		// Wait for ACS to complete (no artificial delay)
		for !n.acs.IsCompleted() {
			// Busy wait - no artificial delay
		}
		
		// Process results
		outputs := n.acs.GetOutputs()
		n.processConsensusResults(outputs)
		
		// Log performance metrics for this epoch
		if n.perfLogger != nil {
			// Calculate delivered transaction size ((N-F) * batchsize * tx_size)
			deliveredTxs := len(txs) * (n.N - n.F)  // 等待N-F个诚实节点达成共识
			deliveredSize := int64(deliveredTxs * 250) // 每个交易250字节
			
			// Get component delays
			delays := n.delayMonitor.GetAverageDelays()
			
			// Log epoch metrics
			n.perfLogger.LogEpochMetrics(n.sn, deliveredTxs, deliveredSize, delays)
		}
		
		// Increment sequence number
		n.sn++
		n.consensusRounds++
		
		n.logger.Info("Consensus round completed", "sn", n.sn-1, "output_count", len(outputs))
	}
}

// generateTransactions generates mock transactions
func (n *Node) generateTransactions() {
	for {
		// No artificial delay in transaction generation
		
		// Generate a batch of transactions
		batchSize := 10
		txs := make([][]byte, batchSize)
		for i := 0; i < batchSize; i++ {
			tx := make([]byte, 250)  // 每个交易250字节
			tx[249] = byte(i)        // 在最后一个字节设置标识
			txs[i] = tx
		}
		
		n.addTransactions(txs)
	}
}

// addTransactions adds transactions to the pool
func (n *Node) addTransactions(txs [][]byte) {
	n.txPoolMutex.Lock()
	defer n.txPoolMutex.Unlock()
	
	n.txPool = append(n.txPool, txs...)
	n.processedTxs += len(txs)
}

// getTransactions gets transactions from the pool
func (n *Node) getTransactions() [][]byte {
	n.txPoolMutex.Lock()
	defer n.txPoolMutex.Unlock()
	
	if len(n.txPool) == 0 {
		return nil
	}
	
	// Get up to MaxPayloadSize transactions
	batchSize := n.MaxPayloadSize
	if len(n.txPool) < batchSize {
		batchSize = len(n.txPool)
	}
	
	txs := make([][]byte, batchSize)
	copy(txs, n.txPool[:batchSize])
	
	// Remove processed transactions
	n.txPool = n.txPool[batchSize:]
	
	return txs
}

// processConsensusResults processes consensus results
func (n *Node) processConsensusResults(outputs map[int][]byte) {
	n.resultsMutex.Lock()
	defer n.resultsMutex.Unlock()
	
	for sn, data := range outputs {
		n.consensusResults = append(n.consensusResults, data)
		n.logger.Debug("Processed consensus result", "sn", sn, "data_len", len(data))
	}
}

// OnACSComplete is called when ACS completes
func (n *Node) OnACSComplete(outputs [][]byte) {
	n.logger.Info("ACS completed", "output_count", len(outputs))
	
	// Process the outputs
	for i, output := range outputs {
		n.logger.Debug("ACS output", "index", i, "data_len", len(output))
	}
}

// SendMsg sends a message to another peer
func (n *Node) SendMsg(tag byte, data interface{}, sig []byte, addrPort string) error {
	c, err := n.trans.GetConn(addrPort)
	if err != nil {
		return err
	}
	// No artificial network latency simulation
	if err := conn.SendMsg(c, tag, data, sig); err != nil {
		return err
	}
	if err = n.trans.ReturnConn(c); err != nil {
		return err
	}
	return nil
}

// PlainBroadcast broadcasts data to all peers
func (n *Node) PlainBroadcast(tag byte, data interface{}, sig []byte) error {
	for i, addr := range n.Id2AddrMap {
		go func(id int, addr string) {
			port := n.Id2PortMap[id]
			addrPort := addr + ":" + port
			if err := n.SendMsg(tag, data, sig, addrPort); err != nil {
				n.logger.Error("Failed to send message", "error", err, "addr", addrPort)
			}
		}(i, addr)
	}
	return nil
}

// GetStats returns node statistics
func (n *Node) GetStats() map[string]interface{} {
	n.mutex.RLock()
	defer n.mutex.RUnlock()
	
	elapsed := time.Since(n.startTime)
	
	// 获取延迟统计
	averages := n.delayMonitor.GetAverageDelays()
	
	stats := map[string]interface{}{
		"node_id":          n.Id,
		"status":           n.status,
		"sequence_number":  n.sn,
		"consensus_rounds": n.consensusRounds,
		"processed_txs":    n.processedTxs,
		"tx_pool_size":     len(n.txPool),
		"results_count":    len(n.consensusResults),
		"uptime_seconds":   elapsed.Seconds(),
		"tps":              float64(n.processedTxs) / elapsed.Seconds(),
		"rbc_avg_ms":       averages["RBC"].Milliseconds(),
		"aba_avg_ms":       averages["ABA"].Milliseconds(),
		"mvba_avg_ms":      averages["MVBA"].Milliseconds(),
		"acs_avg_ms":       averages["ACS"].Milliseconds(),
	}
	
	// Add performance logger metrics if available
	if n.perfLogger != nil {
		perfMetrics := n.perfLogger.GetTotalMetrics()
		componentDelays := n.perfLogger.GetComponentDelays()
		
		stats["perf_total_tps"] = perfMetrics["total_tps"]
		stats["perf_total_size"] = perfMetrics["total_size"]
		stats["perf_avg_epoch_tps"] = perfMetrics["avg_epoch_tps"]
		stats["perf_avg_epoch_latency"] = perfMetrics["avg_epoch_latency"]
		stats["perf_rbc_delay"] = componentDelays["RBC"]
		stats["perf_aba_delay"] = componentDelays["ABA"]
		stats["perf_mvba_delay"] = componentDelays["MVBA"]
		stats["perf_acs_delay"] = componentDelays["ACS"]
	}
	
	return stats
}

// AddTransactions adds transactions to the node's pool (public method)
func (n *Node) AddTransactions(txs [][]byte) {
	n.addTransactions(txs)
}

// GetPerfLogger returns the performance logger (for testing)
func (n *Node) GetPerfLogger() *logger.PerformanceLogger {
	return n.perfLogger
}

// Close closes the node and logs performance summary
func (n *Node) Close() error {
	if n.perfLogger != nil {
		n.perfLogger.LogSummary()
		return n.perfLogger.Close()
	}
	return nil
}