package status_collector

import (
	"context"
	"fmt"
	"log"
	"sync"
	"time"

	"gitlab.ctyuncdn.cn/tai/infra/cloud-controller/pkg/kafka"
	"gitlab.ctyuncdn.cn/tai/infra/cloud-controller/pkg/mysql"
)

// KafkaClientInterface defines the interface for Kafka client operations
type KafkaClientInterface interface {
	GetConfig() *kafka.Config
	Close() error
	HealthCheck(ctx context.Context) error
}

// KafkaWriterInterface defines the interface for Kafka writer operations
type KafkaWriterInterface interface {
	WriteJobStatus(ctx context.Context, statusMsg *kafka.StatusMessage) error
	WriteJobMessage(ctx context.Context, jobMsg *kafka.JobMessage) error
	WriteMessage(ctx context.Context, topic string, key []byte, value []byte) error
	Errors() <-chan error
	Close() error
}

// Collector represents the status collector service
type Collector struct {
	kafkaClient KafkaClientInterface
	kafkaWriter KafkaWriterInterface
	db          DatabaseInterface
	config      *Config

	// Cluster managers registry
	clusterManagers map[string]ClusterManager
	mu              sync.RWMutex

	// Control channels
	ctx    context.Context
	cancel context.CancelFunc
	wg     sync.WaitGroup

	// Status tracking
	lastStatuses map[string]*mysql.JobStatus
	statusMu     sync.RWMutex
}

// Config holds collector configuration
type Config struct {
	CollectionInterval time.Duration `json:"collection_interval"`
	StatusTimeout      time.Duration `json:"status_timeout"`
	BatchSize          int           `json:"batch_size"`
	RetryAttempts      int           `json:"retry_attempts"`
	RetryDelay         time.Duration `json:"retry_delay"`
}

// ClusterManager represents a cluster manager interface for status collection
type ClusterManager interface {
	GetClusterID() string
	IsHealthy() bool
	GetJobStatuses(ctx context.Context) ([]*mysql.JobStatus, error)
}

// DatabaseInterface defines the interface for database operations
type DatabaseInterface interface {
	Create(value interface{}) DatabaseResult
	Model(value interface{}) DatabaseInterface
	Where(query interface{}, args ...interface{}) DatabaseInterface
	Order(value interface{}) DatabaseInterface
	First(dest interface{}) DatabaseResult
	Find(dest interface{}) DatabaseResult
	Updates(values interface{}) DatabaseResult
	Error() error
}

// DatabaseResult represents the result of a database operation
type DatabaseResult interface {
	Error() error
}

// NewCollector creates a new status collector
func NewCollector(kafkaClient KafkaClientInterface, kafkaWriter KafkaWriterInterface, db DatabaseInterface, config *Config) (*Collector, error) {
	if kafkaClient == nil {
		return nil, fmt.Errorf("kafka client cannot be nil")
	}
	if kafkaWriter == nil {
		return nil, fmt.Errorf("kafka writer cannot be nil")
	}
	if db == nil {
		return nil, fmt.Errorf("database cannot be nil")
	}
	if config == nil {
		config = &Config{
			CollectionInterval: 30 * time.Second,
			StatusTimeout:      10 * time.Second,
			BatchSize:          100,
			RetryAttempts:      3,
			RetryDelay:         5 * time.Second,
		}
	}

	ctx, cancel := context.WithCancel(context.Background())

	return &Collector{
		kafkaClient:     kafkaClient,
		kafkaWriter:     kafkaWriter,
		db:              db,
		config:          config,
		clusterManagers: make(map[string]ClusterManager),
		ctx:             ctx,
		cancel:          cancel,
		lastStatuses:    make(map[string]*mysql.JobStatus),
	}, nil
}

// Start starts the status collector
func (c *Collector) Start() error {
	// Start periodic status collection
	c.wg.Add(1)
	go c.collectStatuses()

	// Start status publishing
	c.wg.Add(1)
	go c.publishStatuses()

	log.Println("Status collector started successfully")
	return nil
}

// Stop stops the status collector
func (c *Collector) Stop() error {
	log.Println("Stopping status collector...")

	c.cancel()
	c.wg.Wait()

	if err := c.kafkaWriter.Close(); err != nil {
		return fmt.Errorf("failed to close kafka writer: %w", err)
	}

	log.Println("Status collector stopped successfully")
	return nil
}

// RegisterClusterManager registers a cluster manager
func (c *Collector) RegisterClusterManager(manager ClusterManager) {
	c.mu.Lock()
	defer c.mu.Unlock()

	c.clusterManagers[manager.GetClusterID()] = manager
	log.Printf("Registered cluster manager for status collection: %s", manager.GetClusterID())
}

// UnregisterClusterManager unregisters a cluster manager
func (c *Collector) UnregisterClusterManager(clusterID string) {
	c.mu.Lock()
	defer c.mu.Unlock()

	delete(c.clusterManagers, clusterID)
	log.Printf("Unregistered cluster manager for status collection: %s", clusterID)
}

// collectStatuses periodically collects statuses from all cluster managers
func (c *Collector) collectStatuses() {
	defer c.wg.Done()

	ticker := time.NewTicker(c.config.CollectionInterval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			c.collectAllStatuses()
		case <-c.ctx.Done():
			return
		}
	}
}

// collectAllStatuses collects statuses from all registered cluster managers
func (c *Collector) collectAllStatuses() {
	c.mu.RLock()
	managers := make(map[string]ClusterManager)
	for clusterID, manager := range c.clusterManagers {
		managers[clusterID] = manager
	}
	c.mu.RUnlock()

	var wg sync.WaitGroup
	statusChan := make(chan []*mysql.JobStatus, len(managers))

	// Collect statuses from each cluster manager concurrently
	for clusterID, manager := range managers {
		if !manager.IsHealthy() {
			log.Printf("Skipping unhealthy cluster manager: %s", clusterID)
			continue
		}

		wg.Add(1)
		go func(clusterID string, manager ClusterManager) {
			defer wg.Done()

			ctx, cancel := context.WithTimeout(c.ctx, c.config.StatusTimeout)
			defer cancel()

			statuses, err := manager.GetJobStatuses(ctx)
			if err != nil {
				log.Printf("Failed to get job statuses from cluster %s: %v", clusterID, err)
				return
			}

			// Add cluster ID to statuses
			for _, status := range statuses {
				status.ClusterID = clusterID
			}

			statusChan <- statuses
		}(clusterID, manager)
	}

	// Wait for all collections to complete
	go func() {
		wg.Wait()
		close(statusChan)
	}()

	// Process collected statuses
	for statuses := range statusChan {
		for _, status := range statuses {
			c.processStatusUpdate(status)
		}
	}
}

// processStatusUpdate processes a single status update
func (c *Collector) processStatusUpdate(status *mysql.JobStatus) {
	// Check if status has changed
	key := fmt.Sprintf("%s:%s", status.JobID, status.ClusterID)

	c.statusMu.Lock()
	lastStatus, exists := c.lastStatuses[key]
	c.statusMu.Unlock()

	// If status hasn't changed, skip
	if exists && lastStatus.Status == status.Status && lastStatus.Message == status.Message {
		return
	}

	// Update last status
	c.statusMu.Lock()
	c.lastStatuses[key] = status
	c.statusMu.Unlock()

	// Save status to database
	if err := c.saveStatusToDatabase(status); err != nil {
		log.Printf("Failed to save status to database: %v", err)
		return
	}

	// Queue status for publishing
	c.queueStatusForPublishing(status)
}

// saveStatusToDatabase saves a job status to the database
func (c *Collector) saveStatusToDatabase(status *mysql.JobStatus) error {
	// Create job status record
	status.CreatedAt = time.Now()

	if err := c.db.Create(status).Error(); err != nil {
		return fmt.Errorf("failed to create job status record: %w", err)
	}

	// Update job status in jobs table
	if err := c.db.Model(&mysql.Job{}).Where("job_id = ?", status.JobID).Updates(map[string]interface{}{
		"status":     status.Status,
		"updated_at": time.Now(),
	}).Error(); err != nil {
		return fmt.Errorf("failed to update job status: %w", err)
	}

	return nil
}

// queueStatusForPublishing queues a status for publishing to Kafka
func (c *Collector) queueStatusForPublishing(status *mysql.JobStatus) {
	// Create status message
	statusMsg := &kafka.StatusMessage{
		JobID:     status.JobID,
		ClusterID: status.ClusterID,
		Status:    status.Status,
		Message:   status.Message,
		Timestamp: time.Now(),
	}

	// Serialize and publish
	if err := c.publishStatusMessage(statusMsg); err != nil {
		log.Printf("Failed to publish status message: %v", err)
	}
}

// publishStatuses handles periodic status publishing
func (c *Collector) publishStatuses() {
	defer c.wg.Done()

	// This could be enhanced with a queue system for better reliability
	// For now, statuses are published immediately when collected
	<-c.ctx.Done()
}

// publishStatusMessage publishes a status message to Kafka
func (c *Collector) publishStatusMessage(statusMsg *kafka.StatusMessage) error {
	// Use the Kafka writer's WriteJobStatus method
	return c.kafkaWriter.WriteJobStatus(c.ctx, statusMsg)
}

// GetStats returns collector statistics
func (c *Collector) GetStats() map[string]interface{} {
	c.mu.RLock()
	defer c.mu.RUnlock()

	stats := map[string]interface{}{
		"registered_clusters": len(c.clusterManagers),
		"collection_interval": c.config.CollectionInterval,
		"status_timeout":      c.config.StatusTimeout,
	}

	// Add cluster health information
	clusterHealth := make(map[string]bool)
	for clusterID, manager := range c.clusterManagers {
		clusterHealth[clusterID] = manager.IsHealthy()
	}
	stats["cluster_health"] = clusterHealth

	// Add status tracking information
	c.statusMu.RLock()
	stats["tracked_statuses"] = len(c.lastStatuses)
	c.statusMu.RUnlock()

	return stats
}

// GetJobStatus gets the current status of a specific job
func (c *Collector) GetJobStatus(jobID string) (*mysql.JobStatus, error) {
	// Query database for latest status
	var status mysql.JobStatus
	if err := c.db.Where("job_id = ?", jobID).Order("created_at DESC").First(&status).Error(); err != nil {
		return nil, fmt.Errorf("failed to get job status: %w", err)
	}

	return &status, nil
}

// GetJobStatuses gets all statuses for a job
func (c *Collector) GetJobStatuses(jobID string) ([]*mysql.JobStatus, error) {
	var statuses []*mysql.JobStatus
	if err := c.db.Where("job_id = ?", jobID).Order("created_at DESC").Find(&statuses).Error(); err != nil {
		return nil, fmt.Errorf("failed to get job statuses: %w", err)
	}

	return statuses, nil
}
