package job_dispatcher

import (
	"context"
	"encoding/json"
	"fmt"
	"log"
	"sync"
	"time"

	"gitlab.ctyuncdn.cn/tai/infra/cloud-controller/pkg/kafka"
	"gitlab.ctyuncdn.cn/tai/infra/cloud-controller/pkg/mysql"
)

// KafkaClientInterface defines the interface for Kafka client operations
type KafkaClientInterface interface {
	GetConfig() *kafka.Config
	Close() error
	HealthCheck(ctx context.Context) error
}

// KafkaReaderInterface defines the interface for Kafka reader operations
type KafkaReaderInterface interface {
	RegisterHandler(topic string, handler kafka.MessageHandler)
	Start(ctx context.Context) error
	Stop() error
	Errors() <-chan error
}

// DatabaseInterface defines the interface for database operations
type DatabaseInterface interface {
	Create(value interface{}) DatabaseResult
	Model(value interface{}) DatabaseInterface
	Where(query interface{}, args ...interface{}) DatabaseInterface
	Updates(values interface{}) DatabaseResult
	Error() error
}

// DatabaseResult represents the result of a database operation
type DatabaseResult interface {
	Error() error
}

// Dispatcher represents the job dispatcher service
type Dispatcher struct {
	kafkaClient KafkaClientInterface
	kafkaReader KafkaReaderInterface
	db          DatabaseInterface
	config      *Config

	// Cluster managers registry
	clusterManagers map[string]ClusterManager
	mu              sync.RWMutex

	// Control channels
	ctx    context.Context
	cancel context.CancelFunc
	wg     sync.WaitGroup
}

// Config holds dispatcher configuration
type Config struct {
	MaxConcurrentJobs int           `json:"max_concurrent_jobs"`
	JobTimeout        time.Duration `json:"job_timeout"`
	RetryAttempts     int           `json:"retry_attempts"`
	RetryDelay        time.Duration `json:"retry_delay"`
}

// ClusterManager represents a cluster manager interface
type ClusterManager interface {
	SubmitJob(ctx context.Context, job *mysql.Job) error
	GetClusterID() string
	IsHealthy() bool
}

// NewDispatcher creates a new job dispatcher
func NewDispatcher(kafkaClient KafkaClientInterface, kafkaReader KafkaReaderInterface, db DatabaseInterface, config *Config) (*Dispatcher, error) {
	if kafkaClient == nil {
		return nil, fmt.Errorf("kafka client cannot be nil")
	}
	if kafkaReader == nil {
		return nil, fmt.Errorf("kafka reader cannot be nil")
	}
	if db == nil {
		return nil, fmt.Errorf("database cannot be nil")
	}
	if config == nil {
		config = &Config{
			MaxConcurrentJobs: 10,
			JobTimeout:        30 * time.Minute,
			RetryAttempts:     3,
			RetryDelay:        5 * time.Second,
		}
	}

	ctx, cancel := context.WithCancel(context.Background())

	return &Dispatcher{
		kafkaClient:     kafkaClient,
		kafkaReader:     kafkaReader,
		db:              db,
		config:          config,
		clusterManagers: make(map[string]ClusterManager),
		ctx:             ctx,
		cancel:          cancel,
	}, nil
}

// Start starts the job dispatcher
func (d *Dispatcher) Start() error {
	// Register message handler
	d.kafkaReader.RegisterHandler(d.kafkaClient.GetConfig().Topic, d.handleJobMessage)

	// Start Kafka reader
	if err := d.kafkaReader.Start(d.ctx); err != nil {
		return fmt.Errorf("failed to start kafka reader: %w", err)
	}

	// Start error monitoring
	d.wg.Add(1)
	go d.monitorErrors()

	log.Println("Job dispatcher started successfully")
	return nil
}

// Stop stops the job dispatcher
func (d *Dispatcher) Stop() error {
	log.Println("Stopping job dispatcher...")

	d.cancel()
	d.wg.Wait()

	if err := d.kafkaReader.Stop(); err != nil {
		return fmt.Errorf("failed to stop kafka reader: %w", err)
	}

	log.Println("Job dispatcher stopped successfully")
	return nil
}

// RegisterClusterManager registers a cluster manager
func (d *Dispatcher) RegisterClusterManager(manager ClusterManager) {
	d.mu.Lock()
	defer d.mu.Unlock()

	d.clusterManagers[manager.GetClusterID()] = manager
	log.Printf("Registered cluster manager for cluster: %s", manager.GetClusterID())
}

// UnregisterClusterManager unregisters a cluster manager
func (d *Dispatcher) UnregisterClusterManager(clusterID string) {
	d.mu.Lock()
	defer d.mu.Unlock()

	delete(d.clusterManagers, clusterID)
	log.Printf("Unregistered cluster manager for cluster: %s", clusterID)
}

// handleJobMessage handles incoming job messages from Kafka
func (d *Dispatcher) handleJobMessage(ctx context.Context, msg *kafka.Message) error {
	// Parse job message
	jobMsg, err := kafka.DeserializeJobMessage(msg)
	if err != nil {
		log.Printf("Failed to deserialize job message: %v", err)
		return err
	}

	// Validate job message
	if err := kafka.ValidateJobMessage(jobMsg); err != nil {
		log.Printf("Invalid job message: %v", err)
		return err
	}

	// Process job in a goroutine to avoid blocking
	d.wg.Add(1)
	go func() {
		defer d.wg.Done()
		if err := d.processJob(ctx, jobMsg); err != nil {
			log.Printf("Failed to process job %s: %v", jobMsg.JobID, err)
		}
	}()

	return nil
}

// processJob processes a job message
func (d *Dispatcher) processJob(ctx context.Context, jobMsg *kafka.JobMessage) error {
	// Create job record in database
	job := &mysql.Job{
		JobID:      jobMsg.JobID,
		JobType:    jobMsg.JobType,
		Status:     "pending",
		Priority:   jobMsg.Priority,
		Parameters: d.serializeParameters(jobMsg.Parameters),
		CreatedAt:  time.Now(),
		UpdatedAt:  time.Now(),
	}

	// Save job to database
	if err := d.db.Create(job).Error(); err != nil {
		return fmt.Errorf("failed to create job record: %w", err)
	}

	// Find available cluster
	clusterID, err := d.findAvailableCluster(jobMsg.ClusterID)
	if err != nil {
		// Update job status to failed
		d.updateJobStatus(job.JobID, "failed", fmt.Sprintf("No available cluster: %v", err))
		return fmt.Errorf("failed to find available cluster: %w", err)
	}

	// Create job-cluster mapping
	jobCluster := &mysql.JobCluster{
		JobID:     jobMsg.JobID,
		ClusterID: clusterID,
		Status:    "pending",
		CreatedAt: time.Now(),
		UpdatedAt: time.Now(),
	}

	if err := d.db.Create(jobCluster).Error(); err != nil {
		return fmt.Errorf("failed to create job-cluster mapping: %w", err)
	}

	// Submit job to cluster manager
	if err := d.submitJobToCluster(ctx, job, clusterID); err != nil {
		// Update job status to failed
		d.updateJobStatus(job.JobID, "failed", fmt.Sprintf("Failed to submit to cluster: %v", err))
		return fmt.Errorf("failed to submit job to cluster: %w", err)
	}

	// Update job status to running
	d.updateJobStatus(job.JobID, "running", "Job submitted to cluster")

	log.Printf("Job %s dispatched to cluster %s", jobMsg.JobID, clusterID)
	return nil
}

// findAvailableCluster finds an available cluster for the job
func (d *Dispatcher) findAvailableCluster(preferredClusterID string) (string, error) {
	d.mu.RLock()
	defer d.mu.RUnlock()

	// If preferred cluster is specified and available, use it
	if preferredClusterID != "" {
		if manager, exists := d.clusterManagers[preferredClusterID]; exists && manager.IsHealthy() {
			return preferredClusterID, nil
		}
	}

	// Find any available cluster
	for clusterID, manager := range d.clusterManagers {
		if manager.IsHealthy() {
			return clusterID, nil
		}
	}

	return "", fmt.Errorf("no available clusters")
}

// submitJobToCluster submits a job to a specific cluster
func (d *Dispatcher) submitJobToCluster(ctx context.Context, job *mysql.Job, clusterID string) error {
	d.mu.RLock()
	manager, exists := d.clusterManagers[clusterID]
	d.mu.RUnlock()

	if !exists {
		return fmt.Errorf("cluster manager not found for cluster: %s", clusterID)
	}

	// Submit job with timeout
	ctx, cancel := context.WithTimeout(ctx, d.config.JobTimeout)
	defer cancel()

	return manager.SubmitJob(ctx, job)
}

// updateJobStatus updates the job status in the database
func (d *Dispatcher) updateJobStatus(jobID, status, message string) {
	// Update job status
	if err := d.db.Model(&mysql.Job{}).Where("job_id = ?", jobID).Updates(map[string]interface{}{
		"status":     status,
		"updated_at": time.Now(),
	}).Error(); err != nil {
		log.Printf("Failed to update job status: %v", err)
		return
	}

	// Create job status record
	jobStatus := &mysql.JobStatus{
		JobID:     jobID,
		Status:    status,
		Message:   message,
		CreatedAt: time.Now(),
	}

	if err := d.db.Create(jobStatus).Error(); err != nil {
		log.Printf("Failed to create job status record: %v", err)
	}
}

// serializeParameters serializes job parameters to JSON
func (d *Dispatcher) serializeParameters(params map[string]interface{}) string {
	if params == nil {
		return "{}"
	}

	data, err := json.Marshal(params)
	if err != nil {
		log.Printf("Failed to serialize parameters: %v", err)
		return "{}"
	}

	return string(data)
}

// monitorErrors monitors Kafka reader errors
func (d *Dispatcher) monitorErrors() {
	defer d.wg.Done()

	for {
		select {
		case err := <-d.kafkaReader.Errors():
			log.Printf("Kafka reader error: %v", err)
		case <-d.ctx.Done():
			return
		}
	}
}

// GetStats returns dispatcher statistics
func (d *Dispatcher) GetStats() map[string]interface{} {
	d.mu.RLock()
	defer d.mu.RUnlock()

	stats := map[string]interface{}{
		"registered_clusters": len(d.clusterManagers),
		"active_goroutines":   "N/A", // WaitGroup doesn't expose count directly
	}

	// Add cluster health information
	clusterHealth := make(map[string]bool)
	for clusterID, manager := range d.clusterManagers {
		clusterHealth[clusterID] = manager.IsHealthy()
	}
	stats["cluster_health"] = clusterHealth

	return stats
}
