// main.go
package main

import (
	"context"
	"encoding/json"
	"errors"
	"flag"
	"fmt"
	"net/http"
	"os"
	"os/exec"
	"os/signal"
	"path/filepath"
	"sync"
	"sync/atomic"
	"syscall"
	"time"

	"github.com/fsnotify/fsnotify"
	"github.com/prometheus/client_golang/prometheus"
	"github.com/prometheus/client_golang/prometheus/promauto"
	"github.com/prometheus/client_golang/prometheus/promhttp"
	"gopkg.in/yaml.v3"
)

// 配置结构
type Config struct {
	ListenAddr     string          `yaml:"listen_addr"`
	MaxConcurrency int             `yaml:"max_concurrency"`
	Processes      []ProcessConfig `yaml:"processes"`
	Logging        LogConfig       `yaml:"logging"`
	Metrics        MetricsConfig   `yaml:"metrics"`
}

type ProcessConfig struct {
	Name          string        `yaml:"name"`
	StartCmd      string        `yaml:"start_cmd"`
	CheckCmd      string        `yaml:"check_cmd"`
	MaxRetries    int           `yaml:"max_retries"`
	CheckInterval time.Duration `yaml:"check_interval"`
	RetryInterval time.Duration `yaml:"retry_interval"`
	Critical      bool          `yaml:"critical"`
}

type LogConfig struct {
	Level      string `yaml:"level"`
	Path       string `yaml:"path"`
	MaxSizeMB  int    `yaml:"max_size_mb"`
	MaxBackups int    `yaml:"max_backups"`
}

type MetricsConfig struct {
	Enabled bool   `yaml:"enabled"`
	Path    string `yaml:"path"`
}

// 全局状态
type SystemState struct {
	config     atomic.Value
	states     sync.Map
	logQueue   chan LogEntry
	workerPool chan struct{}
	shutdown   chan struct{}
	registry   *prometheus.Registry
	logFile    *os.File
}

// 进程状态
type ProcessState struct {
	RetryCount  int
	LastError   string
	Status      ProcessStatus
	LastHealthy time.Time
	LastChecked time.Time
}

type ProcessStatus int

const (
	StatusHealthy ProcessStatus = iota
	StatusUnhealthy
	StatusStarting
	StatusFailed
)

// 日志条目
type LogEntry struct {
	Timestamp  time.Time `json:"timestamp"`
	Process    string    `json:"process"`
	EventType  string    `json:"event_type"`
	Status     string    `json:"status"`
	RetryCount int       `json:"retry_count,omitempty"`
	Message    string    `json:"message,omitempty"`
}

// Prometheus指标
var (
	processStatusGauge *prometheus.GaugeVec
	restartCounter     *prometheus.CounterVec
	checkDuration      *prometheus.HistogramVec
)

var (
	configPath string
	showHelp   bool
)

func init() {
	// flag.StringVar(&configPath, "config", configPath, "Path to the configuration file")
	flag.StringVar(&configPath, "config", "config.yaml", "Path to configuration file")
	flag.BoolVar(&showHelp, "help", false, "Show help information")
	flag.Parse()
}

func main() {
	if showHelp {
		fmt.Printf("Usage: %s [options]\n", os.Args[0])
		flag.PrintDefaults()
		os.Exit(0)
	}
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// 系统初始化
	sysState := initializeSystem()
	defer sysState.cleanup()

	// 配置加载
	if err := loadConfiguration(sysState); err != nil {
		sysState.logFatal("Initialization failed: %v", err)
	}

	// 服务启动
	startServices(ctx, sysState)

	// 主监控循环
	sysState.startMonitor(ctx)
}

// 初始化系统组件
func initializeSystem() *SystemState {
	return &SystemState{
		logQueue:   make(chan LogEntry, 10000),
		workerPool: make(chan struct{}, 1000),
		shutdown:   make(chan struct{}),
		registry:   prometheus.NewRegistry(),
	}
}

// 加载配置相关
func loadConfiguration(sysState *SystemState) error {
	if err := sysState.loadConfig(configPath); err != nil {
		return fmt.Errorf("config load failed: %w", err)
	}

	cfg := sysState.config.Load().(Config)
	if err := setupLogging(sysState, cfg); err != nil {
		return fmt.Errorf("logging setup failed: %w", err)
	}

	return nil
}

// 日志系统初始化
func setupLogging(sysState *SystemState, cfg Config) error {
	logDir := filepath.Dir(cfg.Logging.Path)
	if err := os.MkdirAll(logDir, 0o755); err != nil {
		return err
	}

	logFile, err := os.OpenFile(cfg.Logging.Path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
	if err != nil {
		return err
	}

	sysState.logFile = logFile
	return nil
}

// 服务启动器
func startServices(_ context.Context, sysState *SystemState) {
	initMetrics(sysState.registry)

	go sysState.handleSignals()
	go sysState.logWorker()
	go sysState.metricsServer()
}

// 资源清理
func (s *SystemState) cleanup() {
	close(s.logQueue)
	if s.logFile != nil {
		s.logFile.Close()
	}
}

func initMetrics(registry *prometheus.Registry) {
	processStatusGauge = promauto.With(registry).NewGaugeVec(
		prometheus.GaugeOpts{
			Name: "process_healer_status",
			Help: "Current process status (0=Healthy, 1=Unhealthy, 2=Starting, 3=Failed)",
		},
		[]string{"process"},
	)

	restartCounter = promauto.With(registry).NewCounterVec(
		prometheus.CounterOpts{
			Name: "process_healer_restarts_total",
			Help: "Total number of process restarts",
		},
		[]string{"process", "status"},
	)

	checkDuration = promauto.With(registry).NewHistogramVec(
		prometheus.HistogramOpts{
			Name:    "process_healer_check_duration_seconds",
			Help:    "Duration of health checks",
			Buckets: []float64{0.1, 0.5, 1, 2, 5},
		},
		[]string{"process"},
	)
}

func (s *SystemState) loadConfig(path string) error {
	file, err := os.Open(path)
	if err != nil {
		return fmt.Errorf("error opening config file: %w", err)
	}
	defer file.Close()

	var cfg Config
	decoder := yaml.NewDecoder(file)
	decoder.KnownFields(true)
	if err := decoder.Decode(&cfg); err != nil {
		return fmt.Errorf("error decoding config: %w", err)
	}

	// 验证配置
	if cfg.MaxConcurrency <= 0 {
		cfg.MaxConcurrency = 200
	}
	if cfg.ListenAddr == "" {
		cfg.ListenAddr = ":8080"
	}

	s.config.Store(cfg)
	return nil
}

func (s *SystemState) startMonitor(ctx context.Context) {
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		s.logFatal("Failed to create watcher: %v", err)
	}
	defer watcher.Close()

	go s.watchConfigChanges(watcher)

	for {
		select {
		case <-s.shutdown:
			s.logInfo("Shutting down monitor")
			return
		default:
			cfg := s.config.Load().(Config)
			s.dispatchWorkers(ctx, cfg)
			time.Sleep(1 * time.Second)
		}
	}
}

func (s *SystemState) dispatchWorkers(ctx context.Context, cfg Config) {
	var wg sync.WaitGroup
	for _, p := range cfg.Processes {
		wg.Add(1)
		go func(p ProcessConfig) {
			defer wg.Done()
			select {
			case s.workerPool <- struct{}{}:
				defer func() { <-s.workerPool }()
				s.logInfo("Starting check for %s", p.Name)
				s.monitorProcess(ctx, p)
			default:
				s.logWarning("Worker pool full, skipping check for %s", p.Name)
			}
		}(p)
	}
	wg.Wait()
}

func (s *SystemState) monitorProcess(ctx context.Context, cfg ProcessConfig) {
	ticker := time.NewTicker(cfg.CheckInterval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			healthy := s.checkProcessHealth(cfg)
			if !healthy {
				s.handleUnhealthyProcess(cfg)
			}
		case <-ctx.Done():
			return
		case <-s.shutdown:
			return
		}
	}
}

func (s *SystemState) checkProcessHealth(cfg ProcessConfig) bool {
	start := time.Now()
	defer func() {
		checkDuration.WithLabelValues(cfg.Name).Observe(time.Since(start).Seconds())
	}()

	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
	defer cancel()

	cmd := exec.CommandContext(ctx, "/bin/sh", "-c", cfg.CheckCmd)
	if err := cmd.Run(); err != nil {
		s.recordProcessState(cfg, StatusUnhealthy, err)
		return false
	}

	s.recordProcessState(cfg, StatusHealthy, nil)
	return true
}

func (s *SystemState) handleUnhealthyProcess(cfg ProcessConfig) {
	state := s.loadProcessState(cfg.Name)
	if state.RetryCount >= cfg.MaxRetries {
		s.recordProcessState(cfg, StatusFailed, errors.New("max retries exceeded"))
		return
	}

	retryDelay := time.Duration(state.RetryCount*state.RetryCount) * cfg.RetryInterval
	time.Sleep(retryDelay)

	if err := s.restartProcess(cfg); err != nil {
		s.recordProcessState(cfg, StatusUnhealthy, err)
	} else {
		s.recordProcessState(cfg, StatusHealthy, nil)
	}
}

func (s *SystemState) restartProcess(cfg ProcessConfig) error {
	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()

	s.logInfo("Restarting %s: '%s'", cfg.Name, cfg.StartCmd)
	cmd := exec.CommandContext(ctx, "/bin/sh", "-c", cfg.StartCmd)
	if err := cmd.Run(); err != nil {
		return fmt.Errorf("restart failed: %w", err)
	}
	return nil
}

func (s *SystemState) loadProcessState(name string) ProcessState {
	if val, ok := s.states.Load(name); ok {
		return val.(ProcessState)
	}
	return ProcessState{}
}

func (s *SystemState) recordProcessState(cfg ProcessConfig, status ProcessStatus, err error) {
	now := time.Now()
	state := ProcessState{
		Status:      status,
		LastError:   "",
		LastChecked: now,
	}

	if err != nil {
		state.LastError = err.Error()
		if status == StatusUnhealthy {
			state.RetryCount++
		}
	} else {
		state.RetryCount = 0
		state.LastHealthy = now
	}

	s.states.Store(cfg.Name, state)
	processStatusGauge.WithLabelValues(cfg.Name).Set(float64(status))

	eventType := "HEALTHY"
	switch status {
	case StatusUnhealthy:
		eventType = "UNHEALTHY"
		restartCounter.WithLabelValues(cfg.Name, "failed").Inc()
	case StatusFailed:
		eventType = "FAILED"
	case StatusStarting:
		eventType = "STARTING"
	}

	s.logQueue <- LogEntry{
		Timestamp:  now,
		Process:    cfg.Name,
		EventType:  eventType,
		Status:     status.String(),
		RetryCount: state.RetryCount,
		Message:    state.LastError,
	}
}

// 日志管理
func (s *SystemState) logWorker() {
	for entry := range s.logQueue {
		logData, _ := json.Marshal(entry)
		// fmt.Printf("[%s] %s\n", entry.Timestamp.Format(time.RFC3339), logData)
		// fmt.Printf("%s\n", logData)
		logLine := fmt.Sprintf("%s\n", logData)
		_, err := s.logFile.WriteString(logLine)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error writing to log file: %v\n", err)
		}
	}
}

// 信号处理
func (s *SystemState) handleSignals() {
	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)

	for sig := range sigChan {
		switch sig {
		case syscall.SIGINT, syscall.SIGTERM:
			s.logInfo("Received shutdown signal")
			close(s.shutdown)
			time.Sleep(1 * time.Second) // 等待清理
			os.Exit(0)
		case syscall.SIGHUP:
			s.logInfo("Reloading configuration")
			if err := s.loadConfig(configPath); err != nil {
				s.logError("Config reload failed: %v", err)
			}
		}
	}
}

// 监控指标服务
func (s *SystemState) metricsServer() {
	cfg := s.config.Load().(Config)
	if !cfg.Metrics.Enabled {
		return
	}

	http.Handle(cfg.Metrics.Path, promhttp.HandlerFor(
		s.registry,
		promhttp.HandlerOpts{
			EnableOpenMetrics: true,
		},
	))

	http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
		w.WriteHeader(http.StatusOK)
	})

	s.logInfo("Starting metrics server on %s", cfg.ListenAddr)
	if err := http.ListenAndServe(cfg.ListenAddr, nil); err != nil {
		s.logError("Metrics server failed: %v", err)
	}
}

// 日志辅助函数
func (s *SystemState) logInfo(format string, args ...interface{}) {
	s.logQueue <- LogEntry{
		Timestamp: time.Now(),
		EventType: "SYSTEM",
		Status:    "INFO",
		Message:   fmt.Sprintf(format, args...),
	}
}

func (s *SystemState) logError(format string, args ...interface{}) {
	s.logQueue <- LogEntry{
		Timestamp: time.Now(),
		EventType: "SYSTEM",
		Status:    "ERROR",
		Message:   fmt.Sprintf(format, args...),
	}
}

func (s *SystemState) logFatal(format string, args ...interface{}) {
	s.logError(format, args...)
	os.Exit(1)
}

func (s *SystemState) logWarning(format string, args ...interface{}) {
	s.logQueue <- LogEntry{
		Timestamp: time.Now(),
		EventType: "SYSTEM",
		Status:    "WARNING",
		Message:   fmt.Sprintf(format, args...),
	}
}

// 配置热更新
func (s *SystemState) watchConfigChanges(watcher *fsnotify.Watcher) {
	if err := watcher.Add(configPath); err != nil {
		s.logError("Failed to watch config: %v", err)
		return
	}

	for {
		select {
		case event, ok := <-watcher.Events:
			if !ok {
				return
			}
			if event.Op&fsnotify.Write == fsnotify.Write {
				if err := s.loadConfig(configPath); err != nil {
					s.logError("Config reload error: %v", err)
				} else {
					s.logInfo("Config reloaded successfully")
				}
			}
		case err, ok := <-watcher.Errors:
			if !ok {
				return
			}
			s.logError("Watcher error: %v", err)
		}
	}
}

func (s ProcessStatus) String() string {
	return [...]string{"healthy", "unhealthy", "starting", "failed"}[s]
}
