package main

import (
	"context"
	"flag"
	"fmt"
	"log"
	"net/http"
	"os"
	"os/signal"
	"sync"
	"sync/atomic"
	"syscall"
	"time"
)

// 压测工具配置
type Config struct {
	URL                string
	InitialConcurrency int
	MaxConcurrency     int
	TargetLatency      time.Duration
	LogFile            string
}

// 压测结果统计
type Stats struct {
	mu              sync.RWMutex
	requests        int64
	success         int64
	failures        int64
	totalTime       time.Duration
	startTime       time.Time
	recentRequests  int64
	recentSuccess   int64
	recentFailures  int64
	recentTotalTime time.Duration
}

// 添加统计数据
func (s *Stats) Add(success bool, duration time.Duration) {
	s.mu.Lock()
	defer s.mu.Unlock()

	s.requests++
	s.recentRequests++
	if success {
		s.success++
		s.recentSuccess++
	} else {
		s.failures++
		s.recentFailures++
	}
	s.totalTime += duration
	s.recentTotalTime += duration
}

// 获取统计信息
func (s *Stats) GetStats() (requests, success, failures int64, avgTime time.Duration) {
	s.mu.RLock()
	defer s.mu.RUnlock()

	requests = s.requests
	success = s.success
	failures = s.failures
	if requests > 0 {
		avgTime = s.totalTime / time.Duration(requests)
	}
	return
}

// 获取最近统计信息
func (s *Stats) GetRecentStats() (requests, success, failures int64, avgTime time.Duration) {
	s.mu.Lock()
	defer s.mu.Unlock()

	requests = s.recentRequests
	success = s.recentSuccess
	failures = s.recentFailures
	if requests > 0 {
		avgTime = s.recentTotalTime / time.Duration(requests)
	}

	// 重置最近统计数据
	s.recentRequests = 0
	s.recentSuccess = 0
	s.recentFailures = 0
	s.recentTotalTime = 0

	return
}

// 重置统计
func (s *Stats) Reset() {
	s.mu.Lock()
	defer s.mu.Unlock()

	s.requests = 0
	s.success = 0
	s.failures = 0
	s.totalTime = 0
	s.recentRequests = 0
	s.recentSuccess = 0
	s.recentFailures = 0
	s.recentTotalTime = 0
	s.startTime = time.Now()
}

// 压测工作器
type LoadTester struct {
	config      Config
	stats       *Stats
	client      *http.Client
	concurrency int64 // 当前并发数
}

// 创建新的压测器
func NewLoadTester(config Config) *LoadTester {
	return &LoadTester{
		config:      config,
		stats:       &Stats{},
		concurrency: int64(config.InitialConcurrency),
		client: &http.Client{
			Timeout: 30 * time.Second,
		},
	}
}

// 执行单个请求
func (lt *LoadTester) makeRequest(ctx context.Context) {
	start := time.Now()

	req, err := http.NewRequestWithContext(ctx, "GET", lt.config.URL, nil)
	if err != nil {
		lt.stats.Add(false, time.Since(start))
		return
	}
	req.Header.Set("User-Agent", "Go-load-test")
	resp, err := lt.client.Do(req)
	if err != nil {
		lt.stats.Add(false, time.Since(start))
		return
	}
	defer resp.Body.Close()

	if resp.StatusCode >= 200 && resp.StatusCode < 300 {
		lt.stats.Add(true, time.Since(start))
	} else {
		lt.stats.Add(false, time.Since(start))
	}
}

// 启动工作协程
func (lt *LoadTester) worker(ctx context.Context, wg *sync.WaitGroup) {
	defer wg.Done()

	for {
		select {
		case <-ctx.Done():
			return
		default:
			lt.makeRequest(ctx)
		}
	}
}

// 调整并发数
func (lt *LoadTester) adjustConcurrency(avgLatency time.Duration) {
	current := atomic.LoadInt64(&lt.concurrency)

	if avgLatency > lt.config.TargetLatency && current > 1 {
		// 如果延迟过高，减少并发数
		newConcurrency := current * 99 / 100 // 减少1%
		if newConcurrency < 1 {
			newConcurrency = 1
		}
		atomic.StoreInt64(&lt.concurrency, newConcurrency)
		log.Printf("Reducing concurrency to %d due to high latency: %v", newConcurrency, avgLatency)
	} else if avgLatency < lt.config.TargetLatency/2 && current < int64(lt.config.MaxConcurrency) {
		// 如果延迟较低且未达到最大并发数，增加并发数
		newConcurrency := current * 11 / 10 // 增加10%
		if newConcurrency > int64(lt.config.MaxConcurrency) {
			newConcurrency = int64(lt.config.MaxConcurrency)
		}
		atomic.StoreInt64(&lt.concurrency, newConcurrency)
		log.Printf("Increasing concurrency to %d due to low latency: %v", newConcurrency, avgLatency)
	}
}

// 启动压测
func (lt *LoadTester) Run(ctx context.Context) {
	lt.stats.Reset()

	var wg sync.WaitGroup
	workerChannels := make(map[int]chan struct{})
	mutex := sync.Mutex{}

	// 启动初始并发工作协程
	for i := 0; i < lt.config.InitialConcurrency; i++ {
		wg.Add(1)
		stopChan := make(chan struct{})
		mutex.Lock()
		workerChannels[i] = stopChan
		mutex.Unlock()

		go func(stop chan struct{}) {
			defer wg.Done()
			for {
				select {
				case <-ctx.Done():
					return
				case <-stop:
					return
				default:
					lt.makeRequest(ctx)
				}
			}
		}(stopChan)
	}

	// 定期输出统计信息和调整并发数
	go func() {
		ticker := time.NewTicker(5 * time.Second)
		defer ticker.Stop()

		for {
			select {
			case <-ctx.Done():
				return
			case <-ticker.C:
				requests, success, failures, avgTime := lt.stats.GetStats()
				recentRequests, recentSuccess, recentFailures, recentAvgTime := lt.stats.GetRecentStats()

				log.Printf("Total - Requests: %d, Success: %d, Failures: %d, Avg Time: %v",
					requests, success, failures, avgTime)
				log.Printf("Recent - Requests: %d, Success: %d, Failures: %d, Avg Time: %v",
					recentRequests, recentSuccess, recentFailures, recentAvgTime)

				// 调整并发数
				if recentRequests > 0 {
					lt.adjustConcurrency(recentAvgTime)
				}

				// 根据当前并发数调整工作协程数量
				currentConcurrency := int(atomic.LoadInt64(&lt.concurrency))
				mutex.Lock()

				// 如果需要增加协程
				if currentConcurrency > len(workerChannels) {
					for i := len(workerChannels); i < currentConcurrency; i++ {
						wg.Add(1)
						stopChan := make(chan struct{})
						workerChannels[i] = stopChan

						go func(stop chan struct{}) {
							defer wg.Done()
							for {
								select {
								case <-ctx.Done():
									return
								case <-stop:
									return
								default:
									lt.makeRequest(ctx)
								}
							}
						}(stopChan)
					}
				} else if currentConcurrency < len(workerChannels) {
					for i := len(workerChannels) - 1; i >= currentConcurrency; i-- {
						if ch, exists := workerChannels[i]; exists {
							close(ch)
							delete(workerChannels, i)
						}
					}
				}

				mutex.Unlock()
			}
		}
	}()

	// 等待所有工作协程结束
	wg.Wait()
}

func main() {
	var (
		url                = flag.String("url", "", "Target URL to test")
		initialConcurrency = flag.Int("initial-concurrency", 10, "Initial concurrency level")
		maxConcurrency     = flag.Int("max-concurrency", 1000, "Maximum concurrency level")
		targetLatency      = flag.Duration("target-latency", 100*time.Millisecond, "Target latency")
		logFile            = flag.String("log", "loadtest.log", "Log file path")
	)

	flag.Parse()

	if *url == "" {
		fmt.Println("Usage: loadtest -url=<URL> [options]")
		flag.PrintDefaults()
		os.Exit(1)
	}

	// 设置日志输出到文件
	file, err := os.OpenFile(*logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
	if err != nil {
		log.Fatal("Failed to open log file:", err)
	}
	defer file.Close()

	log.SetOutput(file)

	// 创建压测器
	config := Config{
		URL:                *url,
		InitialConcurrency: *initialConcurrency,
		MaxConcurrency:     *maxConcurrency,
		TargetLatency:      *targetLatency,
		LogFile:            *logFile,
	}

	tester := NewLoadTester(config)

	// 创建可取消的上下文（无时间限制）
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// 捕获中断信号
	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)

	// 在后台运行压测
	go tester.Run(ctx)

	fmt.Printf("Load testing started. Logs are written to %s\n", *logFile)
	fmt.Printf("Initial concurrency: %d, Max concurrency: %d, Target latency: %v\n",
		*initialConcurrency, *maxConcurrency, *targetLatency)
	fmt.Println("Press Ctrl+C to stop")

	// 等待信号
	<-sigChan
	fmt.Println("\nReceived interrupt signal, stopping...")

	// 取消上下文并等待结束
	cancel()

	// 输出最终统计
	requests, success, failures, avgTime := tester.stats.GetStats()
	fmt.Printf("\nFinal Results:\n")
	fmt.Printf("Total Requests: %d\n", requests)
	fmt.Printf("Successful: %d\n", success)
	fmt.Printf("Failed: %d\n", failures)
	fmt.Printf("Average Response Time: %v\n", avgTime)

	if requests > 0 {
		successRate := float64(success) / float64(requests) * 100
		fmt.Printf("Success Rate: %.2f%%\n", successRate)
	}

	fmt.Printf("Final Concurrency Level: %d\n", atomic.LoadInt64(&tester.concurrency))
}
