package grpc_pool

import (
	"errors"
	"fmt"
	"log"
	"math"
	"sync"
	"sync/atomic"
	"time"
)

// ErrClosed is the error resulting if the grpc_pool is closed via grpc_pool.Close().
var ErrClosed = errors.New("grpc_pool is closed")

// Pool interface describes a grpc_pool implementation.
// An ideal grpc_pool is threadsafe and easy to use.
type Pool interface {
	// Get returns a new connection from the grpc_pool. Closing the connections puts
	// it back to the Pool. Closing it when the grpc_pool is destroyed or full will
	// be counted as an error. we guarantee the conn.Value() isn't nil when conn isn't nil.
	Get() (Conn, error)

	// Close closes the grpc_pool and all its connections. After Close() the grpc_pool is
	// no longer usable. Concurrent calls to Close and Get are safe.
	Close() error

	// Status returns the current status of the grpc_pool.
	Status() string
}

type pool struct {
	index        uint32        // 原子变量，用于轮询选择连接
	current      int32         // 原子变量，当前实际连接数
	ref          int32         // 原子变量，当前使用中的逻辑连接数
	opt          Options       // 连接池配置参数
	conns        []*conn       // 存储所有创建的物理连接
	address      string        // 服务端地址（用于创建连接）
	closed       int32         // 原子变量，标记连接池是否已关闭（0：未关闭，1：已关闭）
	closeChan    chan struct{} // 用于通知后台协程退出
	sync.RWMutex               // 读写锁，控制对连接池状态的并发访问
}

// New return a connection grpc_pool.
// 1. 参数校验（地址、Dial 函数、MaxIdle/MaxActive 合理性等）
// 2. 初始化 pool 结构体，设置初始连接数为 MaxIdle
// 3. 预创建 MaxIdle 个连接，存入 conns 数组
// 4. 启动健康检测协程
// 5. 返回初始化后的连接池
func New(address string, option Options) (Pool, error) {
	// 参数校验增强：补充默认值和边界检查
	if address == "" {
		return nil, errors.New("invalid address settings")
	}
	if option.Dial == nil {
		return nil, errors.New("invalid dial settings")
	}
	if option.MaxIdle <= 0 || option.MaxActive <= 0 || option.MaxIdle > option.MaxActive {
		return nil, errors.New("invalid maximum settings (MaxIdle must be <= MaxActive and > 0)")
	}
	if option.MaxConcurrentStreams <= 0 {
		return nil, errors.New("invalid MaxConcurrentStreams (must be > 0)")
	}
	// 设置健康检测默认值
	if option.CheckInterval <= 0 {
		option.CheckInterval = CheckInterval
	}

	p := &pool{
		index:     0,
		current:   int32(option.MaxIdle),
		ref:       0,
		opt:       option,
		conns:     make([]*conn, option.MaxActive),
		address:   address,
		closed:    0,
		closeChan: make(chan struct{}),
	}

	// 预创建初始连接（带错误恢复）
	for i := 0; i < p.opt.MaxIdle; i++ {
		c, err := p.createConnection()
		if err != nil {
			_ = p.Close()
			return nil, fmt.Errorf("failed to create initial connection: %w", err)
		}
		p.conns[i] = c
	}

	// 启动健康检测协程
	p.startHealthChecker()

	log.Printf("new grpc_pool success: %v\n", p.Status())
	return p, nil
}

// createConnection 封装连接创建逻辑，便于复用和错误处理
func (p *pool) createConnection() (*conn, error) {
	cc, err := p.opt.Dial(p.address)
	if err != nil {
		return nil, fmt.Errorf("dial failed: %w", err)
	}
	return p.wrapConn(cc, false), nil
}

// startHealthChecker 启动后台健康检测协程
func (p *pool) startHealthChecker() {
	go func() {
		ticker := time.NewTicker(p.opt.CheckInterval)
		defer ticker.Stop()

		for {
			select {
			case <-ticker.C:
				p.checkAndRenewConnections()
			case <-p.closeChan:
				return // 连接池关闭，退出协程
			}
		}
	}()
}

// checkAndRenewConnections 检测并重建无效连接
func (p *pool) checkAndRenewConnections() {
	p.RLock()
	current := atomic.LoadInt32(&p.current)
	p.RUnlock()

	if current == 0 || atomic.LoadInt32(&p.closed) == 1 {
		return
	}

	// 遍历所有连接进行健康检测
	for i := int32(0); i < current; i++ {
		p.RLock()
		conn := p.conns[i]
		p.RUnlock()

		if conn == nil || !conn.IsHealthy() {
			// 连接无效，尝试重建
			newConn, err := p.createConnection()
			if err != nil {
				log.Printf("failed to renew connection %d: %v", i, err)
				continue
			}

			// 替换无效连接
			p.Lock()
			oldConn := p.conns[i]
			p.conns[i] = newConn
			p.Unlock()

			// 关闭旧连接（非阻塞）
			if oldConn != nil {
				go func() {
					_ = oldConn.reset()
				}()
			}
			log.Printf("renewed unhealthy connection at index %d", i)
		}
	}
}

func (p *pool) incrRef() int32 {
	newRef := atomic.AddInt32(&p.ref, 1)
	if newRef == math.MaxInt32 {
		panic(fmt.Sprintf("overflow ref: %d", newRef))
	}
	return newRef
}

func (p *pool) decrRef() {
	newRef := atomic.AddInt32(&p.ref, -1)
	if newRef < 0 && atomic.LoadInt32(&p.closed) == 0 {
		log.Printf("warning: negative ref count: %d", newRef)
	}

	// 当引用计数为0且连接数超过最大空闲数时缩容
	if newRef == 0 && atomic.LoadInt32(&p.current) > int32(p.opt.MaxIdle) {
		p.Lock()
		// 二次检查，避免并发冲突
		if atomic.LoadInt32(&p.ref) == 0 && atomic.LoadInt32(&p.current) > int32(p.opt.MaxIdle) {
			shrinkCount := atomic.LoadInt32(&p.current) - int32(p.opt.MaxIdle)
			atomic.StoreInt32(&p.current, int32(p.opt.MaxIdle))
			p.deleteFrom(p.opt.MaxIdle)
			log.Printf("shrunk grpc_pool: %d -> %d (removed %d connections)",
				atomic.LoadInt32(&p.current)+shrinkCount, p.opt.MaxIdle, shrinkCount)
		}
		p.Unlock()
	}
}

func (p *pool) reset(index int) {
	conn := p.conns[index]
	if conn == nil {
		return
	}
	_ = conn.reset()
	p.conns[index] = nil
}

func (p *pool) deleteFrom(begin int) {
	p.Lock()
	defer p.Unlock()
	for i := begin; i < p.opt.MaxActive; i++ {
		p.reset(i)
	}
}

// Get 从连接池获取健康连接，优化点：
// 1. 增加健康检测
// 2. 优化锁粒度
// 3. 修复扩容逻辑中的资源泄漏
func (p *pool) Get() (Conn, error) {
	// 检查连接池是否已关闭
	if atomic.LoadInt32(&p.closed) == 1 {
		return nil, ErrClosed
	}

	// 增加引用计数
	nextRef := p.incrRef()

	// 尝试从现有连接中获取
	conn, err := p.getHealthyConnection(nextRef)
	if err == nil {
		return conn, nil
	}

	// 现有连接不足，尝试扩容
	if err := p.tryExpand(nextRef); err != nil {
		p.decrRef() // 扩容失败，回滚引用计数
		return nil, err
	}

	// 扩容后再次尝试获取
	conn, err = p.getHealthyConnection(nextRef)
	if err != nil {
		p.decrRef() // 获取失败，回滚引用计数
		return nil, err
	}
	return conn, nil
}

// getHealthyConnection 从现有连接中获取一个健康连接
func (p *pool) getHealthyConnection(nextRef int32) (Conn, error) {
	p.RLock()
	current := atomic.LoadInt32(&p.current)
	p.RUnlock()

	if current == 0 {
		return nil, ErrClosed
	}

	// 检查是否在承载范围内
	if nextRef <= current*int32(p.opt.MaxConcurrentStreams) {
		// 轮询选择连接并检查健康状态
		for i := 0; i < int(current); i++ {
			next := atomic.AddUint32(&p.index, 1) % uint32(current)
			p.RLock()
			conn := p.conns[next]
			p.RUnlock()

			if conn != nil && conn.IsHealthy() &&
				atomic.LoadUint32(&conn.streamCount) < uint32(p.opt.MaxConcurrentStreams) {
				atomic.AddUint32(&conn.streamCount, 1)
				return conn, nil
			}
		}
		return nil, errors.New("no healthy connections available in pool")
	}

	// 已达最大连接数
	if current == int32(p.opt.MaxActive) {
		if p.opt.Reuse {
			// 复用现有连接（即使流数超限）
			next := atomic.AddUint32(&p.index, 1) % uint32(current)
			p.RLock()
			conn := p.conns[next]
			p.RUnlock()

			if conn != nil && conn.IsHealthy() {
				atomic.AddUint32(&conn.streamCount, 1)
				return conn, nil
			}
			return nil, errors.New("no healthy connections available for reuse")
		}

		// 创建临时连接
		cc, err := p.opt.Dial(p.address)
		if err != nil {
			return nil, fmt.Errorf("failed to create one-time connection: %w", err)
		}
		conn := p.wrapConn(cc, true)
		atomic.AddUint32(&conn.streamCount, 1)
		return conn, nil
	}

	return nil, errors.New("need to expand pool capacity")
}

// tryExpand 尝试扩容连接池
func (p *pool) tryExpand(nextRef int32) error {
	p.RLock()
	current := atomic.LoadInt32(&p.current)
	needExpand := current < int32(p.opt.MaxActive) &&
		nextRef > current*int32(p.opt.MaxConcurrentStreams)
	p.RUnlock()

	if !needExpand {
		return nil
	}

	p.Lock()
	defer p.Unlock()

	// 二次检查，避免并发扩容冲突
	current = atomic.LoadInt32(&p.current)
	if current >= int32(p.opt.MaxActive) ||
		nextRef <= current*int32(p.opt.MaxConcurrentStreams) {
		return nil
	}

	// 计算扩容数量（当前连接数的2倍或剩余容量）
	increment := current
	if current+increment > int32(p.opt.MaxActive) {
		increment = int32(p.opt.MaxActive) - current
	}

	// 先批量创建新连接（避免部分成功导致状态不一致）
	newConns := make([]*conn, 0, increment)
	for i := int32(0); i < increment; i++ {
		conn, err := p.createConnection()
		if err != nil {
			// 清理已创建的连接
			for _, c := range newConns {
				_ = c.reset()
			}
			return fmt.Errorf("failed to create new connections: %w", err)
		}
		newConns = append(newConns, conn)
	}

	// 全部创建成功后更新连接池状态
	for i, conn := range newConns {
		p.reset(int(current) + i)
		p.conns[current+int32(i)] = conn
	}

	newCurrent := current + int32(len(newConns))
	atomic.StoreInt32(&p.current, newCurrent)
	log.Printf("expanded grpc_pool: %d -> %d (added %d connections)",
		current, newCurrent, len(newConns))

	return nil
}

// Close 安全关闭连接池，优化点：
// 1. 等待已借出连接归还
// 2. 优雅关闭后台协程
// 3. 确保资源释放
func (p *pool) Close() error {
	// 原子标记关闭状态，防止并发操作
	if !atomic.CompareAndSwapInt32(&p.closed, 0, 1) {
		return errors.New("pool already closed")
	}

	// 关闭健康检测协程
	close(p.closeChan)

	// 等待所有连接归还（最多等待5秒）
	timeout := time.After(5 * time.Second)
	ticker := time.NewTicker(100 * time.Millisecond)
	defer ticker.Stop()

	for {
		if atomic.LoadInt32(&p.ref) == 0 {
			break
		}
		select {
		case <-ticker.C:
			// 继续等待
		case <-timeout:
			log.Printf("warning: pool closing timed out, some connections may still be in use")
			break
		}
	}

	// 清理所有连接
	p.Lock()
	p.deleteFrom(0)
	atomic.StoreInt32(&p.current, 0)
	atomic.StoreInt32(&p.ref, 0)
	atomic.StoreUint32(&p.index, 0)
	p.Unlock()

	log.Printf("grpc_pool closed successfully: %v", p.Status())
	return nil
}

// Status 返回连接池当前状态
func (p *pool) Status() string {
	return fmt.Sprintf(
		"address:%s, closed:%v, current:%d, ref:%d, maxIdle:%d, maxActive:%d",
		p.address,
		atomic.LoadInt32(&p.closed) == 1,
		atomic.LoadInt32(&p.current),
		atomic.LoadInt32(&p.ref),
		p.opt.MaxIdle,
		p.opt.MaxActive,
	)
}
