package algorithm

import (
	"b2c-delivery-optimization/internal/model"
	"fmt"
	"math/rand"
	"strconv"
	"strings"
)

// RLOptimizer 强化学习优化器
// 为什么要使用强化学习？
// 1. 能够动态根据变化来调整策略，适合处理动态决策的过程
type RLOptimizer struct {
	learningRate    float64                       // 学习率
	discountFactor  float64                       // 折扣因子
	explorationRate float64                       // 探索率
	qTable          map[string]map[string]float64 // Q表
}

// NewRLOptimizer 创建新的强化学习优化器
func NewRLOptimizer(learningRate, discountFactor, explorationRate float64) *RLOptimizer {
	return &RLOptimizer{
		learningRate:    learningRate,
		discountFactor:  discountFactor,
		explorationRate: explorationRate,
		qTable:          make(map[string]map[string]float64),
	}
}

// getState 获取状态表示
func (r *RLOptimizer) getState(orders []*model.Order) string {
	return r.GetInitialState(orders)
}

// getActions 获取可用的动作
func (r *RLOptimizer) getActions() []string {
	return []string{
		"increase_priority",
		"decrease_priority",
		"maintain_priority",
	}
}

// getReward 获取奖励
func (r *RLOptimizer) getReward(state string, action string, nextState string) float64 {
	// 根据动作和状态变化计算奖励
	// 这里使用简化的奖励计算逻辑
	switch action {
	case "increase_priority":
		return 1.0
	case "decrease_priority":
		return -0.5
	default:
		return 0.0
	}
}

// Train 训练强化学习模型
func (r *RLOptimizer) Train(orders []*model.Order, episodes int) {
	for i := 0; i < episodes; i++ {
		currentState := r.GetInitialState(orders)
		for !r.isTerminal(currentState) {
			action := r.selectAction(currentState)
			reward := r.getReward(currentState, action, "")
			nextState := r.getNextState(currentState, action)
			r.updateQValue(currentState, action, reward, nextState)
			currentState = nextState
		}
	}
}

// selectAction 选择动作
func (r *RLOptimizer) selectAction(state string) string {
	// ε-贪心策略
	if rand.Float64() < r.explorationRate {
		// 探索：随机选择动作
		actions := r.getActions()
		return actions[rand.Intn(len(actions))]
	}
	// 利用：选择Q值最大的动作
	return r.GetOptimalAction(state)
}

// GetOptimalAction 获取最优动作
func (r *RLOptimizer) GetOptimalAction(state string) string {
	// 检查当前状态的平均优先级
	avgPriority := r.getAveragePriority(state)

	// 确保状态存在于Q表中
	if _, exists := r.qTable[state]; !exists {
		r.qTable[state] = make(map[string]float64)
	}

	// 如果平均优先级已经达到最高或最低，限制可选动作
	if avgPriority >= 5.0 {
		// 对于高优先级订单，只允许保持或降低优先级
		actions := []string{"maintain_priority", "decrease_priority"}
		maxQ := -1.0
		bestAction := actions[0]

		for _, action := range actions {
			if q, exists := r.qTable[state][action]; exists && q > maxQ {
				maxQ = q
				bestAction = action
			}
		}
		return bestAction
	} else if avgPriority <= 1.0 {
		// 对于低优先级订单，只允许保持或提高优先级
		actions := []string{"maintain_priority", "increase_priority"}
		maxQ := -1.0
		bestAction := actions[0]

		for _, action := range actions {
			if q, exists := r.qTable[state][action]; exists && q > maxQ {
				maxQ = q
				bestAction = action
			}
		}
		return bestAction
	}

	// 对于其他情况，选择Q值最高的动作
	maxQ := -1.0
	bestAction := "maintain_priority"

	for action := range r.qTable[state] {
		if q := r.qTable[state][action]; q > maxQ {
			maxQ = q
			bestAction = action
		}
	}

	return bestAction
}

// getAveragePriority 从状态字符串中提取平均优先级
func (r *RLOptimizer) getAveragePriority(state string) float64 {
	// 状态格式：orders_N_avgPriority_X.XX
	parts := strings.Split(state, "_")
	if len(parts) >= 4 {
		if avgPriority, err := strconv.ParseFloat(parts[3], 64); err == nil {
			return avgPriority
		}
	}
	return 3.0 // 默认返回中等优先级
}

// updateQValue 更新Q值
func (r *RLOptimizer) updateQValue(state, action string, reward float64, nextState string) {
	if _, exists := r.qTable[state]; !exists {
		r.qTable[state] = make(map[string]float64)
	}
	// Q-learning更新公式
	currentQ := r.qTable[state][action]
	maxNextQ := r.getMaxQValue(nextState)
	// Q(s,a) = Q(s,a) + α[r + γ*maxQ(s',a') - Q(s,a)]
	newQ := currentQ + r.learningRate*(reward+r.discountFactor*maxNextQ-currentQ)
	r.qTable[state][action] = newQ
}

// getMaxQValue 获取下一个状态的最大Q值
func (r *RLOptimizer) getMaxQValue(state string) float64 {
	if _, exists := r.qTable[state]; !exists {
		return 0.0
	}

	maxQ := -1.0
	for _, q := range r.qTable[state] {
		if q > maxQ {
			maxQ = q
		}
	}

	return maxQ
}

// GetInitialState 获取初始状态
func (r *RLOptimizer) GetInitialState(orders []*model.Order) string {
	if len(orders) == 0 {
		return "empty"
	}
	totalPriority := 0
	for _, order := range orders {
		totalPriority += order.Priority
	}
	avgPriority := float64(totalPriority) / float64(len(orders))
	return fmt.Sprintf("orders_%d_avgPriority_%.2f", len(orders), avgPriority)
}

// isTerminal 判断是否是终止状态
func (r *RLOptimizer) isTerminal(state string) bool {
	// 当状态包含特定标记或达到特定条件时终止
	if state == "empty" {
		return true
	}
	// 如果状态已经经过了多次转换（通过_分隔的部分数量）
	parts := strings.Split(state, "_")
	return len(parts) > 5 // 当状态转换超过5次时终止
}

// executeAction 执行动作
func (r *RLOptimizer) executeAction(state string, action string) string {
	// 简化实现，这里直接返回一个新状态
	return fmt.Sprintf("%s_%s", state, action)
}

// getNextState 获取下一个状态
func (r *RLOptimizer) getNextState(currentState string, action string) string {
	// 简化实现，这里直接返回一个新状态
	return fmt.Sprintf("%s_%s", currentState, action)
}
