package gomoku_qtable

import (
	"bindolabs/gomoku/modules/gomoku_env"
	"fmt"
	"math"
	"math/rand"
	"time"
)

const (
	Alpha    = 0.9      // 学习率
	Gamma    = 0.9      // 折扣因子
	Epsilon  = 0.03     // 探索因子
	Episodes = 40000000 // 训练轮数
	SaveDir  = "./qtable"
)

type QLearningAgent struct {
	qTable        map[string]map[string]float64
	env           *gomoku_env.GomokuEnv
	Count         int64
	PlayWithHuman bool
}

func NewQLearningAgent(env *gomoku_env.GomokuEnv) *QLearningAgent {
	return &QLearningAgent{
		qTable:        make(map[string]map[string]float64),
		env:           env,
		PlayWithHuman: false,
	}
}

func (agent *QLearningAgent) GetQValue(state, action string) float64 {
	if actions, exists := agent.qTable[state]; exists {
		if value, exists := actions[action]; exists {
			return value
		}
	}
	return 0
}

func (agent *QLearningAgent) UpdateQValue(state, action string, reward, nextStateValue float64) {
	if _, exists := agent.qTable[state]; !exists {
		agent.qTable[state] = make(map[string]float64)
	}
	curStateValue := agent.GetQValue(state, action)
	agent.qTable[state][action] = curStateValue + Alpha*(reward+Gamma*nextStateValue-curStateValue)
	// fmt.Println(action, reward, agent.qTable[state][action])
}

func (agent *QLearningAgent) MinValue(state string, actions []string) string {
	bestAction := actions[0]
	bestValue := math.Inf(1)
	for _, action := range actions {
		value := agent.GetQValue(state, action)
		if value < bestValue {
			bestAction = action
			bestValue = value
		}
	}
	return bestAction
}

func (agent *QLearningAgent) MaxValue(state string, actions []string) string {
	bestAction := actions[0]
	bestValue := math.Inf(-1)
	for _, action := range actions {
		value := agent.GetQValue(state, action)
		if value > bestValue {
			bestAction = action
			bestValue = value
		}
	}
	return bestAction
}

func (agent *QLearningAgent) ChooseAction(state string, actions []string) string {
	if !agent.PlayWithHuman && rand.Float64() < Epsilon {
		return actions[rand.Intn(len(actions))] // 随机选择动作
	}
	if agent.env.Player == gomoku_env.Player1 || 1 == 1 {
		return agent.MaxValue(state, actions)
	}
	return agent.MinValue(state, actions)
}

func (agent *QLearningAgent) WinReward() float64 {
	reward := 10*float64(gomoku_env.BoardSize)/float64(agent.Count) + gomoku_env.BoardSize/2
	return reward
}

func (agent *QLearningAgent) NormalReward() float64 {
	reward := (2*float64(agent.Count)/math.Pow(float64(gomoku_env.BoardSize), 2) - 0.5) * (2.0 + math.Sqrt(float64(agent.Count)))
	return reward
}

func (agent *QLearningAgent) Update(state, nextState gomoku_env.State, action string, done bool, winner gomoku_env.PlayerType) {
	// Compute reward and update Q-value
	stateStr := stateToString(state, agent.env.Player)
	nextStateStr := stateToString(nextState, -agent.env.Player)

	reward := 0.0
	agent.Count = agent.env.Count
	switch winner {
	case gomoku_env.Player1:
		reward = agent.WinReward()
	case gomoku_env.Player2:
		reward = agent.WinReward()
	default:
		// reward = agent.NormalReward() * float64(agent.env.Player)
		reward = agent.NormalReward()
	}

	// Update Q-value
	nextStateValue := math.Inf(-1)
	if !done {
		nextActions := getPossibleActions(state)
		action := agent.ChooseAction(nextStateStr, nextActions)
		nextStateValue = agent.GetQValue(nextStateStr, action)
	}
	agent.UpdateQValue(stateStr, action, reward, nextStateValue)
}

func (agent *QLearningAgent) Train() {
	episode := 0
	fmt.Printf("training episode:")
	fmt.Scanf("%d", &episode)
	batch := episode
	agent.PlayWithHuman = false
	for i := 0; i < Episodes; i++ {
		if i == batch {
			fmt.Printf("episode:%d board:\n", episode)
			agent.env.ShowBoard()

			// Save Q-table to file
			filename := fmt.Sprintf("qtable_%d_episode_%d.gob", gomoku_env.BoardSize, i+1)
			err := agent.SaveQTable(filename)
			if err != nil {
				fmt.Println("Error saving Q-table:", err)
			} else {
				fmt.Println("Q-table saved to", filename)
			}
			c := 'c'
			fmt.Printf("continue? y/n :")
			fmt.Scanf("%c", &c)
			if c != 'y' {
				return
			}
			batch += episode
		}

		if i%10000 == 9999 {
			agent.env.ShowBoard()
			fmt.Println(i, time.Now())
		}
		if i == Episodes-1 {
			fmt.Println("reached max episode!", i)
		}
		state := agent.env.Reset()
		done := false
		for !done {
			// Convert state to string
			player := agent.env.Player
			stateStr := stateToString(state, player)

			// Get possible actions
			actions := getPossibleActions(state)
			action := agent.ChooseAction(stateStr, actions)

			// Convert action to coordinates
			x, y := actionToCoordinates(action)

			// Perform action
			nextState, game_done, winner, err := agent.env.Step(x, y)
			done = game_done
			if err != nil {
				fmt.Println("Error during action:", err)
				break
			}
			state = nextState
			agent.Update(state, nextState, action, done, winner)
		}

		// Convert nextState to string

		// Compute reward
	}
}

// func stateToString(state [gomoku_env.BoardSize][gomoku_env.BoardSize]int, player gomoku_env.PlayerType) string{
func stateToString(state [gomoku_env.BoardSize][gomoku_env.BoardSize]int, player gomoku_env.PlayerType) string {
	// Convert state to a string representation
	var result string
	start := 10
	counter := start
	for i := 0; i < gomoku_env.BoardSize; i++ {
		for j := 0; j < gomoku_env.BoardSize; j++ {
			if state[i][j] == 0 {
				counter++
				continue
			}
			msg := fmt.Sprintf("%d", state[i][j])
			if counter != start {
				msg = fmt.Sprintf("%d%d", counter, state[i][j])
				counter = start
			}
			result += msg
		}
	}
	result = fmt.Sprintf("%v_%v", player.String(), result)
	return result
}

func getPossibleActions(state [gomoku_env.BoardSize][gomoku_env.BoardSize]int) []string {
	// Get a list of possible actions
	var actions []string
	for i := 0; i < gomoku_env.BoardSize; i++ {
		for j := 0; j < gomoku_env.BoardSize; j++ {
			if state[i][j] == gomoku_env.Empty {
				actions = append(actions, fmt.Sprintf("%d,%d", i, j))
			}
		}
	}
	return actions
}

func actionToCoordinates(action string) (int, int) {
	var x, y int
	fmt.Sscanf(action, "%d,%d", &x, &y)
	return x, y
}

func max(a, b float64) float64 {
	if a > b {
		return a
	}
	return b
}

func RunTrain() {
	env := gomoku_env.NewGomokuEnv()
	agent := NewQLearningAgent(env)
	agent.Train()
}
