# -*- coding: utf-8 -*-
# Q-Table Learning
import numpy as np
from env_init.environment import deliveryManEnv
import time
from playBoard.PlayBoard import *
import Config
# 加载实验环境
#env = gym.make('FrozenLake-v0')




def Q_learning_Agent(env,player):

    # 集成Q表学习算法
    # 初始表（全0）
    Q = np.zeros([Config.CHECKERBOARDSIZE * Config.CHECKERBOARDSIZE,5])
    t = time.time()

    # 设定超参数
    lr = 0.95
    y = .55
    num_episodes = Config.Q_num_episodes
    # 创建episodes中包含当前奖励值和步骤的列表
    rList = []
    difflist = []
    if player == 1:
        initState = env.play1state

    if player == 2:
        initState = env.play2state

    #Q学习训练
    for i in range(num_episodes):

        # 初始化环境，得到第一个状态观测值
        env.reset()
        s = initState
        # print(env.reward_states)
        # print(env.init_state)
        rAll = 0
        d = False
        j = 0
        # Q表学习算法
        while j < Config.Q_detect_move:
            j += 1

            Q_pre = Q.copy()
            # 根据Q表和贪心算法(含噪)选定当前的动作
            action_space = env.getAllLegalMove(s,player)
            action_space.remove("S")
            action = random.sample(action_space,1)[0]

            #进行向量化
            actionArray = env.actionToArray(action)
            #用Q网络进行更新
            actionArray = Q[s,:] + actionArray * (1./(i+1))
            action = env.arrayToAction(actionArray)

            #加上判断合法非法
            if env.judgeActionLegal(s, action,player) == False:
                #给出大惩罚
                r = -100

            else:

                # 获取新的状态值和奖励值
                if player == 1:
                    oldReward = env.play1score + env.final1Score
                elif player == 2:
                    oldReward = env.play2score + env.final2Score

                s1, r, d, _ = env.playerstep(action,player = player)

                if player == 1:
                    newReward = env.play1score + env.final1Score
                elif player == 2:
                    newReward = env.play2score + env.final2Score

                r = newReward - oldReward

            # 更新Q表
            a = np.argmax(actionArray)
            Q[s,a] = Q[s,a] + lr * (r + y*np.max(Q[s1,]) - Q[s,a])
            rAll += r
            s = s1
            #difflist.append(measureQ.measureQ(Q_pre, Q))
            if d == True:
                break
        rList.append(rAll)

    print("player" + str(player) + " cost time is: " + str(time.time() - t))
    # print("Score over time: " + str(sum(rList)/num_episodes))
    # print("Q diff is:")
    # print(difflist[-50:-1])
    # print("Final Q-Table Values：")
    # print(Q)
    Q_value = Q[initState,:]
    #找到合理的一步
    legalMove = env.getAllLegalMove(initState,player)
    maxValue = -999

    #在合法的步子的中，选最优
    for move in legalMove:
        index = ["L", "R", "D", "U", "S"].index(move)
        if Q_value[index] > maxValue:
            maxValue = Q_value[index]
            bestMove = move
    return bestMove
