import copy
import sys

import gym
import numpy as np
from CartPole_v1_Step import CartPoleStep
import torch

alpha = 5 # alpha决定了后验概率的增加速率
gamma = 0.9

def softmax(x):
    x_row_max = x.max(axis=-1)
    x_row_max = x_row_max.reshape(list(x.shape)[:-1] + [1])
    x = x - x_row_max
    x_exp = np.exp(x)
    x_exp_row_sum = x_exp.sum(axis=-1).reshape(list(x.shape)[:-1] + [1])
    softmax = x_exp / x_exp_row_sum
    return softmax

class treenode():
    def __init__(self,parent,state,value,pi):
        '''
        树节点的初始化
        :param parent: 传入树结点的父节点
        :param state: 传入树节点存储的状态值
        :param value: 传入树节点存储状态对应的价值
        :param pi: 传入树节点不同分支的选择概率
        '''
        self.parent = parent
        self.children = [] # 所有可能状态一次填充满
        self.children_block = np.zeros(pi.shape)
        self.rewards = []
        self.state = state
        self.value = value
        self.target_value = 0
        self.critic_value = value
        self.simulate_quality = value
        self.state_visit = 0
        self.pi = pi # 作为选择概率使用
        self.pi_select = copy.deepcopy(pi)
        self.done = False
        if parent!=None:
            self.deep =parent.deep+1
        else:
            self.deep = 0

    def update_quality(self):
        '''
        更新蒙特卡洛概率分布的函数
        :param current_quality: 传入当前模拟返回的价值
        :return:
        '''
        self.state_visit = self.state_visit + 1
        if self.children != []:
            self.pi_select = softmax(np.reshape(np.array(
                [(self.state_visit / (self.children[0].state_visit + 1)) ** 0.5,
                 (self.state_visit / (self.children[1].state_visit + 1)) ** 0.5]), self.pi.shape) * self.pi)
            self.simulate_quality = np.sum(np.reshape(np.array([self.rewards[0] +self.children[0].simulate_quality,self.rewards[1]+self.children[1].simulate_quality]),self.pi.shape) * self.pi)
        # if self.children != []:
        #     p = np.reshape(softmax(np.power([self.state_visit / (self.children[0].state_visit+1),
        #                       self.state_visit / (self.children[1].state_visit+1)], 0.5)),(1,2))
        #     p = np.array(p,dtype='float32')
        #     self.pi = (self.pi + p) * 0.5

class mcts_():
    def __init__(self,last_state,state,reward,action,done,critic,target_critic,actor,conf):
        '''
        蒙特卡洛搜索树建立
        :param state: 传入root节点对应的状态，保证done不等于true
        :param critic: 传入批评家网络
        :param actor: 传入演员网络
        :param conf: 传入控制参数
        '''
        value = critic(state).item()
        pi = actor(state).detach().numpy()
        self.static_root_treenode = treenode(None,state,value,pi)
        self.max_deep = conf.max_deep
        self.chosed_iter = conf.chosed_iter
        self.last_state = last_state
        self.reward = reward
        self.action = action
        self.done = done
        self.current_chosed_treenode = self.static_root_treenode
        self.current_expand_treenode = None
        self.current_simulate_treenode= None
        self.current_backup_treenode = self.current_expand_treenode
        self.env = gym.make(conf.env)
        self.env.reset()
        self.dim = [self.env.state.shape[0], self.env.action_space.n]
        self.actor = actor
        self.critic = critic
        self.target_critic = target_critic
        self.rate = 0.05
        self.weights = np.array([1,0])

    def target_value_(self,node):
        if ~node.done:
            rewards = np.empty((1,self.dim[1]))
            values = np.empty((1,self.dim[1]))
            for i in range(2):
                self.env.state = node.state
                state_,reward,done,info = self.env.step(i)
                rewards[0,i] = reward
                action = np.ones((1,1))*i
                values[0,i] = self.target_critic(np.concatenate((np.tile(node.state.reshape((1,4)),(2,1)),np.tile(action,(2,1))),axis=1)).cpu()[0]
            node.target_value = np.sum((rewards+values) * node.pi)
        else:
            node.target_value = 0
        return node.target_value

    # 后序打印二叉树（递归）
    def postOrderTraverseUpdate(self,node,fresh):
        if fresh:
            node.critic_value = self.critic(node.state).detach()
            # if node.state_visit !=0:
            #     if self.sample_times !=0:
            #         node.simulate_quality_m = self.weights[0] * node.simulate_quality_m + self.weights[1] * node.simulate_quality
            #     else:
            #         node.simulate_quality_m = node.simulate_quality
            # 如果visit次数为0，也就不需要额外操作，保持原值即可
        else:
            pi = np.reshape(self.actor(node.state).detach().numpy(), (1, 2))
            node.pi = pi  # 修正模拟概率

        if node.children != []:
            self.postOrderTraverseUpdate(node.children[0],fresh)
            self.postOrderTraverseUpdate(node.children[1],fresh)
        return 0

    def reinit(self):
        self.postOrderTraverseUpdate(self.static_root_treenode,False)
    def refresh(self,target_times):
        self.postOrderTraverseUpdate(self.static_root_treenode, True)
        node = self.static_root_treenode
        target_value = self.target_value_(node)
        # if target_value <= node.simulate_quality:  # 如果置信上界不成立，则以置信下界完全决策
        #     gap = torch.abs(node.critic_value - target_value)
        #     weight = torch.exp(-self.rate * gap)
        #     weights = [weight, 1 - weight]
        #     node.value = weights[0] * node.critic_value + weights[1] * target_value
        #     target_times[0]+=1
        #
        #     # gap = torch.abs(node.simulate_quality - node.critic_value)
        #     # weight = torch.exp(-self.rate * gap)
        #     # weights = [weight, 1 - weight]
        #     # # if gap >= self.max_gap:
        #     # #     weight = 0
        #     # # else:
        #     # #     weight = 0.5 * (self.max_gap-gap) / self.max_gap
        #     # # weights = [weight,1-weight]
        #     # node.value = weights[0] * node.critic_value + weights[1] * node.simulate_quality
        # else:
        #     middle_value = 0.5 * (node.simulate_quality + target_value)
        #     if node.critic_value < middle_value:
        #         gap = torch.abs(node.critic_value - target_value)
        #         weight = torch.exp(-self.rate * gap)
        #         weights = [weight, 1 - weight]
        #         node.value = weights[0] * node.critic_value + weights[1] * target_value
        #         target_times[1] += 1
        #     else:
        #         gap = torch.abs(node.simulate_quality - node.critic_value)
        #         weight = torch.exp(-self.rate * gap)
        #         weights = [weight, 1 - weight]
        #         node.value = weights[0] * node.critic_value + weights[1] * node.simulate_quality
        #         target_times[2] += 1
        middle_value = 0.5 * (node.simulate_quality + target_value)
        gap = torch.abs(node.critic_value - middle_value)
        weight = torch.exp(-self.rate * gap)
        weights = [weight, 1 - weight]
        # if gap >= self.max_gap:
        #     weight = 0
        # else:
        #     weight = 0.5 * (self.max_gap-gap) / self.max_gap
        # weights = [weight,1-weight]


        node.value = weights[0] * node.critic_value + weights[1] * middle_value

        return target_times



                # middle_value = 0.5 * (target_value + node.simulate_quality)
                # gap = torch.abs(node.critic_value - middle_value)
                # weight = torch.exp(-self.rate * gap)
                # weights = [weight, 1 - weight]
                # node.value = weights[0] * node.critic_value + weights[1] * middle_value

        # gap = torch.abs(node.critic_value - target_value)
        # weight = torch.exp(-self.rate * gap)
        # weights = [weight, 1 - weight]
        # node.value = weights[0] * node.critic_value + weights[1] * target_value

    def select(self):
        if self.current_chosed_treenode.done:
            return -1
        else:
            if self.current_chosed_treenode.children == []: # 此时应按照随机规则挑
                return 0
            else:
                # if (self.current_chosed_treenode.children[0].state_visit == 0)|(self.current_chosed_treenode.children[1].state_visit == 0):
                #     return 0
                # else:
                return_value = self.random_chosed()
                if return_value != -1:
                    self.current_chosed_treenode = self.current_chosed_treenode.children[return_value]
                    assert self.current_chosed_treenode.done == False
                    return self.select()
                else: # 如果所有子节点都是终止状态，父节点被阻断
                    if self.current_chosed_treenode.parent != None:
                        for i,node in enumerate(self.current_chosed_treenode.parent.children):
                            if node == self.current_chosed_treenode:
                                self.current_chosed_treenode.parent.children_block[0,i] = 1
                        return -1
                    else: # 如果根节点被阻断，该树失效,失效树应重新在原位置生成新树，这部分功能暂时不做实现
                        return -2


    def expand(self):
        if self.current_chosed_treenode.children == []:  # 此时应按照随机规则挑选
            for i in range(self.dim[1]):
                self.env.reset()
                self.env.state = self.current_chosed_treenode.state
                state_,reward,done,info = self.env.step(i)
                state_ = np.reshape(state_, (1, self.dim[0]))
                value = self.critic(state_).item()
                pi = np.reshape(self.actor(state_).detach().numpy(), (1, self.dim[1]))
                child = treenode(self.current_chosed_treenode, state_, value, pi)
                child.done = done
                if done:
                    self.current_chosed_treenode.children_block[0, i] = 1
                    reward = -20
                self.current_chosed_treenode.children.append(child)
                self.current_chosed_treenode.rewards.append(reward)
            return_value= self.random_chosed()
            if return_value != -1:
                self.current_expand_treenode = self.current_chosed_treenode.children[return_value]
                assert self.current_expand_treenode.done == False
                return 0
            else:  # 如果所有子节点都是终止状态，父节点被阻断
                if self.current_chosed_treenode.parent != None:
                    for i, node in enumerate(self.current_chosed_treenode.parent.children):
                        if node == self.current_chosed_treenode:
                            self.current_chosed_treenode.parent.children_block[0, i] = 1
                    return -1
                else:  # 如果根节点被阻断，该树失效,失效树应重新在原位置生成新树，这部分功能暂时不做实现
                    return -2
        else:
            return_value = self.random_chosed()

            if return_value != -1:
                self.current_expand_treenode = self.current_chosed_treenode.children[return_value]
                assert self.current_expand_treenode.done == False
                return 0
            else:  # 如果所有子节点都是终止状态，父节点被阻断
                if self.current_chosed_treenode.parent != None:
                    for i, node in enumerate(self.current_chosed_treenode.parent.children):
                        if node == self.current_chosed_treenode:
                            self.current_chosed_treenode.parent.children_block[0, i] = 1
                    return -1
                else:  # 如果根节点被阻断，该树失效,失效树应重新在原位置生成新树，这部分功能暂时不做实现
                    return -2
            # if self.current_chosed_treenode.children[0].state_visit == 0:
            #     self.current_expand_treenode = self.current_chosed_treenode.children[0]
            # else:
            #     self.current_expand_treenode = self.current_chosed_treenode.children[1]

    def simulate(self,parent_treenode,rewards):
        action = self.random_simulated(parent_treenode)
        state_, reward, done, info = self.env.step(action)
        state_ = np.reshape(state_, (1, self.dim[0]))
        if done:
            reward = -20
        pi = np.reshape(self.actor(state_).detach().numpy(), (1, 2))
        self.current_simulate_treenode = treenode(parent_treenode, state_, 0, pi)
        self.current_simulate_treenode.done = done
        if done|(self.current_simulate_treenode.deep >= self.max_deep):
            VOFFSET = self.critic(self.current_simulate_treenode.state).detach().numpy()[0,0]
            return rewards + reward + VOFFSET

            # return rewards + reward
        else:
            return self.simulate(self.current_simulate_treenode, rewards + reward)

    def backup(self,rewards):
        if self.current_backup_treenode.children == []: #如果返回节点是叶子节点
            self.current_backup_treenode.state_visit = self.current_backup_treenode.state_visit+1
            self.current_backup_treenode.simulate_quality = (0.5*rewards + 0.5*self.current_backup_treenode.simulate_quality)
        else:
            self.current_backup_treenode.update_quality()
        if self.current_backup_treenode.parent !=None: # 这里的backup不要限制在rootnode，而是到达静态根节点
            self.current_backup_treenode = self.current_backup_treenode.parent
            return self.backup(rewards)
        else:
            return 0



    def control(self):
        for iter in range(self.chosed_iter):
            self.current_chosed_treenode = self.static_root_treenode # 每次使用select前都要重置select起始节点的位置
            return_value = self.select()
            rewards = 0
            if (return_value != -1)|(return_value != -2):
                self.env.reset()
                self.env.state = self.current_chosed_treenode.state
                return_value = self.expand()
                self.current_backup_treenode = self.current_expand_treenode
                if return_value !=-1:
                    self.env.reset()
                    self.env.state = self.current_expand_treenode.state
                    rewards = self.simulate(self.current_expand_treenode,0)
                else:
                    self.current_simulate_treenode = self.current_expand_treenode
            else:
                self.current_simulate_treenode = self.current_chosed_treenode
                self.current_backup_treenode = self.current_chosed_treenode
            self.backup(rewards)

        # values = np.empty((1,2))
        # for i,child in enumerate(self.root_treenode.children):
        #     values[0,i] = child.value
        # self.root_treenode.value = np.sum(self.root_treenode.pi_m * values)
        return 0


    def random_chosed(self):  # 应该做一个修正，在没有done的节点里去找
        xy = np.where(self.current_chosed_treenode.children_block == 0)[1]
        if xy.shape == 0:
            return -1
        nonblock_pi = np.zeros(xy.shape)
        sum_pi = 0
        for i in range(xy.shape[0]):
            nonblock_pi[i] = self.current_chosed_treenode.pi_select[0,xy[i]]
            sum_pi += nonblock_pi[i]
        nonblock_pi = nonblock_pi/sum_pi
        i = np.random.choice(a=nonblock_pi.shape[0], p=nonblock_pi)
        return xy[i]

    def random_simulated(self,parent_treenode):
        action = np.random.choice(a = self.dim[1], p = parent_treenode.pi[0,:])
        return action

