"""
定义：优化问题的通用状态定义方法
"""
import torch

from opt_func.FuncApi_torch import Function
from opt_func.FuncSet_torch import *  # 目前先测试该函数
import numpy as np
import pandas as pd
import copy


class Same_range_problems(object):
    def __init__(self, **kwargs):
        """
        初始化任务棋盘
        :param kwargs:
        lower:变量的下限
        uppper：变量的上限
        """
        self.range_lower = float(kwargs.get('lower', -2))
        self.range_upper = float(kwargs.get('upper', 2))
        self.history_steps = 3  # 回溯前3步路径
        self.dim_num = int(kwargs.get('dim_num', 3))
        # 变量池：暂时用顺序的数字序号表示，（之后代码可能是按特定顺序优化的）
        self.dim_rank = torch.linspace(0, self.dim_num - 1, self.dim_num)
        self.split_num = int(kwargs.get('split_num', 5))  # 每一维度水平数，暂时都相同
        self.func = eval(str(kwargs.get('func_name', 'Rosenbrock')))

        self.avaliable_move_value = np.round(np.linspace(self.range_lower,
                                                         self.range_upper,
                                                         self.split_num), 2)
        self.ava_value_to_index = pd.Series(range(len(self.avaliable_move_value)),
                                            index=self.avaliable_move_value)

        # 每个维度变量区间，可以修改

    def init_problem(self):
        # 变量池：暂时用顺序的数字序号表示，（之后代码可能是按特定顺序优化的）
        # self.avaliable_move_idx = list(torch.linspace(0,self.value_num-1,self.value_num))

        # self.avaliable_move_value = torch.round(torch.repeat(
        #     torch.linspace(self.range_lower, self.range_upper, self.split_num)[torch.newaxis, :], self.dim_num, axis=0), 2)
        # self.avaliable_move_value = np.round(torch.linspace(self.range_lower,
        #                                                     self.range_upper,
        #                                                     self.split_num), 2)
        self.last_move_idx = -1
        self.states = {}  # key:value_idx, value:value {x1:value,x2:value}

    def current_state(self):
        square_state = torch.zeros(
            (self.history_steps, self.dim_num, self.split_num))
        try:
            if self.states:
                values = list(self.states.values())
                level_index = self.ava_value_to_index[values].values
                len_state = len(self.states)
                value_indexs = range(len_state)
                try:
                    square_state[0, value_indexs, level_index] = 1
                except:
                    pass

                if len_state > 2:
                    square_state[1, value_indexs[:-1], level_index[:-1]] = 1
                    square_state[2, value_indexs[:-2], level_index[:-2]] = 1
                elif len_state == 2:
                    square_state[1, value_indexs[:-1], level_index[:-1]] = 1
            return square_state
        except:
            pass

    def do_move(self, move):
        """
        :param move: 数值move_value
        :return:
        """
        self.last_move_idx += 1  # 按顺序（即0,1，2...）调整到下一个变量
        self.states[self.last_move_idx] = move  # 更新states

    def get_avaliable_move(self):
        next_move_idx = self.last_move_idx + 1
        return next_move_idx, self.avaliable_move_value

    def end_optimization(self):
        if len(self.states) == self.dim_num:
            # todo:这里看看能不能提升并行度
            # 这里建议先收集状态，最后再统一计算
            target = self.func(torch.tensor(
                list(self.states.values())))  # 问题的真实目标值
            value = -torch.log(target)  # 对预测值取对苏，消除残差的正态性
            return True, value.item()
        else:
            return False, None


class Game():

    def __init__(self, same_range_problem):
        self.srp = same_range_problem

    # 使用的是该函数
    def start_self_play(self, player, temp=1e-3):
        self.srp.init_problem()
        states, mcts_probs = torch.tensor([], dtype=torch.int8), \
            torch.tensor([])

        while True:
            value, value_prob = player.get_action(
                self.srp, temp=temp, return_prob=1)
            # mcts_probs = torch.cat([mcts_probs, torch.tensor(value_prob).unsqueeze(dim=0)])
            mcts_probs = torch.cat(
                [mcts_probs, value_prob.unsqueeze(dim=0)])  # 使用gpu
            self.srp.do_move(value)
            states = torch.cat(
                [states, self.srp.current_state().unsqueeze(dim=0)])
            end, fun_target = self.srp.end_optimization()
            if end:
                # todo:要不只用最后的一组数据好了
                func_targets = torch.full((states.shape[0], 1), fun_target)
                return (states, mcts_probs, func_targets)

    def start_play(self, player, temp=1e-3):
        self.srp.init_problem()
        while True:
            value = player.get_action(self.srp, temp=temp, return_prob=0)
            self.srp.do_move(value)
            end, fun_target = self.srp.end_optimization()
            if end:
                # state = self.srp.current_state()[0]
                state = list(self.srp.states.values())
                # print(('测试结束,函数目标值:{}, 参数为:{}').format(
                #                                         fun_target,
                #                                         state))
                return fun_target, state


if __name__ == '__main__':
    srp = Same_range_problems()
    srp.init_problem()
