# from pcb_vector_utils import compute_sum_of_euclidean_distances_between_pads
import numpy as np
import gym
from gym import spaces
from .geometry import BBox, Point, Tx, Vector
# from core.agent.observation import get_agent_observation
# from core.agent.tracker import tracker
import random as rd
# from pcbDraw import draw_board_from_board_and_graph_multi_agent
from .place import random_placement
import datetime

from .schDraw import calc_dist_of_net
from .agent_observation import get_agent_observation
class agent(gym.Env):
    # This method is called when an object is created.
    # It's purpose is to initialize the object.
    def __init__(self, part, parameters):
        self.part = part
        self.parameters = parameters
        obs_space = {
            "los": spaces.Box(low=0.0, high=1.0, shape=(8,),
                              dtype=np.float32),
            "ol": spaces.Box(low=0.0, high=1.0, shape=(8,),
                             dtype=np.float32),
            "dom": spaces.Box(low=0.0, high=1.0, shape=(2,),
                              dtype=np.float32),
            "euc_dist": spaces.Box(low=0.0, high=1.0, shape=(2,),
                                   dtype=np.float32),
            "position": spaces.Box(low=0.0, high=1.0, shape=(2,),
                                   dtype=np.float32),
            "ortientation": spaces.Box(low=0.0, high=1.0, shape=(1,),
                                       dtype=np.float32)
            }
        self.observation_space = spaces.Dict(obs_space)
        self.action_space = spaces.Box(
            low=np.array([-50,-50,0], dtype=np.float32),
            high=np.array([50,50,0],
                          dtype=np.float32))

        #self.tracker = tracker()
        self.rng = np.random.default_rng(seed=self.parameters.seed)
        # action spaces uses their own random number generator.
        self.action_space.seed(self.parameters.seed)

        # self.max_steps = 200
        self.max_steps = self.parameters.max_steps
        self.steps_done = 0
        self.bboxHeight = self.parameters.bboxHeight
        self.bboxWidth = self.parameters.bboxWidth
        self.HPWLe = np.inf#self.parameters.opt_hpwl
        # self.We = self.parameters.opt_euclidean_distance

        self.n = self.parameters.n
        self.m = self.parameters.m
        self.p = self.parameters.p

        self.penalty_per_remaining_step = 15


    def reset(self):
        #self.tracker.reset()
        self.steps_done = 0

        self.W = []
        self.HPWL = []
        self.ol_term5 = []
        # self.current_We = self.We
        self.nets = []

        # self.Wi = compute_sum_of_euclidean_distances_between_pads(
        #     self.parameters.node,
        #     self.parameters.neighbors,
        #     self.parameters.eoi,
        #     ignore_power=self.parameters.ignore_power)
        self.HPWLi = 0
        for pin in self.part:
            for net in pin.nets:
                self.nets.append(net)
        for net in self.nets:
            dist, ang = calc_dist_of_net(net, self.part)
            self.HPWLi += dist
        self.HPWLi = self.HPWLi / len(net)
        # for net_id in self.parameters.nets:
        #     self.HPWLi += self.parameters.graph.calc_hpwl_of_net(net_id,
        #                                                          True)
        self.current_HPWL = self.HPWLe

        self.all_w = []
        self.all_hpwl = []
        self.all_weighted_cost = []

    def step(self,
             model,
             random:bool=False,
             deterministic:bool =False,
             rl_model_type:str = "TD3"):
        self.steps_done += 1
        state = get_agent_observation(self.part,parameters=self.parameters)
        _state = list(state["los"]) + list(state["ol"]) + state["dom"] + state["euc_dist"] + state["position"] + state["ortientation"]

        def map_to_range(value, original_min, original_max, new_min, new_max):
            if value > original_max:
                value = original_max
            elif value < original_min:
                value = original_min
            return (value - original_min) * (new_max - new_min) / (original_max - original_min) + new_min

        action = None
        if random is True:
            action = self.action_space.sample()
            if rl_model_type == "TD3":
                model_action = [0,0,0]#action
                model_action[0] = action[0] 
                model_action[1] = action[1]
                model_action[2] = action[2]
        else:
            if rl_model_type == "TD3":
                 # Action scaling done here, outside of policy.
                if deterministic is True:
                    model_action = model.select_action(np.array(_state))
                else:
                    model_action = (model.select_action(np.array(_state)))

                action = model_action

            else: # SAC
                # Action scaling done inside policy
                action = model.select_action(
                    np.array(_state), evaluate=deterministic)

        tx_bbox = (
                getattr(self.part, "lbl_bbox", getattr(self.part, "place_bbox", Vector(0, 0))) * self.part.tx
        )
        # pt_temp_x = (self.part.lbl_bbox*self.part.tx).ctr.x + action[0]*10000
        # pt_temp_y = (self.part.lbl_bbox*self.part.tx).ctr.y + action[1]*10000
        pt_temp_x = action[0]
        pt_temp_y = action[1]
        # 原始范围
        original_min = -25000
        original_max = +25000
        # 新范围
        new_min = -400
        new_max = 400

        # pt_temp_x = map_to_range(pt_temp_x, original_min, original_max, new_min, new_max)
        # pt_temp_y = map_to_range(pt_temp_y, original_min, original_max, new_min, new_max)
        #
        # pt_temp = Point(pt_temp_x,
        #                 pt_temp_y)

        # 加上边界限制
        if((self.part.lbl_bbox * self.part.tx).ctr.x+action[0] > self.bboxWidth):
            action[0] = 800 - (self.part.lbl_bbox * self.part.tx).ctr.x
        elif((self.part.lbl_bbox * self.part.tx).ctr.x+action[0] < 0):
            action[0] = 800-(self.part.lbl_bbox * self.part.tx).ctr.x

        if ((self.part.lbl_bbox * self.part.tx).ctr.y + action[1] > self.bboxHeight):
            action[1] = 800 - (self.part.lbl_bbox * self.part.tx).ctr.y
        elif ((self.part.lbl_bbox * self.part.tx).ctr.y + action[1] < 0):
            action[1] = 800-(self.part.lbl_bbox * self.part.tx).ctr.y
        pt_temp = Point(action[0], action[1])

        if (self.part.ref == "R1"):
            print(pt_temp)
            print("ctr_x = "+str((self.part.lbl_bbox * self.part.tx).ctr.x))
            print("ctr_x = "+str((self.part.lbl_bbox * self.part.tx).ctr.y))
            print("action_x = "+str(action[0]))
            print("action_y = "+str(action[1]))
            self.part.tx = self.part.tx.move(pt_temp)

        # print(str(self.part.ref)+"---------:location"+str(self.part.tx.dx)+","+str(self.part.tx.dy))
        # pos = self.parameters.node.get_pos()
        # step_scale = (self.parameters.step_size * action[0])
        # x_offset = step_scale*np.cos(-action[1])
        # y_offset = step_scale*np.sin(-action[1])
        # angle = (np.int0(action[2] * 4) % 4) * 90.0
        #
        # self.parameters.node.set_pos(
        #     tuple([pos[0] + x_offset, pos[1] + y_offset]))
        # self.parameters.node.set_orientation(angle)

        next_state = get_agent_observation(self.part, parameters=self.parameters)
        reward, done = self.get_reward(state,next_state)

        if rl_model_type == "TD3":
            return state, next_state, reward, action, done
            # return state, next_state, reward, model_action, done
        else:
            return state, next_state, reward, action, done



    def get_reward(self, state, next_state):
        done = False
        counter = 0
        for pin in self.part:
            for net in pin.nets:
                counter += 1
        self.ol = next_state["ol"][0] / counter
        # hpwl = next_state["dom"][0] / counter

        reward = 0
        import math
        reward = state["dom"][0] - next_state["dom"][0]
        reward =  (reward)/10000
        if self.steps_done==self.max_steps:
            done = True
        if (self.part.ref == "R1"):
            print(str(self.part.ref) + ":original_reward~~~~~~~~~~~~~~~~" + str(reward)+ " dist~~~~~~~~~~~~~~~~" + str(next_state["dom"][0]))
        # reward = max(-2000, min(reward, 2000))
        # 归一化处理
        # reward = (reward +1000000) / 1000000  # 归一化到 [0, 1] 范围内
        # if (self.part.ref == "R1"):
        #     print(str(self.part.ref) + ":normalized_reward~~~~~~~~~~~~~~~~~~~~~" + str(reward))
        # print("part:"+str(self.part),"---reward:"+str(reward),"---done:"+str(done))
        return reward, done

    def init_random(self):
        random_placement([self.part])
        # r_pos = self.rng.uniform(low=0.05, high=0.95, size=(2))
        # scaled_r_pos = tuple([r_pos[0]*self.parameters.board_width,
        #                       r_pos[1]*self.parameters.board_height])
        # # 0, 90, 180, 270
        # scaled_orientation = np.float64(self.rng.integers(4)*90)
        # self.parameters.node.set_pos(scaled_r_pos)
        # self.parameters.node.set_orientation(scaled_orientation)

    def get_observation_space_shape(self):
        sz = 0
        for _, value in self.observation_space.items():
            sz += value.shape[0]

        return sz
