import torch
from matplotlib import pyplot as plt

from environment.electric_scheduling import PowerDayAheadSchedule
from model.DDPG.v1.ddpg_learning import DDPG
from model.SAC.v1.sac_learning import SACContinuous as V1Model
from model.SAC.v2.sac_learning import SACContinuous as V2Model

plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文
plt.rcParams['axes.unicode_minus'] = False		# 显示负号
actor_lr = 0.01
critic_lr = 0.01
num_episodes = 10000
hidden_dim = 50
gamma = 0.9
tau = 0.005  # 软更新参数
buffer_size = 10000
minimal_size = 10
batch_size = 6
sigma = 0.01  # 高斯噪声标准差
device = "cuda" if torch.cuda.is_available() else "cpu"


def LoadDDPGModel(env: PowerDayAheadSchedule, pth: str) -> DDPG:
    state = env.reset()
    fire_actor_state_dict = torch.load(pth)["FireActorNet"]
    fire_target_actor_state_dict = torch.load(pth)["FireTargetActorNet"]
    # water_actor_state_dict = torch.load(pth)["WaterActorNet"]
    # water_target_actor_state_dict = torch.load(pth)["WaterTargetActorNet"]
    agent = DDPG(
        state_dim=len(state),
        hidden_dim=(50, 20),
        fire_action_dim=env.fire_station_num,
        fire_action_bound=400,
        water_action_dim=env.water_station_num,
        water_action_bound=121,
        sigma=sigma,
        actor_lr=actor_lr,
        critic_lr=critic_lr,
        tau=tau,
        gamma=gamma,
        device=device
    )
    agent.fire_actor.load_state_dict(fire_actor_state_dict)
    # agent.water_actor.load_state_dict(water_actor_state_dict)
    agent.fire_target_actor.load_state_dict(fire_target_actor_state_dict)
    # agent.water_target_actor.load_state_dict(water_target_actor_state_dict)
    return agent


def LoadSACV1Model(env: PowerDayAheadSchedule, pth: str) -> V1Model:
    state = env.reset()
    fire_actor_state_dict = torch.load(pth)["FireActorNet"]
    # water_actor_state_dict = torch.load(pth)["WaterActorNet"]
    # water_target_actor_state_dict = torch.load(pth)["WaterTargetActorNet"]
    agent = V1Model(
        env,
        state_dim=len(state),
        hidden_dim=256,
        action_dim=env.fire_station_num,
        action_bound=400,
        actor_lr=actor_lr,
        critic_lr=critic_lr,
        alpha_lr=0.0001,
        target_entropy=-env.fire_station_num,
        tau=tau,
        gamma=gamma,
        device=device
    )
    agent.fire_actor.load_state_dict(fire_actor_state_dict)
    # agent.water_actor.load_state_dict(water_actor_state_dict)
    return agent


def LoadSACV2Model(env: PowerDayAheadSchedule, pth: str) -> V2Model:
    state = env.reset()
    # fire_actor_state_dict = torch.load(pth)["FireActorNet"]
    actor_state_dict = torch.load(pth)["ActorNet"]
    # water_target_actor_state_dict = torch.load(pth)["WaterTargetActorNet"]
    agent = V2Model(
        env,
        state_dim=len(state),
        hidden_dim=256,
        action_dim=env.water_station_num+env.fire_station_num+2,
        actor_lr=actor_lr,
        critic_lr=critic_lr,
        alpha_lr=0.0001,
        target_entropy=-(env.fire_station_num + env.water_station_num),
        tau=tau,
        gamma=gamma,
        device=device
    )
    agent.actor.load_state_dict(actor_state_dict)
    # agent.water_actor.load_state_dict(water_actor_state_dict)
    return agent


def ShowPolicy(env: PowerDayAheadSchedule, agent):
    total_energy = 0
    plt.figure()
    state = env.reset()
    for i in range(env.trainDuration):
        print(f"==============Day1 Hour{i}=================")
        stateTensor = torch.tensor(state, dtype=torch.float).to(device)
        action_result, _ = agent.actor(stateTensor)
        action = action_result.cpu().detach().numpy()
        fire_action, water_action, solar, wind = SplitAction(
            action, env.fire_station_num, env.water_station_num, env.solar_limit_t[i], env.wind_limit_t[i])
        print("fire output=", fire_action)
        print("water output=", water_action)
        print("solar output=", solar)
        print("wind output=", wind)
        total_energy += solar + wind
        fire_out, water_out = 0, 0
        for x in fire_action:
            fire_out += x
        for x in water_action:
            water_out += x
        actionSet = {
            "fire": fire_action,
            "water": water_action,
            "wind": wind,
            "solar": solar
        }
        state, reward, done, _ = env.step(actionSet)
        if done:
            break

        _bottom = 0
        plt.bar(i, fire_out, bottom=0, color="goldenrod", label="火电")
        _bottom += fire_out
        plt.bar(i, water_out, bottom=_bottom, color="violet", label="水电")
        _bottom += water_out
        plt.bar(i, solar, bottom=_bottom,
                color="lightseagreen", label="太阳能")
        _bottom += solar
        plt.bar(i, wind, bottom=_bottom,
                color="cornflowerblue", label="风能")
        _bottom = -env.p_storage_t
        plt.bar(i, _bottom, color="blue", label="储能")

    plt.plot(range(env.trainDuration),
             env.requirement_t, color="red", label="负荷")
    plt.legend(labels=["负荷", "火电", "水电", "太阳能", "风能", "储能"])
    plt.show()
    return total_energy


def SplitAction(action, fireN: int, waterN: int, solar_limit: int, wind_limit: int) -> (list, list, float, float):
    fire = []
    water = []
    for i in range(fireN):
        if action[i] > 0:
            fire.append(action[i])
        else:
            fire.append(0)
    for i in range(fireN, fireN + waterN):
        if action[i] > 0:
            water.append(action[i])
        else:
            water.append(0)
    solar = action[fireN+waterN] if action[fireN+waterN] > 0 else 0
    solar = solar if solar < solar_limit else solar_limit
    wind = action[fireN+waterN+1] if action[fireN+waterN+1] > 0 else 0
    wind = wind if wind < wind_limit else wind_limit
    return fire, water, solar, wind
