import os
import sys
import time

import numpy as np
import pandas as pd

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import torch
from envs.future_env import MyFuturesEnv
from model.patchtst import PatchTST
from configs.config_loader import Config
from agents.actor_critic_sample import ActorCritic_sample
from presentation import show

cfg = Config(yaml_path="configs/config_A.yaml")

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

config_env = cfg.env
env = MyFuturesEnv(config_env)
env.set_train()
patchtst_config = type("PatchTSTConfig", (object,), cfg.patchtst)
# patchtst_config.seq_len =TODO
patchtst = PatchTST(config=patchtst_config).to(cfg.patchtst["device"])
patchtst.set_pretrain()

train_data1 = pd.read_csv(config_env["price_file_path"])  # (len, future_num)
train_data2 = pd.read_csv(config_env["tech_file_path"])  # (len, future_num * tech_num)
future_num = train_data1.shape[1]
tech_num = train_data2.shape[1] // future_num
len_data = train_data1.shape[0]
train_data2_reshaped = train_data2.values.reshape(len_data, future_num, tech_num)
merged_data = np.zeros((len_data, future_num, 1 + tech_num))
for i in range(future_num):
    merged_data[:, i, 0] = train_data1.iloc[:, i]  # 价格数据
    merged_data[:, i, 1:] = train_data2_reshaped[:, i, :]  # 技术指标
merged_data = np.swapaxes(merged_data, 0, 1)

# 传入 patchtst 模型训练
LOAD = True
if LOAD:
    patchtst.load_state_dict(
        torch.load(
            "./checkpoints/sample_patchtst.pth", map_location=torch.device("cpu")
        )
    )
    print(f"[Info] Successfully load model")
else:
    patchtst.fit(train_data=merged_data)

SAVE = False
if SAVE:
    torch.save(patchtst.state_dict(), "./checkpoints/sample_patchtst.pth")

head_nf = patchtst_config.d_model * int(
    (patchtst_config.seq_len - patchtst_config.patch_len) / patchtst_config.stride + 2
)
agent = ActorCritic_sample(
    # env.get_future_count() * cfg.patchtst["window"] * (env.get_tech_num() + 1),
    # env.get_future_count() * head_nf * (env.get_tech_num() + 1),
    head_nf,
    env.get_tech_num() + 1,
    cfg.agent["hidden_dim"],
    env.action_dim,
    cfg.agent["actor_lr"],
    cfg.agent["critic_lr"],
    cfg.agent["gamma"],
    cfg.agent["device"],
    env,
    patchtst,
    cfg.patchtst["lr"],
    cfg.agent["patchtst_freeze_epochs"],
)
patchtst.set_rl()
num_episodes = 20
print("start training...")
print("max_step:", env.get_max_step())
start_time = time.time()
return_list = []
ratio_list = []
train = False
train_num = 2000
# -----train-----
if train:
    agent.set_pretrain()
    for i_episode in range(int(num_episodes)):
        episode_return = 0
        transition_dict = {
            "states": [],
            "actions": [],
            "next_states": [],
            "rewards": [],
            "dones": [],
            "portfolio": [],
        }
        state = env.reset()
        done = False
        count = 0
        index = 0
        while not done:
            count += 1
            action = agent.take_action(state)
            next_state, reward, done, _ = env.step(action)
            transition_dict["states"].append(state)
            transition_dict["actions"].append(action)
            transition_dict["next_states"].append(next_state)
            transition_dict["rewards"].append(reward)
            transition_dict["dones"].append(done)
            transition_dict["portfolio"].append(env.get_final_ratio())
            state = next_state
            episode_return += reward
            if count >= train_num:
                show.plot_price_and_actions(
                    prices=env.get_price_array(),
                    actions=transition_dict["actions"],
                    index=index,
                    length=train_num,
                    epoch=i_episode,
                    save_dir="./image",
                )
                actor_actions = transition_dict["actions"][-1]
                temp_state = transition_dict["states"][-1]

                agent.update(transition_dict, i_episode)
                print(
                    "win rate:",
                    show.show_win_rate(
                        prices=env.get_price_array()[: train_num + 1],
                        actions=transition_dict["actions"],
                    ),
                )
                transition_dict = {
                    "states": [],
                    "actions": [],
                    "next_states": [],
                    "rewards": [],
                    "dones": [],
                    "portfolio": [],
                }
                index += 1
                count = 0
                print("check point!", time.time() - start_time, "seconds passed")
                break
        # return_list.append(episode_return)
        # ratio_list.append(env.get_final_ratio())
        # agent.update(transition_dict, i_episode)
        print(
            "epoch:",
            i_episode,
            "final_ratio",
            env.get_final_ratio(),
        )
        print("-" * 15)

        # if (i_episode + 1) % 5 == 0:
        #     agent.save_model()

    # show.plot_portfolio_value(transition_dict["portfolio"])
    # show.plot_price_and_actions(
    #     prices=env.price_ary,
    #     actions=transition_dict["actions"],
    # )
    agent.save_model()
else:
    agent.load_model()
# -----test-----
agent.set_rl()
env.set_test()
state = env.reset()
done = False
transition_dict = {
    "states": [],
    "actions": [],
    "next_states": [],
    "rewards": [],
    "dones": [],
    "portfolio": [],
}
while not done:
    action = agent.take_action(state)
    next_state, reward, done, _ = env.step(action)
    transition_dict["states"].append(state)
    transition_dict["actions"].append(action)
    transition_dict["next_states"].append(next_state)
    transition_dict["rewards"].append(reward)
    transition_dict["dones"].append(done)
    transition_dict["portfolio"].append(env.get_final_ratio())
    state = next_state

# print(action_record)
show.plot_portfolio_value(transition_dict["portfolio"])
show.plot_price_and_actions(
    prices=env.price_ary,
    actions=transition_dict["actions"],
)
print(show.calc_portfolio_metrics(transition_dict["portfolio"]))
res = show.show_win_rate_and_confusion_matrix(
    prices=env.get_price_array(), actions=transition_dict["actions"]
)

print("win rate:", res)
