import os
import sys

import numpy as np
import pandas as pd

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import torch
from envs.future_env import MyFuturesEnv
from model.patchtst import PatchTST
from configs.config_loader import Config
from agents.deterministic_agent import deterministic_agent
from presentation import show

cfg = Config(yaml_path="configs/config_deterministic_A.yaml")

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

config_env = cfg.env
env = MyFuturesEnv(config_env)
env.set_train()
patchtst_config = type("PatchTSTConfig", (object,), cfg.patchtst)
patchtst = PatchTST(config=patchtst_config).to(cfg.patchtst["device"])
patchtst.set_pretrain()

# 读取数据
train_data1 = pd.read_csv(config_env["price_file_path"])  # (len, future_num)
train_data2 = pd.read_csv(config_env["tech_file_path"])  # (len, future_num * tech_num)
future_num = train_data1.shape[1]
tech_num = train_data2.shape[1] // future_num
len_data = train_data1.shape[0]
train_data2_reshaped = train_data2.values.reshape(len_data, future_num, tech_num)
merged_data = np.zeros((len_data, future_num, 1 + tech_num))
for i in range(future_num):
    merged_data[:, i, 0] = train_data1.iloc[:, i]  # 价格数据
    merged_data[:, i, 1:] = train_data2_reshaped[:, i, :]  # 技术指标
merged_data = np.swapaxes(merged_data, 0, 1)
# 传入 patchtst 模型训练
LOAD = True
if LOAD:
    patchtst.load_state_dict(torch.load("./checkpoints/determinist_patchtst.pth"))
else:
    patchtst.fit(train_data=merged_data)
SAVE = False
if SAVE:
    torch.save(patchtst.state_dict(), "./checkpoints/determinist_patchtst.pth")

agent = deterministic_agent(
    cfg.agent["device"],
    env,
    patchtst,
)

# -----test-----
# env.set_test()
agent.eval()
state = env.reset()
done = False
transition_dict = {
    "states": [],
    "actions": [],
    "next_states": [],
    "rewards": [],
    "dones": [],
    "portfolio": [],
}
while not done:
    action = agent.take_action(state)
    next_state, reward, done, _ = env.step(action)
    transition_dict["states"].append(state)
    transition_dict["actions"].append(action)
    # transition_dict["next_states"].append(next_state)
    # transition_dict["rewards"].append(reward)
    transition_dict["dones"].append(done)
    transition_dict["portfolio"].append(env.get_final_ratio())
    state = next_state

# print(action_record)
show.plot_portfolio_value(transition_dict["portfolio"])
show.plot_price_and_actions(
    prices=env.get_price_array(),
    actions=transition_dict["actions"],
)
show.show_win_rate(prices=env.get_price_array(), actions=transition_dict["actions"])


print(show.calc_portfolio_metrics(transition_dict["portfolio"]))
