import os.path

import numpy as np

from src.RL.QNet import DQN, QNetworkWithLSTM
from src.RL.FinEnv import StockTradingEnvironment
import torch
from src.EnvironmentVariables import BASE_PATH
import pandas as pd
import random
from src.Utils.MyUtil import LinePlot
from src.RL.TCNQnet import QNetWithTCN
from src.RL.EasyNet import EasyQNet

WINDOW_SIZE = 30
EPISODE = 80


def state2tensor(_state, input_col):
    _x_data, _position = _state
    if type(_x_data) is not torch.Tensor:
        _x_data = torch.FloatTensor(np.array([_x_data[input_col].values])).cuda()
    if type(_position) is not torch.Tensor:
        _position = torch.FloatTensor(np.array([[_position]])).cuda()
    if _x_data.shape[1] == 1:
        _x_data = torch.squeeze(_x_data, 1)
    return _x_data, _position


class DQNTrainer:
    def __init__(self, stockdata: dict, episode=EPISODE, save_name="", base_model_path=None,
                 DQN_type: type = QNetworkWithLSTM):
        self.dqn = DQN(base_model_path, QNetWork=DQN_type)
        self.stockdata = list(stockdata.values())
        self.stock_name = list(stockdata.keys())
        self.episode = episode
        self.save_name = save_name

    def train(self, input_col=None):
        if input_col is None:
            input_col = StockTradingEnvironment.INPUT_COL

        for episode in range(self.episode):
            if episode % len(self.stockdata) == 0:
                t = list(zip(self.stockdata, self.stock_name))
                random.shuffle(t)  # 打乱c
                self.stockdata[:], self.stock_name[:] = zip(*t)

            data_df = self.stockdata[episode % len(self.stockdata)]
            data_id = self.stock_name[episode % len(self.stockdata)]
            kwargs4env = {
                'stock_data': data_df,
                'window_size': WINDOW_SIZE
            }
            env = StockTradingEnvironment(**kwargs4env)
            state = env.reset(**kwargs4env)

            total_wealth = [1]
            stock_wealth = [1]
            turn_step = 0
            reward_list = [0]
            real_profit_list = [0]
            stock_profit_list = [0]
            while True:
                turn_step += 1

                state = state2tensor(state, input_col)

                action = self.dqn.choose_action(state)
                next_state, reward, done = env.step(action)
                reward, real_profit, keep_profit = reward

                reward_list.append(reward)
                real_profit_list.append(real_profit)
                stock_profit_list.append(keep_profit)

                next_state = state2tensor(next_state, input_col)

                total_wealth.append(total_wealth[-1] * (1 + real_profit))
                stock_wealth.append(stock_wealth[-1] * (1 + keep_profit))

                self.dqn.store_transition(state,
                                          torch.tensor([action], dtype=torch.int64).cuda(),
                                          torch.FloatTensor([reward]).cuda(),
                                          next_state,
                                          torch.FloatTensor([done]).cuda())
                state = next_state
                self.dqn.learn()
                if turn_step % 200 == 0:
                    from src.Utils.MyUtil import average_holding_time
                    print(
                        f"Episode:{episode + 1}/{self.episode}; Stock code:{data_id}; "
                        f"Turn Step:{turn_step}; "
                        f"Total wealth:{total_wealth[-1]:.4f}; "
                        f"Stock wealth:{stock_wealth[-1]:.4f}; "
                        f"Reward:{np.mean(reward_list):.4f}/{np.mean(real_profit_list) * 100:.4f}/"
                        f"{np.mean(stock_profit_list) * 100:.4f}; "
                        # f"Target Q:{np.mean(DQN.q_target_list):.4f}/{np.std(DQN.q_target_list):.4f}; "
                        f"Epsilon:{DQN.epsilon:.4f}; "
                        f"Avg Holding Time:{average_holding_time(env.position_his[-min(500, len(env.position_his)):]):.2f}day")

                if done == 1:
                    drawing_board = LinePlot(f"{data_id}", os.path.join(BASE_PATH, 'tmp', f'{data_id}.png'))
                    drawing_board.draw(range(len(total_wealth)),
                                       stock_wealth,
                                       total_wealth,
                                       env.position_his,
                                       np.array(reward_list),
                                       np.array(real_profit_list),
                                       np.array(stock_profit_list))

                    break
            if episode % len(self.stockdata) == 0:
                self.dqn.save(os.path.join(BASE_PATH, 'Models/RL_model', f"{self.save_name}checkpoint{episode}.pt"))

        self.dqn.save(os.path.join(BASE_PATH, 'Models/RL_model', f"{self.save_name}checkpoint{self.episode}.pt"))


def fix_para4single_stock(stock_id, base_model_path: str, splitTime='2019-1-1'):
    dir_path = os.path.join(BASE_PATH, 'data/preProcessedIndexData/sz50stockSignal')
    files_path = os.path.join(dir_path, stock_id + '.csv')
    df = pd.read_csv(files_path, parse_dates=['date'])
    df = df.sort_values('date').dropna()
    df = df[df['date'] < pd.to_datetime(splitTime)]
    StockTradingEnvironment.INPUT_COL = EasyQNet.NEED_COL
    global WINDOW_SIZE
    WINDOW_SIZE = 1
    trainer = DQNTrainer({stock_id: df}, episode=10, save_name=stock_id, base_model_path=base_model_path,
                         DQN_type=EasyQNet)
    DQN.set_soft_update(0.5)
    trainer.train()


def train():
    dir_path = os.path.join(BASE_PATH, 'data/preProcessedIndexData/sz50stockSignal')
    files_name = os.listdir(dir_path)  # ['sh600010.csv']
    files_path = [os.path.join(dir_path, f) for f in files_name]
    datasets = dict()
    for i, f in enumerate(files_path):
        df = pd.read_csv(f, parse_dates=['date'])
        df = df[df['date'] < pd.to_datetime('2015-1-1')]
        df = df.sort_values('date').dropna()
        if len(df) < 200:
            continue
        datasets[files_name[i].split(".")[0]] = df
    print(f"训练股票只数{len(datasets)}")
    StockTradingEnvironment.INPUT_COL = EasyQNet.NEED_COL
    global WINDOW_SIZE
    WINDOW_SIZE = 1
    trainer = DQNTrainer(datasets, DQN_type=EasyQNet)
    trainer.train()


if __name__ == "__main__":
    fix_para4single_stock('sh600519', os.path.join(BASE_PATH, 'Models/RL_model/checkpoint80.pt'))
    # train()
