import torch
import pandas as pd
import random
import argparse
import time

parser = argparse.ArgumentParser(description='超参数')
parser.add_argument('--head', type=int, default=1000, help='抓取的数目')
parser.add_argument('--rating', type=int, default=200, help='比例')
parser.add_argument('--batchsize', type=int, default=128, help='批量')
parser.add_argument('--lr', type=float, default=2e-3, help='学习率')
parser.add_argument('--max_data', type=int, default=5000, help='max_data')
parser.add_argument('--epoch', type=int, default=1000, help='epoch')
parser.add_argument('--hidden', type=int, default=128, help='hidden')
parser.add_argument('--fname', type=str, default='白云山600332.SH.xlsx', help='fname')
parser.add_argument('--money', type=int, default='1000000', help='money')
args = parser.parse_args()

# print(args.batchsize)

# data_path = r'C:\Users\masaikk\Desktop\data'
# file_path = r'C:\Users\masaikk\Desktop\data_aug_2\{}'.format(args.fname)
file_path = r'/mnt/ygangao/data/{}'.format(args.fname)
# file_path = r'/home/b8313/coding/py/high-frequency-trading/test_data/立讯精密002475.SZ.xlsx'
raw_file_data = pd.read_excel(file_path)
# file_data = raw_file_data.head(1000)
file_data = raw_file_data.head(args.head)
action_choose = [0, 1, 2, 3, 4, 5, 6, 7, 8]

init_positions = file_data.iloc[0].收盘价
init_positions_number = 1500000 / init_positions
init_money = args.money
init_current_line = 0

# 第三题的state多加一维，多一个自适应归一化后的初始钱数。作为第12维
model = torch.nn.Sequential(
    torch.nn.Linear(12, args.hidden),
    torch.nn.ReLU(),
    torch.nn.Linear(args.hidden, 9),
)

next_model = torch.nn.Sequential(
    torch.nn.Linear(12, args.hidden),
    torch.nn.ReLU(),
    torch.nn.Linear(args.hidden, 9),
)

# 把model的参数复制给next_model
next_model.load_state_dict(model.state_dict())


class StockEnv:
    def __init__(self):
        self.stock_data = file_data
        self.over_line = len(self.stock_data)
        self.money = init_money
        self.action_choose = action_choose
        self.current_line = init_current_line
        self.positions_number = init_positions_number
        self.positions_price = init_positions
        self.rating = args.rating  # 赚钱的比例系数

    def step(self, action):
        this_time_stock = self.stock_data.iloc[self.current_line]
        action2reward = -10
        if action == 0:
            # 买100手
            self.positions_price = ((self.positions_price * self.positions_number) + 100 *
                                    this_time_stock['价']) / (self.positions_number + 100)
            self.positions_number = self.positions_number + 100
            self.money = self.money - 100 * this_time_stock['价'] - 0.00015 * 100 * this_time_stock['价']
            action2reward = -200
        elif action == 1:
            # 买50手
            self.positions_price = ((self.positions_price * self.positions_number) + 50 *
                                    this_time_stock['价']) / (self.positions_number + 50)
            self.positions_number = self.positions_number + 50
            self.money = self.money - 50 * this_time_stock['价'] - 0.00015 * 50 * this_time_stock['价']
            action2reward = -100
        elif action == 2:
            # 买20手
            self.positions_price = ((self.positions_price * self.positions_number) + 20 *
                                    this_time_stock['价']) / (self.positions_number + 20)
            self.positions_number = self.positions_number + 20
            self.money = self.money - 20 * this_time_stock['价'] - 0.00015 * 20 * this_time_stock['价']
            action2reward = -40
        elif action == 3:
            # 买10手
            self.positions_price = ((self.positions_price * self.positions_number) + 10 *
                                    this_time_stock['价']) / (self.positions_number + 10)
            self.positions_number = self.positions_number + 10
            self.money = self.money - 10 * this_time_stock['价'] - 0.00015 * 10 * this_time_stock['价']
            action2reward = -20
        elif action == 4:
            pass
        elif action == 5:
            self.money = self.money + 10 * this_time_stock['价'] - 0.00115 * 10 * this_time_stock['价']
            # 卖10手
            if self.positions_number < 10:
                action2reward = -10000
            else:
                action2reward = self.rating * (
                        this_time_stock['价'] - self.positions_price) * 10 - 10  # 赚到0元也要扣10reward
                self.positions_number = self.positions_number - 10
        elif action == 6:
            self.money = self.money + 20 * this_time_stock['价'] - 0.00115 * 20 * this_time_stock['价']
            # 卖20手
            if self.positions_number < 20:
                action2reward = -20000
            else:
                action2reward = self.rating * (this_time_stock['价'] - self.positions_price) * 20 - 10
                self.positions_number = self.positions_number - 20
        elif action == 7:
            self.money = self.money + 50 * this_time_stock['价'] - 0.00115 * 50 * this_time_stock['价']
            # 卖50手
            if self.positions_number < 50:
                action2reward = -50000
            else:
                action2reward = self.rating * (this_time_stock['价'] - self.positions_price) * 50 - 10
                self.positions_number = self.positions_number - 50
        elif action == 8:
            self.money = self.money + 100 * this_time_stock['价'] - 0.00115 * 100 * this_time_stock['价']
            # 卖100手
            if self.positions_number < 100:
                action2reward = -100000
            else:
                action2reward = self.rating * (this_time_stock['价'] - self.positions_price) * 100 - 10
                self.positions_number = self.positions_number - 100
        else:
            pass

        self.current_line += 1
        current_stock = self.stock_data.iloc[self.current_line]
        next_state = (current_stock['收盘价'], current_stock['开盘价'], current_stock['最高价'],
                      current_stock['最低价'], current_stock['成交额（元）'], current_stock['成交量（手）'],
                      current_stock['当前额'], current_stock['当前量'], current_stock['价'],
                      self.positions_number, self.positions_price, self.money)
        over = self.current_line == self.over_line - 1
        return next_state, action2reward, over, None

    def get_current_state(self):
        current_stock = self.stock_data.iloc[self.current_line]
        state = (current_stock['收盘价'], current_stock['开盘价'], current_stock['最高价'],
                 current_stock['最低价'], current_stock['成交额（元）'], current_stock['成交量（手）'],
                 current_stock['当前额'], current_stock['当前量'], current_stock['价'],
                 self.positions_number, self.positions_price, self.money)
        return state

    def get_action(self, state):
        if random.random() < 0.01:
            return random.choice(self.action_choose)

        # 走神经网络,得到一个动作
        # print('走神经网络,得到一个动作')
        reshape_state = torch.FloatTensor(state).reshape(1, 12)

        return model(reshape_state).argmax().item()

    def reset(self):
        self.current_line = init_current_line
        self.positions_number = init_positions_number
        self.positions_price = init_positions

        return self.get_current_state()


env = StockEnv()

# 样本池
datas = []


# 向样本池中添加N条数据,删除M条最古老的数据
def update_data():
    old_count = len(datas)

    # 玩到新增了N个数据为止
    while len(datas) - old_count < args.max_data:
        # 初始化
        state = env.reset()

        # 玩到游戏结束为止
        over = False
        while not over:
            # 根据当前状态得到一个动作
            action = env.get_action(state)

            # 执行动作,得到反馈
            next_state, reward, over, _ = env.step(action)

            # 记录数据样本
            datas.append((state, action, reward, next_state, over))

            # 更新游戏状态,开始下一个动作
            state = next_state

    update_count = len(datas) - old_count
    drop_count = max(len(datas) - 10000, 0)

    # 数据上限,超出时从最古老的开始删除
    while len(datas) > 10000:
        datas.pop(0)

    return update_count, drop_count


# update_data(), len(datas)

# 获取一批数据样本
def get_sample():
    # 从样本池中采样
    samples = random.sample(datas, args.batchsize)

    # [b, 4]
    state = torch.FloatTensor([i[0] for i in samples]).reshape(-1, 12)
    # [b]
    action = torch.LongTensor([i[1] for i in samples]).reshape(-1, 1)
    # [b]
    reward = torch.FloatTensor([i[2] for i in samples]).reshape(-1, 1)
    # [b, 4]
    next_state = torch.FloatTensor([i[3] for i in samples]).reshape(-1, 12)
    # [b]
    over = torch.LongTensor([i[4] for i in samples]).reshape(-1, 1)

    return state, action, reward, next_state, over


def get_value(state, action):
    value = model(state)
    value = value.gather(dim=1, index=action)
    return value


def get_target(reward, next_state, over):
    # 上面已经把模型认为的状态下执行动作的分数给评估出来了
    # 下面使用next_state和reward计算真实的分数
    # 针对一个状态,它到底应该多少分,可以使用以往模型积累的经验评估
    # 这也是没办法的办法,因为显然没有精确解,这里使用延迟更新的next_model评估

    # 使用next_state计算下一个状态的分数
    # [b, 4] -> [b, 2]
    with torch.no_grad():
        target = next_model(next_state)

    # 取所有动作中分数最大的
    # [b, 2] -> [b, 1]
    target = target.max(dim=1)[0]
    target = target.reshape(-1, 1)

    # 下一个状态的分数乘以一个系数,相当于权重
    target *= 0.98

    # 如果next_state已经游戏结束,则next_state的分数是0
    # 因为如果下一步已经游戏结束,显然不需要再继续玩下去,也就不需要考虑next_state了.
    # [b, 1] * [b, 1] -> [b, 1]
    target *= (1 - over)

    # 加上reward就是最终的分数
    # [b, 1] + [b, 1] -> [b, 1]
    target += reward

    return target


def test_model():
    # 初始化游戏
    init_state = env.reset()

    # 记录反馈值的和,这个值越大越好
    reward_sum = 0

    # 玩到游戏结束为止
    over = False
    while not over:
        # 根据当前状态得到一个动作
        action = env.get_action(init_state)

        # 执行动作,得到反馈
        init_state, reward, over, _ = env.step(action)
        reward_sum += reward

    return reward_sum


def train():
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    loss_fn = torch.nn.MSELoss()

    # 训练N次
    for epoch in range(args.epoch):
        # 更新N条数据
        update_count, drop_count = update_data()
        if (epoch + 1) % 10 == 0:
            print('训练第{}epoch...'.format(epoch + 1))

        # 每次更新过数据后,学习N次
        for i in range(20):
            # print("   第{}epoch 第{}次学习...".format(epoch + 1, i + 1))
            # 采样一批数据
            state, action, reward, next_state, over = get_sample()

            # 计算一批样本的value和target
            value = get_value(state, action)
            target = get_target(reward, next_state, over)

            # 更新参数
            loss = loss_fn(value, target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 把model的参数复制给next_model
            if (i + 1) % 50 == 0:
                next_model.load_state_dict(model.state_dict())

        if epoch % 50 == 0:
            test_result = sum([test_model() for _ in range(20)]) / 20
            print(epoch, len(datas), update_count, drop_count, test_result)

    torch.save(model.state_dict(),
               'saves/model_{}_{}_{}_{}.pth'.format(args.hidden, int(time.time()), args.head, args.fname))


train()
