'''*****************************************************************************************************
# FileName    : lstm_example.py
# FileFunction: 用LSTM预测股票行情
# Comments    :
*****************************************************************************************************'''
import tushare as ts
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# cons = ts.get_apis()
# df = ts.bar('000300', conn=cons, asset='INDEX',start_date='2010-01-01',end_date='')
# # df.dropna()
# print(df.describe())
# # df.to_csv('sh300.csv')

'''**************************************************************************************
- FunctionName: generate_df_affect_by_n_days()
- Function    : 通过序列生成一个31*(count(*)-train_end)矩阵 把当天的前n天作为参数，当天的数据作为label（最后一列）
- Inputs      : series, n, index
- Outputs     : None
- Comments    : 
**************************************************************************************'''
def generate_df_affect_by_n_days(series, n, index=False):
    if len(series) <= n:
        raise Exception("The Length of series is %d, while affect by (n=%d)." % (len(series), n))
    df = pd.DataFrame()
    for i in range(n):
        df['c%d' % i] = series.tolist()[i:-(n-i)]
    # a = series.tolist()[n:]
    df['y'] = series.tolist()[n:]
    if index:
        df.index = series.index[n:]
    return df

'''**************************************************************************************
- FunctionName: readData()
- Function    : 读取数据
- Inputs      : column, n, all_too, index, train_end
- Outputs     : None
- Comments    : train_end表示后面多少个数据作为测试集
**************************************************************************************'''
def readData(column='high', n=30, all_too=True, index=False, train_end=-500):
    df = pd.read_csv("/home/rrrachel/pytorch_demo/src/sh300.csv", index_col=0)
    # 以日期为索引
    df.index = list(map(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"), df.index))
    # 获取每天的最高价
    df_column = df[column].copy()
    # 拆分为训练集与测试集
    df_column_train, df_column_test = df_column[:train_end], df_column[train_end-n:]
    # 生成训练数据
    df_generate_from_df_column_train = generate_df_affect_by_n_days(df_column_train, n, index=index)
    if all_too:
        return df_generate_from_df_column_train, df_column, df.index.tolist()
    return df_generate_from_df_column_train

'''*****************************************************************************************************
- Class Name  : RNN
- Function    : 循环神经网络
- Inputs      : input_size
- Outputs     : None
- Comments    : None
*****************************************************************************************************'''
class RNN(nn.Module):
    def __init__(self, input_size):
        super(RNN, self).__init__()
        self.rnn = nn.LSTM(
            input_size = input_size,
            hidden_size = 64,
            num_layers= 1,
            batch_first= True # [batch, seq_len, input_size]
        )
        self.out = nn.Sequential(
            nn.Linear(64,1)
        )

    def forward(self, x):
        r_out, (h_n, h_c) = self.rnn(x, None)
        out = self.out(r_out)
        return out

'''*****************************************************************************************************
- Class Name  : TrainSet(Dataset)
- Function    : 得到训练集
- Inputs      : Dataset
- Outputs     : None
- Comments    : None
*****************************************************************************************************'''
class TrainSet(Dataset):
    def __init__(self, data):
        self.data, self.label = data[:, :-1].float(), data[:, -1].float()
    
    # 实例对象（假设为P）,可以这样P[index]取值
    def __getitem__(self, index):
        return self.data[index], self.label[index]
    
    def __len__(self):
        return len(self.data)


# 超参数的定义
n = 30
LR = 0.0001
EPOCH = 100
train_end = -500

# 数据集的建立
df, df_all, df_index = readData('high', n=n, train_end=train_end)

df_all = np.array(df_all.tolist())
plt.plot(df_index, df_all, label='real-data')

df_numpy = np.array(df) # train_data 

# 数据标准化，否则损失很难降低下来
df_numpy_mean = np.mean(df_numpy)
df_numpy_std = np.std(df_numpy)

df_numpy = (df_numpy - df_numpy_mean) / df_numpy_std
df_tensor = torch.Tensor(df_numpy)

trainset = TrainSet(df_tensor)
# DataLoader的作用：将自定义的Dataset根据batch size的大小、是否shuffle等封装成一个batch size大小的Tensor，用于后面的训练
# for data, label in dataloader:
trainloader = DataLoader(trainset, batch_size=10, shuffle=True)

# 训练模型部分
# 记录损失值，并用tensorboardX在web上显示
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir='logs')
# 如果已经训练好了模型，则 rnn=torch.load('rnn.pkl')
rnn = RNN(n).to(device) # 在GPU上运行
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
loss_func = nn.MSELoss() # 均方损失函数

for step in range(EPOCH):
    for tx, ty in trainloader:
        # 数据copy一份到指定的GPU上
        tx = tx.to(device) 
        ty = ty.to(device)
        # print(tx.size()) # [10,30]
        # 在第一个维度上添加一个维度为1的维度，时间序列为1 形状变为[batch, seq_len, input_size]
        output = rnn(torch.unsqueeze(tx, dim=1)).to(device)
        loss = loss_func(torch.squeeze(output), ty)
        optimizer.zero_grad() # 进行下一次batch梯度计算的时候，前一个batch的梯度计算结果，没有保留的必要了
        loss.backward()
        optimizer.step()
    print(step, loss)
    if step % 10:
        torch.save(rnn, 'rnn.pkl') # 防止中途崩溃，时刻保存
    writer.add_scalar('sh300_loss',loss,step)

torch.save(rnn, 'rnn.pkl')

# 画图
generate_data_train = []
generate_data_test = []

test_index = len(df_all) + train_end # ???

df_all_normal = (df_all - df_numpy_mean) / df_numpy_std
df_all_normal_tensor = torch.Tensor(df_all_normal)
for i in range(n, len(df_all)):
    x = df_all_normal_tensor[i-n:i]
    x = torch.unsqueeze(torch.unsqueeze(x,dim=0), dim=0).to(device)
    y = rnn(x)
    y = y.cpu()
    if i < test_index:
        generate_data_train.append(torch.squeeze(y).detach().numpy() * df_numpy_std + df_numpy_mean)
    else:
        generate_data_test.append(torch.squeeze(y).detach().numpy() * df_numpy_std + df_numpy_mean)
plt.plot(df_index[n:train_end], generate_data_train, label='generate_train')
plt.plot(df_index[train_end:], generate_data_test, label='generate_test')
plt.legend()
plt.show()



