import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn
import sys
import logging   #添加 by dabin
np.set_printoptions(suppress=True)

class LSTM_trendRemoval:
    def __init__(self, file_data):
        self.data=file_data

    def sin_wave(self,A, f, fs, phi, t):
        '''
        :params A:    振幅
        :params f:    信号频率
        :params fs:   采样频率
        :params phi:  相位
        :params t:    时间长度
        '''
        Ts = 1 / fs
        n = t / Ts
        n = np.arange(n)
        y = A * np.sin(2 * np.pi * f * n * Ts + phi * (np.pi / 180))
        return y
    def addSimulatedForce(self,amplitude):
        dataLen=self.data.shape[1]
        data = np.zeros((2, 1, dataLen))
        sinwave = self.sin_wave(amplitude, 1.5, 100, 0, dataLen/100)
        data[0][0]=sinwave.reshape(1, dataLen)
        data[1][0]=sinwave+self.data
        dataset = data.astype('float32')
        # dataset=np.array(dataset)
        return dataset

    def preprocessData(self,dataset,look_back=100):
        max_value = np.nanmax(dataset)   #跳过nan值
        min_value = np.nanmin(dataset)
        scalar = max_value - min_value
        dataset = dataset / scalar
        dataX,dataY=self.create_dataset(dataset,look_back)
        return  dataX,dataY
    def create_dataset(self,dataset,look_back): # 每个的滑动窗口设置为2
        dataX, dataY=[], []
        d1,d2,d3=dataset.shape
        for j in range(d2):
            for i in range(d3-look_back):
                a=dataset[1][j][i:(i+look_back)]
                dataX.append(a)    # 记录窗口的值
                dataY.append(dataset[0][j][i+look_back]) # 记录除了前面两个以外的所有值作为正确的标签
        return np.array(dataX), np.array(dataY)

    def splitDataset(self, data_X,data_Y,trainLen,testLen,verifyLen):
        train_size = int(len(data_X) * trainLen)
        test_size = int(len(data_X)* testLen)
        verify_size=int(len(data_X)* verifyLen)

        train_X = data_X[:train_size]
        train_Y = data_Y[:train_size]

        test_X = data_X[train_size:train_size+test_size]
        test_Y = data_Y[train_size:train_size+test_size]
        # RNN 读入的数据维度是 (seq, batch, feature)，
        # 所以要重新改变一下数据的维度，这里只有一个序列，
        # 所以 batch 是 1，而输入的 feature 就是我们希望依据的几天，
        # 这里我们定的是两个天，所以 feature 就是 2.

        train_X = train_X.reshape(-1, 1, look_back)
        train_Y = train_Y.reshape(-1, 1, 1)
        test_X = test_X.reshape(-1, 1, look_back)

        train_x = torch.from_numpy(train_X)
        train_y = torch.from_numpy(train_Y)
        test_x = torch.from_numpy(test_X)
        return train_x,train_y,test_x
class lstm_reg(nn.Module):#括号中的是python的类继承语法，父类是nn.Module类 不是参数的意思
    def __init__(self,input_size,hidden_size, output_size=1,num_layers=1): # 构造函数
        #inpu_size 是输入的样本的特征维度， hidden_size 是LSTM层的神经元个数，
        #output_size是输出的特征维度
        super(lstm_reg,self).__init__()# super用于多层继承使用，必须要有的操作

        self.rnn = nn.LSTM(input_size,hidden_size,num_layers,
                           batch_first=False
                           )# 两层LSTM网络，
        self.reg = nn.Linear(hidden_size,output_size)#把上一层总共hidden_size个的神经元的输出向量作为输入向量，然后回归到output_size维度的输出向量中

    def forward(self,x): #x是输入的数据
        x, _ = self.rnn(x)# 单个下划线表示不在意的变量，这里是LSTM网络输出的两个隐藏层状态
        s,b,h = x.shape
        x = x.contiguous().view(s*b, h)
        # x = x.reshape(s * b, h)
        x = self.reg(x)
        x = x.view(s,b,-1)#使用-1表示第三个维度自动根据原来的shape 和已经定了的s,b来确定
        return x

    def trainByDabin(self,train_x,train_y, n_epochs=10):
        criterion = nn.MSELoss()  # .cuda()
        optimizer = torch.optim.Adam(net.parameters(), lr=1e-2)

        for e in range(50):
            # 新版本中可以不使用Variable了
            #     var_x = Variable(train_x).cuda()
            #     var_y = Variable(train_y).cuda()

            # 将tensor放在GPU上面进行运算
            var_x = train_x.contiguous().cuda()
            var_y = train_y.contiguous().cuda()

            out = net(var_x)
            loss = criterion(out, var_y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (e + 1) % 2 == 0:
                print('Epoch: {}, Loss:{:.5f}'.format(e + 1, loss.item()))
        torch.save(net.state_dict(), 'trendRemoval.net_params.pkl')

def save_excel(jilu, io_dirResults, name):
        """
        保存汇总数据
        :param iter_losses,epoch_losses,y_pred,y:
        :param name:
        :return:
        """
        fileName = io_dirResults + '{}.csv'.format(name)
        columns = [name,'{}Force'.format(name),'Force']
        submission = pd.DataFrame(jilu, columns=columns)
        submission.to_csv(fileName, index=True, encoding="utf_8_sig")
        # submission.to_excel(fileName, index=True, encoding="utf_8_sig")
if __name__ == '__main__':
    global logger
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    logger = logging.getLogger(__name__)  # 添加 by dabin
    use_cuda = torch.cuda.is_available()
    logger.info("Is CUDA available? %s.", use_cuda)

    df1 = pd.read_excel('data/trendTermData210108.xlsx')
    data_raw = df1['trend1']
    dataset = np.array(data_raw)
    dataset = dataset.astype('float32').reshape(1, len(dataset))

    LSTM_trendRemoval=LSTM_trendRemoval(dataset)

    #---------------漂移叠加力------------------
    dat = pd.read_csv('data/trendTermData210108_del.csv')
    selectedTrend='trend3'
    #38914   45915 20715
    X = dat.loc[1: 20715, [x for x in dat.columns.tolist() if x == selectedTrend]].values
    print(X.shape)
    t=len(X-1)/100
    wave1=LSTM_trendRemoval.sin_wave(100,1.4,100,0,t).reshape(-1,1)
    # wave2=LSTM_trendRemoval.sin_wave(4,1.5,100,np.pi/3.5,t)
    # wave3=LSTM_trendRemoval.sin_wave(3,0.9,100,np.pi/2.2,t)
    # wave4=LSTM_trendRemoval.sin_wave(2,1.4,100,np.pi/1.5,t)
    # wavesum=wave1+wave2+wave3+wave4
    print( wave1.shape)
    Y=wave1+X
    print(Y.shape)
    XY=np.append(X, Y, axis=1)
    print(XY.shape)
    print(XY[45897:45900,:])
    XYwave1 = np.append(XY,wave1, axis=1)
    print('wancheng   np.append')
    save_excel( XYwave1, 'data/', selectedTrend)
    plt.figure(1)
    plt.plot(Y[1:10000])
    plt.show()

#     dataset=LSTM_trendRemoval.addSimulatedForce(150)
#     look_back=5
#     data_X,data_Y=LSTM_trendRemoval.preprocessData(dataset,look_back)
#     train_X, train_Y, test_X=LSTM_trendRemoval.splitDataset(data_X,data_Y,0.8,0.1,0.1)
# #    print(train_X.shape())
#     net = lstm_reg(look_back, 4)
#     net = net.cuda()
#     net.trainByDabin(train_X, train_Y,100)

    # print('第{row}行:'.format(row=sys._getframe().f_lineno), \
    #       'train_X', train_X.shape)

    # plt.figure(1)
    # plt.plot(dataset[1][0])
    # plt.show()