__author__ = 'Administrator'

from models import LSTM_Model,GRU_Model,TCN_Model,ALSTM_Model,BiLSTM,RNN_Model,CNN_LSTM_Model
import torch
from torch import nn
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from torch.utils.data import DataLoader,TensorDataset
from sklearn.metrics import precision_score,recall_score,f1_score,roc_curve,auc,classification_report,accuracy_score
from bayes_opt import BayesianOptimization
from matplotlib import pyplot as plt
from PatchTST import PatchTST
from iTransformer import iTransformer
from Nonstationary_Transformer import NS_Transformer
from Informer import Informer
from Autoformer import Autoformer
from FEDformer import FEDformer

'''
    股票数据预测，根据前面N天的M列特征数据预测N+1 天的股票涨跌情况
'''
class StockPrediction():
    def __init__(self):
        #设置训练用那些特征
        self.featureNames=['open','high','low','close','volume']#'open','high','low',

    #导入数据
    def loadDatas(self):
        #date	open	high	low	close	volume	pct	RSRS	RSRS_Z	RSRS_Revise	RSRS_Positive	RSRS_Passivation	LLT	hightMoment	pvm	Volatility	rps	K	D	J	BOLLUP	BOLL	BOLLDN	RSI	ATR	SAR	ROC
        datas = pd.read_csv('./DataFile/000300_Indicator.csv')
        datas['pct'] = (datas['close'].shift(-1)/datas['close']-1.0)*100.0
        ##1.固定阈值分类法
        #datas['label'] = datas['pct'].apply(self.classify_fun) #设置标签类 固定阈值分类法

        ##2.标准差分类法-1
        tmpN = 20
        #datas['label'] = datas['pct'].rolling(window=tmpN+1).apply(lambda x:self.classify_funStd(x,N=tmpN)) #标准差分类法1
        ##2.标准差分类法-2
        #datas['label'] = datas['pct'].rolling(window=tmpN+1).apply(lambda x:self.classify_funStd2(x,N=tmpN)) #标准差分类法2

        # ##3.基于回报率分类法
        # window = 3
        # datas['future_return'] = (datas['close'].shift(-window)/datas['close']-1.0)*100.0
        # datas['label'] = datas['future_return'].apply(lambda x: self.classify_rateReturn(x))

        ##4.基于相对强弱分类发
        window = 20
        datas['relative_Return'] = (datas['close']/datas['close'].shift(window)-1.0)*100.0
        datas['label'] = datas['relative_Return'].apply(lambda x: self.classify_relativeReturn(x))


        #删除缺失值
        datas = datas.dropna()
        #datas.to_csv('./tmp.csv',index=False)
        print(datas["label"].value_counts())#统计每个类别个数
        self.labelConts = datas["label"].value_counts().values
        return datas

    #根据涨跌幅度分类
    '''
        固定涨跌幅度分类
    '''
    def classify_fun(self,change):
        if change > 0.2:# 涨跌幅>2%的定义为涨
            return 1
        elif change < -1.53:# 涨跌幅 <-2% 的定义为跌
            return 2
        else: # 其他的定义为不确定
            return 0

    '''
        标准差分类
    '''
    def classify_funStd(self,row,N):
        mean = row.iloc[:N].mean()
        std = row.iloc[:N].std()

        # print("mean:",mean,"std:",std)
        # print(row.iloc[N])
        res = 0
        if row.iloc[N] > (mean + 2*std):
            res =  1
        elif row.iloc[N] < (mean - 2*std):
            res= 2
        else:
            res =  0
        #print("返回值:",res)
        return res

    '''
        标准差分类2，把涨的和跌的标准差分类，单独计算，分类时，涨的按涨的标准差分类，跌的按跌的标准差分类
    '''
    def classify_funStd2(self,row,N):

        #化分出涨的和跌的数据
        positive_data = row.iloc[:N][row.iloc[:N]>0]
        negative_data = row.iloc[:N][row.iloc[:N]<=0]

        #计算各个类别的均值和标准差。
        positive_mean = positive_data.mean()
        positive_std = positive_data.std()

        negative_mean = negative_data.mean()
        negative_std = negative_data.std()

        res = 0
        if row.iloc[N] > (positive_mean + positive_std):
            res = 1
        elif row.iloc[N] < (negative_mean - negative_std):
            res = 2
        else:
            res = 0
        return  res

    '''
        基于回报率的分类
        计算N天后相对当前的return = (i+n).clsoe/i.clsoe-1.0
    '''
    def classify_rateReturn(self,future_return):

        threshold = 1.0
        if future_return > threshold:
            return 1
        elif future_return < -threshold:
            return 2
        else:
            return 0

    '''
        基于相对强弱分类法：当天相对前面N天的涨跌幅
    '''
    def classify_relativeReturn(self,past_return):
        threshold = 1.0
        if past_return > threshold:
            return 1
        elif past_return < -threshold:
            return 2
        else:
            return 0

    '''
        数据预处理
        batchsize 管道数据模块的大小
        window 滑窗大小
    '''
    def DataHandle(self,batchsize,window):
        datas = self.loadDatas() #从文件中导入K线数据
        featureNames = self.featureNames

        #获取数据长度，把数据划分为训练集和测试集
        nlen = len(datas)
        ntrainlen = int(nlen*0.8)
        trraindata = datas.iloc[:ntrainlen]
        testdata = datas.iloc[ntrainlen:]

        #数据标准化
        stand = StandardScaler()
        x_train = stand.fit_transform(trraindata[featureNames].values).reshape(-1,len(featureNames))
        y_train = trraindata['label'].values

        #测试集数据标准化
        stand2 = StandardScaler()
        x_test = stand2.fit_transform(testdata[featureNames].values).reshape(-1,len(featureNames))
        y_test = testdata['label'].values

        trainx,trainy = self.datatoTimeseries(x_train,y_train,window)
        testx,testy = self.datatoTimeseries(x_test,y_test,window)


        #把 tensor 类型的数据转换为训练的管道数据
        trainloader = DataLoader(TensorDataset(trainx,trainy),batch_size=batchsize,shuffle=False)
        testloader = DataLoader(TensorDataset(testx,testy),batch_size=batchsize,shuffle=False)

        return trainloader,testloader


    ''''
        把数据转换为时间序列
        datax 特征值
        datay 标签值
        window 滑窗长度
    '''''
    def datatoTimeseries(self,datax,datay,window):
        x_data = []
        y_data = []

        for i in range(len(datax)-window):
            if window <=1:
                x_data.append(datax[i])
                y_data.append(datay[i])
            else:
                x_data.append(datax[i:i+ window])
                y_data.append(datay[i+ window-1])
        #把numpy转tensor 类型
        x = torch.tensor(np.array(x_data),dtype=torch.float)
        y = torch.tensor(np.array(y_data),dtype = torch.float).view(-1,1)
        return x,y

    #训练模式
    def trai_step(self,model,features,labels):
        # 训练模式，dropout层发生作用
        model.train()
        # 梯度清零
        model.optimizer.zero_grad()
        # 正向传播求损失
        predictions = model(features)

        #nsTransform 模型 确保输出值在合理范围
        #predictions = torch.clamp(predictions,min=1e-9,max=1.0-1e-9)

        #print(predictions.argmax(dim=1).unsqueeze(1))
        # print(labels.long().squeeze()) 多分类情况下，需要把结果转整型，并展平[32,1]->>[32]
        loss = model.loss_func(predictions,labels.long().squeeze()) #多分类情况下，需要把标签转整型，并展平[32,1]->>[32]
        #metric = model.metric_func(predictions,labels)
        # 反向传播求梯度
        loss.backward()
        # 应用梯度裁剪
        #torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        model.optimizer.step()
        return loss.item()#,#metric.item()

    #预测模式
    #@torch.no_grad
    def valid_step(self,model,features,labels):
        #预测模式，dropout层不发生作用
        model.eval()

        predictions = model(features)
        # nsTransform 模型 确保输出值在合理范围
        #predictions = torch.clamp(predictions, min=1e-9, max=1 - 1e-9)

        loss = model.loss_func(predictions,labels.long().squeeze())#多分类情况下，需要把标签转整型，并展平[32,1]->>[32]
        #metric = model.metric_func(predictions,labels)
        return loss.item()#, #metric.item()

    #根据训练的记录信息，把loss,正确率绘制成图像
    def plot_metric(self,dfhistory,metric):
        train_metrics = dfhistory[metric]
        val_metricess = dfhistory["val_"+metric]
        epochs = range(1,len(train_metrics)+1)
        plt.plot(epochs,train_metrics,"bo--")
        #plt.plot(epochs,val_metricess,"ro--")

        plt.title("Train_"+metric)
        plt.xlabel("Epochs")
        plt.ylabel(metric)
        plt.legend(["train_"+metric,"val_"+metric])
        plt.show()

         #训练 模型
    '''
        model 模型
        epochs 循环次数
        dl_train 训练集数据
        dl_valid 测试集数据
        log_step_freq 训练日志打印频次
    '''
    def train_model(self,model,epochs,dl_train,dl_valid):

        dfhistory = pd.DataFrame(columns =["epoch","loss","val_loss"])

        for epoch in range(1,epochs+1):
            #训练循环----------------------------------------------------------
            loss_sum = 0.0
            metric_sum = 0.0
            step = 1
            for features,labels in dl_train:
                loss = self.trai_step(model,features,labels)
                #打印batch级别日志
                loss_sum += loss
                step+=1

            # #2.验证循环----------------------------------------------------------------------
            val_loss_sum = 0.0
            val_step = 1
            for (features,labels) in dl_valid:
                val_loss = self.valid_step(model,features,labels)
                val_loss_sum += val_loss
                val_step +=1

            #记录日志
            info = (epoch,loss_sum/step,val_loss_sum/val_step)
            dfhistory.loc[epoch-1] = info
            # 打印epoch级别日志
            print(("EPOCH = %d, loss = %.3f,val_loss = %.3f")%info)
        return dfhistory

              #模型评估
    def modelEvaluation(self,name,ytest,ypred):
        testAcc = round(accuracy_score(ytest,ypred),3) # 测试集准确率
        fprecision = round(precision_score(ytest,ypred,labels=None,pos_label=1,average='macro',zero_division=1),3) #精确率
        frecall = round(recall_score(ytest,ypred,labels=None,pos_label=1,average='macro',sample_weight=None),3) #召回率
        F1score = round(f1_score(ytest,ypred,labels=None,pos_label=1,average='macro',sample_weight=None),3) #F1
        print("%s,准确率:%0.3f,精确率:%0.3f,召回率;%0.3f,F1core:%0.3f"%(name,testAcc,fprecision,frecall,F1score))
        print("评估报告",classification_report(ytest,ypred,zero_division=1))
        return [testAcc,fprecision,frecall,F1score]

        #预测 在测试集上评估
    def PredictionEvaluation(self,model,testloader):
        precisions_list = []
        labels_list = []
        with torch.no_grad():
            for data in testloader:
                inputs,labels = data

                outputs = model(inputs)
                # _,outputs1 = torch.max(outputs.data,1) #把预测的结果，根据概率转为为分类
                # print("转换1",outputs1)
                outputs = outputs.argmax(dim=1)#.unsqueeze(1) #多分类情况下把概率结果转分类结果把转换成标签集[32,1]类型
                precisions_list.append(outputs.numpy())
                labels_list.append(labels.squeeze().numpy())
        precisions_list = np.concatenate(precisions_list)
        labels_list = np.concatenate(labels_list)
        #print(precisions_list.shape,labels_list.shape)
        #把预测的概率转换为标签值
        #precisions_list = (precisions_list>=0.5).astype(int)
        return precisions_list,labels_list

    '''
        频率统计，统计1，2类在总的预测值中的频率，及单位时间内的频率
        predValues list
        1.统计预测为（涨|跌）的占总比例
        2.统计每周内(涨|跌)的占该周的频次
        3.统计平均每周的频次（每周的频次和/总的周数）
    '''
    def FrequencyStatistics(self,predValues):
        predValues = predValues.tolist()

        total_samples = len(predValues)#计数总的预测个数
        up_count = predValues.count(1) #计算预测涨的个数
        down_count = predValues.count(2) #计算预测跌的个数
        #print(up_count,down_count,total_samples)

        #计算涨和跌的频率
        total_frequencies = (up_count + down_count)/total_samples*100.0

        #统计每周的频次（单位时间内的频次）
        UnitFrequencyList = [] #记录单位时间内的频率，比如每周预测为(涨和跌的个数占本周的个数)
        sumupdownCount = 0
        time_window = 5 # 比如5天 ，这里是日线以天计算，如果是分钟线按分钟折算成天数，如min60,4跟为1天 time_window*4，min15 time_window*4*4
        for i in range(0,len(predValues),time_window):
            windowdata = predValues[i:i+time_window]
            up_count = windowdata.count(1)
            down_count = windowdata.count(2)
            sumupdownCount += (up_count + down_count)
            unitf = (up_count + down_count)
            UnitFrequencyList.append(unitf)

        #计算平均每周的频次
        unitfAvg = sumupdownCount/(total_samples/time_window)

        print("预测值(涨|跌)占总预测值的比例:%0.2f%%,总预测值个数:%d"%(total_frequencies,total_samples))
        print("平均每周的频率:%0.2f个,共%d周"%(unitfAvg,total_samples/time_window))

        #把每周的频率画出柱状图
        #print("单周的频率统计:",UnitFrequencyList)
        plt.bar(range(len(UnitFrequencyList)),UnitFrequencyList)
        #添加标题和标签
        plt.title("frequency")
        plt.xlabel("week")
        plt.ylabel("frequency")

        for i,value in enumerate(UnitFrequencyList):
            plt.text(i,value,str(value))
        #显示图标
        plt.show()

    '''
        根据每个类别的分类个数，计算每个分类的权重，用于传入损失函数中，针对样本分布不均衡
    '''
    def cal_classWeights(self):
        # 计算类别权重
        class_counts = torch.tensor(self.labelConts, dtype=torch.float)
        total_samples = class_counts.sum()
        class_weights = total_samples / (class_counts * len(class_counts))
        print("Class Weights:", class_weights)
        return class_weights
    '''
        训练模型
    '''
    def mymain(self):
        #导入数据，并对数据进行预处理
        trainloader,testloader = self.DataHandle(batchsize=32,window=20)

        featureNum = len(self.featureNames)

        ### lstm模型
        #model = LSTM_Model(input_dim=featureNum,hidden_dim=128,num_layers=2,output_dim=3)
        ## GRU 模型
        #model = GRU_Model(input_size=featureNum,hidden_size=128,num_layers=2,output_size=3)
        ## BiLSTM 模型
        #model = BiLSTM(input_dim=featureNum,hidden_dim=128,num_layers=2,output_dim=3)

        ## ALSTM 模型
        #model = ALSTM_Model(input_dim=featureNum,hidden_size=128,num_layers=2,output_dim=3)
        ## RNN 模型
        #model = RNN_Model(input_size=featureNum,hidden_size=128,num_layers=2,output_size=3)

        # CNN_LSTM组合模型
        #model = CNN_LSTM_Model(input_size=featureNum,cv1_channels=32,cv2_channels=64,hidden_size=128,output_size=3,num_layers=2,kernel_size=5,dropout=0.1)

        #PatchTST 模型 patch_len要小于等于seq_len
        # model = PatchTST(enc_in=featureNum,d_model=128,seq_len=20,dropout=0.1,factor=1.0,output_attention=False,
        #                  n_heads=8,d_ff=256,activation='gelu', e_layers=2,patch_len=16,stride=8)
        # model = PatchTST(enc_in=featureNum,d_model=128, seq_len=20, dropout=0.1, factor=1.0, output_attention=True,
        #                  n_heads=8, d_ff=256, activation='gelu', e_layers=2, patch_len=16, stride=8)

        #iTransformer模型
        #model = iTransformer(input_dim=featureNum,seq_len=20,d_model=128,e_layers=2,n_heads=8,output_attention=True,d_ff=256,activation='gelu',factor=1.0,dropout=0.1,output_dim=3)

        # Nonstationary_Transformer模型
        # model = NS_Transformer(input_dim=featureNum,seq_len=20,d_model=128,e_layers=2,n_heads=8,output_attention=False,
        #                                   d_ff=256,activation='gelu',factor=1.0,dropout=0.1,output_dim=3)

        #informer 模型
        # model = Informer(input_dim=featureNum,seq_len=20,d_model=128,e_layers=3,n_heads=8,output_attention=False,
        #                                       d_ff=256,activation='gelu',factor=1.0,dropout=0.1,num_class=3)

        # #autoformer 模型
        # model = Autoformer(input_dim=featureNum,seq_len=20,moving_avg=14,d_model=128,e_layers=3,n_heads=8,output_attention=False,
        #                                           d_ff=256,activation='gelu',factor=1.0,dropout=0.3,num_class=3)

        #FEDformer 模型
        # - version: FEDformer的版本，选择 'Fourier'或'Wavelets'。
        # - mode_select: 模式选择方法，选择'random'或'low'。
        # - modes: 被选择的模式数量，默认为32。
        model = FEDformer(input_dim=featureNum,seq_len=20,d_model=128,dropout=0.1,n_heads = 8,d_ff=256,moving_avg = 16,
                          e_layers = 3,activation='gelu',version='Fourier', mode_select='random', modes=35,num_class=3)

        #设置优化器
        model.optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
        #model.optimizer = torch.optim.SGD(model.parameters(),lr=0.001)

        #设置损失函数
        class_weights  = self.cal_classWeights()#根据分类结果，计算每类的权重
        model.loss_func = nn.CrossEntropyLoss(weight=class_weights)  # 多分类
        #不带权重的
        #model.loss_func = nn.CrossEntropyLoss() #多分类

        #训练模型
        epochs = 50 #120
        dfhistory = self.train_model(model,epochs,trainloader,testloader)

        #在训练集上评估
        xpred,xtest = self.PredictionEvaluation(model,trainloader)
        self.modelEvaluation("训练集",xtest,xpred)

        #在测试集上测试数据(返回测试集预测数据和测试集标签值) 评估
        ypred,ytest=self.PredictionEvaluation(model,testloader)
        #根据测试集预测值和真实值，做评估
        res = self.modelEvaluation('测试集',ytest,ypred)
        print("测试集评估值;",res)
        # 绘制loss 曲线
        self.plot_metric(dfhistory,'loss')
        #统计频次
        self.FrequencyStatistics(ypred)

if __name__ == "__main__":
    sp = StockPrediction()
    sp.mymain()

    # tmplist = [0.499936105,0.247453047,-1.365463265,-2.063354326,0.003311183
    # ,-0.927100448,0.985676404,-0.389942964,1.101315782,-0.895546405]
    # mean = np.mean(tmplist)
    # std = np.std(tmplist)
    # print(mean,std,mean-std)

    # # 计算类别权重
    # class_counts = torch.tensor([3674, 417, 372], dtype=torch.float)
    # total_samples = class_counts.sum()
    # class_weights = total_samples / (class_counts * len(class_counts))
    # print("Class Weights:", class_weights)

