import torch as t
import torch.nn as nn
from model import nn_net, mse_lr
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
import numpy as np
import os
import pandas as pd
from uitl.handout import handclf
from model.xgb_feature import xgb_model
from model.gbdt_feature import gbdt_model
from sklearn.preprocessing import StandardScaler

allPdata=pd.read_csv("lc_model/data/allPdata.csv",index_col=0)                   #加载数据
allNdata=pd.read_csv("lc_model/data/allNdata.csv",index_col=0)
allTdata=pd.read_csv("lc_model/data/allTdata.csv",index_col=0)

handoutline1=set()
handoutline2=set()
handoutline1,allPdata=handclf('c0', 0.2, 1, allPdata,handoutline1)
handoutline1,allPdata=handclf('c20',1, 1,allPdata,handoutline1)
handoutline1,allPdata=handclf('FA1dataMean',4, 1,allPdata,handoutline1)
handoutline2,allNdata=handclf('c0',0.2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c2',2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c10',1.1, 0,allNdata,handoutline2)
handoutline2,allNdata=handclf('c15',-0.4, 0,allNdata,handoutline2)
handoutline2,allNdata=handclf('c16',2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c17',1.1, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c20',1, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('F_ai2_fft_frequency_max',2000, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('FA1dataVar',10, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('ai2_frequency_max_diff',-1000, 0,allNdata,handoutline2)
# print(allPdata.shape)
# print(allNdata.shape)

allPdata['lable']=1
allNdata['lable']=0
newallPdata=pd.concat([allPdata,allPdata,allPdata,allPdata],axis=0)         #为了平衡样本，手动复制，‘1’的样本复制四份, 可以尝试更改
traindata=pd.concat([newallPdata,allNdata],axis=0)                          #前120为‘1’，后500为‘0’
y=traindata['lable']                                                       #确定了 label
x=traindata.drop('lable',axis=1)                                           #确定了训练集

train_gbdt_feature, test_gbdt_feature = xgb_model(x, y, allTdata)
# train_gbdt2_feature, test_gbdt2_feature = gbdt_model(x, y, allTdata)

# 分别对训练和测试数据的特征进行标准化处理
ss_X = StandardScaler()
x = ss_X.fit_transform(x)                                 #对训练集标准化
x=pd.DataFrame(x,train_gbdt_feature.index)                #输出df格式
sstest=ss_X.transform(allTdata)                           #对测试集标准化
sstest=pd.DataFrame(sstest,test_gbdt_feature.index)       #输出df格式

x_train=pd.concat([x, train_gbdt_feature, y],axis=1)         #把标准化的训练集和上面的01编码的整合成新的训练集，相当于扩充了xgb的特征
x_test=pd.concat([sstest, test_gbdt_feature],axis=1)

#========三个模型特征==========
# x_train=pd.concat([x, train_gbdt_feature,train_gbdt2_feature, y],axis=1)         #把标准化的训练集和上面的01编码的整合成新的训练集，相当于扩充了xgb的特征
# x_test=pd.concat([sstest, test_gbdt_feature, test_gbdt2_feature],axis=1)

x_train.to_csv("./data/pre_train.csv")
x_test.to_csv("./data/pre_test1.csv")

#====================================训练神经网络==================================================
import dataloader

os.environ['CUDA_VISIBLE_DEVICES'] = '0'                  #指定gpu

writer = SummaryWriter(comment='xgb_nn')                  #画图
model = nn_net.MLP()
# model = mse_lr.LR()

# if t.cuda.is_available():
#     model.cuda()

weight = t.Tensor([1, 2])                               #损失函数的权值设置
# criterion = nn.CrossEntropyLoss(weight=weight)        #交叉熵损失函数
criterion = nn.BCELoss()
# criterion = nn.MSELoss()

# if t.cuda.is_available():
#     criterion.cuda()
optimizer = t.optim.SGD(model.parameters(), lr= 0.01, weight_decay=1e-5)                    #定义优化器
exp_lr_schedule = lr_scheduler.StepLR(optimizer, step_size=2000, gamma=0.1)  #每30次下降10倍

t.set_num_threads(8)                      #设置线程

draw_train_loss = []
draw_train_auc = []
best_auc = 0
for epoch in range(40000):                #迭代100个epoch
    exp_lr_schedule.step()
    # train_acc = []
    # train_rec = []
    train_loss = []

    for i, (inputs, labels) in enumerate(dataloader.trainloader):
        # if t.cuda.is_available():
        #     inputs = inputs.cuda()            #放入gpu
        #     labels = labels.cuda()

        optimizer.zero_grad()                  #梯度清零
        outputs = model(inputs)
        labels = labels.type(t.FloatTensor)
        # outputs = t.squeeze(outputs)         #在使用mse模型时候才用
        loss = criterion(outputs, labels)      #损失函数
        train_loss.append(loss.item())         #保存每次迭代的loss和roc分数

        loss.backward()                        #反向传播
        optimizer.step()                       #更新参数

    if (epoch + 1) % 10 == 0:
        print("[%d %5d] loss:%.5f" %(epoch+1, i + 1, loss.item()))   #注意loss.item（）要有括号！！！！！！！！
            # print("%.5f" %(np.mean(train_acc)))
            # print("%.5f" %(np.mean(train_rec)))

    writer.add_scalar('Train_loss', np.mean(train_loss), epoch)
    # writer.add_scalar('Acc', np.mean((train_acc)), epoch)
    # writer.add_scalar('Rec', np.mean((train_rec)), epoch)

    if (epoch+1) % 1000== 0:
        t.save(model, "save/new_feature/%d model.pkl" % (epoch + 1))
