import utils
import yaml
import lightgbm as lgb
import os
from sklearn import tree
# import xgboost as xgb
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB,MultinomialNB
from sklearn.svm import SVC
import sys
pythonVersion = 3.6
# 不同版本的base名称不一样（_base,base）
if sys.version_info > (3,8):
    pythonVersion = 3.8
if pythonVersion == 3.6:
    from sklearn.datasets import base
else:
    from sklearn.datasets import _base
import time
import joblib
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import TimeSeriesSplit
# import torch 
# import torch.nn as nn
# import torch.optim as optim
# from torch.utils.data import DataLoader, TensorDataset
import numpy as np

yaml_file = "/home/cyw/projects/malware_detected/configue.yaml"
with open(yaml_file, 'r') as f:
    config = yaml.safe_load(f)
debugLog = config["logs"]["debugLog"]
fastDebug = config["train"]["fastDebug"]
optParams = config["train"]["optParams"]
device = config["train"]['device']

class modelTrain():
    def __init__(self,name):
        self.modelName = name
        self.model = None

    def trainDataSet(self):
        """
            训练集数据加载
        """
        data_dir = config["dataConfig"]["ember2018Path"]
        X_train, y_train = utils.read_vectorized_features(data_dir, "train")
        # Filter unlabeled data
        train_rows = (y_train != -1)
        # 数据分布不是均衡的，不能只取前多少个，这里前5万个全是0
        if pythonVersion == 3.6:
            dataset = base.Bunch(data = X_train[train_rows], y = y_train[train_rows])
        else:
            dataset = _base.Bunch(data = X_train[train_rows], y = y_train[train_rows])
        if debugLog == "true":
            a={}
            b={}
            ind=-1
            for i in train_rows:
                ind+=1
                if i == True:
                    if y_train[ind] not in b:
                        b[y_train[ind]]=0
                    b[y_train[ind]]+=1
                if i not in a:
                    a[i]=1
                else:
                    a[i]+=1
            print("训练模型所使用的row的状况{}".format(a))
            print("dataset 标签状况{}".format(b))
            print("dataset x的长度：{}".format(len(dataset.data)))
            print("dataset y的长度：{}".format(len(dataset.y)))
        return dataset
    
    def train(self):
        """
            训练模型
        """
        print("这是abstractModelTrain")
    
    def predict(self, x):
        if(self.model==None):
            # 加载模型参数
            savePath = config["model"]["savePath"]
            modelPath = os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"]))
            self.model = joblib.load(modelPath)
        res = self.model.predict(x)
        return res
    
# -------------------------------------------------------------------------
# ---------------------------------机器学习---------------------------------
# -------------------------------------------------------------------------
class lightGbmTrain(modelTrain):
    def __init__(self):
        super(lightGbmTrain,self).__init__("lightGbm")

    def train_model(self, dataset,params={}):
        """
            Train the LightGBM model from the EMBER dataset from the vectorized features
        """
        # update params
        params.update({"application": "binary"})
        lgbm_model = lgb.train(params, dataset)
        return lgbm_model

    def trainDataSet(self):
        data_dir = config["dataConfig"]["ember2018Path"]
        X_train, y_train = utils.read_vectorized_features(data_dir, "train")
        # Filter unlabeled data
        train_rows = (y_train != -1)
        if debugLog == "true":
            a={}
            for i in train_rows:
                if i not in a:
                    a[i]=1
                else:
                    a[i]+=1
            print("训练模型所使用的row的状况{}".format(a))
        lgbm_dataset = lgb.Dataset(X_train[train_rows], y_train[train_rows])
        return lgbm_dataset
    
    def train(self,dataset):
        # 使用ember2018数据集训练模型
        datadir = config["dataConfig"]["ember2018Path"]
        savePath = config["model"]["savePath"]
        if debugLog=="true":
            X_train_path = os.path.join(datadir, "X_train.dat")
            y_train_path = os.path.join(datadir, "y_train.dat")
            print("特征的地址为：{}".format(X_train_path))
            print("标签的地址为：{}".format(y_train_path))
        
        params = {
            "boosting": "gbdt",
            "objective": "binary",
            "learning_rate": 0.1,
            "num_iterations": 1000,
            "num_leaves": 2048,
            "max_depth": 15,
            "min_data_in_leaf": 50,
            "feature_fraction": 0.5,
            # "device" : "gpu"
        }
        if fastDebug == "true":
            params["num_iterations"] = 1
        print("Training {} model".format(self.modelName))
        
        lgbm_model = self.train_model(dataset,params)
        joblib.dump(lgbm_model,os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))

class decisionTreeTrain(modelTrain):
    def __init__(self):
        super(decisionTreeTrain,self).__init__("decisionTree")
    
    def train(self,dataset):
        datadir = config["dataConfig"]["ember2018Path"]
        savePath = config["model"]["savePath"]
        if debugLog=="true":
            X_train_path = os.path.join(datadir, "X_train.dat")
            y_train_path = os.path.join(datadir, "y_train.dat")
            print("特征的地址为：{}".format(X_train_path))
            print("标签的地址为：{}".format(y_train_path))
 
        print("Training {} model".format(self.modelName))
        model = tree.DecisionTreeClassifier()
        if fastDebug =="true":
            dataset.data= dataset.data[:100]
            dataset.y= dataset.y[:100]
        if debugLog == "true":
            print(dataset)
        a = time.time()
        model.fit(dataset.data,dataset.y)
        b = time.time()
        if debugLog=="true":
            print("训练耗时：{}".format(b-a))

        joblib.dump(model,os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))
    
class xgBoostTrain(modelTrain):
    def __init__(self):
        super(xgBoostTrain, self).__init__("xgBoost")

    def optimize_model(self):
        """
            Run a grid search to find the best xgBoost parameters
        """
        # 太慢了，服务器资源不太够
        print("正在优化模型参数")
        data_dir = config["dataConfig"]["ember2018Path"]
        X_train, y_train = utils.read_vectorized_features(data_dir, "train")
        train_rows = (y_train != -1)
        parameters = {
            #   'max_depth': [5, 10, 15, 20, 25],
            #   'learning_rate': [0.01, 0.02, 0.05, 0.1, 0.15],
            #   'n_estimators': [500, 1000, 2000, 3000, 5000],
            #   'min_child_weight': [0, 2, 5, 10, 20],
            #   'max_delta_step': [0, 0.2, 0.6, 1, 2],
            #   'subsample': [0.6, 0.7, 0.8, 0.85, 0.95],
            #   'colsample_bytree': [0.5, 0.6, 0.7, 0.8, 0.9],
            #   'reg_alpha': [0, 0.25, 0.5, 0.75, 1],
            #   'reg_lambda': [0.2, 0.4, 0.6, 0.8, 1],
            #   'scale_pos_weight': [0.2, 0.4, 0.6, 0.8, 1]
        }
        xlf = xgb.XGBClassifier(
            booster = 'gbtree',
            objective='binary:logistic',
            silent=True,
            nthread=-1,
			gamma=0,
            colsample_bylevel=1,
            seed=0,
            tree_method = "gpu_hist",
			missing=1,

            max_depth=10,
			learning_rate=0.01,
			n_estimators=2000,
			min_child_weight=1,
			max_delta_step=0,
			subsample=0.85,
			colsample_bytree=0.7,
			reg_alpha=0,
			reg_lambda=1,
			scale_pos_weight=1
			)
        gsearch = GridSearchCV(xlf, param_grid=parameters, scoring='accuracy', cv=3)
        gsearch.fit(X_train[train_rows], y_train[train_rows])
        print(gsearch.best_params_)
        return gsearch.best_params_
    
    def trainDataSet(self):
        data_dir = config["dataConfig"]["ember2018Path"]
        X_train, y_train = utils.read_vectorized_features(data_dir, "train")
        # Filter unlabeled data
        train_rows = (y_train != -1)
        # 数据分布不是均衡的，不能只取前多少个，这里前5万个全是0
        dtrain = xgb.DMatrix(X_train[train_rows], label = y_train[train_rows])
        if debugLog == "true":
            a={}
            b={}
            ind=-1
            for i in train_rows:
                ind+=1
                if i == True:
                    if y_train[ind] not in b:
                        b[y_train[ind]]=0
                    b[y_train[ind]]+=1
                if i not in a:
                    a[i]=1
                else:
                    a[i]+=1
            print("训练模型所使用的row的状况{}".format(a))
            print("dataset 标签状况{}".format(b))
        return dtrain
    
    def train(self,dataset):
        datadir = config["dataConfig"]["ember2018Path"]
        savePath = config["model"]["savePath"]
        if debugLog=="true":
            X_train_path = os.path.join(datadir, "X_train.dat")
            y_train_path = os.path.join(datadir, "y_train.dat")
            print("特征的地址为：{}".format(X_train_path))
            print("标签的地址为：{}".format(y_train_path))
        if optParams == "true":
            self.optimize_model()
        print("Training {} model".format(self.modelName))

        params={
            'booster':'gbtree',  # 弱学习器的类型，默认就是gbtree，及cart决策树
            'objective': 'binary:logistic',   # 目标函数，二分类：逻辑回归，输出的是概率
            'eval_metric': 'auc',
            'max_depth':25,  # 最大深度
            'learning_rate': 0.1,#学习率
            'lambda':10,
            'subsample':0.75,
            'colsample_bytree':0.75,
            'min_child_weight':2,
            'eta': 0.025,  # 步长
            'seed':0,
            'nthread':8,
            'silent':1,
            "tree_method":"gpu_hist" #启用GPU加速
        }
        if debugLog == "true":
            print("数据集展示：{}".format(dataset))
        watchlist = [(dataset,'train')]        
        a = time.time()
        if fastDebug == "true":
            model = xgb.train(params,
                dataset,
                num_boost_round= 1,  # 迭代的次数，及弱学习器的个数
                evals= watchlist)
        else:
            model = xgb.train(params,
                    dataset,
                    num_boost_round= 100,  # 迭代的次数，及弱学习器的个数
                    evals= watchlist)
        b = time.time()
        if debugLog=="true":
            print("训练耗时：{}".format(b-a))

        joblib.dump(model,os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))

    def predict(self, x):
        if(self.model==None):
            # 加载模型参数
            savePath = config["model"]["savePath"]
            modelPath = os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"]))
            self.model = joblib.load(modelPath)
        tar = xgb.DMatrix(x)
        res = self.model.predict(tar)
        return res

class KNNTrain(modelTrain):
    def __init__(self):
        super(KNNTrain, self).__init__("knn")

    def train(self,dataset):
        datadir = config["dataConfig"]["ember2018Path"]
        savePath = config["model"]["savePath"]
        if debugLog=="true":
            X_train_path = os.path.join(datadir, "X_train.dat")
            y_train_path = os.path.join(datadir, "y_train.dat")
            print("特征的地址为：{}".format(X_train_path))
            print("标签的地址为：{}".format(y_train_path))
 
        print("Training {} model".format(self.modelName))
        neigh = KNeighborsClassifier(n_neighbors=2)
        if debugLog == "true":
            print("数据集展示：{}".format(dataset))
        if fastDebug =="true":
            dataset.data= dataset.data[:100]
            dataset.y= dataset.y[:100]
        a = time.time()
        neigh.fit(dataset.data, dataset.y)
        b = time.time()
        if debugLog=="true":
            print("训练耗时：{}".format(b-a))

        joblib.dump(neigh,os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))
    
class NBTrain(modelTrain):
    def __init__(self):
        super(NBTrain, self).__init__("naiveBayse")
    
    def train(self,dataset):
        datadir = config["dataConfig"]["ember2018Path"]
        savePath = config["model"]["savePath"]
        if debugLog=="true":
            X_train_path = os.path.join(datadir, "X_train.dat")
            y_train_path = os.path.join(datadir, "y_train.dat")
            print("特征的地址为：{}".format(X_train_path))
            print("标签的地址为：{}".format(y_train_path))
 
        print("Training {} model".format(self.modelName))
        nb = GaussianNB()
        # nb = MultinomialNB()
        if debugLog == "true":
            print("数据集展示：{}".format(dataset))        
        if fastDebug =="true":
            dataset.data= dataset.data[:100]
            dataset.y= dataset.y[:100]
        a = time.time()
        nb.fit(dataset.data, dataset.y)
        b = time.time()
        if debugLog=="true":
            print("训练耗时：{}".format(b-a))

        joblib.dump(nb, os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))
    
class rfTrain(modelTrain):
    def __init__(self):
        super(rfTrain, self).__init__("randomForest")
    
    def train(self,dataset):
        datadir = config["dataConfig"]["ember2018Path"]
        savePath = config["model"]["savePath"]
        if debugLog=="true":
            X_train_path = os.path.join(datadir, "X_train.dat")
            y_train_path = os.path.join(datadir, "y_train.dat")
            print("特征的地址为：{}".format(X_train_path))
            print("标签的地址为：{}".format(y_train_path))
 
        print("Training {} model".format(self.modelName))
        # n_estimators：随机森林中决策树的个数，默认为 100。
        # criterion：随机森林中决策树的算法，可选的有两种：
            # gini：基尼系数，也就是 CART 算法，为默认值。
            # entropy：信息熵，也就是 ID3 算法。
        # max_depth：决策树的最大深度。
        rft = RandomForestClassifier(n_estimators=100, random_state=42)
        if debugLog == "true":
            print("数据集展示：{}".format(dataset))
        if fastDebug =="true":
            dataset.data= dataset.data[:100]
            dataset.y= dataset.y[:100]
        a = time.time()
        rft.fit(dataset.data, dataset.y)
        b = time.time()
        if debugLog=="true":
            print("训练耗时：{}".format(b-a))

        joblib.dump(rft, os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))
    
class svmTrain(modelTrain):
    def __init__(self):
        super(svmTrain, self).__init__("svm")
    
    def train(self,dataset):
        datadir = config["dataConfig"]["ember2018Path"]
        savePath = config["model"]["savePath"]
        if debugLog=="true":
            X_train_path = os.path.join(datadir, "X_train.dat")
            y_train_path = os.path.join(datadir, "y_train.dat")
            print("特征的地址为：{}".format(X_train_path))
            print("标签的地址为：{}".format(y_train_path))
 
        print("Training {} model".format(self.modelName))
        clf = make_pipeline(StandardScaler(), SVC(gamma='auto'))
        if debugLog == "true":
            print("数据集展示：{}".format(dataset))
        if fastDebug =="true":
            dataset.data= dataset.data[50000:53000]
            dataset.y= dataset.y[50000:53000]
        a = time.time()
        clf.fit(dataset.data, dataset.y)
        b = time.time()
        if debugLog=="true":
            print("训练耗时：{}".format(b-a))

        joblib.dump(clf, os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))

# -------------------------------------------------------------------------
# ---------------------------------深度学习---------------------------------
# -------------------------------------------------------------------------
# python3.6的版本不能使用torch，很坏

# class SimpleDNN(nn.Module):
#     """
#         深度神经网络模型
#         参考这篇文章  Robust Intelligent Malware Detection Using Deep Learning
#     """
#     def __init__(self):
#         super(SimpleDNN, self).__init__()
#         featureDim = 2381
#         self.tarDims =[4608, 4096, 3584, 3072, 2560, 2048, 1536, 1024, 512, 128,64]
#         # self.tarDims =[2048,1024,512,256,128]
#         # self.tarDims =[1024,256,64]
#         self.fcs = []
#         self.bns = []
#         self.dropouts = []
#         preDim = featureDim
#         for tarDim in self.tarDims:
#             self.fcs.append(nn.Linear(preDim,tarDim).to(device))
#             self.bns.append(nn.BatchNorm1d(tarDim).to(device))
#             self.dropouts.append(nn.Dropout(0.01).to(device))
#             preDim = tarDim
#         # self.linear1 = nn.Linear(2381,1024)
#         # self.linear2 = nn.Linear(1024,256)
#         # self.linear3 = nn.Linear(256,64)
#         # self.bn1 = nn.BatchNorm1d(1024)
#         # self.bn2 = nn.BatchNorm1d(256)
#         # self.bn3 = nn.BatchNorm1d(64)
#         self.fcFinal = nn.Linear(64, 1)
#         self.sigmoid = nn.Sigmoid()
    
#     def forward(self, x):
#         for i in range(len(self.tarDims)):
#             x = torch.relu(self.bns[i](self.fcs[i](x)))
#             x = self.dropouts[i](x)                
#         # x = torch.relu(self.bn1(self.linear1(x)))
#         # x = torch.relu(self.bn2(self.linear2(x)))
#         # x = torch.relu(self.bn3(self.linear3(x)))
#         x = self.fcFinal(x)                        
#         x = self.sigmoid(x)
#         return x
    
#     def predict(self,x):
#         tar = torch.from_numpy(x)
#         res = self.forward(tar)
#         return res.detach().numpy()
    
# class DNNTrain(modelTrain):
#     def __init__(self):
#         super(DNNTrain, self).__init__("DNN")
#         self.model =  SimpleDNN()
#         self.model.to(device)
#         self.criterion = nn.CrossEntropyLoss()
#         self.optimizer = optim.Adam(self.model.parameters(), lr=0.00001)
    
#     def trainDataSet(self):
#         """
#             训练集数据加载
#         """
#         data_dir = config["dataConfig"]["ember2018Path"]
#         X_train, y_train = utils.read_vectorized_features(data_dir, "train")
#         print("快速实验，使用test来训练！！！！！！！！！！！")
#         train_rows = (y_train != -1)
#         xTrain = torch.from_numpy(X_train[train_rows]).to(device)
#         yTrain = torch.from_numpy(y_train[train_rows]).to(device)
#         print(xTrain[0])
#         print(xTrain.shape)
#         print(type(xTrain[0]))
#         print(yTrain)
#         print(type(yTrain[0]))
#         print(yTrain.shape)

#         trainDataset = TensorDataset(xTrain, yTrain)
#         dloader = DataLoader(trainDataset, batch_size=256, shuffle=False)
#         return dloader
    
#     def train(self,dataset):
#         datadir = config["dataConfig"]["ember2018Path"]
#         savePath = config["model"]["savePath"]
#         if debugLog=="true":
#             X_train_path = os.path.join(datadir, "X_train.dat")
#             y_train_path = os.path.join(datadir, "y_train.dat")
#             print("特征的地址为：{}".format(X_train_path))
#             print("标签的地址为：{}".format(y_train_path))
#         print("Training {} model".format(self.modelName))
#         if debugLog == "true":
#             print("数据集展示：{}".format(dataset))
#         a = time.time()
#         num_epochs = 1000
#         print("开始训练dnn模型")
#         for epoch in range(num_epochs):
#             self.model.train()
#             totalLoss = 0
#             for batch_X, batch_y in dataset:
#                 self.optimizer.zero_grad()
#                 outputs = self.model(batch_X).squeeze()
#                 # print(outputs)
#                 loss = self.criterion(outputs, batch_y)
#                 totalLoss+=loss
#                 loss.backward()
#                 self.optimizer.step()
#             print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {totalLoss:.4f}')
#             joblib.dump(self.model, os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))
#         b = time.time()
#         if debugLog=="true":
#             print("训练耗时：{}".format(b-a))
#         joblib.dump(self.model, os.path.join(savePath, "{}_model.plk".format(config["model"]["trainModel"])))

