import sklearn
from sklearn.model_selection import train_test_split
from eli5.sklearn import PermutationImportance
from lightgbm import LGBMClassifier
import eli5
from xpinyin import Pinyin
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
import numpy as np
import lightgbm as lgb
class basemodel:
    def __init__(self,X,Y):
        P = Pinyin()
        pinyinname = []
        hanzi_name = [each for each in X.columns.values]
        for each in hanzi_name:
            try:
                pinyineach = P.get_pinyin(each)
                pinyinname.append(pinyineach)
            except:
                pinyinname.append(each)
        X.columns = pinyinname
        print(pinyinname)
        # self.names = ['kong-zhi-ren-chi-gu-bi-li','zhu-ce-zi-ben']
        self.names = pinyinname
        # self.names = pinyinname
        self.X = X[self.names]
        self.Y = Y

        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.Y, test_size = 0.1, random_state = 42,shuffle=True)


    def LGBM(self):

        scaler = StandardScaler()
        self.X_train = scaler.fit_transform(self.X_train)
        self.X_test = scaler.transform(self.X_test)
        print('输入模型的信息')
        print(self.X_train)
       # model = LGBMClassifier(learning_rate=0.01).fit(self.X_train, self.y_train)
        print(self.y_train)
        print('预测数据分布情况')
        print(self.y_test.value_counts())
        trainset = lgb.Dataset(self.X_train,label=self.y_train.values,free_raw_data=False)
        validationset = lgb.Dataset(self.X_test,label=self.y_test)
        params = {
            'objective': 'multiclass',
            'num_class': 2
        }
        clf = lgb.train(params,trainset,valid_sets=[validationset])
        y_pred = clf.predict(self.X_test)
        #看来预测结果是一个得分矩阵
        y_pred = [list(x).index(max(x)) for x in y_pred]
        print('预测为僵尸的有%d个'%(sum(y_pred)))
        print(y_pred)
        print(accuracy_score(self.y_test, y_pred))
        # gbm = lgb.LGBMClassifier(params, trainset, metrics='multi_logloss', early_stopping_rounds=10)
        # gbm.fit(self.X_train,self.y_train)
        # y_pred = gbm.predict(self.X_test,num_iteration=gbm.best_iteration_)
        # print(y_pred)
        # print('训练集分布:',self.y_train.value_counts())
        # print('测试集分布:', self.y_test.value_counts())
        model = LGBMClassifier().fit(self.X_train, self.y_train)
        perm = PermutationImportance(model).fit(self.X_test, self.y_test)
        # predictres = model.predict(self.X_test)
        # print('训练集拟合程度')
        # print(accuracy_score(self.y_train, model.predict(self.X_train)))
        # print('测试集拟合程度')
        # print(accuracy_score(self.y_test, predictres))
        # # MAE = mean_absolute_error(self.y_test,predictres)
        # # print(MAE)
        # print('classification_report')
        # print(self.y_test.values[:30])
        # print('有%d个僵尸'%sum(predictres))
        # # for each in predictres:
        # #     if each==1:
        # #         print('预测为1')
        # print(predictres[:30])
        # print(classification_report(self.y_test,predictres))
        #
        htmlres = eli5.show_weights(perm, feature_names=self.names)
        with open('factor_weight.html','w') as f:
            f.write(htmlres.data)

    def LogisticRegression(self):

        print('-'*8+'LogisticRegression'+'-'*8)
        print(self.y_train.value_counts())
        print(self.X_train)
        scaler = StandardScaler()
        self.X_train = scaler.fit_transform(self.X_train)
        self.X_test = scaler.transform(self.X_test)


        clf = LogisticRegression(penalty='l2')
        clf = clf.fit(self.X_train, self.y_train)
        predictres = clf.predict(self.X_test)
        trainpredict = clf.predict(self.X_train)
        print('训练集拟合程度')
        print(accuracy_score(self.y_train,trainpredict ))
        print('有%d个僵尸' % sum(trainpredict))
        print('有%d个不是僵尸'%(len(trainpredict)-sum(trainpredict)))
        print(accuracy_score(self.y_test, predictres))
        # result = {'var': self.X_train.columns.values
        #     , 'feature_importances_': perm.score}
        #
        # print(result['feature_importances_'])
        # eli5.show_weights(perm, feature_names=self.X_test.columns.tolist())
    def KNeighborsClassifier(self):
        clf = KNeighborsClassifier()
        clf.fit(self.X_train,self.y_train)
        predictres = clf.predict(self.X_test)
        print(accuracy_score(self.y_test, predictres))