import pandas as pd
import SBFI as SBFI
import numpy as np
from numpy import loadtxt
from numpy import sort
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
import matplotlib.ticker as mtick
from random import sample
import seaborn as sns
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn import preprocessing
from sklearn import metrics
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.datasets import make_classification
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.utils import resample
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectFromModel, RFE
from warnings import simplefilter
from Score import func_score
from sklearn.linear_model import (LinearRegression, Ridge, Lasso)
# ignore all future warnings
# from GRA import cc
from GRA_improve import gra_imporve
from GRA import gra
from CHI2test import chi2_test
from MICtest import mic_test
from pearson import pearsonr_test
# -------数据处理-----------
simplefilter(action='ignore', category=FutureWarning)
df_rating = pd.read_csv('corporate_rating.csv')
rating_dict = {'AAA':'Lowest Risk',
               'AA':'Low Risk',
               'A':'Low Risk',
               'BBB':'Medium Risk',
               'BB':'High Risk',
               'B':'High Risk',
               'CCC':'Highest Risk',
               'CC':'Highest Risk',
               'C':'Highest Risk',
               'D':'In Default'}
df_rating.Rating = df_rating.Rating.map(rating_dict)
df_rating = df_rating[df_rating['Rating']!='Lowest Risk'] # filter Lowest Risk
df_rating = df_rating[df_rating['Rating']!='In Default']  # filter In Default
df_rating.reset_index(inplace = True, drop=True) # reset index
le = preprocessing.LabelEncoder()
le.fit(df_rating.Sector)
df_rating.Sector = le.transform(df_rating.Sector) # encode sector
le.fit(df_rating.Rating)
df_rating.Rating = le.transform(df_rating.Rating) # encode rating
# 归一化
min_max_scaler = preprocessing.MinMaxScaler()
for c in df_rating.columns[0:26]:
    df_rating[[c]] = min_max_scaler.fit_transform(df_rating[[c]].to_numpy())
# print(df_rating.head())

sns.heatmap(df_rating.corr())
# -------原始准确率----------

# print(XGB_model.feature_importances_)
# from xgboost import plot_importance
# from matplotlib import pyplot
# plot_importance(XGB_model)
# pyplot.show()


from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, mutual_info_classif
S=[]
for i in range(26):
    S.append(i)
X=df_rating.iloc[:,0:26]
y=df_rating.iloc[:,26]

# colnames=X.columns
# ranks = {}
# ----------MIC----------
# mic_test(X,y,S,df_rating)
# ----------chi2--------
# chi2_test(X,y,S,df_rating)
# ---------灰色排名-------
# gra_imporve(S,df_rating)
# gra(S,df_rating)
#pearson
pearsonr_test(X,y,S,df_rating)

# def ranking(ranks, names, order=1):
#     minmax = MinMaxScaler()
#     ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
#     ranks = map(lambda x: round(x,2), ranks)
#     return dict(zip(names, ranks))
#
#
# # ---------- REF -----------
# # Construct our Linear Regression model
# lr = LinearRegression(normalize=True)
# lr.fit(X,y)
# #stop the search when only the last feature is left
# # n_feature_to_select=1,递归到剩一个特征，verbose=3显示中间过程
# rfe = RFE(lr, n_features_to_select=1, verbose =3 )
# rfe.fit(X,y)
# ranks["RFE"] = ranking(list(map(float, rfe.ranking_)), colnames, order=-1)
# print(ranks["RFE"])


#  -------基于单变量特征统计检验的方法----------
#
# 此类方法通过单变量统计检验实现最佳特征选择。
#
# 单变量特征选择能够对每一个特征进行测试，衡量该特征和目标变量y之间的关系，根据得分去掉不好的特征。对于回归和分类问题可以采用卡方检验等方式对特征进行测试。
#
# 此类方法简单，易于运行，易于理解，通常对于理解数据有较好的效果
#----------- chi2检验--------------------

# i=0
# S.remove(ranks[i])
# S_new = S
# print(S_new)
# S=S_new+[ranks[i]]
# print(S)



# print("hello")
# # 互信息



# # 随机森林
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.model_selection import cross_val_score
# clf = RandomForestClassifier(n_estimators = 100, max_depth = None)
# scores = []
# num_features = len(X.columns)
# for i in range(num_features):
#     col = X.columns[i]
#     score = np.mean(cross_val_score(clf, X[col].values.reshape(-1,1), y, cv=10))
#     scores.append((int(score*100), col))
# print(sorted(scores, reverse = True))
#
#
#
# # Define dictionary to store our rankings
# ranks = {}
# # Create our function which stores the feature rankings to the ranks dictionary
# def ranking(ranks, names, order=1):
#     minmax = MinMaxScaler()
#     ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
#     ranks = map(lambda x: round(x,2), ranks)
#     return dict(zip(names, ranks))


# a=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]
# a=SBFI.
# S=df_rating.iloc[:,0:26]
# def score(a):
#     df_train, df_test = train_test_split(df_rating, test_size=0.2, random_state=1234)
#     X_train, y_train = df_train.iloc[:, a], df_train.iloc[:, 26]
#     X_test, y_test = df_test.iloc[:, 26], df_test.iloc[:, 26]
#     XGB_model = xgb.XGBRegressor(objective ='multi:softmax', num_class =4)
#     XGB_model.fit(X_train, y_train)
#     y_pred_XGB = XGB_model.predict(X_test)
#     Accuracy_XGB = metrics.accuracy_score(y_test, y_pred_XGB)
#     return Accuracy_XGB
#
#     KNN_model = KNeighborsClassifier(n_neighbors = 3)
#     KNN_model.fit(X_train,y_train)
#     y_pred_KNN = KNN_model.predict(X_test)
#     Accuracy_KNN = metrics.accuracy_score(y_test, y_pred_KNN)
#     print("KNN Accuracy:",Accuracy_KNN)
#
#
#     QDA_model = QuadraticDiscriminantAnalysis()
#     QDA_model.fit(X_train,y_train)
#     y_pred_QDA = QDA_model.predict(X_test)
#     Accuracy_QDA = metrics.accuracy_score(y_test, y_pred_QDA)
#     print("QDA Accuracy:",Accuracy_QDA)
#
#     LDA_model = LinearDiscriminantAnalysis()
#     LDA_model.fit(X_train,y_train)
#     y_pred_LDA = LDA_model.predict(X_test)
#     Accuracy_LDA = metrics.accuracy_score(y_test, y_pred_LDA)
#     print("LDA Accuracy:",Accuracy_LDA)
#
#     GNB_model = GaussianNB()
#     GNB_model.fit(X_train, y_train)
#     y_pred_GNB = GNB_model.predict(X_test)
#     Accuracy_GNB = metrics.accuracy_score(y_test, y_pred_GNB)
#     print("GNB Accuracy:",Accuracy_GNB)
#
#     RF_model = RandomForestClassifier(random_state=1234)
#     RF_model.fit(X_train,y_train)
#     y_pred_RF = RF_model.predict(X_test)
#     Accuracy_RF = metrics.accuracy_score(y_test, y_pred_RF)
#     print("RF Accuracy:",Accuracy_RF)
#
#     SVC_model = svm.SVC(kernel='rbf', gamma= 2, C = 5, random_state=1234)
#     SVC_model.fit(X_train, y_train)
#     y_pred_SVM = SVC_model.predict(X_test)
#     Accuracy_SVM = metrics.accuracy_score(y_test, y_pred_SVM)
#     print("SVM Accuracy:",Accuracy_SVM)
#
#     MLP_model = MLPClassifier(hidden_layer_sizes=(5,5,5), activation='logistic', solver='adam', max_iter=1500)
#     MLP_model.fit(X_train, y_train)
#     y_pred_MLP = MLP_model.predict(X_test)
#     Accuracy_MLP = metrics.accuracy_score(y_test, y_pred_MLP)
#     print("MLP Accuracy:",Accuracy_MLP)
#
#     LR_model = LogisticRegression(random_state=1234 , multi_class='multinomial', solver='newton-cg')
#     LR_model = LR_model.fit(X_train, y_train)
#     y_pred_LR = LR_model.predict(X_test)
#     Accuracy_LR = metrics.accuracy_score(y_test, y_pred_LR)
#     print("LR Accuracy:",Accuracy_LR)