from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, plot_roc_curve, roc_curve, auc, roc_auc_score
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
# import ssl
# import joblib #jbolib模块
import matplotlib.pyplot as plt
import time

# from sklearn.feature_extraction import DictVectorizer
# from sklearn.tree import DecisionTreeClassifier


'''详见 https://blog.csdn.net/weixin_39959615/article/details/111581385
   以及 https://blog.csdn.net/qq_39763246/article/details/119703206
'''

# 自己定义一个逻辑回归
class Mylogic():

    def __init__(self):
        pass

    def parse(self, data=None, target=None):
        """
        肿瘤良性恶性预测
        """
        # # 由于数据来自网络 可能出现SSL证书问题
        # ssl._create_default_https_context = ssl._create_unverified_context

        # # 1. 获取数据
        # # 各个列名（原始数据里没有给出列索引）
        # names = ['Sample code number',
        #         'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',
        #         'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei',
        #         'Bland Chromatin', 'Normal Nucleoli', 'Mitoses',
        #         'Class']
        # # 读取网络数据
        # data = pd.read_csv(
        #     "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data",
        #     names=names)




        data = pd.read_excel(r'./data/表单1.xlsx')
        # 2. 数据基本处理
        # 2.1 缺失值处理 —— 将?替换为NaN，然后删除NaN
        data = data.replace(to_replace="?", value=np.NaN)
        data = data.dropna()
        # 2.2 确定特征值(第一列不是特征值)和目标值(最后一列)以便分割数据
        x = data.iloc[:, 1:14]
        y = data.iloc[:, 14]

        # 2.3 分割数据
        x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=22, test_size=0.2)

        # 3. 特征工程 —— 特征预处理 标准化
        transfer = StandardScaler()
        x_train = transfer.fit_transform(x_train)
        x_test = transfer.fit_transform(x_test)

        # dict = DictVectorizer(sparse=False)
        # #这里涉及到.to_dict(orient="records")方法，就是将列表转换为一个一个字典形式，因为DictVectorizer()中接收的是字典形式
        # x_train = dict.fit_transform(x_train.to_dict(orient="records"))
        # #print(dict.get_feature_names())
        # x_test = dict.transform(x_test.to_dict(orient="records"))
        # print(x_train)
        # # 用决策数进行预测，这里就是决策树api
        # tre = DecisionTreeClassifier()
        # tre.fit(x_train,y_train)
        # #输出准确度
        # print(tre.score(x_test, y_test))





        # 4. 机器学习 —— 逻辑回归
        estimator = LogisticRegression()
        estimator.fit(x_train, y_train)
        # 保存模型
        # joblib.dump(estimator, r'./save/logic.pkl')

        # 5. 模型评估
        y_predict = estimator.predict(x_test)    # 预测值
        score = estimator.score(x_test, y_test)  # 准确率
        # report = classification_report(y_test, y_predict, labels=(2, 4), target_names=("无风化", "风化"))

        report = classification_report(y_test, y_predict)
        print("预测值：\n", y_predict)
        print("准确率：", score)
        print(report)
        print("AUC指标:", roc_auc_score(y_test, y_predict))
        self.plotROC(clf=estimator,x_test=x_test,y_test=y_test)

    # 逻辑回归模型的评价可参考 https://blog.csdn.net/qq_39763246/article/details/119703206  以及 https://blog.51cto.com/u_15060460/4709502
    def plotROC(self, clf=None, x_test=None, y_test=None):

        # y_test = np.where(y_test > 3, 1, 0)  # 目标值2是正例，4是反例；将其转为1和0
        # print("AUC指标：", roc_auc_score(y_test, y_predict))

        #创建画布
        fig, ax = plt.subplots(figsize=(12,10))
        lr_roc = plot_roc_curve(estimator=clf, X=x_test, 
                                y=y_test, ax=ax, linewidth=1)
        # dt_roc = plot_roc_curve(estimator=dt_clf, X=cancer_X_test,
        #                         y=cancer_y_test, ax=ax, linewidth=1)
        # knn_roc = plot_roc_curve(estimator=knn_clf, X=cancer_X_test,
        #                         y=cancer_y_test, ax=ax, linewidth=1)
        #注意:这里的ax一定要传给所创建画布的ax,否则三个模型的ROC曲线分别绘制三张图而不在一张图中

        #更改图例字体大小
        ax.legend(fontsize=12)
        #绘制对角线
        ax.plot([0,1],[0,1],linestyle='--',color='grey')
        
        #显示绘制的ROC曲线
        plt.show()


class logic:
    # 程序用到的测试数据：
    # 链接：https://pan.baidu.com/s/1Enr4JcPVzBiUCfvEYiVmlQ 
    # 提取码：lg51

    # 代码正文

    def __init__(self):
        self.path = r'./data/Logireg_data.txt'
        self.pdData = pd.read_csv(self.path, header=None, names=['Exam1','Exam2','Admitted'])
        ##比较3种不同梯度下降方法
        self.STOP_ITER = 0
        self.STOP_COST = 1
        self.STOP_GRAD = 2
        self.theta = np.zeros([1,3])
    
    def pltdot(self):
        positive = self.pdData[self.pdData['Admitted']==1]#定义正
        nagative = self.pdData[self.pdData['Admitted']==0]#定义负
        fig, ax = plt.subplots(figsize=(10,5))
        ax.scatter(positive['Exam1'],positive['Exam2'],s=30,c='b',marker='o',label='Admitted')
        ax.scatter(nagative['Exam1'],nagative['Exam2'],s=30,c='r',marker='x',label='not Admitted')
        ax.legend()          # 标注图标
        ax.set_xlabel('Exam 1 score')
        ax.set_ylabel('Exam 2 score')
        plt.show()#画图

    ##实现算法 the logistics regression 目标建立一个分类器 设置阈值来判断录取结果
    ##sigmoid 函数
    def sigmoid(self,z):
        return 1/(1+np.exp(-z))
    
    #画sigmoid图
    def pltsigmoid(self):
        nums = np.arange(-10,10,step=1)
        fig,ax = plt.subplots(figsize=(12,4))
        ax.plot(nums,self.sigmoid(nums),'r')#画图定义
        plt.show()
    
    #按照理论实现预测函数
    def model(self,X,theta):
        return self.sigmoid(np.dot(X,theta.T))
    
    def test(self):
        self.pdData.insert(0,'ones',1)    #插入到第一列 全部为1
        orig_data = self.pdData.values
        cols = orig_data.shape[1]
        X = orig_data[:,0:cols-1]
        y = orig_data[:,cols-1:cols]
        print(X[:5])
        print(X.shape,y.shape,self.theta.shape)
        print(self.cost(X,y,self.theta))

    ##损失函数
    def cost(self,X,y,theta):
        left = np.multiply(-y,np.log(self.model(X,theta)))
        right = np.multiply(1-y,np.log(1-self.model(X,theta)))
        return np.sum(left-right)/(len(X))
            
   #计算梯度
    def gradient(self,X, y, theta):
        grad = np.zeros(theta.shape)
        error = (self.model(X, theta) - y).ravel()
        for j in range(len(theta.ravel())):  # for each parmeter
            term = np.multiply(error, X[:, j])
            grad[0, j] = np.sum(term) / len(X)
        return grad

    def stopCriterion(self,type,value,threshold):
        if type == self.STOP_ITER: return value>threshold
        elif type == self.STOP_COST: return abs(value[-1]-value[-2])<threshold
        elif type == self.STOP_GRAD: return np.linalg.norm(value)<threshold
    
    #打乱数据洗牌
    def shuffledata(self,data):
        np.random.shuffle(data)
        cols = data.shape[1]
        X = data[:,0:cols-1]
        y = data[:,cols-1:]
        return X,y
        
    def descent(self, data, theta, batchSize, stopType, thresh, alpha):
        # 梯度下降求解
        init_time = time.time()
        i = 0  # 迭代次数
        k = 0  # batch
        X, y = self.shuffledata(data)
        grad = np.zeros(theta.shape)  # 计算的梯度
        costs = [self.cost(X, y, theta)]  # 损失值
    
        while True:
            grad = self.gradient(X[k:k + batchSize], y[k:k + batchSize], theta)
            k += batchSize  # 取batch数量个数据
            if k >= 100:
                k = 0
                X, y = self.shuffledata(data)  # 重新洗牌
            theta = theta - alpha * grad  # 参数更新
            costs.append(self.cost(X, y, theta))  # 计算新的损失
            i += 1
    
            if stopType == self.STOP_ITER:
                value = i
            elif stopType == self.STOP_COST:
                value = costs
            elif stopType == self.STOP_GRAD:
                value = grad
            if self.stopCriterion(stopType, value, thresh): break
    
        return theta, i - 1, costs, grad, time.time() - init_time

    #选择梯度下降
    def runExpe(self, data, theta, batchSize, stopType, thresh, alpha):
        #import pdb; pdb.set_trace();
        theta, iter, costs, grad, dur = self.descent(data, theta, batchSize, stopType, thresh, alpha)
        name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled"
        name += " data - learning rate: {} - ".format(alpha)
        if batchSize == 100: strDescType = "Gradient"
        elif batchSize == 1:  strDescType = "Stochastic"
        else: strDescType = "Mini-batch ({})".format(batchSize)
        name += strDescType + " descent - Stop: "
        if stopType == self.STOP_ITER: strStop = "{} iterations".format(thresh)
        elif stopType == self.STOP_COST: strStop = "costs change < {}".format(thresh)
        else: strStop = "gradient norm < {}".format(thresh)
        name += strStop
        print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(
            name, theta, iter, costs[-1], dur))
        fig, ax = plt.subplots(figsize=(12,4))
        ax.plot(np.arange(len(costs)), costs, 'r')
        ax.set_xlabel('Iterations')
        ax.set_ylabel('Cost')
        ax.set_title(name.upper() + ' - Error vs. Iteration')
        return theta


    def run(self):
        orig_data = self.pdData.values
        self.runExpe(orig_data, self.theta, 100, self.STOP_ITER, thresh=5000, alpha=0.000001)
        plt.show()
        self.runExpe(orig_data, self.theta, 100, self.STOP_GRAD, thresh=0.05, alpha=0.001)
        plt.show()
        self.runExpe(orig_data, self.theta, 100, self.STOP_COST, thresh=0.000001, alpha=0.001)
        plt.show()
        #对比
        self.runExpe(orig_data, self.theta, 1, self.STOP_ITER, thresh=5000, alpha=0.001)
        plt.show()
        self.runExpe(orig_data, self.theta, 1, self.STOP_ITER, thresh=15000, alpha=0.000002)
        plt.show()
        self.runExpe(orig_data, self.theta, 16, self.STOP_ITER, thresh=15000, alpha=0.001)
        plt.show()
        ##对数据进行标准化 将数据按其属性(按列进行)减去其均值，然后除以其方差。
        #最后得到的结果是，对每个属性/每列来说所有数据都聚集在0附近，方差值为1
    
    #设定阈值
    def predict(self, X, theta):
        return [1 if x >= 0.5 else 0 for x in self.model(X, theta)]


    def test2(self):
        from sklearn import preprocessing as pp
        
        orig_data = self.pdData.values
        scaled_data = orig_data.copy()
        scaled_data[:, 1:3] = pp.scale(orig_data[:, 1:3])
        
        self.runExpe(scaled_data, self.theta, 100, self.STOP_ITER, thresh=5000, alpha=0.001)
        
        scaled_X = scaled_data[:, :3]
        y = scaled_data[:, 3]
        predictions = self.predict(scaled_X, self.theta)
        correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
        accuracy = (sum(map(int, correct)) % len(correct))
        print ('accuracy = {0}%'.format(accuracy))


if __name__ == '__main__':
    # ml = Mylogic()
    # ml.parse()
    # ml.plotROC()
    # lc = logic()
    # lc.test2()
    ml = Mylogic()
    ml.parse()
