import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import stats
from sklearn.linear_model import BayesianRidge, LogisticRegression
from sklearn import tree
from sklearn.svm import SVR
from sklearn.model_selection import cross_validate, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
from sklearn.externals.six import StringIO
import pandas as pd
import pydotplus
from collections import Counter


class KNN(object):
    """
    k近邻
    欧氏距离(两点差值的平方和再开方)：sqrt( sum( (x(i) - y(i))^2 ) )
    曼哈顿距离(亮点差值的绝对值求和)：sum( abs(x(i) - y(i)) )
    明式距离：np.power( sum( np.power(abs(x(i) - y(i)), q) ) , 1 / q)
    """
    def __init__(self):
        # matplotlib中文显示方块
        mpl.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体
        mpl.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题

        self.color_dict = {
            '红色': 'r',
            '黄色': 'y',
            '绿色': 'g'
        }

        self.df_fruit = pd.read_excel('./data/fruits.xlsx')

    @staticmethod
    def kk_search(x, data, k):
        """
        获取距离最近的前k个元素职位
        :param x:
        :param data:
        :param k:
        :return:
        """
        ndata = data.shape[1]
        k = k if k < ndata else ndata
        # 欧氏距离：差值的平方，对列（各个方向的分量）求和,再开方
        sqd = np.sqrt(np.power(data - x[:, :ndata], 2).sum(axis=0))
        # 对数据由小到大排序后取底标（元素在数组的位置）
        idx = np.argsort(sqd)
        return idx[: k]

    def kk_search_main(self):
        # 在区间[0, 1)的 uniform distribution上生成2行200列的数据
        data = np.random.rand(2, 200)
        x = np.random.rand(2, 1)

        neig_idx = self.kk_search(x, data, k=10)

        plt.plot(data[0], data[1], 'o', markersize=10, label='原始数据')
        plt.plot(x[0][0], x[1][0], 'o', markersize=10, label='测试标签')
        plt.plot(data[0][neig_idx], data[1][neig_idx], 'o', markersize=12, label='距离最近的k个')
        plt.legend()

        plt.show()

    def fruit_show(self):
        """
        水果信息：形状，重量，颜色 --> 水果名
        通过knn，给无标签的只有形状和重量的水果打标签
        :return:
        """
        # classes = df_fruit['水果名'].unique()
        # class_colors = df_fruit['颜色'].unique()

        plt.title('苹果、香蕉、梨的形状和重量')
        plt.xlabel('形状')
        plt.ylabel('重量')

        plt.scatter(self.df_fruit['形状'], self.df_fruit['重量'], c=[self.color_dict.get(k, '')
                                                                 for k in self.df_fruit['颜色']])
        plt.legend(('苹果', '香蕉', '梨',))

        plt.show()

    def determin_fruit_me(self, x, y, threshold):
        """
        knn算法实现水果分类预测
        :param x: 新水果坐标
        :param y: 新水果坐标
        :param threshold: k值
        :return:
        """
        dist = []
        for index, row in self.df_fruit.iterrows():
            # 形状
            xdif = np.power(row['形状'] - x, 2)
            # 重量
            ydif = np.power(row['重量'] - y, 2)

            distance = np.sqrt(xdif + ydif)

            dist.append(distance)

        idx = np.argsort(dist)

        idx = idx[: threshold]
        fruit_names = [self.df_fruit['水果名'].iloc[num] for num in idx]
        fruit_names_dict = dict(Counter(fruit_names))
        return max(fruit_names_dict, key=fruit_names_dict.get)

    def determin_fruit(self, x, y, threshold):
        """
        knn算法实现水果分类预测
        :param x: 新水果坐标
        :param y: 新水果坐标
        :param threshold: k值
        :return:
        """
        dist = []
        for index, row in self.df_fruit.iterrows():
            # 形状
            xdif = np.power(row['形状'] - x, 2)
            # 重量
            ydif = np.power(row['重量'] - y, 2)

            distance = np.sqrt(xdif + ydif)

            if xdif < threshold and ydif < threshold and distance < threshold:
                dist.append(distance)
            else:
                dist.append(1000)

        fruit_colors = {
            '绿色': 'pear',
            '红色': 'apple',
            '黄色': 'banana'
        }
        fruits = dict(pear='梨', apple='苹果', banana='香蕉')

        if dist:
            fruit_dict = dict(pear=0, apple=0, banana=0)
            for num, data in enumerate(dist):
                if data < threshold:
                    fruit_color = self.df_fruit['颜色'].iloc[num]
                    fruit = fruit_colors.get(fruit_color, '')
                    # fruit = self.df_fruit['水果名'].iloc[num]  # 中文名，需要转换
                    fruit_dict[fruit] += 1

            best_fruit = max(fruit_dict, key=fruit_dict.get)
            return fruits.get(best_fruit, '')

    def knn_predict_fruit(self):
        """
        按 determin_fruit 的方法 来对新水果分类
        :return:
        """
        new_fruit = [[3.5, 6.2], [2.75, 6.2], [2.9, 7.6], [2.4, 7.2]]
        new_fruit_names = ['A', 'B', 'C', 'D']
        threshold = 3

        for coordinate in new_fruit:
            x, y = coordinate
            # fruit = self.determin_fruit(x, y, threshold)
            fruit = self.determin_fruit_me(x, y, threshold)
            print(fruit)

        # 画图
        plt.title('苹果、香蕉、梨的形状和重量')
        plt.xlabel('形状')
        plt.ylabel('重量')

        plt.scatter(self.df_fruit['形状'], self.df_fruit['重量'], c=[self.color_dict.get(k, '')
                                                                 for k in self.df_fruit['颜色']],)

        for num, coordinate in enumerate(new_fruit):
            x, y = coordinate
            plt.scatter(x, y, marker='${}$'.format(new_fruit_names[num]))

        # plt.legend(self.df_fruit['水果名'].unique())
        plt.show()


class LR(object):
    """
    普通线性模型：对异常值（预处理）和共线性（相关性检测）敏感
    """
    def __init__(self):
        # matplotlib中文显示方块
        mpl.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体
        mpl.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题

    @staticmethod
    def bayes_regression():
        """
        贝叶斯回归
        1、该模型时一种加罚估计量，比传统线性回归更加灵活和稳健
        2、在小样本情况下更精确（可能依赖于先验）
        3、该模型能结合先验信息

        场景：对于数据(x(i), y(i))，其拟合的回归方程中，权重w符合正态分布，对于y(i)在新变量x已知的情况下服从正态分布
        目标：我们用新变量预测响应的y
        先验信息：正态分布
        :return:
        """
        np.random.seed(0)
        n_sample, n_features = 200, 200

        # 按标准正态分布生成200行200列的数据
        x = np.random.randn(n_sample, n_features)
        theta = 4

        # 在[0, n_features) 中随机抽取8个整数
        relevant_feature = np.random.randint(0, n_features, 8)
        # 按指定参数的正态分布生成随机数
        w = stats.norm.rvs(loc=0, scale=1/np.sqrt(theta), size=n_features)

        alpha_ = 50
        noise = stats.norm.rvs(loc=0, scale=1/np.sqrt(alpha_), size=n_sample)

        y = np.dot(x, w) + noise

        clf = BayesianRidge(compute_score=True)
        clf.fit(x, y)

        # plt.plot(clf.coef_, 'b-', label='贝叶斯岭回归估计')
        # plt.plot(w, 'g-', label='训练集精确度(Accuracy)')
        # plt.xlabel('特征')
        # plt.ylabel('权重值')
        # plt.legend(loc='best')
        # plt.title('权重模型')

        plt.hist(clf.coef_, bins=n_features, log=True)
        plt.plot(clf.coef_[relevant_feature], 5 * np.ones(len(relevant_feature)), 'ro', label='Relevant 特征')
        plt.xlabel('权重值')
        plt.ylabel('特征')
        plt.legend(loc='lower left')
        plt.title('权重直方图')

        plt.show()

    @staticmethod
    def line_regression():
        """
        简单线性回归
        :return:
        """
        df = pd.read_excel('./data/sports.xlsx', index_col=0)
        # 相关性
        print(df.corr())

        fig, axs = plt.subplots(1, 3)

        df.plot(kind='scatter', x='sports', y='acceptance', ax=axs[0], figsize=(16, 8))
        df.plot(kind='scatter', x='music', y='acceptance', ax=axs[1])
        df.plot(kind='scatter', x='academic', y='acceptance', ax=axs[2])

        plt.show()


class DecisionTree(object):
    @staticmethod
    def decision_tree():
        # 四列分别为：天气，温度，湿度，风
        # 天气：1：晴朗；2：多云；3：鱼
        # 温度：1：炎热；2：温和；3：凉爽
        # 湿度：1：高；2：正常
        # 风： 0：弱；1：强
        # y: 0:不打球；1：打球
        x = [[1, 1, 1, 0],
             [1, 1, 1, 1],
             [2, 1, 1, 0],
             [2, 3, 2, 1],
             [1, 2, 1, 0],
             [1, 3, 2, 0],
             [3, 2, 1, 0],
             [3, 3, 2, 0],
             [3, 3, 2, 1],
             [3, 2, 2, 0],
             [1, 2, 2, 1],
             [2, 2, 1, 1],
             [2, 1, 2, 0],
             [3, 2, 1, 0]]
        y = [0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0]

        clf = tree.DecisionTreeClassifier()
        clf.fit(x, y)

        # 导出pdf的决策树图： error(依赖包问题)
        # dot_data = StringIO()
        # tree.export_graphviz(clf, out_file=dot_data, feature_names=feature_name,
        #                      class_names=target_name, filled=True, rounded=True,
        #                      special_characters=True)
        dot_data = tree.export_graphviz(clf, out_file=None, feature_names=clf.feature_importances_,
                                        # class_names=target_name,
                                        filled=True, rounded=True, special_characters=True)

        # graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
        graph = pydotplus.graph_from_dot_data(dot_data)
        graph.write_pdf('./data/DecisionTreeClassifier.pdf')
        print('Visible tree plot saved as pdf.')


def logstic_regression():
    """
    逻辑回归：
    y = 1 / (1 + exp(-x))
    :return:
    """
    df_train = pd.read_csv('./data/titanic_train.csv')
    df_test = pd.read_csv('./data/titanic_test.csv')

    data = df_train[['Sex', 'Age', 'Pclass', 'Survived']].copy()
    data['Sex'] = data['Sex'] == 'female'
    data = data.dropna()

    #########################################
    data = data.astype(np.int32)
    x = data[['Sex', 'Age', 'Pclass']]

    split_train_x = x.values
    split_train_y = data[['Survived']].values

    survived_male_age = data[data['Survived'] == 1][data['Sex'] == 0]
    survived_female_age = data[data['Survived'] == 1][data['Sex'] == 1]

    dead_male_age = data[data['Survived'] == 0][data['Sex'] == 0]
    dead_female_age = data[data['Survived'] == 0][data['Sex'] == 1]

    #########################################
    # data_np = data.astype(np.int32).values
    # x = data_np[:, : -1]
    # y = data_np[:, -1]
    #
    # # 性别
    # female = x[:, 0] == 1
    # # 年龄
    # age = x[:, 1]
    #
    # survived = y == 1
    #
    # # 直方图
    # bins_ = np.arange(0, 121, 5)
    # s = {'male': np.histogram(age[survived & ~female], bins=bins_[0]),
    #      'female': np.histogram(age[survived & female], bins=bins_[0]),
    #      }
    #
    # d = {'male': np.histogram(age[~survived & ~female], bins=bins_)[0],
    #      'female': np.histogram(age[~survived & female], bins=bins_)[0],
    #      }
    #########################################

    # 直方图
    bins_ = np.arange(0, 121, 5)
    s = {'male': np.histogram(survived_male_age['Age'], bins=bins_)[0],
         'female': np.histogram(survived_female_age['Age'], bins=bins_)[0],
         }

    d = {'male': np.histogram(dead_male_age['Age'], bins=bins_)[0],
         'female': np.histogram(dead_female_age['Age'], bins=bins_)[0],
         }

    bins = bins_[:-1]
    plt.figure(figsize=(15, 8))
    for i, sex, color, in zip((0, 1), ('male', 'female'), ('#3345d0', '#cc3dc0')):
        plt.subplots(121 + i)
        plt.bar(bins, s[sex], bottom=d[sex], color=color, width=5, label='幸存者')
        plt.bar(bins, d[sex], color='#aaaaff', width=5, label='死亡', alpha=0.4)
        plt.xlim(0, 80)

        plt.title('{}幸存者'.format(sex))
        plt.xlabel('年龄（岁）')
        plt.legend()

    x_train, x_test, y_train, y_test = train_test_split(split_train_x, split_train_y, test_size=0.05)

    clf = LogisticRegression()
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)

    plt.imshow(np.vstack((y_test.flatten(), y_pred)), interpolation='none', cmap='bone')
    plt.xticks([])
    plt.yticks([])
    plt.title('测试集的真实和预测幸存者结果')

    plt.show()


def svm():
    """

    :return:
    """
    # matplotlib中文显示方块
    mpl.rcParams['font.sans-serif'] = ['SimHei']  # 指定默认字体
    mpl.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题

    # np.random.rand(40, 1)： 生成40行一列的[0, 1)上的均匀分布
    # 指定方向上的sort
    x = np.sort(5 * np.random.rand(40, 1), axis=0)
    # ravel：返回视图（修改ravel后的数组，会影响原数组）
    # flatten：分配新内存（修改ravel后的数组，不会影响原数组）
    y = (np.cos(x) + np.sin(x)).ravel()
    # 每隔4个
    y[::5] += 3 * (0.5 - np.random.rand(8))

    svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
    svr_linear = SVR(kernel='linear', C=1e3)
    svr_polynom = SVR(kernel='poly',  C=1e3, degree=2)

    rbf_model = svr_rbf.fit(x, y)
    linear_model = svr_linear.fit(x, y)
    poly_model = svr_polynom.fit(x, y)

    y_rbf = rbf_model.predict(x)
    y_linear = linear_model.predict(x)
    y_poly = poly_model.predict(x)

    plt.scatter(x, y, c='k', label='数据')
    plt.plot(x, y_rbf, c='g', label='SVM RBF 模型')
    plt.plot(x, y_linear, c='r', label='SVM Line 模型')
    plt.plot(x, y_poly, c='b', label='SVM Poly 模型')

    plt.xlabel('数据')
    plt.ylabel('目标')
    plt.title('SVM回归')
    plt.legend()

    plt.show()


def sklearn_pca():
    """

    :return:
    """
    data = load_iris()
    x = data.get('data')

    x[:, 0] /= 2.54
    x[:, 1] /= 100

    # 标准化
    x_std = StandardScaler().fit_transform(x)

    basic_pca = PCA(n_components=2)
    x_transf = basic_pca.fit_transform(x_std)

    plt.scatter(x_transf[:, 0], x_transf[:, 1], s=600, alpha=0.56)
    plt.title('sklearn PCA (SVD)')
    plt.xlabel('Petal Width')
    plt.ylabel('Sepal Length')

    plt.show()


#  knn
# knn = KNN()
# knn.kk_search_main()
# 水果
# knn.fruit_show()
# knn.knn_predict_fruit()

# LR
# lr = LR()
# lr.bayes_regression()
# lr.line_regression()

# decision tree
# decision_tree = DecisionTree()
# decision_tree.decision_tree()

# 逻辑回归
# logstic_regression()

# svm
# svm()

# PCA
# sklearn_pca()

