from sklearn.model_selection import KFold
import numpy as np
import time
import matplotlib.pyplot as plt


class KNN_Classifier:
    def get_dataset_by_index(self, x_data, y_data, train_index, test_index):
        x_train, y_train, x_test, y_test = [], [], [], [] #分别为训练集，训练集label，测试集，测试集label
        for index in train_index:
            x_train.append(x_data[index])
            y_train.append(y_data[index])
        for index in test_index:
            x_test.append(x_data[index])
            y_test.append(y_data[index])
        return x_train, y_train, x_test, y_test

    def dtw_distance(self, ts_a, ts_b, d=lambda x, y: abs(x - y), mww=1000):
        """Computes dtw distance between two time series

        Args:
            ts_a: time series a
            ts_b: time series b
            d: distance function
            mww: max warping window, int, optional (default = infinity)

        Returns:
            dtw distance
        """
        # print(ts_a, ts_b)
        # Create cost matrix via broadcasting with large int
        ts_a, ts_b = np.array(ts_a), np.array(ts_b)
        M, N = len(ts_a), len(ts_b)
        cost = np.ones((M, N))
        # Initialize the first row and column
        cost[0, 0] = d(ts_a[0], ts_b[0])
        for i in range(1, M):
            cost[i, 0] = cost[i - 1, 0] + d(ts_a[i], ts_b[0])

        for j in range(1, N):
            cost[0, j] = cost[0, j - 1] + d(ts_a[0], ts_b[j])

        # Populate rest of cost matrix within window
        for i in range(1, M):
            for j in range(max(1, i - mww), min(N, i + mww)):
                choices = cost[i - 1, j - 1], cost[i, j - 1], cost[i - 1, j]
                cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])

        # Return DTW distance given window
        return cost[-1, -1]

    def train(self, x_train, y_train):
        self.x_train = x_train
        self.y_train = y_train

    def select_top_k(self, sorted_index, max_k=49):
        res = {}
        for k in range(1, max_k + 1, 2):
            class_count = {}
            for i in range(min(k, sorted_index.shape[0])):
                # 获取标签
                vote_label = self.y_train[sorted_index[i]]
                # 统计标签数量
                class_count[vote_label] = class_count.get(vote_label, 0) + 1

            # 根据value的值进行降序排序，拿到投票数最多的一个
            classfy = sorted(class_count.items(), key=lambda s: s[1], reverse=True)[0][0]
            res[k] = classfy #选出当前k的最终结果
            # print('k=', k, class_count, "选出:", classfy, '正确为：', label, '路径为:', z_data)
        return res


    def predict(self, x_test, y_test):
        res_acc = {}
        res = {}
        for index in range(len(x_test)):
            test = x_test[index]
            label = y_test[index]
            cost = np.array([self.dtw_distance(test, self.x_train[i]) for i in range(len(self.x_train))])
            sorted_index = cost.argsort()
            top_k = self.select_top_k(sorted_index)
            for k in top_k:
                if not k in res.keys():
                    res[k] = []
                res[k].append(top_k[k])
        y_test = np.array(y_test)
        for k in res:
            predit = np.array(res[k])
            res_acc[k] = np.sum(predit == y_test) / y_test.shape[0]

        return res_acc, res


    def ten_fold_cross_validation(self, x_data, y_data, x=10):
        '''
        十倍交叉实验
        :param x_data: 全部数据集
        :param y_data:  数据集对应的label值
        '''
        # np.random.seed(1)
        kf = KFold(n_splits=x, shuffle=True)
        # x_data = [[1,2,3,3,4],[1,2,2,2,3,4],[3,3,4,6,7],[1,1,1,2,4],[1,1,2,3,12,4,4,5],[7,5,4],[8,7,7,12,4,4,4],[9,7,7,4,4],[7,12,6,4],[7,5,4]]
        # y_data = [0,0,0,0,0,1,1,1,1,1]
        split = kf.split(x_data)
        co = 1
        res = []

        y_pred = {}
        y_true = {}
        for train_index, test_index in split:
            print(train_index.shape, test_index.shape)
            x_train, y_train, x_test, y_test = self.get_dataset_by_index(x_data, y_data, train_index, test_index)
            self.train(x_train, y_train)
            print('训练集：', len(x_train), '测试集：', len(x_test), '第%d次交叉验证' % co)
            predit_res, predict = self.predict(x_test, y_test) #predit_res包含每个k的当前测试集的准确率，predict包含的是每个k
            print('ok')
            pos = 0
            if len(res) == 0:
                for k in predit_res:
                    res.append(predit_res[k])
            else:
                for k in predit_res:
                    res[pos] += predit_res[k]
                    pos += 1
            print(predit_res)
            co += 1
            for k in predict:
                if not k in y_pred:
                    y_pred[k] = []
                    y_true[k] = []
                y_pred[k].extend(np.array(predict[k]).astype(int).tolist())
                y_true[k].extend(y_test)
        res = np.array(res)
        res = res / x
        print(res)
        for k in y_pred:
            print('k = ', k)
            print(y_pred[k])
            print(y_true[k])
            # my_model.draw_confusion_matrix(y_true[k], y_pred[k], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
            #                                title='confusion matrix of KNN with all user')
