import logging
import os
import glob
import random
import numpy as np
import torch
import math
import csv
import time
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from FCM.ClusterResult import ClusterResult
from concurrent.futures import ThreadPoolExecutor
from Kohonen.MapMinMaxApplier import mapminmax
from Kohonen.PSO import PSO
from Kohonen.fuzzycm import fcm

logging.basicConfig(level=logging.INFO,
                    format="%(asctime)s [%(thread)d] %(levelname)s %(message)s")
logger = logging.getLogger()


def import_excel_matrix(path):
    with open(path, encoding='UTF-8') as f:
        sheet = list(csv.reader(f))
    table = np.array(sheet)
    # 第一个数字乱码
    table[0][0] = table[1][0]
    row = table.shape[0]  # 行数
    col = table.shape[1]  # 列数
    logger.info("row %s col %s" % (row, col))
    data_matrix = np.zeros((row, col - 1))  # 生成一个nrows行*ncols-1列的初始矩阵
    data_type = np.zeros(row)
    for i in range(row):  # 对行进行遍历
        data_type[i] = table[i, 0]
        for j in range(1, col):
            data_matrix[i, j - 1] = table[i][j]  # 按列把数据存进矩阵中
    return data_matrix, data_type


def intersection(nums1, nums2):
    nums1 = list(set(nums1))  # 去重
    nums1.sort()  # 升序排列
    nums2 = list(set(nums2))
    nums2.sort()
    i = 0  # 数组1指针
    j = 0  # 数组2指针
    res = []  # 结果数组
    while i < len(nums1) and j < len(nums2):  # 循环条件
        if nums1[i] == nums2[j]:  # 两个值想等得情况
            res.append(nums1[i])  # 将值加入结果数组
            i += 1  # 数组1指针后移
            j += 1  # 数组2指针后移
        elif nums1[i] < nums2[j]:  # 数组1得值小于数组2
            i += 1  # 数组1得指针后移
        elif nums1[i] > nums2[j]:  # 数组1得值大于数组2
            j += 1  # 数组2得指针后移
    return res  # 返回结果数组


def KohonenCluster(Data, kind):
    logger.info("data: %s" % Data)
    # 获取聚类数
    C = len(set(kind))
    logger.info("clusterNum: %s" % C)
    # 归一化
    input, inputps = mapminmax(Data)
    n, m = input.shape
    # 测试集
    input_test = input
    testkind = kind
    # 训练集，从原始数据集随机选择一半数据
    d = torch.randperm(n)
    input_train = input[d[1: math.floor(n / 2)], :]
    trainkind = kind[d[1: math.floor(n / 2)]]
    nn, mm = input_train.shape
    # 网络期望输出
    output_train = np.zeros([nn, C])
    for i in range(nn):
        output_train[i, int(trainkind[i] - 1)] = 1
    # 求标准差
    TrainU = mapminmax(input_train)
    row = TrainU[0].reshape(nn * mm, 1)
    bzc = np.std(row)
    # 网络构建
    # Kohonen网络
    g = N = M = C
    K = M * N  # Kohonen总节点数
    # Kohonen层节点排序
    jdpx = []
    for i in range(M):
        for j in range(N):
            jdpx.append([i, j])
    # 学习率
    rate2max = 1
    rate2min = 0.5
    # 学习半径
    r1max = 1.5
    r1min = 0.4
    # 权值初始化
    # w1 = rand(Inum, K); % 第一层权值
    center, U = fcm(TrainU[0], K)
    w1 = np.transpose(center)
    w2 = np.zeros([K, g])  # 第二层权值
    # PSO优化w1权值
    for i in range(nn):
        x = input_train[i]
        Dist, w1 = PSO(x, w1, K)
    # 迭代求解
    maxgen = 100
    for i in range(maxgen):
        # 自适应学习率和相应半径
        rate1 = (1 / math.sqrt(2 * math.pi * bzc)) * math.exp(-i / (2 * bzc * bzc * maxgen))
        rate2 = rate2min + i / maxgen * (rate2max - rate2min)
        r = r1max - i / maxgen * (r1max - r1min)
        # 从数据中随机抽取
        k = random.sample(range(nn), 1)
        x = input_train[k, :]
        y = output_train[k, :]
        # 计算最优节点dist(A,B)
        # 计算A中每个行向量与B中每个列向量之间欧氏距离，A的行向量维数必须等于B的列向量维数
        distMin = np.zeros(w1.shape[1])
        for index in range(w1.shape[1]):
            distMin[index], path = fastdtw(x, w1[:, i], dist=euclidean)
        mindist = np.min(distMin)
        index = np.where(distMin == mindist)
        if isinstance(index, tuple):
            index = np.min(index)
        # 计算周围节点
        d1 = math.ceil(index / C)
        d2 = index % C
        nodeIndex = []
        for ind in range(len(jdpx)):
            dist, path = fastdtw([d1, d2], jdpx[ind])
            if dist <= r:
                nodeIndex.append(int(dist))
        w0 = w2
        # 权值更新
        for j in range(len(nodeIndex)):
            w1[:, nodeIndex[j]] = w1[:, nodeIndex[j]] + rate1 * (x - w1[:, nodeIndex[j]])
            w2[nodeIndex[j], :] = w2[nodeIndex[j], :] + rate2 * (y - w2[nodeIndex[j], :])
        # 迭代终止条件
        pauRes = np.linalg.norm(w2 - w0, ord=np.inf)
        if pauRes < 1.0e-4 or i > 10:
            break
    # 聚类结果
    # 样本验证
    outputfore = np.zeros(n)
    for i in range(n):
        x = input_test[i, :]
        # 计算最小距离节点
        distM = np.zeros(w1.shape[1])
        for index in range(w1.shape[1]):
            y = w1[:, index]
            distM[index], path = fastdtw(x, y, dist=euclidean)

        mindist = np.min(distM)
        index = np.where(distM == mindist)
        if isinstance(index, tuple):
            index = np.min(index)
        a = np.max(w2[index, :])
        b = np.where(w2[index, :] == a)
        if isinstance(b, tuple):
            b = np.min(b)
        outputfore[i] = b
    # 比较聚类精度
    # 求得矩阵Cj
    Cj = []
    for cc in range(C):
        v = np.where(outputfore == cc + 1)
        Cj.append(v)

    Ki = []
    for k in range(C):
        v = np.where(testkind == k + 1)
        Ki.append(v)
    # 求得Ki与Cj并的个数
    KC = np.zeros([C, C])
    for i in range(len(Ki)):
        for j in range(len(Cj)):
            KC[i, j] = int(len(intersection(Ki[i][0], Cj[j][0])))

    z = np.zeros(C)
    for j in range(C):
        z[j] = (max(KC[:, j])) / n
    return sum(z)


def loadDataParallel():
    workspace = os.path.dirname(__file__)
    resultList = []
    # 文件路径列表
    fileLst = []
    for data_file in glob.glob(os.path.join(workspace, "datasource", "Car.csv")):
        fileLst.append(data_file)

    threadPool = ThreadPoolExecutor(max_workers=6, thread_name_prefix="test_")
    i = 0
    for [data_matrix, data_type] in threadPool.map(import_excel_matrix, fileLst):
        data_file = fileLst[i]
        i += 1
        # 截取文件名
        file_name = data_file.split("\\")[-1]
        logger.info("data_file %s" % file_name)
        result = KohonenCluster(data_matrix, data_type)
        logger.info("data %s" % result)
        cluResult = ClusterResult(file_name, result)
        resultList.append(cluResult)
    # 关闭线程池
    threadPool.shutdown(wait=True)
    return resultList


def getCurrentTime():
    return int(round(time.time() * 1000))


def LstTransToStr(Lst):
    res = ''
    for i in range(len(Lst)):
        res += str(Lst[i])
        res += ';'
    return res[0:len(res) - 1]


def main():
    try:
        startMillis = getCurrentTime()
        startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(startMillis / 1000))
        logger.info("startTime: %s" % startTime)
        result_list = loadDataParallel()
        logger.info("cost time: %s ms" % (getCurrentTime() - startMillis))
        # 將結果集转换为str
        resStr = LstTransToStr(result_list)
        logger.info("result: %s " % resStr)
    except Exception as ex:
        print('Exception:\r\n')
        print(ex)
    finally:
        os.system("pause")


if __name__ == '__main__':
    main()