import time
from multiprocessing import Process, Pool, Queue
from abc import abstractmethod
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from dtw import dtw
# from mpl_toolkits.axes_grid1 import make_axes_locatable
import os


data = pd.read_csv('data/data.csv', sep=',', header=None)
columns = data.columns


def m_to_m(mos, m_function, deviation: int = 200, zoom_limits: int = 300, min_scope: int = 200, step: int = 80,
           zoom_step: int = 100) -> list:
    """
    在一条函数中找对应的图像，看在偏差的范围内其他图像是也出现

    :param mos: 所有模式图[m*n]（模式图：想的图但不一定能找到）
    :param m_function: 要查找的所有
    :param deviation: 偏差
    :return: 匹配到的结果
    """
    result = []
    break_flg = False
    other_series = m_function.drop(list(m_function.columns)[0], axis=1)
    process = FindWithPool(mos[mos.columns[0]], m_function[m_function.columns[0]], zoom_limits=zoom_limits,
                                        min_scope=min_scope, zoom_step=zoom_step, step=step).use_process_pool()
    # 过滤空元组
    first_one_result = [i.get() for i in process if i.get()["scope"]]
    for i in first_one_result:
        transverse_connecting_line = [i['scope']]
        for index, column in enumerate(other_series.columns, 1):
            # 把每一个图像对应匹配的查找加入数组
            min_scope_index = i['scope'][0] - deviation if i['scope'][0] - deviation > 0 else 0
            max_scope_index = i['scope'][1] + deviation + 1 if i['scope'][1] + deviation < len(m_function) else len(m_function)
            # 在这个误差范围内有没有，没有解决有很多的情况
            one_of_search_in_on_function = FindWithPool(mos[mos.columns[index]],
                                                        other_series[column][min_scope_index:max_scope_index],
                                                        zoom_limits=zoom_limits,
                                                        min_scope=min_scope,
                                                        zoom_step=zoom_step, step=step).use_process_pool()
            one_of_search_in_on_function = [proce.get() for proce in one_of_search_in_on_function]
            # 只用最合适的
            if not one_of_search_in_on_function:
                break
            min_score = min(one_of_search_in_on_function, key=lambda x: x['similarity'])
            if min_score['similarity'] != np.inf:
                transverse_connecting_line.append(min_score)
            else:
                break_flg = True
                break

        if len(transverse_connecting_line) == mos.shape[1]:
            # 如果所有图像在误差内都匹配成功，加到加到结果中
            result.append(transverse_connecting_line)

        if break_flg:
            break_flg = False
            break

    return result


def use_DTW_find_range_in_another_serice(series, another_series, zoom_limits: int = 300, min_scope: int = 200,
                                         step: int = 80, zoom_step: int = 100, queue=None) -> dict:
    """
    在另一个序列中查找与这个序列最相似的区域
    注意：对比的是两个序列下表都得从零开始，否则会报D1[i, j] = dist(x[i], y[j]) KeyError: 0

    :param series: 待查找的序列
    :param another_series: 在这个序列中查找
    :param zoom_limits: 被限制的范围，是跟待查找序列缩放的范围
    :param min_scope: 在another_series中要找的最小的对比范围
    :param step: 跳步，越小对比的越精细，速度越慢
    :param zoom_step: 缩放范围的跳步
    :param queue: 多进程之间的管道，用来统一数据
    :return: 最优匹配的范围和相似度（越小越相似）{'scope':(开始位置，结束位置), 'similarity':最低相似度, 'shortest_path': 最短路径矩阵}
    """
    # 清空series的索引
    series = series.reset_index(drop=True)

    result = {
        'scope': ()
        , 'similarity': np.inf
        # , 'shortest_path': None
    }
    for i in range(len(series) - zoom_limits if len(series) - zoom_limits > min_scope else min_scope,
                   len(series) + zoom_limits if len(series) + zoom_limits < len(another_series) else len(
                           another_series), zoom_step):
        # 带比较的大小一点点增大
        for j in range(another_series.index[0], another_series.index[-1], step):
            # 从前往后比
            # dist算法是欧式距离（点到点距离公式）
            dist, cost_matrix, acc_cost_matrix, path = dtw(series, another_series[j:j + i].reset_index(drop=True),
                                                           dist=lambda x, y: np.abs(x - y))
            # print(dist, path)
            # 新的最优解覆盖
            if dist < result['similarity']:
                result['similarity'] = dist
                # 一开始给的序列中的索引
                result['scope'] = (another_series.index[0] + j, another_series.index[0] + j + i)
                # result['shortest_path'] = path

    if queue:
        queue.put(result)

    return result


def pool_function(args):
    return use_DTW_find_range_in_another_serice(*args[0], **args[1])


def multiprocess_dtw(series, another_series, zoom_limits: int = 300, min_scope: int = 200, step: int = 80,
                     zoom_step: int = 100):
    """
    试用多进程处理在另一个序列中查找与这个序列最相似的区域, 多线程核数必须大于2
    注意：对比的是两个序列下表都得从零开始，否则会报D1[i, j] = dist(x[i], y[j]) KeyError: 0

    :param series: 待查找的序列
    :param another_series: 在这个序列中查找
    :param zoom_limits: 被限制的范围，是跟待查找序列缩放的范围
    :param min_scope: 在another_series中要找的最小的对比范围
    :param step: 跳步，越小对比的越精细，速度越慢
    :param zoom_step: 缩放范围的跳步
    :return: 最优匹配的范围和相似度（越小越相似）{'scope':(开始位置，结束位置), 'similarity':最低相似度, 'shortest_path': 最短路径矩阵}
    """
    result = []
    # 要用到的核数
    use_cpu_count = os.cpu_count() - 1
    # 大的序列分成几段
    another_series_count = len(another_series) // use_cpu_count
    # 整除不了余数给最后的序列
    # 用进程池分配进程，直接能在主进程拿到结果，就不用管道过或者队列啥的在进程之间通信了
    queue = Queue()
    process = []
    for i in range(use_cpu_count):
        start = i * another_series_count
        end = (i + 1) * another_series_count if i < use_cpu_count - 1 else len(another_series)
        p = Process(target=use_DTW_find_range_in_another_serice,
                    args=(series.reset_index(drop=True), another_series[start:end]),
                    kwargs={"zoom_limits": zoom_limits, "min_scope": min_scope, "zoom_step": zoom_step, "step": step,
                            "queue": queue}
                    )
        p.start()
        process.append(p)

    # 重置series的索引
    series = series.reset_index(drop=True)
    # 对被切开的链接点处进行处理
    # 有another_series_count-1个连接处
    # 间断区的最优解
    gap_result = {
        'scope': (), 'similarity': np.inf, 'shortest_path': None
    }
    for j in range(1, another_series_count):
        # 跟another——series连用确定分解区域
        # 把所有没有通过分界的跑一边
        for k in range(2 * zoom_step, len(series) + zoom_limits if len(
                series) + zoom_limits < another_series_count else another_series_count, zoom_step):
            # 范围大小
            cut_point = j * another_series_count
            for m in range(cut_point - k + step, cut_point + step, step):
                # 固定滑动窗口往后移，只看一侧就行
                if cut_point + k - step >= len(another_series):
                    # 防止越界
                    break
                dist, _, acc_cost_matrix, path = dtw(series, another_series[m:m + k].reset_index(drop=True),
                                                     dist=lambda x, y: np.abs(x - y))
                if dist < gap_result['similarity']:
                    gap_result['similarity'] = dist
                    # 一开始给的序列中的索引
                    gap_result['scope'] = (m, m + k)
                    # gap_result['shortest_path'] = path

    for i in process:
        result.append(queue.get())
    for i in process:
        i.join()

    result.append(gap_result)
    # 主进程阻塞等待子进程结果
    # for result_process in result:
    #     geted_result.append(queue.get())
    #     result_process.join()
    return result


class FindWithPool:
    def __init__(self, series, another_series, cpu_count=None, zoom_limits: int = 300, min_scope: int = 200,
                 step: int = 80, zoom_step: int = 100, find_series_progressBar=None, *args, **kwargs):
        """
        初始化信息
        :param series: 待查找的序列
        :param another_series: 在这个序列中查找
        :param cpu_count: 并行时进程的个数
        :param zoom_limits: 被限制的范围，是跟待查找序列缩放的范围
        :param min_scope: 在another_series中要找的最小的对比范围
        :param step: 跳步，越小对比的越精细，速度越慢
        :param zoom_step: 缩放范围的跳步
        :param find_series_progressBar: 进度条
        """
        self.find_series_progressBar = find_series_progressBar
        # 执行完了多少个进程
        self.successful_process = 0

        # 重置要索引，供dtw算法使用
        self.series = series.reset_index(drop=True)
        self.another_series = another_series
        self.cpu_count = cpu_count if cpu_count else os.cpu_count()
        self.zoom_limits = zoom_limits
        self.min_scope = min_scope
        self.step = step
        self.zoom_step = zoom_step

        self.min = len(self.series) - self.min_scope if (len(
            self.series) - self.min_scope) > 1 else 1
        self.max = len(self.series) + self.zoom_limits if len(self.series) + self.zoom_limits < len(
            self.another_series) else len(self.another_series)

        # 共有多少个进程
        self.count_process = (self.max - self.min) // zoom_step

    @staticmethod
    def is_similar(result: dict, current_result):
        """
        通过重写这个方法判断连个个序列是否相似， 通过训练模型重写这个方法保留相似结果
        :param result: 所有返回的结果，可以是一个或多个,现在是一个
        :param current_result: 当前得出的结果
        """
        if current_result['similarity'] < result['similarity']:
            result['similarity'] = current_result['similarity']
            result['scope'] = current_result['scope']

    @staticmethod
    def dtw(series, another_series, step, zoom, is_similar):
        """
        只处理一个范围的series,返回在another_series中最佳匹配结果
        :param series: 要找的序列
        :param another_series: 在哪里找,会有原来的索引，行数
        :param step: 跳步
        :param zoom: another_series中找的范围大小
        :param is_similar: 判断相似的方法
        :return: 最优匹配的范围和相似度（越小越相似）{'scope':(开始位置，结束位置), 'similarity':最低相似度, 'shortest_path': 最短路径矩阵}
        """
        # 将Series索引从0开始，但是index不是，是给定的
        result = {
            'scope': (),
            'similarity': np.inf
            # , 'shortest_path': None
        }
        another_series_start_zero = another_series.reset_index(drop=True)
        max_value = len(another_series)
        for j in range(0, max_value - zoom, step):
            # j相当于从0开始的索引，也就是index/传过来行的 索引, 不从零开始切的片为空，只能用索引找一个数
            # 判断是否越界, zoom 不一定小于step
            if j + zoom >= max_value:
                break
            # 从前往后比
            dist, cost_matrix, acc_cost_matrix, path = dtw(series, another_series_start_zero[j:j + zoom].reset_index(drop=True),
                                                           dist=lambda x, y: np.abs(x - y))
            # 新的最优解覆盖
            is_similar(result, {'scope': (another_series.index[j], another_series.index[j] + zoom-1),
                                     'similarity': dist})

        return result

    @abstractmethod
    def on_process_status_change(self):
        pass

    def call_back(self):
        def run(res):
            """
            :param res: 进程结束返回的结果(闭包), 目前没有用到
            """
            self.successful_process += 1
            self.on_process_status_change()
            # print(self.successful_process)

        return run

    def use_process_pool(self):
        process = []
        pool = Pool(processes=self.cpu_count)

        for zoom in range(self.min, self.max, self.zoom_step):
            result = pool.apply_async(FindWithPool.dtw, (self.series, self.another_series, self.step, zoom, FindWithPool.is_similar),
                                      callback=self.call_back())
            process.append(result)
        pool.close()
        pool.join()

        return process


class MultiToMulti:
    def __init__(self, multi_series, another_multi_series, cpu_count=None,  deviation: int = 200, zoom_limits: int = 300, min_scope: int = 200, step: int = 80,
           zoom_step: int = 100):

        self.multi_series = multi_series
        self.another_multi_series = another_multi_series
        self.cpu_count = cpu_count if cpu_count else os.cpu_count()
        self.deviation = deviation
        self.zoom_limits = zoom_limits
        self.min_scope = min_scope
        self.step = step
        self.zoom_step = zoom_step

        # 得到第一条匹配的结果
        process = FindWithPool(self.multi_series[self.multi_series.columns[0]],
                               self.another_multi_series[self.another_multi_series.columns[0]],
                               zoom_limits=self.zoom_limits,
                               min_scope=self.min_scope,
                               zoom_step=self.zoom_step,
                               step=step).use_process_pool()
        # 过滤空元组 [{},...]
        first_one_result = [i.get() for i in process if i.get()["scope"]]
        self.first_one_result = first_one_result

        self.find_series_progressBar = len(first_one_result)
        # 执行完了多少个进程
        self.successful_process = 0

    @abstractmethod
    def on_one_process_end(self, result):
        pass

    @staticmethod
    def validate_one_result(multi_series, another_multi_series, deviation, zoom_limits, min_scope, zoom_step, step, group):
        """
        验证这一组是否相似，如果是返回各开始结束序列， 不是则返回最空数组
        :return: []
        """
        validata_result = []
        for index, column in enumerate(another_multi_series.columns, 1):
            # 把每一个图像对应匹配的查找加入数组
            min = group[0] - deviation if group[0] - deviation > 0 else 0
            max = group[1] + deviation + 1 if group[1] + deviation < len(another_multi_series) else len(another_multi_series)
            # 在这个误差范围内有没有，没有解决有很多的情况
            one_of_search_in_on_function = use_DTW_find_range_in_another_serice(multi_series[multi_series.columns[index]],
                                                                                another_multi_series[column][min:max],
                                                                                zoom_limits=zoom_limits,
                                                                                min_scope=min_scope,
                                                                                zoom_step=zoom_step, step=step)
            if one_of_search_in_on_function["scope"]:
                validata_result.append(one_of_search_in_on_function['scope'])
            else:
                return []
        return validata_result

    def call_back(self):
        def run(res):
            """
            :param res: 进程结束返回的结果(闭包), 目前没有用到
            """
            self.successful_process += 1
            self.on_one_process_end(res)
        return run

    def use_process_pool(self):
        process = []
        pool = Pool(processes=self.cpu_count)

        print(self.first_one_result)
        for group in self.first_one_result:
            result = pool.apply_async(MultiToMulti.validate_one_result,
                                      (self.multi_series, self.another_multi_series[:][1:], self.deviation,
                                       self.zoom_limits, self.min_scope, self.zoom_step, self.step, group["scope"]),
                                      callback=self.call_back())
            process.append(result)
        pool.close()
        pool.join()

        exit_group_result = []
        for i in process:
            if i.get():
                exit_group_result.append(
                    [*i.get()]
                )
        return exit_group_result


if __name__ == '__main__':
    a = MultiToMulti(data[420:440][data.columns[30:50]], data[370:][data.columns[50:70]], deviation=50, zoom_limits=10, min_scope=10, zoom_step=5, step=50)
    print(a.use_process_pool())

    # res = []
    # a = FindWithPool(data[430:440][data.columns[30]], data[350:][data.columns[30]], zoom_limits=1, min_scope=0, zoom_step=2, step=2)
    # process = a.use_process_pool()
    # for i in process:
    #     print(i.get())

    # 这个类没问题
    # a = FindWithPool(data[400:440][data.columns[30]].reset_index(drop=True), data[:][data.columns[30]], step=2,
    #                  min_scope=10, zoom_step=2, zoom_limits=20
    #             ).use_process_pool()
    # for i in a:
    #     print(i.get())

    # 修复少量数据返回空数组
    # a = FindWithPool.dtw(data[430:440][data.columns[30]].reset_index(drop=True), data[350:][data.columns[50]], 2, 10,
    #                        FindWithPool.is_similar)
    # print(a)
    # plt.plot(data[430:440][data.columns[30]])
    # plt.plot(data[412:421][data.columns[50]])
    # plt.show()

    # # Series报错修改方法，切完片不从0开始
    # data1 = data[430:440][data.columns[30]].reset_index(drop=True)
    # data2 = data[350:][data.columns[50]].reset_index(drop=True)
    #
    # dist, _, _, _ = dtw(data1, data2[5:15].reset_index(drop=True), dist=lambda x, y: np.abs(x - y))
    # print(dist)

    # a = FindWithPool(data[40:50][0].reset_index(drop=True), data[:][0], min_scope=10, zoom_limits=20, zoom_step=1, step=1)
    # res = a.use_process_pool()
    # for i in res:
    #     print(i.get())

