# -*- coding: utf-8 -*-
import argparse
import faiss
import sys

from tqdm import tqdm, trange

sys.path.append("..")
import ray
import networkx as nx
import numpy as np
import pandas as pd
from modules.tn import TemporalNetwork, seconds_to_hms
from utils.tools import make_valid_ranges, timestamp2int
from ray.data import DataContext


def build_relation_graph(all_chains):
    """
    :param all_chains:
    """
    import networkx as nx

    def pop_ind(node_indices, idx):
        for i in range(len(node_indices)):
            if idx == node_indices[i]:
                node_indices.pop(i)
                break

    node_indices = [i for i in range(len(all_chains))]

    DG = nx.Graph()
    node_num = 1
    for idx in range(len(all_chains)):
        DG.add_node(idx)
        node_num += 1

    cache = set()  # 使用集合来跟踪已访问的索引，避免重复

    def find_connections_recursive(current_index, DG, all_chains, cache):
        source_ranges = all_chains[current_index]["ranges"]
        source_duration = np.diff(source_ranges[[0, 2]])
        for j in range(current_index + 1, len(all_chains)):
            if j in cache:
                continue
            sink_ranges = all_chains[j]["ranges"]
            pair = np.array((source_ranges, sink_ranges))

            if 0 < pair[1, 0] - pair[0, 2] < 3 and 0 < pair[1, 1] - pair[0, 3] < 3:
                DG.add_edge(current_index, j)
                cache.add(j)  # 确保j不会被重复访问
                find_connections_recursive(j, DG, all_chains, cache)  # 递归查找与j相连的节点

    for i in trange(len(all_chains), desc="building relation graph ..."):
        if i in cache:
            continue
        find_connections_recursive(i, DG, all_chains, cache)

    cluster = []

    try:
        # Ensure the graph is a DAG
        # if not nx.is_directed_acyclic_graph(DG):
        #     raise ValueError("The constructed graph is not a DAG.")

        # Find one of the longest paths
        components = list(nx.connected_components(DG))
        # print(components)
        for component in components:
            component = list(component)
            sub_chains = []
            for idx in component:
                pop_ind(node_indices, idx)
                sub_chains.append(all_chains[idx])
            cluster.append(sub_chains)

        for ind in node_indices:
            cluster.append([all_chains[ind]])

        return cluster
    except Exception as e:
        print(e)

    return


def merge_matches(matches):
    def merge_intervals(intervals: np.ndarray, miss_interval=2, sims=None):
        if isinstance(intervals, list):
            intervals = np.array(intervals)

        if len(intervals) == 1:
            return intervals

        start_id = 0
        while start_id < intervals.shape[0] - 1:
            next_id = start_id + 1
            pair = intervals[[start_id, next_id]]
            if abs(pair[1, 0] - pair[0, 2]) < miss_interval and abs(pair[1, 1] - pair[0, 3]) < miss_interval:
                new_interval = np.array([pair[:, 0].min(), pair[:, 1].min(), pair[:, 2].max(), pair[:, 3].max()])
                # if 84296 in pair:
                #     print(pair, new_interval)
                intervals[start_id] = new_interval
                intervals = np.delete(intervals, next_id, axis=0)

            else:
                start_id += 1

        return intervals

    format_matches = [{"ranges": matches[i]} for i in range(len(matches))]
    format_matches = sorted(format_matches, key=lambda x: x["ranges"][0])

    format_matches = build_relation_graph(format_matches)
    matches = []
    for format_match in format_matches:
        ranges = [info['ranges'] for info in format_match]
        ranges = sorted(ranges, key=lambda x: x[0])
        ranges = merge_intervals(ranges)
        matches.append(ranges)

    matches = merge_intervals(np.concatenate(matches))

    # pop_index = []
    # for i in range(matches.shape[0]):
    #     if matches[i][2]-matches[i][0] < 10 and sims[i] < 0.8:
    #         pop_index.append(i)
    #
    # matches = np.delete(matches, pop_index, axis=0)
    # sims = np.delete(sims, pop_index, axis=0)

    return matches


class TNMacher:
    def __init__(self, tn_top_k=60, max_path=10, min_sim=0.65,
                 tn_max_step=5, min_length=5, max_iou=0.3, version={}, faiss_index=None):
        self._tn_top_k = tn_top_k
        self._max_path = max_path
        self._min_sim = min_sim
        self._tn_max_step = tn_max_step
        self._min_length = min_length
        self._max_iou = max_iou
        self.version = version
        self.faiss_index = faiss_index

    def set_emb(self, emb):
        self.emb = emb

    def insert_emb(self, ranges_r):
        if not hasattr(self, 'emb_r'):
            self.emb_r = self.emb

        embs = []
        emb_id = 0
        for range_id in range(len(ranges_r)):
            start, end = ranges_r[range_id]
            duration = end - start
            embs.append(self.emb_r[start: end])
            emb_start = emb_id
            emb_id += duration
            self.version[range_id] = [emb_start, emb_id]
        train_set = np.concatenate(embs)
        self.faiss_index = faiss.IndexFlatIP(train_set.shape[1])
        faiss.normalize_L2(train_set)
        self.faiss_index.add(train_set)

    def match_emb(self, range_q, ranges_r):
        if not hasattr(self, 'emb_q'):
            self.emb_q = self.emb

        if not hasattr(self, 'emb_r'):
            self.emb_r = self.emb
        # 提取特征并归一化
        feature = self.emb_q[range_q[0]:range_q[1]]
        faiss.normalize_L2(feature)

        # 使用faiss搜索最相似的样本
        D, I = self.faiss_index.search(feature, len(ranges_r))
        # 投票机制与选择逻辑
        votes = self.vote(D, I)
        ranges, sims = self.select(self.version, feature, votes, range_q, ranges_r)
        return ranges, sims

    @staticmethod
    def get_version_id(idx, version_info):
        for k, v in version_info.items():
            if v[0] <= idx <= v[1]:
                return k

        return None

    def select(self, version, query_path, votes, range_q, ranges_r):
        preds = {}
        ranges = []
        scores = []
        for emb_id, score in votes:
            vid = self.get_version_id(emb_id, version)
            if vid not in preds:
                preds[vid] = score
            else:
                preds[vid] += score

        final_preds = sorted(preds.items(), key=lambda item: item[1], reverse=True)

        contigous_miss_count = 0

        for vid, score in tqdm(final_preds):
            if range_q == ranges_r[vid]:
                print("asdasd")
                continue
            start, end = ranges_r[vid]
            # print(seconds_to_hms(start), seconds_to_hms(end))

            if contigous_miss_count > 10:
                break

            tn = TemporalNetwork(tn_top_k=self._tn_top_k, max_path=self._max_path,
                                 min_sim=self._min_sim, tn_max_step=self._tn_max_step,
                                 min_length=self._min_length, max_iou=self._max_iou)
            reverse = False
            if self.emb_r[start:end].shape[0] < query_path.shape[0]:
                reverse = True
            if reverse:
                matches, sims, sim_map = tn(self.emb_r[start:end], query_path)
            else:
                matches, sims, sim_map = tn(query_path, self.emb_r[start:end])
            if matches.any():
                # matches, sims = merge_matches(matches, sims, sim_map)
                contigous_miss_count = 0
                for match, sim in zip(matches, sims):
                    if reverse:
                        match = match[[1, 0, 3, 2]]

                    match[[1, 3]] += start
                    match[[0, 2]] += range_q[0]
                    ranges.append(match)
                    scores.append(sim)
                    # print([seconds_to_hms(m) for m in match],
                    #       sim, match[2] - match[0],
                    #       match[3] - match[1])

            else:
                contigous_miss_count += 1
        return ranges, scores

    @staticmethod
    def vote(D, I):
        votes = {}
        D = D.flatten()
        I = I.flatten()
        for score, ind in zip(D, I):
            if ind not in votes:
                votes[ind] = score
            else:
                votes[ind] += score

        votes = sorted(votes.items(), key=lambda x: x[1], reverse=True)
        return votes


def get_pair_items(ranges_q, ranges_r):
    items = []
    for range_id in range(len(ranges_q)):
        items.append({"range_id": range_id, "range_q": ranges_q[range_id], "ranges_r": ranges_r})

    return items


def find_closest_time_id(times, target_time):
    """
    在给定的时间列表中找到最接近目标时间的时间点，并返回其ID。

    :param times: 一个包含datetime对象的列表
    :param ids: 与times列表一一对应的时间ID列表
    :param target_time: 目标时间点，datetime对象
    :return: 最接近目标时间的时间点的ID
    """
    # 计算每个时间点与目标时间的差值绝对值
    for i in range(1, len(times)):
        t = times[i - 1]
        if t[0] <= target_time[0] <= t[1]:
            if t[1] - target_time[0] < 5:
                return target_time[0], min(times[i][1], target_time[1])
            return target_time[0], min(target_time[1], t[1])


def cal_sim(emb1, emb2, start1, start2, step=5):
    if emb2 is None:
        emb2 = emb1

    q = emb1[start1:start1 + step]
    r = emb2[start2:start2 + step]
    q = q / np.linalg.norm(q, axis=1, keepdims=True)
    r = r / np.linalg.norm(r, axis=1, keepdims=True)
    sim_map = np.dot(q, r.T)
    diagonal_elements = np.diagonal(sim_map)
    sim = diagonal_elements.mean()

    return sim


if __name__ == "__main__":
    import json

    parser = argparse.ArgumentParser()
    parser.add_argument("--silent_path", type=str, default="data/silence_times.txt")
    parser.add_argument("--label_path", type=str, default="data/扬州新闻0310.csv")
    parser.add_argument("--emb_path", type=str,
                        default="/data/auto-match-dataset/yznews/287-2024-03-10-00-00-00.npy")
    parser.add_argument("--tn_top_k", type=int, default=120)
    parser.add_argument("--max_path", type=int, default=10)
    parser.add_argument("--min_sim", type=int, default=0.6)
    parser.add_argument("--tn_max_step", type=int, default=3)
    parser.add_argument("--min_length", type=int, default=5)
    parser.add_argument("--max_iou", type=float, default=1e-5)
    args = parser.parse_args()

    DataContext.get_current().enable_progress_bars = False

    silent_timestamp = make_valid_ranges(args.silent_path)
    label_df = pd.read_csv(args.label_path)
    group_df = label_df.groupby('广告名称')

    emb = np.load(args.emb_path)
    matcher = TNMacher(args.tn_top_k, args.max_path, args.min_sim, args.tn_max_step, args.min_length, args.max_iou)
    matcher.set_emb(emb)
    matcher.insert_emb(silent_timestamp)
    for idx, row in label_df.iterrows():
        if "欧派铂尼思集团 120" not in row["广告名称"]:
            continue
        print(row['广告名称'])
        start = timestamp2int(row['开始时间'])
        if "12:43:46" not in row['开始时间']:
            continue
        end = timestamp2int(row['结束时间'])
        time_range = find_closest_time_id(silent_timestamp, (start, end))
        print(seconds_to_hms(time_range[0]), seconds_to_hms(time_range[1]), time_range[1]-time_range[0])
        duration = end - start
        # matcher._min_length = int(duration/10)

        ranges, sims = matcher.match_emb(time_range, silent_timestamp)
        # with open(f"data/{row['广告名称']}.json", "w", encoding="utf-8") as file:
        #     data = json.dumps(ranges, ensure_ascii=False, indent=4)
        #     file.write(data)

        ranges = merge_matches(ranges)
        count = 0
        for range_ in ranges:
            sim = cal_sim(emb, emb, range_[0], range_[1], step=duration)

            if sim > 0.67:
                range_[3] = range_[1] + duration
                print([seconds_to_hms(m) for m in range_], sim,
                      range_[2] - range_[0],
                      range_[3] - range_[1])
                count += 1

        print("predict count", count)
        # else:
                # print()
                # range_[3] = range_[1] + duration
                # print([seconds_to_hms(m) for m in range_], sim,
                #       range_[2] - range_[0],
                #       range_[3] - range_[1])
                # print()
