import json
import os.path
import tempfile
import time

import numpy as np

from typing import Dict, Any

import ray
from utils.logger import get_logger
from utils.timer import Timer
from utils.timestamp_process import seconds_to_hms
from utils.tools import parse_s3url, read_metadata
# from core import Predictor
from .tn import TemporalNetwork
from addict import Dict as ADict

logger = get_logger()


class BacktrackingPredictor:
    def __init__(self, cfg: Dict[str, Any]):
        self.cfg = ADict(cfg)
        self.tn_params = self.cfg.model.params.tn_params
        self.usr_params = self.cfg.model.params.usr_params
        self.tn_top_k = self.tn_params.tn_top_k
        self.max_path = self.tn_params.max_path
        self.min_sim = self.tn_params.min_sim
        self.tn_max_step = self.tn_params.tn_max_step
        self.max_iou = float(self.tn_params.max_iou)
        self.min_length = self.tn_params.min_length
        self.fake_step = self.usr_params.fake_step
        self.min_score = self.usr_params.min_score
        self.timer = Timer()

    def __call__(self, batch: Dict[str, Any]) -> Dict:
        task_id = batch['task_id']
        # if batch["backtracking_progress"] == "completed":
        #     return batch
        try:
            with open(batch['meta_path'], 'r') as f:
                ref_meta = json.load(f)
        except FileNotFoundError as e:
            logger.error(f"could not find meta file for {task_id}")
            return batch
        try:
            ref_meta["backtracking_progress"] = "processing"
            logger.info(f"start backtracking for task {task_id}")
            # backtracking_path = batch["noid"] + ".json"
            # ref_meta = self.storage.get_json(batch['meta_path'])
            r = np.load(ref_meta["embedding_saved_path"])
            # r = self.storage.get_npy(ref_meta["embedding_saved_path"])
            # logger.info(emb.shape)
            # 获取或初始化 versions
            versions = ref_meta.setdefault("versions", {})
            backtracking_results = versions.setdefault("backtracking_results", {})
            versions["changed_info"] = {}
            changed_info = versions["changed_info"]
            changed_info["deleted_set"] = []
            changed_info["updated_set"] = []

            # 获取或扩展 query_info
            query_info = ref_meta.setdefault("query_info", [])
            query_info.extend(batch["query_info"])

            deleted_set = versions.setdefault("deleted_set", [])

            for query in batch["query_info"]:
                noid = query["noid"]
                name = query["name"]
                query_start = query["start"]
                # bucket_name, obj_name = parse_s3url(query['meta_path'])
                with open(query['meta_path'], 'r') as f:
                    query_meta = json.load(f)
                query_emb = np.load(query_meta["embedding_saved_path"])
                # query_meta = self.storage.get_json(query['meta_path'])
                # query_emb = self.storage.get_npy(query_meta["embedding_saved_path"])
                q = query_emb[
                    query_start:min(query_emb.shape[0], query_start + min(query["duration"], self.fake_step))]

                slice_points = self.get_slice_points(r.shape[0], q.shape[0])
                paired_points = list(zip(slice_points, slice_points[1:]))
                logger.info(paired_points)
                items = self.get_ray_items(q, r, paired_points)
                ds = ray.data.from_items(items).map(self.match_func, concurrency=6, num_cpus=1)
                rows = ds.take_all()
                rows = sorted(rows, key=lambda x: x["id"])
                ranges_chunks = np.array([interval for r in rows for interval in r["ranges"]])
                # ranges_chunks = []
                # for start_point, end_point in paired_points:
                #     ranges = self.match_func(q, r, start_point, end_point)
                #     ranges_chunks.extend(ranges)
                # logger.info(matches)
                q = query_emb[query_start:query_start + query["duration"]]
                outputs, sims = self.align_ranges(ranges_chunks, q, r, self.min_score, self.fake_step)
                self.merge_backtracking_results(backtracking_results, changed_info,
                                                noid, outputs, sims, name)
            deleted_set.extend(changed_info["deleted_set"])
            query_info = [q for q in query_info if q["noid"] not in deleted_set]
            logger.info(f"pre results: {backtracking_results}")
            backtracking_results = {k: v for k, v in backtracking_results.items() if k not in deleted_set}
            logger.info(f"current results: {backtracking_results}")
            # for res in results:
            #     del res['sims']
            # self.storage.fput_json(self.bucket_name, backtracking_path, results, sub_dir_name=f"backtracking/{task_id}")
            # s3_path = f"{self.s3_prefix}/backtracking/{task_id}/{backtracking_path}"
            # ref_meta["backtracking_path"] = os.path.splitext(ref_meta["path"])[0] + "-backtracking.json"
            ref_meta["query_info"] = query_info
            ref_meta["versions"]["backtracking_results"] = backtracking_results
            ref_meta["backtracking_progress"] = "completed"
            logger.info(f"backtracking for task {task_id} completed")
        except:
            import traceback
            error_info = traceback.format_exc()
            logger.error(f"error occurs in backtracking: {error_info}")
            ref_meta["backtracking_progress"] = "failed"
        logger.info(ref_meta)
        ref_meta['update_status'] = 'backtracking'
        # backtracking_path = os.path.splitext(ref_meta["path"])[0] + "-backtracking.json"
        with open(batch['meta_path'], "w", encoding="utf-8") as f:
            json_data = json.dumps(ref_meta, indent=4, ensure_ascii=True)
            f.write(json_data)
        return batch

    def get_ray_items(self, q_emb, emb, paired_points):
        items = []
        idx = 0

        tn = TemporalNetwork(tn_top_k=self.tn_top_k, max_path=self.max_path, min_sim=self.min_sim,
                             tn_max_step=self.tn_max_step,
                             max_iou=self.max_iou, min_length=self.min_length)

        for start_point, end_point in paired_points:
            r_emb = emb[start_point:end_point]
            items.append({"id": idx, "start": start_point, "end": end_point,
                          "q_emb": q_emb, "r_emb": r_emb, "func": tn})
            idx += 1
        return items

    @staticmethod
    def match_func(row: Dict[str, Any]):
        tn = row["func"]
        q_emb = row["q_emb"]
        r_emb = row["r_emb"]
        start = row["start"]
        matches, sims = tn(q_emb, r_emb)

        ranges = []
        if matches.any():
            for match, sim in zip(matches, sims):
                match[[1, 3]] += start
                ranges.append(match)
        row["ranges"] = ranges
        return row

    @staticmethod
    def align_ranges(sorted_ranges, q, emb, min_score, fake_step):
        duration = q.shape[0]
        if duration < 20:
            min_score = 0.995
        alignments = []
        sims = []
        cache = []
        for range_ in sorted_ranges:
            sub_start = max(0, range_[1] - fake_step)
            sub_end = min(emb.shape[0], range_[3] + duration)
            steps = max(sub_end - duration, 0)
            max_sim = 0
            confirmed_id = 0
            for i in range(sub_start, steps):
                r = emb[i:i + duration]
                q = q / np.linalg.norm(q, axis=1, keepdims=True)
                r = r / np.linalg.norm(r, axis=1, keepdims=True)
                sim_map = np.dot(q, r.T)
                diagonal = np.diag(sim_map)
                average_diagonal = np.mean(diagonal)
                # print([seconds_to_hms(i), seconds_to_hms(i + duration)], average_diagonal)
                if average_diagonal > max_sim:
                    if i in cache:
                        continue
                    max_sim = average_diagonal
                    confirmed_id = i
                    cache.append(confirmed_id)
                    if max_sim > 0.999:
                        break

            if max_sim > min_score:
                range_[1] = confirmed_id
                range_[3] = confirmed_id + duration
                # print([seconds_to_hms(range_[1]), seconds_to_hms(range_[3])], max_sim)
                alignments.append(range_[[1, 3]].tolist())
                sims.append(float(max_sim))

        return alignments, sims

    @staticmethod
    def filter_overlap(ranges, sims):
        """
        过滤重叠部分
        """

        def has_intersection(interval1, interval2):
            a1, b1 = interval1
            a2, b2 = interval2
            # 判断两个区间是否有交集
            if b1 < a2 or b2 < a1:  # 如果一个区间的结束点小于另一个区间的开始点，则无交集
                return False
            else:
                return True

        pop_idx = []
        for i in range(1, len(ranges)):
            if has_intersection(ranges[i], ranges[i - 1]):
                if sims[i] > sims[i - 1]:
                    pop_idx.append(i - 1)
                else:
                    pop_idx.append(i)

        for idx in sorted(pop_idx, reverse=True):
            logger.info(f"single node remove {ranges[idx], sims[idx]}")
            ranges.pop(idx)
            sims.pop(idx)

        return ranges, sims

    @staticmethod
    def filter_overlap_results(results, change_info):

        def has_intersection(interval1, interval2):
            a1, b1 = interval1
            a2, b2 = interval2
            # 判断两个区间是否有交集
            if b1 < a2 or b2 < a1:  # 如果一个区间的结束点小于另一个区间的开始点，则无交集
                return False
            else:
                return True

        def remove_items(results, key, remove_ids, change_info):
            for idx in sorted(remove_ids, reverse=True):
                logger.info(f'remove {results[key]["result"][idx]}')
                results[key]["result"].pop(idx)
                results[key]["sims"].pop(idx)
            if not results[key]["result"]:
                change_info["deleted_set"].append(key)
                change_info["updated_set"] = [item for item in change_info["updated_set"] if item != key]

        # 获取字典的键列表
        keys = list(results.keys())

        for i in range(len(keys)):
            for j in range(i + 1, len(keys)):

                key1 = keys[i]
                key2 = keys[j]
                ranges1 = results[key1]["result"]
                ranges2 = results[key2]["result"]
                if not ranges1 or not ranges2:
                    continue
                sims1 = results[key1]["sims"]
                sims2 = results[key2]["sims"]

                len1, len2 = len(ranges1), len(ranges2)
                to_remove = [[(0, 0) for _ in range(len2)] for _ in range(len1)]

                for m in range(len1):
                    for n in range(len2):
                        interval1 = ranges1[m]
                        interval2 = ranges2[n]

                        if has_intersection(interval1, interval2):
                            if interval1[0] >= interval2[0] and interval1[1] <= interval2[1]:
                                to_remove[m][n] = (1, 0)
                            elif interval1[0] <= interval2[0] and interval1[1] >= interval2[1]:
                                to_remove[m][n] = (0, 1)
                            # 非全包含,不存在源节点可删
                            elif sims1[m] < sims2[n] and sims1[m] < 1.0 and abs(
                                    interval1[0] - interval2[0]) > 1 and abs(interval1[1] - interval2[1]) > 1:
                                to_remove[m][n] = (1, 0)
                            elif sims1[m] >= sims2[n] and sims2[n] < 1.0 and abs(
                                    interval1[0] - interval2[0]) > 1 and abs(interval1[1] - interval2[1]) > 1:
                                to_remove[m][n] = (0, 1)

                remove_id1 = []
                remove_id2 = []
                # 根据 to_remove 矩阵更新 remove1 和 remove2
                for m in range(len1):
                    for n in range(len2):
                        a, b = to_remove[m][n]
                        if a == 1:
                            remove_id1.append(m)
                        if b == 1:
                            remove_id2.append(n)

                if remove_id1:
                    change_info["updated_set"].append(key1)
                if remove_id2:
                    change_info["updated_set"].append(key2)

                remove_items(results, key1, remove_id1, change_info)
                remove_items(results, key2, remove_id2, change_info)

    def merge_backtracking_results(self, backtracking_results, change_info, noid, outputs, sims, name):
        outputs, sims = self.filter_overlap(outputs, sims)
        if not backtracking_results:
            backtracking_results[f"{noid}"] = {"name": name,
                                               "result": outputs,
                                               "sims": sims,
                                               "msg": "success"}
        else:
            tmp_res = {"name": name, "result": outputs, "sims": sims, "msg": "success"}
            backtracking_results.update({f"{noid}": tmp_res})

            self.filter_overlap_results(backtracking_results, change_info)

    @staticmethod
    def get_slice_points(shape, target):
        def get_band(value):
            if value < 900:
                return 12
            elif value < 1800:
                return 4
            elif value < 2700:
                return 2
            else:  # 包括了2700到3600的范围
                return 1

        band = get_band(target)
        # 计算每一份的大小
        step_size = shape // band

        # 生成切分点列表
        slice_points = [i * step_size for i in range(1, band)]
        slice_points += [0, shape]
        return np.unique(slice_points)
