# -*- coding: utf-8 -*-
# ==========================
# @Time    : 2023/12/6
# @Author  : zhoutengwei
# @File    : storage.py
# ==========================
import json

import addict
from typing import Dict, Any

import ray.data
from fastapi import FastAPI
from ray import serve
import numpy as np

from modules.tn import TemporalNetwork, seconds_to_hms
from modules.payload import SearchModel
from utils.timer import Timer
from utils.logger import get_logger
# from storage import Boto3Storage

from fastapi.openapi.docs import (
    get_redoc_html,
    get_swagger_ui_html,
    get_swagger_ui_oauth2_redirect_html,
)
from fastapi.staticfiles import StaticFiles

app = FastAPI()
app.mount("/static", StaticFiles(directory="/work/server/search-server/static"), name="static")
logger = get_logger()


# 配置日志输出到文件
# logger.add("log/app.log", rotation="50 MB")

@app.get("/docs", include_in_schema=False)
async def custom_swagger_ui_html():
    return get_swagger_ui_html(
        openapi_url=app.openapi_url,
        title=app.title + " - Swagger UI",
        oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url,
        swagger_js_url="/static/swagger-ui-bundle.js",
        swagger_css_url="/static/swagger-ui.css",
    )


@app.get(app.swagger_ui_oauth2_redirect_url, include_in_schema=False)
async def swagger_ui_redirect():
    return get_swagger_ui_oauth2_redirect_html()


@app.get("/redoc", include_in_schema=False)
async def redoc_html():
    return get_redoc_html(
        openapi_url=app.openapi_url,
        title=app.title + " - ReDoc",
        redoc_js_url="/static/redoc.standalone.js",
    )


@serve.deployment
@serve.ingress(app)
class SearchDeployment:
    def __init__(self):
        self.timer = Timer()

    def reconfigure(self, config: Dict[str, Any]):
        cfg = addict.Dict(config)
        self.tn_params = cfg.model.params.tn_params
        self.usr_params = cfg.model.params.usr_params
        # self.storage_cfg = cfg.storage
        # self.storage = Boto3Storage(self.storage_cfg.addr,
        #                             self.storage_cfg.accessKey,
        #                             self.storage_cfg.secretKey)
        self.tn_top_k = self.tn_params.tn_top_k
        self.max_path = self.tn_params.max_path
        self.min_sim = self.tn_params.min_sim
        self.tn_max_step = self.tn_params.tn_max_step
        self.max_iou = float(self.tn_params.max_iou)
        self.min_length = self.tn_params.min_length
        self.fake_step = self.usr_params.fake_step
        self.min_score = self.usr_params.min_score
        logger.info("reconfigure deployment")

    @app.post("/predict", summary="上传音频并提取特征")
    def predict(self,
                request: SearchModel) -> Dict:
        noid = request.noid
        name = request.name
        meta_path = request.meta_path
        try:
            with open(meta_path, "r") as f:
                metadata = json.load(f)
        except FileNotFoundError:
            return {"noid": noid, "name": name, "meta_path": None, "msg": "meta file not found"}
        # metadata = self.storage.get_json(meta_path)
        emb_path = metadata["embedding_saved_path"]
        start = request.start
        duration = request.duration
        query_info = {"start": start, "duration": duration,
                      "noid": noid, "name": name, "meta_path": meta_path}
        if metadata.get("query_info", None) is None:
            metadata["query_info"] = [query_info]
        else:
            metadata["query_info"].append(query_info)
        backtracking_results = metadata.get("backtracking_results", None)
        try:
            # emb = self.storage.get_npy(emb_path)
            emb = np.load(emb_path)
            query_emb = emb[start:min(emb.shape[0], start + min(duration, self.fake_step))]
            slice_points = self.get_slice_points(emb.shape[0], query_emb.shape[0])
            paired_points = list(zip(slice_points, slice_points[1:]))
            items = self.get_ray_items(query_emb, emb, paired_points)
            ds = ray.data.from_items(items).map(self.match_func, concurrency=6, num_cpus=1)
            rows = ds.take_all()
            rows = sorted(rows, key=lambda x: x["id"])
            ranges_chunks = np.array([interval for r in rows for interval in r["ranges"]])

            q = emb[start:start + duration]
            outputs, sims = self.align_ranges(ranges_chunks, q, emb, self.min_score, self.fake_step)
            backtracking_results = self.merge_backtracking_results(backtracking_results, noid,
                                                                   outputs, sims, name)
            metadata["backtracking_results"] = backtracking_results
            with open(meta_path, "w", encoding="utf-8") as f:
                json_data = json.dumps(metadata, indent=4, ensure_ascii=True)
                f.write(json_data)
            return {"noid": noid, "name": name, "meta_path": meta_path, "msg": "success"}
        except:
            import traceback
            error_info = traceback.format_exc()
            logger.error(f"error occurs in audio search: {error_info}")
            return {"noid": noid, "name": name, "meta_path": meta_path, "msg": error_info}

    def get_ray_items(self, q_emb, emb, paired_points):
        items = []
        idx = 0

        tn = TemporalNetwork(tn_top_k=self.tn_top_k, max_path=self.max_path, min_sim=self.min_sim,
                             tn_max_step=self.tn_max_step,
                             max_iou=self.max_iou, min_length=self.min_length)

        for start_point, end_point in paired_points:
            r_emb = emb[start_point:end_point]
            items.append({"id": 0, "start": start_point, "end": end_point,
                          "q_emb": q_emb, "r_emb": r_emb, "func": tn})
            idx += 1
        return items

    @staticmethod
    def match_func(row: Dict[str, Any]):
        tn = row["func"]
        q_emb = row["q_emb"]
        r_emb = row["r_emb"]
        start = row["start"]
        matches, sims = tn(q_emb, r_emb)

        ranges = []
        if matches.any():
            for match, sim in zip(matches, sims):
                match[[1, 3]] += start
                ranges.append(match)
        row["ranges"] = ranges
        return row

    @staticmethod
    def get_slice_points(shape, target):
        def get_band(value):
            if value < 900:
                return 12
            elif value < 1800:
                return 4
            elif value < 2700:
                return 2
            else:  # 包括了2700到3600的范围
                return 1

        band = get_band(target)
        # 计算每一份的大小
        step_size = shape // band

        # 生成切分点列表
        slice_points = [i * step_size for i in range(1, band)]
        slice_points += [0, shape]
        return np.unique(slice_points)

    @staticmethod
    def align_ranges(sorted_ranges, q, emb, min_score, fake_step):
        duration = q.shape[0]
        alignments = []
        sims = []
        cache = []
        for range_ in sorted_ranges:
            sub_start = max(0, range_[1] - fake_step)
            sub_end = min(emb.shape[0], range_[3] + duration)
            steps = max(sub_end - duration, 0)
            max_sim = 0
            confirmed_id = 0
            for i in range(sub_start, steps):
                r = emb[i:i + duration]
                q = q / np.linalg.norm(q, axis=1, keepdims=True)
                r = r / np.linalg.norm(r, axis=1, keepdims=True)
                sim_map = np.dot(q, r.T)
                diagonal = np.diag(sim_map)
                average_diagonal = np.mean(diagonal)
                if average_diagonal > max_sim:
                    if i in cache:
                        continue
                    max_sim = average_diagonal
                    confirmed_id = i
                    cache.append(confirmed_id)

            if max_sim > min_score:
                range_[1] = confirmed_id
                range_[3] = confirmed_id + duration
                print([seconds_to_hms(range_[1]), seconds_to_hms(range_[3])])
                alignments.append(range_[[1, 3]].tolist())
                sims.append(float(max_sim))

        return alignments, sims

    @staticmethod
    def filter_overlap(ranges, sims):
        """
        过滤重叠部分
        """

        def has_intersection(interval1, interval2):
            a1, b1 = interval1
            a2, b2 = interval2
            # 判断两个区间是否有交集
            if b1 < a2 or b2 < a1:  # 如果一个区间的结束点小于另一个区间的开始点，则无交集
                return False
            else:
                return True

        pop_idx = []
        for i in range(1, len(ranges)):
            if has_intersection(ranges[i], ranges[i - 1]):
                if sims[i] > sims[i - 1]:
                    pop_idx.append(i - 1)
                else:
                    pop_idx.append(i)

        for idx in sorted(pop_idx, reverse=True):
            ranges.pop(idx)
            sims.pop(idx)

        return ranges, sims

    @staticmethod
    def filter_overlap_results(results):

        def has_intersection(interval1, interval2):
            a1, b1 = interval1
            a2, b2 = interval2
            # 判断两个区间是否有交集
            if b1 < a2 or b2 < a1:  # 如果一个区间的结束点小于另一个区间的开始点，则无交集
                return False
            else:
                return True

        for i in range(len(results)):
            for j in range(i + 1, len(results)):
                ranges1 = results[i]["result"]
                ranges2 = results[j]["result"]
                if not ranges1 or not ranges2:
                    continue
                sims1 = results[i]["sims"]
                sims2 = results[j]["sims"]

                len1, len2 = len(ranges1), len(ranges2)
                to_remove = [[(0, 0) for _ in range(len2)] for _ in range(len1)]

                for m in range(len1):
                    for n in range(len2):
                        interval1 = ranges1[m]
                        interval2 = ranges2[n]

                        if has_intersection(interval1, interval2):
                            if interval1[0] >= interval2[0] and interval1[1] <= interval2[1]:
                                to_remove[m][n] = (1, 0)
                            elif interval1[0] <= interval2[0] and interval1[1] >= interval2[1]:
                                to_remove[m][n] = (0, 1)
                            elif sims1[m] < sims2[n]:
                                to_remove[m][n] = (1, 0)
                            elif sims1[m] >= sims2[n]:
                                to_remove[m][n] = (0, 1)

                remove_id1 = []
                remove_id2 = []
                # 根据 to_remove 矩阵更新 remove1 和 remove2
                for m in range(len1):
                    for n in range(len2):
                        a, b = to_remove[m][n]
                        if a == 1:
                            remove_id1.append(m)
                        if b == 1:
                            remove_id2.append(n)

                for idx in sorted(remove_id1, reverse=True):
                    results[i]["result"].pop(idx)
                    results[i]["sims"].pop(idx)

                for idx in sorted(remove_id2, reverse=True):
                    results[j]["result"].pop(idx)
                    results[j]["sims"].pop(idx)
        return results

    @staticmethod
    def parse_s3url(url: str):
        bucket_name, object_name = url[len("s3://"):].split('/', 1)
        return bucket_name, object_name

    def merge_backtracking_results(self, backtracking_results, noid, outputs, sims, name):
        outputs, sims = self.filter_overlap(outputs, sims)
        if backtracking_results is None:
            backtracking_results = [{"noid": noid, "name": name,
                                     "result": outputs, "sims": sims, "msg": "success"}]
        else:
            tmp_res = {"noid": noid, "name": name, "result": outputs, "sims": sims, "msg": "success"}
            backtracking_results.append(tmp_res)

            backtracking_results = self.filter_overlap_results(backtracking_results)
        logger.info(backtracking_results)
        return backtracking_results


app = SearchDeployment.bind()
