from transformers import AutoTokenizer, AutoModel, AutoProcessor
from transformers import CLIPImageProcessor

from typing import Dict, Any
import torch
import cv2
import time
import numpy as np

from core.utils.infer_engine import OrtInferSession
from core.utils.transform import (
    custom_NMSBoxes,
    ResizePad,
)
from core.utils.load_image import LoadImage


# nomic-ai/nomic-embed-text-v1.5
class TextEmbedding:
    def __init__(self, text_embedding_model_path):
        self.init_text_model(text_embedding_model_path)

    def init_text_model(self, text_embedding_model_path):
        print('init text embedding model...')
        self.text_tokenizer = AutoTokenizer.from_pretrained(text_embedding_model_path, from_tf=False, local_files_only=True, trust_remote_code=True)
        self.text_model = AutoModel.from_pretrained(text_embedding_model_path, from_tf=False, local_files_only=True, trust_remote_code=True)
        self.text_model = self.text_model.eval()

    def get_text_embedding(self, text):
        text_inputs = self.text_tokenizer(text, return_tensors='pt', padding=True, truncation=True)
        with torch.no_grad():
            text_outputs = self.text_model(**text_inputs)
        text_embeddings = text_outputs.last_hidden_state.mean(dim=1)
        return text_embeddings[0].detach().numpy()


# nomic-ai/nomic-embed-vision-v1.5
class ImageEmbedding:
    def __init__(self, image_embedding_model_path):
        self.init_image_model(image_embedding_model_path)

    def init_image_model(self, image_embedding_model_path):
        print('init image embedding model...')
        self.image_model = AutoModel.from_pretrained(image_embedding_model_path, from_tf=False, local_files_only=True, trust_remote_code=True)
        self.image_processor = CLIPImageProcessor.from_pretrained(image_embedding_model_path, use_fast=True, from_tf=False, local_files_only=True, trust_remote_code=True)
        self.image_model.eval()

    def get_image_embedding(self, image):
        # image = Image.open('image_path')
        image_inputs = self.image_processor(images=image, return_tensors='pt')
        with torch.no_grad():
            image_outputs = self.image_model(**image_inputs)
        image_embeddings = image_outputs.last_hidden_state
        return image_embeddings.mean(dim=1).squeeze().cpu().numpy()


class Det:
    def __init__(self, config: Dict[str, Any]):
        self.model = OrtInferSession(config)
        self.img_loader = LoadImage()
        self.resize_shape = [928, 928]

    def __call__(self, img, **kwargs):
        start = time.time()
        score = kwargs.get("score", 0.4)
        img = self.img_loader(img)
        ori_h, ori_w = img.shape[:-1]
        img, new_w, new_h, left, top = self.img_preprocess(img, self.resize_shape)
        pre = self.model([img])
        result = self.img_postprocess(
            pre, ori_w / new_w, ori_h / new_h, left, top, score
        )
        return result, time.time() - start

    def img_preprocess(self, img, resize_shape=[928, 928]):
        im, new_w, new_h, left, top = ResizePad(img, resize_shape[0])
        im = im / 255.0
        im = im.transpose((2, 0, 1)).copy()
        im = im[None, :].astype("float32")
        return im, new_w, new_h, left, top

    def img_postprocess(self, predict_maps, x_factor, y_factor, left, top, score):
        result = []
        # 转置和压缩输出以匹配预期的形状
        outputs = np.transpose(np.squeeze(predict_maps[0]))
        # 获取输出数组的行数
        rows = outputs.shape[0]
        # 用于存储检测的边界框、得分和类别ID的列表
        boxes = []
        scores = []
        # 遍历输出数组的每一行
        for i in range(rows):
            # 找到类别得分中的最大得分
            max_score = outputs[i][4]
            # 如果最大得分高于置信度阈值
            if max_score >= score:
                # 从当前行提取边界框坐标
                x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
                # 计算边界框的缩放坐标
                xmin = max(int((x - w / 2 - left) * x_factor), 0)
                ymin = max(int((y - h / 2 - top) * y_factor), 0)
                xmax = xmin + int(w * x_factor)
                ymax = ymin + int(h * y_factor)
                # 将类别ID、得分和框坐标添加到各自的列表中
                boxes.append([xmin, ymin, xmax, ymax])
                scores.append(max_score)
                # 应用非最大抑制过滤重叠的边界框
        indices = custom_NMSBoxes(boxes, scores)
        for i in indices:
            result.append([scores[i], np.array(boxes[i])])
        return result


class TableDetector:
    def __init__(
        self,
        use_cuda=False,
        use_dml=False,
        model_path=None,
    ):
        self.img_loader = LoadImage()
        obj_det_config = {
            "model_path": model_path,
            "use_cuda": use_cuda,
            "use_dml": use_dml,
        }

        self.obj_detector = Det(obj_det_config)

    def __call__(
        self,
        img,
        det_accuracy=0.8,
    ):
        img = self.img_loader(img)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        h, w = img.shape[:-1]
        obj_det_res, pred_label = self.init_default_output(h, w)
        result = []
        obj_det_res, obj_det_elapse = self.obj_detector(img, score=det_accuracy)
        for i in range(len(obj_det_res)):
            det_res = obj_det_res[i]
            score, box = det_res
            xmin, ymin, xmax, ymax = box
            lb, lt, rb, rt = self.get_box_points(box)
            lb1, lt1, rb1, rt1 = self.get_real_rotated_points(
                lb, lt, pred_label, rb, rt
            )
            result.append(
                {
                    "box": [int(xmin), int(ymin), int(xmax), int(ymax)],
                    "lb": [int(lb1[0]), int(lb1[1])],
                    "lt": [int(lt1[0]), int(lt1[1])],
                    "rt": [int(rt1[0]), int(rt1[1])],
                    "rb": [int(rb1[0]), int(rb1[1])],
                }
            )
        return result

    def init_default_output(self, h, w):
        img_box = np.array([0, 0, w, h])
        # 初始化默认值
        obj_det_res, edge_box, pred_label = (
            [[1.0, img_box]],
            img_box.reshape([-1, 2]),
            0,
        )
        return obj_det_res, pred_label

    def add_pre_info_for_cls(self, cls_img, edge_box, xmin_cls, ymin_cls):
        """
        Args:
            cls_img:
            edge_box:
            xmin_cls:
            ymin_cls:

        Returns: 带边缘划线的图片，给方向分类提供先验信息

        """
        cls_box = edge_box.copy()
        cls_box[:, 0] = cls_box[:, 0] - xmin_cls
        cls_box[:, 1] = cls_box[:, 1] - ymin_cls
        # 画框增加先验信息，辅助方向label识别
        cv2.polylines(
            cls_img,
            [np.array(cls_box).astype(np.int32).reshape((-1, 1, 2))],
            True,
            color=(255, 0, 255),
            thickness=5,
        )

    def adjust_edge_points_axis(self, edge_box, lb, lt, rb, rt, xmin_edge, ymin_edge):
        edge_box[:, 0] += xmin_edge
        edge_box[:, 1] += ymin_edge
        lt, lb, rt, rb = (
            lt + [xmin_edge, ymin_edge],
            lb + [xmin_edge, ymin_edge],
            rt + [xmin_edge, ymin_edge],
            rb + [xmin_edge, ymin_edge],
        )
        return lb, lt, rb, rt

    def get_box_points(self, img_box):
        x1, y1, x2, y2 = img_box
        lt = np.array([x1, y1])  # 左上角
        rt = np.array([x2, y1])  # 右上角
        rb = np.array([x2, y2])  # 右下角
        lb = np.array([x1, y2])  # 左下角
        return lb, lt, rb, rt

    def get_real_rotated_points(self, lb, lt, pred_label, rb, rt):
        if pred_label == 0:
            lt1 = lt
            rt1 = rt
            rb1 = rb
            lb1 = lb
        elif pred_label == 1:
            lt1 = rt
            rt1 = rb
            rb1 = lb
            lb1 = lt
        elif pred_label == 2:
            lt1 = rb
            rt1 = lb
            rb1 = lt
            lb1 = rt
        elif pred_label == 3:
            lt1 = lb
            rt1 = lt
            rb1 = rt
            lb1 = rb
        else:
            lt1 = lt
            rt1 = rt
            rb1 = rb
            lb1 = lb
        return lb1, lt1, rb1, rt1

    def pad_box_points(self, h, w, xmax, xmin, ymax, ymin, pad):
        ymin_edge = max(ymin - pad, 0)
        xmin_edge = max(xmin - pad, 0)
        ymax_edge = min(ymax + pad, h)
        xmax_edge = min(xmax + pad, w)
        return xmin_edge, ymin_edge, xmax_edge, ymax_edge
