import base64
import math
import os
from typing import List

import numpy as np
import paddle
from ocr_process.infer.infer_engine import OrtInferSession
from paddlenlp.datasets import load_dataset
from paddlenlp.taskflow.utils import dbc2sbc, get_id_and_prob
from paddlenlp.transformers import ErnieLayoutTokenizer
from paddlenlp.utils.ie_utils import map_offset, pad_image_data
from paddlenlp.utils.log import logger
from paddlenlp.utils.tools import get_bool_ids_greater_than, get_span

from taskinfo.img_parser import ImgParser


class Uie:

    def __init__(self, uie_model_path: str, det_model_path: str, rec_model_path: str, schema_list: list[str] = None, *, use_cuda=False,
                 use_openvino=False):
        self._num_threads = math.ceil(os.cpu_count() / 2)
        ## 和_tokenizer对应
        self._max_seq_len = 512
        self._batch_size = 32
        self._position_prob = 0.5
        self._summary_token_num = 4  # [CLS] prompt [SEP] [SEP] text [SEP] for UIE-X
        self._img_parser = ImgParser(det_model_path, rec_model_path, use_cuda=use_cuda, use_openvino=use_openvino)
        self._schema_list = schema_list
        self._tokenizer = ErnieLayoutTokenizer.from_pretrained('uie-x-base',
                                                               do_tokenize_postprocess=True,
                                                               tokenizer_class="ErnieLayoutTokenizer",
                                                               do_lower_case=True, max_length=512
                                                               )
        # paddlenlp.transformers.auto.tokenizer.FAST_TOKENIZER_MAPPING_NAMES
        # self._tokenizer = AutoTokenizer.from_pretrained(r"C:\Users\liyh1\.paddlenlp\taskflow\information_extraction\uie-x-base", use_fast=True)

        config = {
            "use_cuda": use_cuda,
            "use_openvino": use_openvino,
            "model_path": uie_model_path
        }
        self.engine = OrtInferSession(config)

    def set_schema_list(self, schema_list: list[str]):
        self._schema_list = schema_list

    @staticmethod
    def _auto_splitter(input_texts: List[str], max_text_len: int, bbox_list):
        """
        Split the raw texts automatically for model inference.
        Args:
            input_texts (List[str]): input raw texts.
            max_text_len (int): cutting length.
            bbox_list (List[List[float]]): bbox for document input.
        return:
            short_input_texts (List[str]): the short input texts for model inference.
            short_bbox_list (List[List[float]] or None): the short bbox list.
            input_mapping (dict): mapping between raw text and short input texts.
        """
        input_mapping = {}
        short_input_texts = []
        short_bbox_list = []

        cnt_org = 0
        cnt_short = 0

        for idx, text in enumerate(input_texts):
            text_len = len(text)
            if text_len <= max_text_len:
                short_input_texts.append(text)
                short_bbox_list.append(bbox_list[idx])
                input_mapping.setdefault(cnt_org, []).append(cnt_short)
                cnt_short += 1
            else:
                temp_text_list = [text[i: i + max_text_len] for i in range(0, text_len, max_text_len)]
                short_input_texts.extend(temp_text_list)

                # Assuming each bbox corresponds to the entire text
                short_bbox_list.extend([bbox_list[idx] for _ in range(len(temp_text_list))])

                short_idx = cnt_short
                cnt_short += math.ceil(text_len / max_text_len)
                temp_text_id = [short_idx + i for i in range(cnt_short - short_idx)]
                input_mapping.setdefault(cnt_org, []).extend(temp_text_id)
            cnt_org += 1
        return short_input_texts, short_bbox_list, input_mapping

    def _preprocess(self, img_name):
        """
        Transform the raw text to the model inputs, two steps involved:
           1) Transform the raw text to token ids.
           2) Generate the other model inputs from the raw text and token ids.
        """
        inputs_list = self._check_img(img_name)
        outputs = {"text": inputs_list}
        return outputs

    def _check_img(self, inputs):
        input_list = []
        data: dict = self._img_parser.parse(inputs)
        input_list.append(data)
        return input_list

    def _single_stage_predict(self, inputs):
        input_texts = [d["text"] for d in inputs]
        prompts = [d["prompt"] for d in inputs]

        # max predict length should exclude the length of prompt and summary tokens
        max_predict_len = self._max_seq_len - len(max(prompts)) - self._summary_token_num

        bbox_list = [d["bbox"] for d in inputs]
        short_input_texts, short_bbox_list, input_mapping = self._auto_splitter(
            input_texts, max_predict_len, bbox_list=bbox_list
        )

        short_texts_prompts = []
        for k, v in input_mapping.items():
            short_texts_prompts.extend([prompts[k] for _ in range(len(v))])

        image_list = []
        for k, v in input_mapping.items():
            image_list.extend([inputs[k]["image"] for _ in range(len(v))])
        short_inputs = [
            {
                "text": short_input_texts[i],
                "prompt": short_texts_prompts[i],
                "bbox": short_bbox_list[i],
                "image": image_list[i],
            }
            for i in range(len(short_input_texts))
        ]

        def doc_reader(inputs, pad_id=1, c_sep_id=2):
            def _process_bbox(tokens, bbox_lines, offset_mapping, offset_bias):
                bbox_list = [[0, 0, 0, 0] for x in range(len(tokens))]

                for index, bbox in enumerate(bbox_lines):
                    index_token = map_offset(index + offset_bias, offset_mapping)
                    if 0 <= index_token < len(bbox_list):
                        bbox_list[index_token] = bbox
                return bbox_list

            def _encode_doc(tokenizer, offset_mapping, last_offset, prompt, this_text_line, inputs_ids, q_sep_index, max_length):
                if len(offset_mapping) == 0:
                    content_encoded_inputs = tokenizer(
                        text=[prompt],
                        text_pair=[this_text_line],
                        max_length=max_length,
                        return_dict=False,
                        return_offsets_mapping=True,
                    )

                    content_encoded_inputs = content_encoded_inputs[0]
                    inputs_ids = content_encoded_inputs["input_ids"][:-1]
                    sub_offset_mapping = [list(x) for x in content_encoded_inputs["offset_mapping"]]
                    q_sep_index = content_encoded_inputs["input_ids"].index(2, 1)

                    bias = 0
                    for i in range(len(sub_offset_mapping)):
                        if i == 0:
                            continue
                        mapping = sub_offset_mapping[i]
                        if mapping[0] == 0 and mapping[1] == 0 and bias == 0:
                            bias = sub_offset_mapping[i - 1][-1] + 1
                        if mapping[0] == 0 and mapping[1] == 0:
                            continue
                        if mapping == sub_offset_mapping[i - 1]:
                            continue
                        sub_offset_mapping[i][0] += bias
                        sub_offset_mapping[i][1] += bias

                    offset_mapping = sub_offset_mapping[:-1]
                    last_offset = offset_mapping[-1][-1]
                else:
                    content_encoded_inputs = tokenizer(
                        text=this_text_line, max_seq_len=max_length, return_dict=False, return_offsets_mapping=True
                    )
                    inputs_ids += content_encoded_inputs["input_ids"][1:-1]
                    sub_offset_mapping = [list(x) for x in content_encoded_inputs["offset_mapping"]]
                    for i, sub_list in enumerate(sub_offset_mapping[1:-1]):
                        if i == 0:
                            org_offset = sub_list[1]
                        else:
                            if sub_list[0] != org_offset and sub_offset_mapping[1:-1][i - 1] != sub_list:
                                last_offset += 1
                            org_offset = sub_list[1]
                        offset_mapping += [[last_offset, sub_list[1] - sub_list[0] + last_offset]]
                        last_offset = offset_mapping[-1][-1]
                return offset_mapping, last_offset, q_sep_index, inputs_ids

            for example in inputs:
                content = example["text"]
                prompt = example["prompt"]
                bbox_lines = example.get("bbox", None)
                image_buff_string = example.get("image", None)
                # Text
                # if bbox_lines is None:
                #     print('bbox_lines is none.........')
                #     encoded_inputs = self._tokenizer(
                #         text=[example["prompt"]],
                #         text_pair=[example["text"]],
                #         truncation=True,
                #         max_seq_len=self._max_seq_len,
                #         pad_to_max_seq_len=True,
                #         return_attention_mask=True,
                #         return_position_ids=True,
                #         return_offsets_mapping=True,
                #         return_dict=False,
                #     )
                #
                #     encoded_inputs = encoded_inputs[0]
                #
                #     inputs_ids = encoded_inputs["input_ids"]
                #     position_ids = encoded_inputs["position_ids"]
                #     attention_mask = encoded_inputs["attention_mask"]
                #
                #     q_sep_index = inputs_ids.index(2, 1)
                #     c_sep_index = attention_mask.index(0)
                #
                #     offset_mapping = [list(x) for x in encoded_inputs["offset_mapping"]]
                #
                #     bbox_list = [[0, 0, 0, 0] for x in range(len(inputs_ids))]
                #     token_type_ids = [
                #         1 if token_index <= q_sep_index or token_index > c_sep_index else 0
                #         for token_index in range(self._max_seq_len)
                #     ]
                #     padded_image = np.zeros([3, 224, 224])
                # # Doc
                # else:
                #     inputs_ids = []
                #     prev_bbox = [-1, -1, -1, -1]
                #     this_text_line = ""
                #     q_sep_index = -1
                #     offset_mapping = []
                #     last_offset = 0
                #     for char_index, (char, bbox) in enumerate(zip(content, bbox_lines)):
                #         if char_index == 0:
                #             prev_bbox = bbox
                #             this_text_line = char
                #             continue
                #
                #         if all([bbox[x] == prev_bbox[x] for x in range(4)]):
                #             this_text_line += char
                #         else:
                #             offset_mapping, last_offset, q_sep_index, inputs_ids = _encode_doc(
                #                 self._tokenizer,
                #                 offset_mapping,
                #                 last_offset,
                #                 prompt,
                #                 this_text_line,
                #                 inputs_ids,
                #                 q_sep_index,
                #                 self._max_seq_len,
                #             )
                #             this_text_line = char
                #         prev_bbox = bbox
                #     if len(this_text_line) > 0:
                #         offset_mapping, last_offset, q_sep_index, inputs_ids = _encode_doc(
                #             self._tokenizer,
                #             offset_mapping,
                #             last_offset,
                #             prompt,
                #             this_text_line,
                #             inputs_ids,
                #             q_sep_index,
                #             self._max_seq_len,
                #         )
                #     if len(inputs_ids) > self._max_seq_len:
                #         inputs_ids = inputs_ids[: (self._max_seq_len - 1)] + [c_sep_id]
                #         offset_mapping = offset_mapping[: (self._max_seq_len - 1)] + [[0, 0]]
                #     else:
                #         inputs_ids += [c_sep_id]
                #         offset_mapping += [[0, 0]]
                #
                #     if len(offset_mapping) > 1:
                #         offset_bias = offset_mapping[q_sep_index - 1][-1] + 1
                #     else:
                #         offset_bias = 0
                #
                #     seq_len = len(inputs_ids)
                #     inputs_ids += [pad_id] * (self._max_seq_len - seq_len)
                #     token_type_ids = [1] * (q_sep_index + 1) + [0] * (seq_len - q_sep_index - 1)
                #     token_type_ids += [pad_id] * (self._max_seq_len - seq_len)
                #
                #     bbox_list = _process_bbox(inputs_ids, bbox_lines, offset_mapping, offset_bias)
                #
                #     offset_mapping += [[0, 0]] * (self._max_seq_len - seq_len)
                #
                #     # Reindex the text
                #     text_start_idx = offset_mapping[1:].index([0, 0]) + self._summary_token_num - 1
                #     for idx in range(text_start_idx, self._max_seq_len):
                #         offset_mapping[idx][0] -= offset_bias
                #         offset_mapping[idx][1] -= offset_bias
                #
                #     position_ids = list(range(seq_len))
                #
                #     position_ids = position_ids + [0] * (self._max_seq_len - seq_len)
                #     attention_mask = [1] * seq_len + [0] * (self._max_seq_len - seq_len)
                #
                #     image_data = base64.b64decode(image_buff_string.encode("utf8"))
                #     padded_image = pad_image_data(image_data)
                inputs_ids = []
                prev_bbox = [-1, -1, -1, -1]
                this_text_line = ""
                q_sep_index = -1
                offset_mapping = []
                last_offset = 0
                for char_index, (char, bbox) in enumerate(zip(content, bbox_lines)):
                    if char_index == 0:
                        prev_bbox = bbox
                        this_text_line = char
                        continue

                    if all([bbox[x] == prev_bbox[x] for x in range(4)]):
                        this_text_line += char
                    else:
                        offset_mapping, last_offset, q_sep_index, inputs_ids = _encode_doc(
                            self._tokenizer,
                            offset_mapping,
                            last_offset,
                            prompt,
                            this_text_line,
                            inputs_ids,
                            q_sep_index,
                            self._max_seq_len,
                        )
                        this_text_line = char
                    prev_bbox = bbox
                if len(this_text_line) > 0:
                    offset_mapping, last_offset, q_sep_index, inputs_ids = _encode_doc(
                        self._tokenizer,
                        offset_mapping,
                        last_offset,
                        prompt,
                        this_text_line,
                        inputs_ids,
                        q_sep_index,
                        self._max_seq_len,
                    )
                if len(inputs_ids) > self._max_seq_len:
                    inputs_ids = inputs_ids[: (self._max_seq_len - 1)] + [c_sep_id]
                    offset_mapping = offset_mapping[: (self._max_seq_len - 1)] + [[0, 0]]
                else:
                    inputs_ids += [c_sep_id]
                    offset_mapping += [[0, 0]]

                if len(offset_mapping) > 1:
                    offset_bias = offset_mapping[q_sep_index - 1][-1] + 1
                else:
                    offset_bias = 0

                seq_len = len(inputs_ids)
                inputs_ids += [pad_id] * (self._max_seq_len - seq_len)
                token_type_ids = [1] * (q_sep_index + 1) + [0] * (seq_len - q_sep_index - 1)
                token_type_ids += [pad_id] * (self._max_seq_len - seq_len)

                bbox_list = _process_bbox(inputs_ids, bbox_lines, offset_mapping, offset_bias)

                offset_mapping += [[0, 0]] * (self._max_seq_len - seq_len)

                # Reindex the text
                text_start_idx = offset_mapping[1:].index([0, 0]) + self._summary_token_num - 1
                for idx in range(text_start_idx, self._max_seq_len):
                    offset_mapping[idx][0] -= offset_bias
                    offset_mapping[idx][1] -= offset_bias

                position_ids = list(range(seq_len))

                position_ids = position_ids + [0] * (self._max_seq_len - seq_len)
                attention_mask = [1] * seq_len + [0] * (self._max_seq_len - seq_len)

                image_data = base64.b64decode(image_buff_string.encode("utf8"))
                padded_image = pad_image_data(image_data)

                input_list = [inputs_ids, token_type_ids, position_ids, attention_mask, bbox_list]
                return_list = [np.array(x, dtype="int64") for x in input_list]
                return_list.append(np.array(padded_image, dtype="float32"))
                return_list.append(np.array(offset_mapping, dtype="int64"))
                assert len(inputs_ids) == self._max_seq_len
                assert len(token_type_ids) == self._max_seq_len
                assert len(position_ids) == self._max_seq_len
                assert len(attention_mask) == self._max_seq_len
                assert len(bbox_list) == self._max_seq_len
                yield tuple(return_list)

        infer_ds = load_dataset(doc_reader, inputs=short_inputs, lazy=False)
        batch_sampler = paddle.io.BatchSampler(dataset=infer_ds, batch_size=self._batch_size, shuffle=False)

        infer_data_loader = paddle.io.DataLoader(
            dataset=infer_ds, batch_sampler=batch_sampler, return_list=True
        )

        sentence_ids = []
        probs = []
        for batch in infer_data_loader:
            input_ids, token_type_ids, pos_ids, att_mask, bbox, image, offset_maps = batch
            result = self.engine(input_ids.numpy(), token_type_ids.numpy(), pos_ids.numpy(), att_mask.numpy(), bbox.numpy(), image.numpy())
            start_prob = result[0].tolist()
            end_prob = result[1].tolist()

            start_ids_list = get_bool_ids_greater_than(start_prob, limit=self._position_prob, return_prob=True)
            end_ids_list = get_bool_ids_greater_than(end_prob, limit=self._position_prob, return_prob=True)
            for start_ids, end_ids, offset_map in zip(start_ids_list, end_ids_list, offset_maps.tolist()):
                span_set = get_span(start_ids, end_ids, with_prob=True)
                sentence_id, prob = get_id_and_prob(span_set, offset_map)
                sentence_ids.append(sentence_id)
                probs.append(prob)
        results = self._convert_ids_to_results(short_inputs, sentence_ids, probs)
        results = self._auto_joiner(results, short_input_texts, input_mapping)
        return results

    @staticmethod
    def _auto_joiner(short_results, short_inputs, input_mapping):
        concat_results = []
        for k, vs in input_mapping.items():
            offset = 0
            single_results = []
            for v in vs:
                if v == 0:
                    single_results = short_results[v]
                    offset += len(short_inputs[v])
                else:
                    for i in range(len(short_results[v])):
                        if "start" not in short_results[v][i] or "end" not in short_results[v][i]:
                            continue
                        short_results[v][i]["start"] += offset
                        short_results[v][i]["end"] += offset
                    offset += len(short_inputs[v])
                    single_results.extend(short_results[v])
            concat_results.append(single_results)
        return concat_results

    def _run_model(self, inputs):
        raw_inputs = inputs["text"]
        _inputs = self._parse_inputs(raw_inputs)
        results = self._multi_stage_predict(_inputs)
        inputs["result"] = results
        return inputs

    def __call__(self, *args, **kwargs):
        logger.info("before_preprocess")
        inputs = self._preprocess(*args)
        logger.info("after_preprocess")
        outputs = self._run_model(inputs)
        logger.info("after_run_model")
        results = self._postprocess(outputs)
        logger.info("after_postprocess")
        return results

    def _parse_inputs(self, inputs):
        _inputs = []
        for d in inputs:
            text = ""
            bbox = []
            img_w, img_h = d["img_w"], d["img_h"]
            offset_x, offset_y = d["offset_x"], d["offset_x"]
            for segment in d["layout"]:
                org_box = segment[0]  # bbox before expand to A4 size
                box = [
                    org_box[0] + offset_x,
                    org_box[1] + offset_y,
                    org_box[2] + offset_x,
                    org_box[3] + offset_y,
                ]
                box = self._img_parser._normalize_box(box, [img_w, img_h], [1000, 1000])
                text += segment[1]
                bbox.extend([box] * len(segment[1]))
            _inputs.append({"text": text, "bbox": bbox, "image": d["image"], "layout": d["layout"]})
        return _inputs

    def _multi_stage_predict(self, data):
        """
        Traversal the schema tree and do multi-stage prediction.

        Args:
            data (list): a list of dictionaries, each containing 'text', 'bbox', and 'image' keys.

        Returns:
            list: a list of dictionaries, where each dictionary contains predictions
                  for the corresponding input in `data`. The list's length
                  equals to the length of `data`.
        """
        results = [{} for _ in range(len(data))]
        batch_size = 4
        for i in range(0, len(self._schema_list), batch_size):
            node_names = self._schema_list[i:i + batch_size]
            examples = []
            input_map = {}
            for idx, one_data in enumerate(data):
                for node_name in node_names:
                    example = {
                        "text": one_data["text"],
                        "bbox": one_data["bbox"],
                        "image": one_data["image"],
                        "prompt": dbc2sbc(node_name),
                    }
                    examples.append(example)
                    input_map[len(examples) - 1] = idx

            result_list = self._single_stage_predict(examples)
            for k, v in input_map.items():
                if len(result_list[k]) > 0:
                    results[v].setdefault(node_names[k % batch_size], []).extend(result_list[k])
        results = self._add_bbox_info(results, data)
        return results

    @staticmethod
    def _add_bbox_info(results, data):

        def _add_bounding_boxes(result, char_boxes):
            # 遍历每个识别结果
            for value_set in result.values():
                for value in value_set:
                    bounding_boxes = []
                    current_box = None
                    for index in range(value["start"], value["end"]):
                        character_box = char_boxes[index][1]

                        # 如果是第一个字符，则初始化边界框
                        if index == value["start"]:
                            current_box = character_box
                            continue

                        _, current_top, current_right, current_bottom = character_box

                        # 如果当前字符与前一个字符在同一行，则扩展边界框
                        if current_top == current_box[1] and current_bottom == current_box[3]:
                            current_box[2] = current_right
                        else:
                            # 添加之前的边界框，并开始新的边界框
                            if current_box:
                                bounding_boxes.append(current_box)
                            current_box = character_box

                    # 添加最后一个字符的边界框
                    if current_box:
                        bounding_boxes.append(current_box)

                    # 将边界框转换为整数格式
                    bounding_boxes = [[int(coord) for coord in box] for box in bounding_boxes]
                    value["bbox"] = bounding_boxes

            return result

        new_results = []
        for result, one_data in zip(results, data):
            if "layout" in one_data.keys():
                layout = one_data["layout"]
                char_boxes = []
                for segment in layout:
                    sbox = segment[0]
                    text_len = len(segment[1])
                    if text_len == 0:
                        continue
                    char_w = (sbox[2] - sbox[0]) * 1.0 / text_len
                    for i in range(text_len):
                        cbox = [sbox[0] + i * char_w, sbox[1], sbox[0] + (i + 1) * char_w, sbox[3]]
                        char_boxes.append((segment[1][i], cbox))
                result = _add_bounding_boxes(result, char_boxes)
            new_results.append(result)
        return new_results

    @staticmethod
    def _convert_ids_to_results(examples, sentence_ids, probs):
        """
        Convert ids to raw text in a single stage.
        """
        results = []
        for example, sentence_id, prob in zip(examples, sentence_ids, probs):
            if len(sentence_id) == 0:
                results.append([])
                continue
            result_list = []
            text = example["text"]
            prompt = example["prompt"]
            for i in range(len(sentence_id)):
                start, end = sentence_id[i]
                if start < 0 and end >= 0:
                    continue
                if end < 0:
                    start += len(prompt) + 1
                    end += len(prompt) + 1
                    result = {"text": prompt[start:end], "probability": prob[i]}
                    result_list.append(result)
                else:
                    result = {"text": text[start:end], "start": start, "end": end, "probability": prob[i]}
                    result_list.append(result)
            results.append(result_list)
        return results

    @staticmethod
    def _postprocess(inputs):
        """
        This function will convert the model output to raw text.
        """
        return inputs["result"]
