import math
import os
from typing import List

import numpy as np
import paddle
from ocr_process.infer.onnx import Engine
from ocr_process.utils.env import ONNX_HOME
from paddlenlp.datasets import load_dataset
from paddlenlp.taskflow.utils import dbc2sbc, get_id_and_prob
from paddlenlp.transformers import ErnieTokenizer
from paddlenlp.utils.log import logger
from paddlenlp.utils.tools import get_bool_ids_greater_than, get_span


class UieText:

    def __init__(self, schema_list: list[str] = None, **kwargs):
        self.kwargs = kwargs
        self._num_threads = math.ceil(os.cpu_count() / 2)
        self._max_seq_len = 512
        self._batch_size = kwargs.get("batch_size", 32)
        self._position_prob = kwargs.get("position_prob", 0.5)
        self._summary_token_num = 3
        onnx_home = self.kwargs["onnx_home"] if "onnx_home" in self.kwargs else ONNX_HOME
        self._schema_list = schema_list
        # info_path = os.path.join(onnx_home, "information_extraction")
        self._tokenizer = ErnieTokenizer(
            vocab_file=os.path.join(onnx_home, "vocab.txt").replace("\\", "/"),
            tokenizer='ErnieTokenizer',
            # special_tokens_map_file=os.path.join(onnx_home, "special_tokens_map.json").replace("\\", "/"),
            unk_token="[UNK]",
            sep_token="[SEP]",
            cls_token="[CLS]",
            pad_token="[PAD]",
            mask_token="[MASK]",
            model_max_length=512,
            do_lower_case=True
        )
        model_path = os.path.join(onnx_home, "static", "inference.onnx").replace("\\", "/")
        self.engine = Engine(model_path, **self.kwargs)

    @staticmethod
    def _auto_splitter(input_texts: List[str], max_text_len: int):
        """
        Split the raw texts automatically for model inference.
        Args:
            input_texts (List[str]): input raw texts.
            max_text_len (int): cutting length.
        return:
            short_input_texts (List[str]): the short input texts for model inference.
            input_mapping (dict): mapping between raw text and short input texts.
        """
        input_mapping = {}
        short_input_texts = []

        cnt_org = 0
        cnt_short = 0

        for idx, text in enumerate(input_texts):
            text_len = len(text)
            if text_len <= max_text_len:
                short_input_texts.append(text)
                input_mapping.setdefault(cnt_org, []).append(cnt_short)
                cnt_short += 1
            else:
                temp_text_list = [text[i: i + max_text_len] for i in range(0, text_len, max_text_len)]
                short_input_texts.extend(temp_text_list)
                short_idx = cnt_short
                cnt_short += math.ceil(text_len / max_text_len)
                temp_text_id = [short_idx + i for i in range(cnt_short - short_idx)]
                input_mapping.setdefault(cnt_org, []).extend(temp_text_id)
            cnt_org += 1
        return short_input_texts, input_mapping

    def _preprocess(self, inputs):
        """
        Transform the raw text to the model inputs, two steps involved:
           1) Transform the raw text to token ids.
           2) Generate the other model inputs from the raw text and token ids.
        """
        inputs = self._check_input_text(inputs)
        outputs = {}
        outputs["text"] = inputs
        return outputs

    def _check_input_text(self, inputs):
        inputs = inputs[0]
        if isinstance(inputs, dict) or isinstance(inputs, str):
            inputs = [inputs]
        if isinstance(inputs, list):
            input_list = []
            for example in inputs:
                if isinstance(example, str):
                    input_list.append(example)
                else:
                    raise TypeError(
                        "Invalid inputs, the input should be list of dict, but type of {} found!".format(
                            type(example)
                        )
                    )
        else:
            raise TypeError("Invalid input format!")
        return input_list

    def _single_stage_predict(self, inputs):
        input_texts = [d["text"] for d in inputs]
        prompts = [d["prompt"] for d in inputs]

        # max predict length should exclude the length of prompt and summary tokens
        max_predict_len = self._max_seq_len - len(max(prompts)) - self._summary_token_num

        short_input_texts, input_mapping = self._auto_splitter(
            input_texts, max_predict_len
        )

        short_texts_prompts = []
        for k, v in input_mapping.items():
            short_texts_prompts.extend([prompts[k] for _ in range(len(v))])

        short_inputs = [
            {"text": short_input_texts[i], "prompt": short_texts_prompts[i]} for i in range(len(short_input_texts))
        ]

        def text_reader(inputs):
            for example in inputs:
                encoded_inputs = self._tokenizer(
                    text=[example["prompt"]],
                    text_pair=[example["text"]],
                    truncation=True,
                    max_seq_len=self._max_seq_len,
                    pad_to_max_seq_len=True,
                    return_attention_mask=True,
                    return_position_ids=True,
                    return_offsets_mapping=True,
                )
                tokenized_output = [
                    encoded_inputs["input_ids"][0],
                    encoded_inputs["token_type_ids"][0],
                    encoded_inputs["position_ids"][0],
                    encoded_inputs["attention_mask"][0],
                    encoded_inputs["offset_mapping"][0],
                ]
                tokenized_output = [np.array(x, dtype="int64") for x in tokenized_output]
                yield tuple(tokenized_output)

        infer_ds = load_dataset(text_reader, lazy=False, inputs=short_inputs)
        batch_sampler = paddle.io.BatchSampler(dataset=infer_ds, batch_size=self._batch_size, shuffle=False)

        infer_data_loader = paddle.io.DataLoader(
            dataset=infer_ds, batch_sampler=batch_sampler, return_list=True
        )

        sentence_ids = []
        probs = []
        for batch in infer_data_loader:
            input_ids, token_type_ids, pos_ids, att_mask, offset_maps = batch

            result = self._onnx_predict(input_ids.numpy(), token_type_ids.numpy(), pos_ids.numpy(), att_mask.numpy())

            start_prob = result[0].tolist()
            end_prob = result[1].tolist()

            start_ids_list = get_bool_ids_greater_than(start_prob, limit=self._position_prob, return_prob=True)
            end_ids_list = get_bool_ids_greater_than(end_prob, limit=self._position_prob, return_prob=True)
            for start_ids, end_ids, offset_map in zip(start_ids_list, end_ids_list, offset_maps.tolist()):
                span_set = get_span(start_ids, end_ids, with_prob=True)
                sentence_id, prob = get_id_and_prob(span_set, offset_map)
                sentence_ids.append(sentence_id)
                probs.append(prob)
        results = self._convert_ids_to_results(short_inputs, sentence_ids, probs)
        results = self._auto_joiner(results, short_input_texts, input_mapping)
        return results

    def _onnx_predict(self, input_ids_data, token_type_ids_data, pos_ids_data, att_mask_data):
        input_ids = self.engine.get_input_names()[0]
        token_type_ids = self.engine.get_input_names()[1]
        pos_ids = self.engine.get_input_names()[2]
        att_mask = self.engine.get_input_names()[3]
        outputs = self.engine.infer({
            input_ids: input_ids_data,
            token_type_ids: token_type_ids_data,
            pos_ids: pos_ids_data,
            att_mask: att_mask_data})
        return outputs

    @staticmethod
    def _auto_joiner(short_results, short_inputs, input_mapping):
        concat_results = []
        for k, vs in input_mapping.items():
            offset = 0
            single_results = []
            for v in vs:
                if v == 0:
                    single_results = short_results[v]
                    offset += len(short_inputs[v])
                else:
                    for i in range(len(short_results[v])):
                        if "start" not in short_results[v][i] or "end" not in short_results[v][i]:
                            continue
                        short_results[v][i]["start"] += offset
                        short_results[v][i]["end"] += offset
                    offset += len(short_inputs[v])
                    single_results.extend(short_results[v])
            concat_results.append(single_results)
        return concat_results

    def _run_model(self, inputs):
        raw_inputs = inputs["text"]
        _inputs = self._parse_inputs(raw_inputs)
        results = self._multi_stage_predict(_inputs)
        inputs["result"] = results
        return inputs

    def __call__(self, *args, **kwargs):
        logger.info("before_preprocess")
        inputs = self._preprocess(*args)
        logger.info("after_preprocess")
        outputs = self._run_model(inputs)
        logger.info("after_run_model")
        results = self._postprocess(outputs)
        logger.info("after_postprocess")
        return results

    def _parse_inputs(self, inputs):
        _inputs = []
        for d in inputs:
            _inputs.append({"text": d, "bbox": None, "image": None})
        return _inputs

    def _multi_stage_predict(self, data):
        """
        Traversal the schema tree and do multi-stage prediction.

        Args:
            data (list): a list of dictionaries, each containing 'text', 'bbox', and 'image' keys.

        Returns:
            list: a list of dictionaries, where each dictionary contains predictions
                  for the corresponding input in `data`. The list's length
                  equals to the length of `data`.
        """
        results = [{} for _ in range(len(data))]
        batch_size = 1
        for i in range(0, len(self._schema_list), batch_size):
            node_names = self._schema_list[i:i + batch_size]
            examples = []
            input_map = {}
            for idx, one_data in enumerate(data):
                for node_name in node_names:
                    example = {
                        "text": one_data["text"],
                        "bbox": one_data["bbox"],
                        "image": one_data["image"],
                        "prompt": dbc2sbc(node_name),
                    }
                    examples.append(example)
                    input_map[len(examples) - 1] = idx

            result_list = self._single_stage_predict(examples)
            for k, v in input_map.items():
                if len(result_list[k]) > 0:
                    results[v].setdefault(node_names[k % batch_size], []).extend(result_list[k])
        results = self._add_bbox_info(results, data)
        return results

    @staticmethod
    def _add_bbox_info(results, data):

        def _add_bounding_boxes(result, char_boxes):
            # 遍历每个识别结果
            for value_set in result.values():
                for value in value_set:
                    bounding_boxes = []
                    current_box = None
                    for index in range(value["start"], value["end"]):
                        character_box = char_boxes[index][1]

                        # 如果是第一个字符，则初始化边界框
                        if index == value["start"]:
                            current_box = character_box
                            continue

                        _, current_top, current_right, current_bottom = character_box

                        # 如果当前字符与前一个字符在同一行，则扩展边界框
                        if current_top == current_box[1] and current_bottom == current_box[3]:
                            current_box[2] = current_right
                        else:
                            # 添加之前的边界框，并开始新的边界框
                            if current_box:
                                bounding_boxes.append(current_box)
                            current_box = character_box

                    # 添加最后一个字符的边界框
                    if current_box:
                        bounding_boxes.append(current_box)

                    # 将边界框转换为整数格式
                    bounding_boxes = [[int(coord) for coord in box] for box in bounding_boxes]
                    value["bbox"] = bounding_boxes

            return result

        new_results = []
        for result, one_data in zip(results, data):
            if "layout" in one_data.keys():
                layout = one_data["layout"]
                char_boxes = []
                for segment in layout:
                    sbox = segment[0]
                    text_len = len(segment[1])
                    if text_len == 0:
                        continue
                    char_w = (sbox[2] - sbox[0]) * 1.0 / text_len
                    for i in range(text_len):
                        cbox = [sbox[0] + i * char_w, sbox[1], sbox[0] + (i + 1) * char_w, sbox[3]]
                        char_boxes.append((segment[1][i], cbox))
                result = _add_bounding_boxes(result, char_boxes)
            new_results.append(result)
        return new_results

    @staticmethod
    def _convert_ids_to_results(examples, sentence_ids, probs):
        """
        Convert ids to raw text in a single stage.
        """
        results = []
        for example, sentence_id, prob in zip(examples, sentence_ids, probs):
            if len(sentence_id) == 0:
                results.append([])
                continue
            result_list = []
            text = example["text"]
            prompt = example["prompt"]
            for i in range(len(sentence_id)):
                start, end = sentence_id[i]
                if start < 0 and end >= 0:
                    continue
                if end < 0:
                    start += len(prompt) + 1
                    end += len(prompt) + 1
                    result = {"text": prompt[start:end], "probability": prob[i]}
                    result_list.append(result)
                else:
                    result = {"text": text[start:end], "start": start, "end": end, "probability": prob[i]}
                    result_list.append(result)
            results.append(result_list)
        return results

    @staticmethod
    def _postprocess(inputs):
        """
        This function will convert the model output to raw text.
        """
        return inputs["result"]
