# cython:language_level=3
import json
import logging
import os
import io
import time
from typing import Optional, Awaitable

import fitz
import nanoid
import torch
import tornado.web
from PIL import Image
from transformers import AutoTokenizer
from transformers import TextStreamer

from GOT.model import GOTQwenForCausalLM
from GOT.model.plug.blip_process import BlipImageEvalProcessor
from GOT.utils.conversation import conv_templates, SeparatorStyle
from GOT.utils.utils import KeywordsStoppingCriteria
from GOT.utils.utils import disable_torch_init

LOG = logging.getLogger("asr.web.main")
CHANNEL = logging.StreamHandler()
FORMATTER = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
CHANNEL.setFormatter(FORMATTER)
LOG.addHandler(CHANNEL)
LOG.propagate = False

version = "got-ocr-api-v1"
PDF_FILE = "file.pdf"
IMAGE_DIR = "images"
HTML_DIR = "pdf_html"
MODEL_NAME = "./stepfun-ai--GOT-OCR2_0"

DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = '<imgpad>'
DEFAULT_IM_START_TOKEN = '<img>'
DEFAULT_IM_END_TOKEN = '</img>'


def load_image(image_file):
    image = Image.open(image_file).convert('RGB')
    return image


def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
    best_ratio_diff = float('inf')
    best_ratio = (1, 1)
    area = width * height
    for ratio in target_ratios:
        target_aspect_ratio = ratio[0] / ratio[1]
        ratio_diff = abs(aspect_ratio - target_aspect_ratio)
        if ratio_diff < best_ratio_diff:
            best_ratio_diff = ratio_diff
            best_ratio = ratio
        elif ratio_diff == best_ratio_diff:
            if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
                best_ratio = ratio
    # print(f'width: {width}, height: {height}, best_ratio: {best_ratio}')
    return best_ratio


def dynamic_preprocess(image, min_num=1, max_num=6, image_size=1024, use_thumbnail=True):
    orig_width, orig_height = image.size
    aspect_ratio = orig_width / orig_height

    # calculate the existing image aspect ratio
    target_ratios = set(
        (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
        i * j <= max_num and i * j >= min_num)
    # print(target_ratios)
    target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])

    # find the closest aspect ratio to the target
    target_aspect_ratio = find_closest_aspect_ratio(
        aspect_ratio, target_ratios, orig_width, orig_height, image_size)

    # print(target_aspect_ratio)
    # calculate the target width and height
    target_width = image_size * target_aspect_ratio[0]
    target_height = image_size * target_aspect_ratio[1]
    blocks = target_aspect_ratio[0] * target_aspect_ratio[1]

    # resize the image
    resized_img = image.resize((target_width, target_height))
    processed_images = []
    for i in range(blocks):
        box = (
            (i % (target_width // image_size)) * image_size,
            (i // (target_width // image_size)) * image_size,
            ((i % (target_width // image_size)) + 1) * image_size,
            ((i // (target_width // image_size)) + 1) * image_size
        )
        # split the image
        split_img = resized_img.crop(box)
        processed_images.append(split_img)
    assert len(processed_images) == blocks
    if use_thumbnail and len(processed_images) != 1:
        thumbnail_img = image.resize((image_size, image_size))
        processed_images.append(thumbnail_img)
    return processed_images


def get_models(app):
    if app.model is None:
        disable_torch_init()
        model_name = os.path.expanduser(MODEL_NAME)
        tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
        model = GOTQwenForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, device_map='cuda',
                                                   use_safetensors=True, pad_token_id=151643).eval()
        model.to(device='cuda', dtype=torch.bfloat16)
        app.model = model
        app.tokenizer = tokenizer
    return app.tokenizer, app.model


def ocr_image(app, img, html2, force=False):
    if not force and html2 is not None and os.path.exists(html2):
        return open(html2, encoding='utf8').read()
    tokenizer, model = get_models(app)
    # vary old codes, no use
    # image_processor = BlipImageEvalProcessor(image_size=1024)
    image_processor_high = BlipImageEvalProcessor(image_size=1024)
    # use_im_start_end = True
    image_token_len = 256
    qs = 'OCR with format upon the patch reference: '
    # img = load_image(image_file)
    sub_images = dynamic_preprocess(img)
    ll = len(sub_images)
    image_list = []
    for p in sub_images:
        image = p
        image_1 = image.copy()
        # no use, vary old codes
        # image_tensor = image_processor(image)
        image_tensor_1 = image_processor_high(image_1)
        image_list.append(image_tensor_1)

    image_list = torch.stack(image_list)
    qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_PATCH_TOKEN * image_token_len * ll + DEFAULT_IM_END_TOKEN + '\n' + qs
    conv_mode = "mpt"
    conv = conv_templates[conv_mode].copy()
    conv.append_message(conv.roles[0], qs)
    conv.append_message(conv.roles[1], None)
    prompt = conv.get_prompt()

    inputs = tokenizer([prompt])

    input_ids = torch.as_tensor(inputs.input_ids).cuda()

    stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
    keywords = [stop_str]
    stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
    streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
    with torch.autocast("cuda", dtype=torch.bfloat16):
        output_ids = model.generate(
            input_ids,
            images=[(image_list.half().cuda(), image_list.half().cuda())],
            do_sample=False,
            num_beams=1,
            no_repeat_ngram_size=20,
            streamer=streamer,
            max_new_tokens=4096,
            stopping_criteria=[stopping_criteria]
        )
    outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()

    if outputs.endswith(stop_str):
        outputs = outputs[:-len(stop_str)]
    outputs = outputs.strip()

    html_path = "./render_tools/" + "/content-mmd-to-html.html"
    right_num = outputs.count('\\right')
    left_num = outputs.count('\left')

    if right_num != left_num:
        outputs = outputs.replace('\left(', '(').replace('\\right)', ')').replace('\left[', '[').replace('\\right]',
                                                                                                         ']').replace(
            '\left{', '{').replace('\\right}', '}').replace('\left|', '|').replace('\\right|', '|').replace('\left.',
                                                                                                            '.').replace(
            '\\right.', '.')

    outputs = outputs.replace('"', '``').replace('$', '')

    outputs_list = outputs.split('\n')
    gt = ''
    for out in outputs_list:
        gt += '"' + out.replace('\\', '\\\\') + r'\n' + '"' + '+' + '\n'

    gt = gt[:-2]

    with open(html_path, 'r') as web_f:
        lines = web_f.read()
        lines = lines.split("const text =")
        new_web = lines[0] + 'const text =' + gt + lines[1]
    if html2 is not None:
        with open(html2, 'w', encoding="utf8") as web_f_new:
            web_f_new.write(new_web)
    return new_web


class BaseHandler(tornado.web.RequestHandler):
    def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
        pass

    def prepare(self):
        access_headers = self.request.headers.get(
            'Access-Control-Request-Headers',
            'x-requested-with'
        )
        origin_headers = self.request.headers.get('Origin', "*")
        self.set_header("Access-Control-Allow-Origin", origin_headers)
        self.set_header("Access-Control-Allow-Headers", access_headers)
        self.set_header(
            "Access-Control-Allow-Methods",
            "GET,OPTIONS,PUT,POST,DELETE"
        )

    async def options(self, *args, **kwargs):
        pass


class HelloHandler(BaseHandler):
    def data_received(self, chunk):
        pass

    # @tornado.gen.coroutine
    def get(self):
        self.write("I'm alive!")


class PDFHandler(BaseHandler):
    def data_received(self, chunk):
        pass

    def get(self):
        self.write(json.dumps({"code": 2}, ensure_ascii=False))

    def post(self):
        files = self.request.files.get('data', None)
        req_id = nanoid.generate('1234567890abcdef', size=6)
        result = {"req_id": req_id, "code": 1, "version": version}
        if files is not None and len(files):
            dict1 = self.request.body_arguments
            file_name = str(dict1["file_name"][0], encoding="utf-8")
            file_type = "pdf"
            if "file_type" in dict1 and str(dict1["file_type"][0], encoding="utf-8") == "png":
                file_type = "png"
            force = False
            if "force" in dict1 and str(dict1["force"][0], encoding="utf-8") == "y":
                force = True
            param_file = files[0]
            file_data = param_file.get("body")
            if file_type == "png":  # 请求单张图片，直接返回ocr结果
                html_text = ocr_image(self.application, load_image(io.BytesIO(file_data)), None, force)
                self.write(html_text)
                return

            with open(PDF_FILE, mode="wb") as f:
                LOG.debug("save to pdf file")
                f.write(file_data)

            pdfDoc = fitz.open(PDF_FILE)
            rotate = int(0)
            zoom_x = 1.33333333
            zoom_y = 1.33333333
            mat = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate)
            os.makedirs(IMAGE_DIR, exist_ok=True)
            os.makedirs(os.path.join(HTML_DIR, file_name), exist_ok=True)
            images_list = []
            html_paths = []
            for pg in range(pdfDoc.pageCount):
                page = pdfDoc[pg]
                pix = page.getPixmap(matrix=mat, alpha=False)
                image_path = os.path.join(IMAGE_DIR, "{}.png".format(str(pg).zfill(6)))
                pix.writePNG(image_path)
                images_list.append(image_path)
                html_paths.append(os.path.join(HTML_DIR, file_name, "{}.html".format(str(pg).zfill(6))))
            for i in range(len(images_list)):
                image_file = images_list[i]
                html_path = html_paths[i]
                ocr_image(self.application, load_image(image_file), html_path, force)
            result["code"] = 0
            result["page_num"] = len(images_list)
            os.remove(PDF_FILE)
            os.system("rm -rf {}".format(IMAGE_DIR))

        if result["code"] > 0:
            LOG.error("audio process has error, code = %d", result["code"])
        LOG.debug("all done, return json: %s", json.dumps(result, ensure_ascii=False))
        self.write(json.dumps(result, ensure_ascii=False))


def mm_main():
    time1 = time.time()
    LOG.setLevel(logging.DEBUG)
    application = tornado.web.Application([
        (r"/", HelloHandler),
        (r"/pdf", PDFHandler),
    ])
    application.model = None
    application.tokenizer = None
    application.listen(port=8181)
    LOG.info("http server start at port 8181! " + str(time.time() - time1))
    tornado.ioloop.IOLoop.instance().start()


if __name__ == '__main__':
    mm_main()
