import os
import json
import argparse
import fitz
import torch
from PIL import Image
from transformers import AutoTokenizer
from transformers import TextStreamer

from GOT.model import GOTQwenForCausalLM
from GOT.model.plug.blip_process import BlipImageEvalProcessor
from GOT.utils.conversation import conv_templates, SeparatorStyle
from GOT.utils.utils import KeywordsStoppingCriteria
from GOT.utils.utils import disable_torch_init


parser = argparse.ArgumentParser()
parser.add_argument("--src", type=str, default="教师用书")
parser.add_argument("--info", type=str, default="teacher_book.json")
parser.add_argument("--force", type=str, default="n", help="y or n, whether to force ocr")
args = parser.parse_args()

PDF_FILE = "file.pdf"
IMAGE_DIR = "images"
HTML_DIR = "pdf_html"
MODEL_NAME = "./stepfun-ai--GOT-OCR2_0"

DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = '<imgpad>'
DEFAULT_IM_START_TOKEN = '<img>'
DEFAULT_IM_END_TOKEN = '</img>'


class ModelUtil:
    def __init__(self):
        self.tokenizer = None
        self.model = None


def load_image(image_file):
    image = Image.open(image_file).convert('RGB')
    return image


def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
    best_ratio_diff = float('inf')
    best_ratio = (1, 1)
    area = width * height
    for ratio in target_ratios:
        target_aspect_ratio = ratio[0] / ratio[1]
        ratio_diff = abs(aspect_ratio - target_aspect_ratio)
        if ratio_diff < best_ratio_diff:
            best_ratio_diff = ratio_diff
            best_ratio = ratio
        elif ratio_diff == best_ratio_diff:
            if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
                best_ratio = ratio
    # print(f'width: {width}, height: {height}, best_ratio: {best_ratio}')
    return best_ratio


def dynamic_preprocess(image, min_num=1, max_num=6, image_size=1024, use_thumbnail=True):
    orig_width, orig_height = image.size
    aspect_ratio = orig_width / orig_height

    # calculate the existing image aspect ratio
    target_ratios = set(
        (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
        i * j <= max_num and i * j >= min_num)
    # print(target_ratios)
    target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])

    # find the closest aspect ratio to the target
    target_aspect_ratio = find_closest_aspect_ratio(
        aspect_ratio, target_ratios, orig_width, orig_height, image_size)

    # print(target_aspect_ratio)
    # calculate the target width and height
    target_width = image_size * target_aspect_ratio[0]
    target_height = image_size * target_aspect_ratio[1]
    blocks = target_aspect_ratio[0] * target_aspect_ratio[1]

    # resize the image
    resized_img = image.resize((target_width, target_height))
    processed_images = []
    for i in range(blocks):
        box = (
            (i % (target_width // image_size)) * image_size,
            (i // (target_width // image_size)) * image_size,
            ((i % (target_width // image_size)) + 1) * image_size,
            ((i // (target_width // image_size)) + 1) * image_size
        )
        # split the image
        split_img = resized_img.crop(box)
        processed_images.append(split_img)
    assert len(processed_images) == blocks
    if use_thumbnail and len(processed_images) != 1:
        thumbnail_img = image.resize((image_size, image_size))
        processed_images.append(thumbnail_img)
    return processed_images


def get_models(app):
    if app.model is None:
        disable_torch_init()
        model_name = os.path.expanduser(MODEL_NAME)
        tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
        model = GOTQwenForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, device_map='cuda',
                                                   use_safetensors=True, pad_token_id=151643).eval()
        model.to(device='cuda', dtype=torch.bfloat16)
        app.model = model
        app.tokenizer = tokenizer
    return app.tokenizer, app.model


def ocr_image(app, img, html2, force=False):
    if not force and html2 is not None and os.path.exists(html2):
        return open(html2, encoding='utf8').read()
    tokenizer, model = get_models(app)
    # vary old codes, no use
    # image_processor = BlipImageEvalProcessor(image_size=1024)
    image_processor_high = BlipImageEvalProcessor(image_size=1024)
    # use_im_start_end = True
    image_token_len = 256
    qs = 'OCR with format upon the patch reference: '
    # img = load_image(image_file)
    sub_images = dynamic_preprocess(img)
    ll = len(sub_images)
    image_list = []
    for p in sub_images:
        image = p
        image_1 = image.copy()
        # no use, vary old codes
        # image_tensor = image_processor(image)
        image_tensor_1 = image_processor_high(image_1)
        image_list.append(image_tensor_1)

    image_list = torch.stack(image_list)
    qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_PATCH_TOKEN * image_token_len * ll + DEFAULT_IM_END_TOKEN + '\n' + qs
    conv_mode = "mpt"
    conv = conv_templates[conv_mode].copy()
    conv.append_message(conv.roles[0], qs)
    conv.append_message(conv.roles[1], None)
    prompt = conv.get_prompt()

    inputs = tokenizer([prompt])

    input_ids = torch.as_tensor(inputs.input_ids).cuda()

    stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
    keywords = [stop_str]
    stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
    streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
    with torch.autocast("cuda", dtype=torch.bfloat16):
        output_ids = model.generate(
            input_ids,
            images=[(image_list.half().cuda(), image_list.half().cuda())],
            do_sample=False,
            num_beams=1,
            no_repeat_ngram_size=20,
            streamer=streamer,
            max_new_tokens=4096,
            stopping_criteria=[stopping_criteria]
        )
    outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()

    if outputs.endswith(stop_str):
        outputs = outputs[:-len(stop_str)]
    outputs = outputs.strip()

    html_path = "./render_tools/" + "/content-mmd-to-html.html"
    right_num = outputs.count('\\right')
    left_num = outputs.count('\left')

    if right_num != left_num:
        outputs = outputs.replace('\left(', '(').replace('\\right)', ')').replace('\left[', '[').replace('\\right]',
                                                                                                         ']').replace(
            '\left{', '{').replace('\\right}', '}').replace('\left|', '|').replace('\\right|', '|').replace('\left.',
                                                                                                            '.').replace(
            '\\right.', '.')

    outputs = outputs.replace('"', '``').replace('$', '')

    outputs_list = outputs.split('\n')
    gt = ''
    for out in outputs_list:
        gt += '"' + out.replace('\\', '\\\\') + r'\n' + '"' + '+' + '\n'

    gt = gt[:-2]

    with open(html_path, 'r') as web_f:
        lines = web_f.read()
        lines = lines.split("const text =")
        new_web = lines[0] + 'const text =' + gt + lines[1]
    if html2 is not None:
        with open(html2, 'w', encoding="utf8") as web_f_new:
            web_f_new.write(new_web)
    return new_web


if __name__ == '__main__':
    file_type = "pdf"
    info_json = {}
    if os.path.exists(args.info):
        with open(args.info, 'r', encoding='utf8') as fin:
            info_json = json.load(fin)
    failed = []
    app = ModelUtil()
    wait_list = []
    for dir_name, _, filenames in os.walk(args.src):
        pre = "-".join(dir_name.split("/")) if "/" in dir_name else "-".join(dir_name.split("\\"))
        for filename in filenames:
            filepath = os.path.join(dir_name, filename)
            if not filepath.endswith(".pdf"):
                continue
            filename = filename.replace(".pdf", "")
            filename = pre + "-" + filename
            filename = filename.replace(" ", "_")
            if filename in info_json:
                print(filename, "-->", "skipped")
                continue
            wait_list.append((filepath, filename))
    num = 0
    for pdf_path, pdf_name in wait_list:
        num += 1
        print("{}/{}".format(num, len(wait_list)))
        pdfDoc = fitz.open(pdf_path)
        rotate = int(0)
        zoom_x = 1.33333333
        zoom_y = 1.33333333
        mat = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate)
        os.makedirs(IMAGE_DIR, exist_ok=True)
        os.makedirs(os.path.join(HTML_DIR, pdf_name), exist_ok=True)
        images_list = []
        html_paths = []
        for pg in range(pdfDoc.pageCount):
            page = pdfDoc[pg]
            pix = page.getPixmap(matrix=mat, alpha=False)
            image_path = os.path.join(IMAGE_DIR, "{}.png".format(str(pg).zfill(6)))
            pix.writePNG(image_path)
            images_list.append(image_path)
            html_paths.append(os.path.join(HTML_DIR, pdf_name, "{}.html".format(str(pg).zfill(6))))
        page_num = 0
        for i in range(len(images_list)):
            page_num += 1
            image_file = images_list[i]
            html_path = html_paths[i]
            print("pdf num: {}/{}, page num: {}/{}".format(num, len(wait_list), page_num, len(images_list)))
            ocr_image(app, load_image(image_file), html_path, args.force)
        os.system("rm -rf {}".format(IMAGE_DIR))
