import os
import io
import cv2
import json
import yaml
import argparse
import numpy as np
import random
from contextlib import contextmanager
import datetime

from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from ultralytics import YOLO
from unimernet.common.config import Config
from unimernet import tasks
from unimernet.processors import load_processor

from modules.s3_utils import get_s3_cfg_by_bucket, get_s3_client, \
    download_s3_asset, read_s3_object_content, write_s3_object_content
from modules.extract_pdf import load_pdf_fitz
from modules.layoutlmv3.model_init import Layoutlmv3_Predictor
from modules.self_modify import ModifiedPaddleOCR
from modules.post_process import get_croped_image, latex_rm_whitespace
from modules.faiss_model.faiss_run import Faiss_Index

import logging
logging.disable(logging.WARNING)

class PDFList(Dataset):
    def __init__(self, jsonl_file):
        s3_load_cfg = get_s3_cfg_by_bucket(jsonl_file)
        s3_load_client = get_s3_client("", s3_load_cfg)
        content = read_s3_object_content(s3_load_client, jsonl_file)
        all_datas = io.BytesIO(content)
        all_lines = []
        for line in all_datas:
            if not line:
                continue
            all_lines.append(line)
        self.all_lines = all_lines

    def __iter__(self):
        return iter(self.all_lines)

    def __len__(self):
        return len(self.all_lines)

    def __getitem__(self, idx):
        return self.all_lines[idx]
    
def mfd_model_init(weight):
    mfd_model = YOLO(weight)
    return mfd_model

def mfr_model_init(weight_dir, device="cpu"):
    args = argparse.Namespace(cfg_path="/root/project/ray-pipeline/modules/UniMERNet/configs/demo.yaml", options=None)
    cfg = Config(args)
    cfg.config.model.pretrained = os.path.join(weight_dir, "pytorch_model.bin")
    cfg.config.model.model_config.model_name = weight_dir
    cfg.config.model.tokenizer_config.path = weight_dir
    task = tasks.setup_task(cfg)
    model = task.build_model(cfg)
    model = model.to(device)
    vis_processor = load_processor("formula_image_eval", cfg.config.datasets.formula_rec_eval.vis_processor.eval)
    return model, vis_processor

def layout_model_init(weight, num_gpus=1):
    model = Layoutlmv3_Predictor(weight, num_gpus=num_gpus)
    return model


class MathDataset(Dataset):
    def __init__(self, image_paths, transform=None):
        self.image_paths = image_paths
        self.transform = transform

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        # if not pil image, then convert to pil image
        if isinstance(self.image_paths[idx], str):
            raw_image = Image.open(self.image_paths[idx])
        else:
            raw_image = self.image_paths[idx]
        if self.transform:
            image = self.transform(raw_image)
            return image
        return raw_image


@contextmanager
def log2file(logfile: str):
    log_fd = open(logfile, "a")
    try:
        yield log_fd
    finally:
        log_fd.close()


class BatchDocInfer:
    def __init__(self, apply_formula: bool, apply_ocr: bool):
        self.apply_formula = apply_formula
        self.apply_ocr = apply_ocr
        self._load_model_and_conf()

    def _load_model_and_conf(self):
        print("current dir: ", os.getcwd())
        with open("/root/project/ray-pipeline/resources/global_args.yaml") as f:
            global_args = yaml.load(f, Loader=yaml.FullLoader)
        device = global_args["model_args"]["device"]
        self.layout_model = layout_model_init(global_args["model_args"]["layout_weight"], num_gpus=1)
        if self.apply_formula:
            self.mfd_model = mfd_model_init(global_args["model_args"]["mfd_weight"])
            self.mfr_model, self.mfr_vis_processors = mfr_model_init(global_args["model_args"]["mfr_weight"], device=device)
            self.mfr_transform = transforms.Compose([self.mfr_vis_processors, ])
        if self.apply_ocr:
            self.ocr_model = ModifiedPaddleOCR(show_log=True, gpu_id=0)
        self.faiss_model = Faiss_Index(global_args["model_args"]["faiss_img_list"], global_args["model_args"]["faiss_img_features"], global_args["model_args"]["img_ap_ar"])
        self.global_args = global_args


    def __call__(self, source_file, target_file):
        base_name = os.path.basename(source_file)[0:-6]
        self.log_file = f"/root/project/ray-pipeline/logs/{base_name}-{int(os.getenv('LOCAL_RANK', '0'))}"
        self.source_file = source_file
        self.target_file = target_file
        self.batch_infer()

    def batch_infer(self):
        results = []
        pdf_list = PDFList(jsonl_file=self.source_file)
        for line in pdf_list:
            results.append(self.infer(line))

        save_content = ""
        for output in results:
            save_content += json.dumps(output, ensure_ascii=False) + "\n"

        s3_save_cfg = get_s3_cfg_by_bucket(self.target_file)
        s3_save_client = get_s3_client('', s3_save_cfg)
        write_s3_object_content(s3_save_client, self.target_file, save_content.encode('utf-8'))

        # with open(self.target_file, "w") as f:
        #    f.write(save_content)
        
        print("save jsonl done.")

    def infer(self, record_line):
        img_size = self.global_args["model_args"]["img_size"]
        conf_thres = self.global_args["model_args"]["conf_thres"]
        iou_thres = self.global_args["model_args"]["iou_thres"]
        dpi = self.global_args["model_args"]["pdf_dpi"]

        with log2file(self.log_file) as log_f:
            print2f = log_f.write
            pdf_info = json.loads(record_line)
            if pdf_info.get("doc_layout_result", False):
                return pdf_info
  
            pdf_s3_cfg = get_s3_cfg_by_bucket(pdf_info["path"])
            print("=> processing s3 pdf:", pdf_info["path"])
            temp_path = download_s3_asset(pdf_info["path"], pdf_s3_cfg)
            try:
                img_list = load_pdf_fitz(temp_path, dpi=dpi)
            except:
                img_list = None
                print2f(f"!! unexpected pdf file: {temp_path}")
            if img_list is None:
                return pdf_info

            if len(img_list) > 20:
                sample_ratio = 20 / len(img_list)
            else:
                sample_ratio = 1

            try:
                # layout检测 + 公式检测
                doc_layout_result = []
                latex_filling_list = []
                mf_image_list = []
                for idx, image in enumerate(img_list):
                    img_H, img_W = image.shape[0], image.shape[1]
                    layout_res = self.layout_model(image, ignore_catids=[])
                    
                    # 单页面检索
                    if random.random() < sample_ratio:
                        check_img = self.faiss_model.trans_img(image)
                        D, I = self.faiss_model.index.search(check_img, 10)
                        ap_list = self.faiss_model.get_retrival_ap_list(I, D)
                        search_judge, cannot_find = self.faiss_model.low_ap_percentage(ap_list)
                        score_judge = self.faiss_model.score_judge(layout_res["layout_dets"])  # 仅计算了layout检测的score
                        final_judge = score_judge and search_judge
                        layout_res["judge"] = {"final_judge": final_judge, "search_judge": search_judge, "score_judge": score_judge, "cannot_find": cannot_find, "search_list": ap_list}
                    else:
                        layout_res["judge"] = {}

                    if self.apply_formula:
                        mfd_res = self.mfd_model.predict(image, imgsz=img_size, conf=conf_thres, iou=iou_thres, verbose=True)[0]
                        for xyxy, conf, cla in zip(mfd_res.boxes.xyxy.cpu(), mfd_res.boxes.conf.cpu(), mfd_res.boxes.cls.cpu()):
                            xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy]
                            new_item = {
                                "category_id": 13 + int(cla.item()),
                                "poly": [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax],
                                "score": round(float(conf.item()), 2),
                                "latex": "",
                            }
                            layout_res["layout_dets"].append(new_item)
                            latex_filling_list.append(new_item)
                            bbox_img = get_croped_image(Image.fromarray(image), [xmin, ymin, xmax, ymax])
                            mf_image_list.append(bbox_img)
                            
                    layout_res["page_info"] = dict(
                        page_no = idx,
                        height = img_H,
                        width = img_W
                    )
                    doc_layout_result.append(layout_res)
                    
                # 公式识别，因为识别速度较慢，为了提速，把单个pdf的所有公式裁剪完，一起批量做识别。
                if self.apply_formula:
                    dataset = MathDataset(mf_image_list, transform=self.mfr_transform)
                    dataloader = DataLoader(dataset, batch_size=16, num_workers=32)
                    mfr_res = []
                    for imgs in dataloader:
                        imgs = imgs.to(self.mfr_model.device)
                        output = self.mfr_model.generate({"image": imgs})
                        mfr_res.extend(output["pred_str"])
                    for res, latex in zip(latex_filling_list, mfr_res):
                        res["latex"] = latex_rm_whitespace(latex)

                # ocr识别
                if self.apply_ocr:
                    for idx, image in enumerate(img_list):
                        pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
                        single_page_res = doc_layout_result[idx]["layout_dets"]
                        single_page_mfdetrec_res = []
                        if self.apply_formula:
                            for res in single_page_res:
                                if int(res["category_id"]) in [13, 14]:
                                    xmin, ymin = int(res["poly"][0]), int(res["poly"][1])
                                    xmax, ymax = int(res["poly"][4]), int(res["poly"][5])
                                    single_page_mfdetrec_res.append({
                                        "bbox": [xmin, ymin, xmax, ymax],
                                    })
                        for res in single_page_res:
                            if int(res["category_id"]) in [0, 1, 2, 4, 6, 7]:  #需要进行ocr的类别
                                xmin, ymin = int(res["poly"][0]), int(res["poly"][1])
                                xmax, ymax = int(res["poly"][4]), int(res["poly"][5])
                                crop_box = [xmin, ymin, xmax, ymax]
                                cropped_img = Image.new("RGB", pil_img.size, "white")
                                cropped_img.paste(pil_img.crop(crop_box), crop_box)
                                cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR)
                                ocr_res = self.ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0]
                                if ocr_res:
                                    for res in ocr_res:
                                        p1, p2, p3, p4 = res[0]
                                        text, score = res[1]
                                        doc_layout_result[idx]["layout_dets"].append({
                                            "category_id": 15,
                                            "poly": p1 + p2 + p3 + p4,
                                            "score": round(score, 2),
                                            "text": text,
                                        })
                pdf_info["doc_layout_result"] = doc_layout_result
            except Exception as e:
                print2f(f"!!got exception: {e}")
            return pdf_info


def infer_func(src):
    basename = os.path.basename(src)
    target = "s3://llm-pdf-text-1/pdf_gpu_output/the-eye-pdf/" + basename
    # target = f"/root/project/output/{basename}"
    # os.path.makedirs(os.path.dirname(target), exist_ok=True)
    infer = BatchDocInfer(True, True)
    infer(src, target)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input", type=str,default="s3://llm-pdf-text/books/processing/test/index/part-6662d7694c33-000034.jsonl")
    parser.add_argument("--output", type=str, default="s3://llm-pdf-text/books/processing/test/output/part-6662d7694c33-000034.jsonl")
    parser.add_argument("--formula", action="store_true")
    parser.add_argument("--ocr", action="store_true")
    parser.add_argument("--vis", action="store_true")
    parser.add_argument("--render", action="store_true")
    args = parser.parse_args()

    # infer = BatchDocInfer(args.formula, args.ocr)
    # infer(args.input, "/tmp/temp.jsonl")

