import asyncio
import json
import os
import time
import fitz
import cv2
import numpy as np
import re
from PIL import Image
from fastapi import Depends
from redis.asyncio import Redis
from core.dependencies import get_redis

from core.dependencies import AsyncSessionLocal
from core.dependencies import get_db
from sqlalchemy.ext.asyncio import AsyncSession

from src.modules.knowbase.repository.entity.vectors import TextVector, ImageVector
from src.modules.ai_models.utils.extract_img import extract_table_img
from src.modules.ai_models.utils.text_split import TextSplit
from src.modules.ai_models.model_loaders.model_loader import (
    get_text_embedding_model,
    get_image_embedding_model,
    get_table_detect_model,
)

text_split_obj = TextSplit()
text_embedding_model = get_text_embedding_model()
image_embedding_model = get_image_embedding_model()
table_det_model = get_table_detect_model()

async def process_pdf_file(task_id:str, file_id:str, file_path:str, out_dir:str, file_name:str, knowbase_id:str, session:AsyncSession):
    redis = await get_redis()
    await extract_text_embedding_from_pdf_file(
        task_id = task_id,
        file_id=file_id,
        file_path = file_path,
        file_name=file_name,
        knowbase_id= knowbase_id,
        session = session
    )

    await extract_images_from_pdf_file(task_id = task_id,
                                       file_id= file_id,
                                       file_path= file_path,
                                       out_dir = out_dir,
                                       file_name=file_name,
                                       knowbase_id=knowbase_id,
                                       session = session)

    await extract_tables_from_pdf_file(task_id = task_id,
                                       file_id = file_id,
                                       file_path = file_path,
                                       out_dir = out_dir, 
                                       file_name = file_name,
                                       knowbase_id = knowbase_id,
                                       session = session)
    
    await redis.hset(f"task:{task_id}", mapping={
        "type": "complete",
        "percentage": "100",
        "count":0,
        "message": "文件解析完成"
    })


async def extract_text_embedding_from_pdf_file(
    task_id: str,
    file_id: str,
    file_path: str,
    file_name: str,
    knowbase_id: str,
    session:AsyncSession
):
    
    redis = await get_redis()
    # 文本提取
    start_time_0 = time.time()
    pdf_doc = fitz.open(file_path)
    texts = []
    for page in pdf_doc:
        text = page.get_text()
        texts.append(text)
    pdf_doc.close()
    end_time = time.time()
    
    await redis.hset(f"task:{task_id}",mapping={
        "type":"text",
        "percentage": 50, 
        "count": len(texts),
        "message":"文字提取完成"
    })

    # 文本分割
    start_time = time.time()
    end_time = time.time()
    texts = text_split_obj.split_texts(texts)
    texts = text_split_obj.add_meta_data(texts, file_path, knowbase_id, file_id)
    end_time = time.time()

    # 张量解析
    start_time = time.time()
    if len(texts) != 0:
        texts_embedding = [
            text_embedding_model.get_text_embedding(text.page_content) for text in texts
        ]

    # 张量数据创建及存储
    chunk_size = 1000
    objects = [
        TextVector(
            text_content=text.page_content.replace("\00", ""),
            text_vector=np.array(texts_embedding[idx]),
            file_id=file_id,
            file_name=file_name,
            know_base_id=knowbase_id,
        )
        for idx, text in enumerate(texts)
    ]

    session.add_all(objects)
    await session.commit()
    for embedding in objects:
        await session.refresh(embedding)
    await asyncio.sleep(0.001)

    end_time = time.time()
    await redis.hset(f"task:{task_id}",mapping={
        "type":"text",
        "percentage": 100, 
        "count": len(texts),
        "message":"文字提取并解析完成"
    })


async def extract_images_from_pdf_file(task_id:str,
                                       file_id:str,
                                       file_path:str, 
                                       out_dir:str, 
                                       file_name:str,
                                       knowbase_id:str,
                                       session:AsyncSession):
    start_time_0 = time.time()
    redis = await get_redis()

    # 图片提取
    pdf_doc = fitz.open(file_path)
    images_dir = os.path.join(out_dir, "images")
    metadata_path = os.path.join(
        out_dir, "images_metadata.json"
    )  # 保存描述的元数据文件
    os.makedirs(images_dir, exist_ok=True)
    count = 0

    # 存储图片元数据
    image_metadata_list = []
    for p_num in range(len(pdf_doc)):
        page = pdf_doc[p_num]
        imgs = page.get_images(full=True)
        page_text = page.get_text()
        for img_idx, img in enumerate(imgs):
            xref = img[0]
            base_img = pdf_doc.extract_image(xref)
            img_bytes = base_img["image"]
            img_ext = base_img["ext"]

            img_filename = f"page_{p_num + 1}_image_{img_idx + 1}.{img_ext}"
            img_path = os.path.join(images_dir, img_filename)

            with open(img_path, "wb") as img_file:
                img_file.write(img_bytes)
            print("Saved: {}".format(img_path))
            count += 1

            # 提取描述（示例）
            img_bbox = img[1:5]  # 图片的边界框
            title = extract_image_title(page_text)
            title_num = None
            image_desc = None
            if len(title) > 0:
                title_arr = title.split()
                if len(title_arr) > 0:
                    title_num = title_arr[0] + "ure " + title_arr[1]
            if title_num is not None:
                image_desc = extract_image_descForX52FixGuider(
                    p_num, pdf_doc, title_num
                )
            if image_desc is not None:
                full_desc = f"{title}. {image_desc}".strip()
            else:
                full_desc = f"{title}".strip()
            # related_paragraphs = extract_related_paragraphs(page, img_bbox)
            # caption = extract_caption(page, img_bbox)
            # full_description = f"{title} {related_paragraphs} {caption}".strip()

            # 保存元数据
            image_metadata_list.append(
                {
                    "filename": img_filename,
                    "image_path": img_path,
                    "description": full_desc,
                    "page": p_num + 1,
                    "index": img_idx + 1,
                }
            )

    pdf_doc.close()
    end_time = time.time()

    with open(metadata_path, "w", encoding="utf-8") as f:
        json.dump(image_metadata_list, f, ensure_ascii=False, indent=2)

    await redis.hset(f"task:{task_id}",mapping={
        "type":"image",
        "percentage": 50, 
        "count": len(image_metadata_list),
        "message":"图片提取完成"
    })
    await extract_images_embedding_from_json_file(
        metadata_path, "image", knowbase_id, file_id,file_name, session
    )
    await redis.hset(f"task:{task_id}",mapping={
        "type":"image",
        "percentage": 100, 
        "count": len(image_metadata_list),
        "message":"图片提取并解析完成"
    })
    end_time = time.time()


async def extract_tables_from_pdf_file(task_id:str,
                                       file_id:str,
                                       file_path:str, 
                                       out_dir:str, 
                                       file_name:str,
                                       knowbase_id:str,
                                       session:AsyncSession):
    redis = await get_redis()
    start_time_0 = time.time()

    # 表格提取

    pdf_doc = fitz.open(file_path)
    tables_dir = os.path.join(out_dir, "tables")
    metadata_path = os.path.join(out_dir, "tables_metadata.json")
    os.makedirs(tables_dir, exist_ok=True)
    count = 0

    # 存储表格元数据
    table_metadata_list = []
    for p_num in range(len(pdf_doc)):
        page = pdf_doc[p_num]
        pix = page.get_pixmap(dpi=120, alpha=False)
        img = Image.frombytes(
            "RGB", [pix.width, pix.height], pix.samples
        )  # 创建PIL图像对象
        img = np.array(img)
        img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        result = table_det_model(img_bgr)
        page_text = page.get_text()
        for table_num, res in enumerate(result):
            table_file_name = f"page_{p_num + 1}_table_{table_num + 1}.png"
            img_path = os.path.join(tables_dir, table_file_name)
            lt, rt, rb, lb = res["lt"], res["rt"], res["rb"], res["lb"]
            wrapped_img = extract_table_img(img_bgr, lt, rt, rb, lb)
            # success = cv2.imwrite(img_path, wrapped_img)
            # cv2.imwrite不支持中文路径保存
            # 编码为png格式
            success, encoded_img = cv2.imencode(".png", wrapped_img)
            if success:
                with open(img_path, "wb") as f:
                    f.write(encoded_img.tobytes())
                print("Saved: {}".format(img_path))
                count += 1
            else:
                print("Saved失败: {}".format(img_path))
            title = extract_table_title(page_text)
            full_description = f"{title}".strip()

            # 保存元数据
            table_metadata_list.append(
                {
                    "filename": table_file_name,
                    "image_path": img_path,
                    "description": full_description,
                    "page": p_num + 1,
                    "index": table_num + 1,
                }
            )
    pdf_doc.close()
    end_time = time.time()
    with open(metadata_path, "w", encoding="utf-8") as f:
        json.dump(table_metadata_list, f, ensure_ascii=False, indent=2)
    await redis.hset(f"task:{task_id}",mapping={
        "type":"table",
        "percentage": 50, 
        "count": len(table_metadata_list),
        "message":"表格提取完成"
    })
    await extract_images_embedding_from_json_file(
        metadata_path, "table", knowbase_id, file_id,file_name,session
    )
    end_time = time.time()
    await redis.hset(f"task:{task_id}",mapping={
        "type":"table",
        "percentage": 100, 
        "count": len(table_metadata_list),
        "message":"表格提取并解析完成"
    })


async def extract_images_embedding_from_json_file(
    metadata_path: str,
    file_type: str,
    knowbase_id: str,
    file_id: str,
    file_name: str,
    session:AsyncSession
):
    
    redis = await get_redis()
    flag = ""
    if file_type == "table":
        flag = "表格"
    if file_type == "image":
        flag = "图片"
    start_time = time.time()
    with open(metadata_path, "r", encoding="utf-8") as f:
        metadata_list = json.load(f)

    descriptions = []
    image_paths = []
    image_embeddings = []

    for item in metadata_list:
        img_path = item["image_path"]
        description = item["description"]
        try:
            image = Image.open(img_path)
            image_embedding = image_embedding_model.get_image_embedding(image)
            image_embeddings.append(image_embedding)
            descriptions.append(description)
            image_paths.append(img_path)
        except Exception as e:
            print(f"Failed to process image {img_path}:{e}")
    objects = [
        ImageVector(
            image_path=image_paths[idx],
            image_vector=image_embeddings[idx],
            image_desc=descriptions[idx],
            desc_vector=text_embedding_model.get_text_embedding(descriptions[idx]),
            file_id=file_id,
            file_name=file_name,
            know_base_id=knowbase_id,
        )
        for idx in range(len(image_embeddings))
    ]
    end_time = time.time()

    try:

        start_time = time.time()
        session.add_all(objects)
        await session.commit()
        for embedding in objects:
            await session.refresh(embedding)
        end_time = time.time()

    except Exception as e:
        print(f"{e}")


def extract_image_title(page_text):
    # 利用正则表达式提取出形如 Fig x.x.x 和 Fig x.x.x.x后面的标题
    # title_pattern = r'Fig\s+\d+(?:\.\d+){2,3}\s+(.+?)\n'
    title_pattern = r"(Fig\s+\d+(?:\.\d+){2,3}\s+.+?)\n"
    titles = re.findall(title_pattern, page_text)
    title = "".join(titles)
    return title


def extract_image_descForX52FixGuider(page_num, pdf_doc, title_num):
    # 注意首页和尾页的边界问题处理
    text = ""
    for page_no in range(page_num - 1, page_num + 2):
        page = pdf_doc[page_no]
        text_dict = page.get_text("dict")
        # 如果遇到12.0的字号，就将其作为起始加入容器；如果遇到不是10.0的字号，并且容器内没有图的标题就清空标题
        for block in text_dict["blocks"]:
            if "lines" in block:  # 文本块
                for line in block["lines"]:
                    # if(line['spans'][0]['font'] != font):
                    #     font = line['spans'][0]['font']
                    #     f.write(f"字体: {line['spans'][0]['font']}\n")
                    # if(line['spans'][0]['size'] != 10):
                    #     size = line['spans'][0]['size']
                    for span in line["spans"]:
                        if (
                            line["spans"][0]["size"] != 10
                            and line["spans"][0]["size"] != 12
                        ):
                            continue
                        if (
                            line["spans"][0]["bbox"][1] < 74
                            or line["spans"][0]["bbox"][1] > 808
                        ):
                            continue
                        if line["spans"][0]["size"] == 10:
                            text = text + span["text"]
                        if (line["spans"][0]["size"] == 12) & (not title_num in text):
                            if span["text"] == "•":
                                continue
                            else:
                                text = ""
                                text = text + span["text"]

                        if (line["spans"][0]["size"] == 12) and (title_num in text):
                            # or (line['spans'][0]['font'] != 'ArialMT' and line['spans'][0]['size'] == 10)
                            if span["text"] == "•":
                                text = text + span["text"]
                            else:
                                return text

    # 坐标范围：页眉：左上：（127,31）  右下：（539,74）
    #         页脚：左上：（56,809）  右下：（539,820）
    if not title_num in text:
        return ""


def extract_table_title(page_text):
    # 利用正则表达式提取出形如 Tab x.x.x 和Tab x.x.x.x 后面的标题
    # title_pattern = r'Tab\s+\d+(?:\.\d+){2,3}\s+(.+?)\n'
    title_pattern = r"(Tab\s+\d+(?:\.\d+){2,3}\s+.+?)\n"
    titles = re.findall(title_pattern, page_text)
    return titles


def extract_related_paragraphs(page, img_bbox):
    blocks = page.get_text("dict")["blocks"]
    related_text = []

    for block in blocks:
        if "lines" in block:
            for line in block["lines"]:
                for span in line["spans"]:
                    text_bbox = (
                        span["bbox"][0],
                        span["bbox"][1],
                        span["bbox"][2],
                        span["bbox"][3],
                    )
                    if is_near(img_bbox, text_bbox):
                        related_text.append(span["text"])
    return " ".join(related_text)


def is_near(bbox1, bbox2, threshold=50):
    x1, y1, x2, y2 = bbox1
    x3, y3, x4, y4 = bbox2
    return abs(x1 - x3) < threshold or abs(y1 - y3) < threshold


def extract_caption(page, img_bbox):
    blocks = page.get_text("dict")["blocks"]
    caption = ""

    for block in blocks:
        if "lines" in block:
            for line in block["lines"]:
                for span in line["spans"]:
                    text_bbox = (
                        span["bbox"][0],
                        span["bbox"][1],
                        span["bbox"][2],
                        span["bbox"][3],
                    )
                    if is_below(img_bbox, text_bbox):
                        caption += span["text"] + " "

    return caption.strip()


def is_below(bbox1, bbox2, threshold=50):
    # 判断一个框是否在另一个框下方
    x1, y1, x2, y2 = bbox1
    x3, y3, x4, y4 = bbox2
    return y2 < y3 and abs(x1 - x3) < threshold
