from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
import cv2
import numpy as np
import requests
from pdf2image import convert_from_bytes
from io import BytesIO
from PIL import Image
from ollama import Client
from typing import List, Union
from enum import Enum
from typing_extensions import Annotated, Literal, Union
from pydantic import BaseModel
from fastapi import FastAPI, File, UploadFile, Form, Request
import base64
import io
import json
from PIL import Image

class ClassificationType(str, Enum):
    ByTitle = "ByTitle"

class TemplateClassfication(BaseModel):
    classification_type: ClassificationType  = ClassificationType.ByTitle
    classification_value: str = ''

class InfoType(str, Enum):
    Keyword = 'Keyword'

class InformationExtractionField(BaseModel):
    info_id: int = 0
    info_name: str = ''
    info_type: InfoType= InfoType.Keyword
    info_box: List= []
    info_key: str= ''
    ocr_info_value: Union[str, None] = None
    final_info_value: Union[str, None] = None

class DocumentTemplate(BaseModel):
    template_id: int= 0
    page_id: int = 0
    template_name: str = ''
    template_type: str= ''
    template_document_base64: str= ''
    template_classfication: TemplateClassfication
    info_fields: List[InformationExtractionField] = []


class VerificationItem(BaseModel):
    template_id: int = 0
    page_id: int = 0
    info_id: int = 0

class VerificationMode(str, Enum):
    AutoAI = "AutoAI"
    Manual = "Manual"

class VerificationType(str, Enum):
    Exists = "Exists"
    NotExists = "NotExists"
    Equal = "Equal"
    NotEqual = "NotEqual"
    AtLeastOne = "AtLeastOne"
    All = "All"

class DocumentVerification(BaseModel):
    verification_id: int = 0
    verification_name: str = ""
    verification_type: VerificationType = VerificationType.Exists
    verification_items: List[VerificationItem]
    verification_mode: VerificationMode = VerificationMode.AutoAI
    verification_result: Union[bool, None] = None


class DocumentOcrFullTextItem(BaseModel):
    ocr_text: str = ""
    ocr_box: List[int] = []


class DocumentResultItem(BaseModel):
    doucment_filename: str = ""
    template_id: int = 0
    page_id: int = 0
    document_ocr_full_text: List[DocumentOcrFullTextItem] = []
    info_fields: List[InformationExtractionField] = []

class DocumentFile(BaseModel):
    document_filename: str = ''
    document_fileext: str= ''
    document_base64: str= ''


class Model(BaseModel):
    document_templates: List[DocumentTemplate]
    document_verifications: List[DocumentVerification]
    document_files: List[DocumentFile]

class InputData(BaseModel):
    document_templates: List[DocumentTemplate]
    document_verifications: List[DocumentVerification]
    document_files: List[DocumentFile]

class OutputData(BaseModel):
    document_results: List[DocumentResultItem]

client = Client(host='http://localhost:11434')

def get_kie_by_ollama(content):
    response = client.chat(model='qwen2.5:0.5b', messages=[
    {
        'role': 'user',
        'content': content,
    },
    ])
    s = response['message']['content'].replace('```json', '').replace('```', '')
    return s
def sorted_boxes(dt_boxes):
    """
    Sort text boxes in order from top to bottom, left to right
    args:
        dt_boxes(array):detected text boxes with shape [4, 2]
    return:
        sorted boxes(array) with shape [4, 2]
    """
    num_boxes = dt_boxes.shape[0]
    sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
    _boxes = list(sorted_boxes)

    for i in range(num_boxes - 1):
        for j in range(i, -1, -1):
            if abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10 and (
                _boxes[j + 1][0][0] < _boxes[j][0][0]
            ):
                tmp = _boxes[j]
                _boxes[j] = _boxes[j + 1]
                _boxes[j + 1] = tmp
            else:
                break
    return _boxes

def get_rotate_crop_image(img, points):
    """
    img_height, img_width = img.shape[0:2]
    left = int(np.min(points[:, 0]))
    right = int(np.max(points[:, 0]))
    top = int(np.min(points[:, 1]))
    bottom = int(np.max(points[:, 1]))
    img_crop = img[top:bottom, left:right, :].copy()
    points[:, 0] = points[:, 0] - left
    points[:, 1] = points[:, 1] - top
    """
    assert len(points) == 4, "shape of points must be 4*2"
    img_crop_width = int(
        max(
            np.linalg.norm(points[0] - points[1]), np.linalg.norm(points[2] - points[3])
        )
    )
    img_crop_height = int(
        max(
            np.linalg.norm(points[0] - points[3]), np.linalg.norm(points[1] - points[2])
        )
    )
    pts_std = np.float32(
        [
            [0, 0],
            [img_crop_width, 0],
            [img_crop_width, img_crop_height],
            [0, img_crop_height],
        ]
    )
    M = cv2.getPerspectiveTransform(points, pts_std)
    dst_img = cv2.warpPerspective(
        img,
        M,
        (img_crop_width, img_crop_height),
        borderMode=cv2.BORDER_REPLICATE,
        flags=cv2.INTER_CUBIC,
    )
    dst_img_height, dst_img_width = dst_img.shape[0:2]
    if dst_img_height * 1.0 / dst_img_width >= 1.5:
        dst_img = np.rot90(dst_img)
    return dst_img

def pdf_to_images_in_memory(pdf_data):
    # 将PDF数据转换为图像列表
    pages = convert_from_bytes(pdf_data)
    
    # 存储图片数据的列表
    images_data = []
    
    # 遍历每一页，并将其转换为内存中的图片数据
    for i, page in enumerate(pages):
        image_byte_array = BytesIO()
        page.save(image_byte_array, format="PNG")
        images_data.append(image_byte_array.getvalue())
        print(f"Page {i+1} converted to image in memory")
    
    return images_data

ocr_detection = pipeline(Tasks.ocr_detection, model='damo/cv_resnet18_ocr-detection-db-line-level_damo')
ocr_recognition = pipeline(Tasks.ocr_recognition, model='damo/cv_convnextTiny_ocr-recognition-document_damo')

def process_ocr_one_page(image_path_or_url):
    if isinstance(image_path_or_url, str):
        # 根据输入的路径或URL加载图片
        if image_path_or_url.startswith(('http://', 'https://')):
            # 如果是URL，则下载图片到本地
            image = cv2.imdecode(np.frombuffer(requests.urlopen(image_path_or_url).read(), np.uint8), cv2.IMREAD_COLOR)
        else:
            # 否则是本地路径，直接读取图片
            image = cv2.imread(image_path_or_url)
    elif isinstance(image_path_or_url, np.ndarray):
        print('image is numpy array')
        image = image_path_or_url
    elif isinstance(image_path_or_url, bytes):
        image = cv2.imdecode(image_path_or_url, cv2.IMREAD_COLOR)
    elif isinstance(image_path_or_url, Image.Image):
        image = np.array(image_path_or_url)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    else:
        print("Invalid input type")
        
    # 假设 ocr_detection 和 ocr_recognition 是已定义好的函数
    det_result = ocr_detection(image)
    boxes = []
    for res in det_result['polygons']:
        # 裁剪并旋转图像
        box = np.array(res).reshape(4, 2).astype(np.float32).tolist()
        boxes.append(box)

    det_boxes = sorted_boxes(np.array(boxes))
    
    result = []
    # 遍历所有检测到的多边形区域
    for res in det_boxes:
        # 裁剪并旋转图像
        img = get_rotate_crop_image(image, np.array(res).reshape(4, 2).astype(np.float32))
        # 对裁剪后的图像进行OCR识别
        ocr_res = ocr_recognition(img)

        result.append({'ocr_text': ocr_res['text'][0], 'ocr_box': res.astype(np.int32).reshape(8).tolist()})
    
    return result


def process_ocr(doc_file: DocumentFile):
    print(doc_file.document_filename)
    print(doc_file.document_fileext)
    file_data = base64.b64decode(doc_file.document_base64)
    filename  = doc_file.document_filename
    fileext   = doc_file.document_fileext
    pil_image_pages = []
    if fileext.lower() in ['pdf', '.pdf']:
        pil_image_pages = convert_from_bytes(file_data)
    else:
        pil_image = Image.open(io.BytesIO(file_data))
        pil_image_pages.append(pil_image)

    document_results : List[DocumentResultItem] = []
    for page_id, pil_image_page in enumerate(pil_image_pages):
        doc_result_item = DocumentResultItem()
        doc_result_item.doucment_filename = filename
        doc_result_item.page_id = page_id
        # 调用process_ocr_one_page处理图像
        ocr_results = process_ocr_one_page(pil_image_page)
        
        doc_ocr_full_text = []
        for ocr_result in ocr_results:
            print(ocr_result)
            oc_res = DocumentOcrFullTextItem(**ocr_result)
            doc_ocr_full_text.append(oc_res)

        doc_result_item.document_ocr_full_text = doc_ocr_full_text
        document_results.append(doc_result_item)

    return document_results


def get_template_id(document_results : List[DocumentResultItem], document_templates: List[DocumentTemplate]):
    for document_result in document_results:
        for document_template in document_templates:
            if document_template.template_classfication.classification_type == ClassificationType.ByTitle:
                title_value= document_template.template_classfication.classification_value
                for ocr_full_text in document_result.document_ocr_full_text:
                    if title_value in ocr_full_text.ocr_text:
                        document_result.template_id = document_template.template_id
    
    return document_results


def get_kie_by_ollama(document_result: DocumentResultItem, document_template:DocumentTemplate):
    content = ''
    all_text = ''
    for ocr_full_text in document_result.document_ocr_full_text:
        all_text += ocr_full_text.ocr_text
    
    keyword_text = ''
    for info_field in document_template.info_fields:
        if info_field.info_type == InfoType.Keyword:
            keyword_text  += info_field.info_key + ','
    content = f'''
    你是一个文档信息提取者, 从待处理文本中提取出关键信息，
    ---
    {all_text}
    ---
    提取出上文中的[{keyword_text}], 用json格式输出:
    '''
    print(content)
    response = client.chat(model='qwen2.5:0.5b', messages=[
    {
        'role': 'user',
        'content': content,
    },
    ])
    print('ollama response ', response)
    s = response['message']['content'].replace('```json', '').replace('```', '')
    try:
        json_dict =  json.loads(s)
    except:
        pass
    
    print('json dict', json_dict)
    result_info_fields = document_template.info_fields
    for result_info_field in result_info_fields:
        if isinstance(json_dict, list):
            for item in json_dict:
                if result_info_field.info_key == item['key']:
                    result_info_field.ocr_info_value = value
                    result_info_field.final_info_value = value
        elif isinstance(json_dict, dict):
            for key, value in json_dict.items():
                if result_info_field.info_key == key:
                    result_info_field.ocr_info_value = value
                    result_info_field.final_info_value = value
                    
    return result_info_fields

def get_info_by_ollama(document_results : List[DocumentResultItem], document_templates: List[DocumentTemplate]):
    for document_result in document_results:
        for document_template in document_templates:
            res_info_fields = get_kie_by_ollama(document_result, document_template)
            document_result.info_fields = res_info_fields
                
    return document_results

def process_document_verify(document_verifications: List[DocumentVerification], document_results: List[DocumentResultItem]): 
    # 模板中的审核项
    for document_verification in document_verifications:
        document_result_info_list: List[InformationExtractionField] = []
        # 遍历模板中的审核项
        for verification_item in document_verification.verification_items:
            template_id = verification_item.template_id
            page_id = verification_item.page_id
            info_id = verification_item.info_id
            
            # 遍历结果中的模板和页面信息
            for doc_result_item in document_results:
                if doc_result_item.template_id == template_id and doc_result_item.page_id == page_id:
                    for info_result in doc_result_item.info_fields:
                        if info_result.info_id == info_id:
                            document_result_info_list.append(info_result)

        if document_verification.verification_type == "Exists":
            if len(document_result_info_list) == 0:
                document_verification.verification_result = False
            else:
                document_verification.verification_result = True
        elif document_verification.verification_type == "Equal":
            if len(document_result_info_list) == 0:
                document_verification.verification_result = False
            else:
                
                for doc_result_info in document_result_info_list:
                    for doc_result_info_2 in document_result_info_list:
                        if doc_result_info.final_info_value != doc_result_info_2.final_info_value:
                            document_verification.verification_result = False
                            break
                document_verification.verification_result = True

        elif document_verification.verification_type == "AtLeastOne":
            if len(document_result_info_list) == 0:
                document_verification.verification_result = False
            else:
                document_verification.verification_result = True
        else:
            document_verification.verification_result = False

def base64_2_pilimage(image_base64: str):
    print(image_base64)
    if image_base64.find('base64,') > 0:
        idx = image_base64.find('base64,')
        image_base64 = image_base64[idx+7 :]
    img_data = base64.b64decode(image_base64)
    pil_image = Image.open(io.BytesIO(img_data))
    return pil_image

def base64_2_cv2(image_base64: str):
    if image_base64.find('base64,') > 0:
        idx = image_base64.find('base64,')
        image_base64 = image_base64[idx+7 :]
    image_data = base64.b64decode(image_base64)
    img_array = np.fromstring(image_data, np.uint8)
    img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
    return img

app = FastAPI()

@app.post("/process_ocr")
async def classification_id_bussiness(req: Request=None, imageFile: UploadFile=File(...)):
    if 'application/json' in req.headers['Content-Type']:
        data = await req.json()
        # data = json.loads(data)
        pil_image = base64_2_cv2(data['imageBase64'])
        image = np.array(pil_image)
         # 调用process_ocr处理图像
        result = process_ocr_one_page(image)

        # 返回JSON格式的结果
        return {"result": result}
    else:
        image_bytes = await imageFile.read()
        image_np = np.frombuffer(image_bytes, np.uint8)
        image = cv2.imdecode(image_np, cv2.IMREAD_COLOR)

        # # 调用process_ocr处理图像
        result = process_ocr_one_page(image)
        all_text = ''
        for item in result:
            box = item['ocr_box']
            s = f'[{box[0][0]}, {box[0][1]}, {box[2][0]}, {box[2][1]}], {item["ocr_text"]}'
            all_text += s + '\n'
          
        # content = f'''
        # 你是一个文档信息提取者, 从待处理文本中提取出关键信息，
        # ---
        # {all_text}
        # ---
        # 提取出上文中的[{keys}], 用json格式输出：
        # '''

        # kie_res =get_kie_by_ollama(content)
        # print(kie_res)
        # 返回JSON格式的结果

        return {"result": result, 'text': all_text}
        # return {"result": result, 'key_value': json.loads(kie_res)}

@app.post("/api/v1/ocr/document_verify")
def document_verify(input_data: InputData):
    for document_file in input_data.document_files:
        doc_results = process_ocr(document_file)

        get_template_id(doc_results, input_data.document_templates)
        print(doc_results)
        doc_results = get_info_by_ollama(doc_results, input_data.document_templates)
        process_document_verify(input_data.document_verifications, doc_results)
    output = OutputData(document_results=doc_results)
    return output.json(ensure_ascii=False)

doc_template = DocumentTemplate(
    template_id = 0,
    page_id = 0,
    template_name = '最高额个人经营借款合同',
    template_type= '最高额个人经营借款合同',
    template_document_base64 = '',
    template_classfication = TemplateClassfication(classification_type='ByTitle', classification_value='最高额个人经营借款合同'),
    info_fields = [
        InformationExtractionField(info_id=0, info_key='合同编号', info_type=InfoType.Keyword, ocr_info_value='', final_info_value=''),
        InformationExtractionField(info_id=1, info_key='贷款人', info_type=InfoType.Keyword, ocr_info_value='', final_info_value=''),
        InformationExtractionField(info_id=2, info_key='借款人', info_type=InfoType.Keyword, ocr_info_value='', final_info_value=''),]
)

doc_files = [
    DocumentFile(
        document_filename = '01.jpg',
        document_fileext = 'jpg',
        document_base64 = base64.b64encode(open('images/01.jpg', 'rb').read()).decode('utf-8')
    )
]

doc_verifications = [
    DocumentVerification(
        verification_id = 0,
        verification_name = '合同编号存在',
        verification_type = 'Exists',
        verification_items = [
            VerificationItem(
                template_id = 0,
                page_id = 0,
                info_id = 0
            )
        ]
    )
]

input_data = InputData(
    document_files = doc_files,
    document_templates = [doc_template],
    document_verifications = doc_verifications
)


out = document_verify(input_data)
print(out)