# bring in our LLAMA_CLOUD_API_KEY
from dotenv import load_dotenv
from langchain.schema import Document
import shutil
import os
from openai import OpenAI
from fastapi import FastAPI, HTTPException, Body, Query, UploadFile, File, Form
from PIL import Image


# bring in deps
from llama_cloud_services import LlamaParse
from llama_index.core import SimpleDirectoryReader
import io
import base64
import json
import requests


load_dotenv()
apiKeyList=[
    "llx-FMkLafvUjUhQmPmjfnjA3QyAUjNbTqwHE9Mbd5cBRDfYJ8Gt",
    "llx-Y0glRZMbb97x7WASItzG2gbC3TuV1Rk1NRiQxG2lrVh6GIYX",
    "llx-KpZrTUwVnIcTCrYhuiS1gaQvVVeqeWsnk5UNLdRMxUSEatQf",
    "llx-joh2jhVujApkCdX09ff4DH2rkpKme7cs5F7FKmdz2GlNCM61",
    "llx-b6Q7fqV1DuTWdawPTQ0ZnBaQxgDo4PoYYGOJOTSvWwvFJWBq",
    "llx-EClvD4VnabGteVRUSvurotj6fRtDcrk8Z2qnjzARZEulweJo"
]

def copy_file_with_new_extension(source_file, new_extension):
    # 检查源文件是否存在
    if not os.path.exists(source_file):
        raise FileNotFoundError(f"源文件不存在: {source_file}")
    
    # 分离文件路径和文件名
    file_dir, file_name = os.path.split(source_file)
    
    # 构造新文件名（添加新扩展名）
    new_file_name = f"{file_name}{new_extension}"
    new_file_path = os.path.join(file_dir, new_file_name)
    
    # 复制文件
    shutil.copy(source_file, new_file_path)
    return new_file_path



async def llama_parse(filename):
    # set up parser
    copy_file_with_new_extension(filename, '.pdf')
    #apikey从apiKeyList中随机获取
    import random
    apiKey = random.choice(apiKeyList)
    parser = LlamaParse(
        api_key=apiKey,  # 使用随机选择的 API Key
        result_type="markdown",  # "markdown" and "text" are available
        language='ch_sim',
        parse_mode="parse_page_with_lvm",
        vendor_multimodal_model_name="gemini-2.0-flash-001",

    )
    # use SimpleDirectoryReader to parse our file
    file_extractor = {".pdf": parser}
    # print(filename)
    # filename=filename.replace('\\','/')
    # filename=filename+'.pdf'
    documents =await SimpleDirectoryReader(input_files=[filename+'.pdf'], file_extractor=file_extractor).aload_data()
    print(filename)
    for doc in documents:
        print(doc.get_content())
    documents_output = [Document(page_content=doc.get_content(),metadata={"source": filename}) for doc in documents]
    return documents_output
async def llama_recall_by_jobId(job_id):
    """
    使用 LlamaParse 的 job_id 获取解析结果
    """
    



async def qwen_parser(file):
    # 读取文件内容并处理图像
    image_data = await file.read()
    image = Image.open(io.BytesIO(image_data))
    image.thumbnail((1600, image.height), Image.Resampling.LANCZOS)  # 调整宽度为1600px，保持纵横比

    # 将图像转换为 base64
    buffer = io.BytesIO()
    image.save(buffer, format="PNG")
    base64_image = base64.b64encode(image_data).decode("utf-8")
    # print(base64_image)

    # 准备 OpenAI API 的输入
    prompt = f'''
        Analyze this PDF page content and convert it to markdown format. 
        Include all text content, maintain headings, lists, and table structures. 
        Igonore the images but only put the displacement in the content.
        Translate the formulas into LaTeX format.
        
        '''
    messages = [
            # {     
            #     "role": "system",
            #     "content": [{"type": "text", "text": "You are a helpful assistant."}]
            # },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            # "url": "https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241022/emyrja/dog_and_girl.jpeg"
                            "url": "data:image/jpeg;base64,%s" % str(base64_image)
                        },
                    },
                    # {"type": "text", "text": "图中描绘的是什么景象？"},
                    {"type": "text", "text": prompt},
                    # {"type": "text", "text": "检测到图片中有人打架的具体坐标？"},
                ],
            }
        ]
    client = OpenAI(
            api_key="sk-1434d82cbd3c4c45aefacbe35aa3d6f2",# 申请自己的阿里 key， 申请地址：https://bailian.console.aliyun.com/?spm=a2c4g.11186623.0.0.1d777980RJmctT&apiKey=1#/api-key
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )
    completion = client.chat.completions.create(
            model="qwen2.5-vl-32b-instruct",
            messages=messages,
        )
    markdown = completion.choices[0].message.content                              

    return markdown
def extract_markdown_json(markdown):
    # 提取markdown中的json
    json_start = markdown.find('```json')
    json_end = markdown.find('```', json_start + 3)
    if json_start != -1 and json_end != -1:
        json_str = markdown[json_start + 7:json_end]
        print (json_str)
        #将json字符串转换为python对象
        json_obj = json.loads(json_str)
        for item in json_obj:
            #根据图片坐标裁剪图片
            coordinates = item['coordinates']
            x1, y1, x2, y2 = coordinates
            # print(x1, y1, x2, y2)
            cropped_image = image.crop((x1, y1, x2, y2))
            #将裁剪后的图片保存到本地
            cropped_image_path = os.path.join('output', item['name'] + '.png')
            cropped_image.save(cropped_image_path)
    markdown = markdown.replace(json_str, '')
    markdown = markdown.replace('```json', '')
    markdown = markdown.replace('```markdown', '')
    markdown = markdown.replace('```', '')
    markdown = markdown.replace('### JSON Format for Images', '')
                                

    return markdown
async def qwen_parser_multi(file,pageNum):
    # 读取文件内容并处理图像
    image_data = await file.read()
    image = Image.open(io.BytesIO(image_data))
    image.thumbnail((1600, image.height), Image.Resampling.LANCZOS)  # 调整宽度为1600px，保持纵横比

    # 将图像转换为 base64
    buffer = io.BytesIO()
    image.save(buffer, format="PNG")
    base64_image = base64.b64encode(image_data).decode("utf-8")
    # print(base64_image)

    # 准备 OpenAI API 的输入
    prompt = f'''
        currentPageNum: page-{pageNum}
        Analyze this PDF page content and convert it to markdown format. 
        Include all text content, maintain headings, lists, and table structures. 
        Translate the formulas into LaTeX format.
        Extract the name, description and coordinates of images to json format.
        for example,the json content of single image is:
        左括号 "name": "page-{pageNum}-image1", #do not change the page number
        "description": "展示扫地杆设置于起步立柱最下方圆盘处的示意图，标注了扫地杆离地高度为500mm",
        "coordinates": [254, 107, 729, 426] #only the coordinates of the image,please do not show the text around the image 右括号
        NOTE:the link of every image in content should be the same as the name in json so that I can find the image in content.
        '''
    messages = [
            # {
            #     "role": "system",
            #     "content": [{"type": "text", "text": "You are a helpful assistant."}]
            # },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            # "url": "https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241022/emyrja/dog_and_girl.jpeg"
                            "url": "data:image/jpeg;base64,%s" % str(base64_image)
                        },
                    },
                    # {"type": "text", "text": "图中描绘的是什么景象？"},
                    {"type": "text", "text": prompt},
                    # {"type": "text", "text": "检测到图片中有人打架的具体坐标？"},
                ],
            }
        ]
    client = OpenAI(
            api_key="sk-1434d82cbd3c4c45aefacbe35aa3d6f2",# 申请自己的阿里 key， 申请地址：https://bailian.console.aliyun.com/?spm=a2c4g.11186623.0.0.1d777980RJmctT&apiKey=1#/api-key
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )
    completion = client.chat.completions.create(
            model="qwen2.5-vl-32b-instruct",
            messages=messages,
        )
    markdown = completion.choices[0].message.content
    print('currentPageNum:', pageNum)
    print(markdown)
    #提取markdown中的json
    markdown= extract_markdown_json(markdown)
    return markdown

async def slicon_qwen_parser(file):

    # 读取文件内容并处理图像
    image_data = await file.read()
    image = Image.open(io.BytesIO(image_data))
    image.thumbnail((1600, image.height), Image.Resampling.LANCZOS)  # 调整宽度为1600px，保持纵横比

    # 将图像转换为 base64
    buffer = io.BytesIO()
    image.save(buffer, format="PNG")
    # 图片的 URL 或 Base64 编码数据
    image_url = base64.b64encode(image_data).decode("utf-8")
    text_prompt =  f'''
        Analyze this PDF page content and convert it to markdown format. 
        Include all text content, maintain headings, lists, and table structures. 
        Igonore the images but only put the displacement in the content.
        Translate the formulas into LaTeX format.
        '''
    # 构建请求体
    url = "https://api.siliconflow.cn/v1/chat/completions"
    payload = {
        "model": "Qwen/Qwen2.5-VL-72B-Instruct",
        "stream": False,
        "max_tokens": 512,
        "enable_thinking": True,
        "thinking_budget": 512,
        "min_p": 0.05,
        "temperature": 0.7,
        "top_p": 0.7,
        "top_k": 50,
        "frequency_penalty": 0.5,
        "n": 1,
        "stop": [],
        "messages": [
            {
                "role": "user",
                "content": [
                    {
                        "image_url": {
                            "detail": "auto",
                            "url": f'''data:image/png;base64,{str(image_url)}'''
                            },
                        "type": "image_url"
                    },
                    {
                        "text": text_prompt,
                        "type": "text"
                    }
                ]
            }
        ]
    }
    headers = {
        "Authorization": "Bearer sk-edsqqcdsyjftrvxhxmhorylyieeeqrrwfujubikaxpvphqsq",
        "Content-Type": "application/json"
    }

    response = requests.request("POST", url, json=payload, headers=headers)

    print(response.text)
    return response.json()['choices'][0]['message']['content']


