# @FileName  : api.py
# @Time      : 2025/2/28 11:25
# @Author    : LuZhaoHui
# @Software  : PyCharm

from file import *
from PIL import Image
import base64
import requests
from openai import OpenAI
import io
import cv2
import numpy as np


class modelApi():
    # 模型API
    def __init__(self, urlReq, urlOpen, key):
        self.baseUrlRequest = urlReq
        self.baseUrlOpenAI = urlOpen
        self.apiKey = key
        self.isvLLM = False
        self.reqType = 'openai'
        self.reqStream = False
        self.systemPrompt = '获取信息'
        self.isText = True
        self.imagePage = 'A4'
        self.imageSize = [2481, 3508]
        self.curModel = 'Qwen/Qwen2.5-7B-Instruct'

    def setImagePage(self, image_page):
        self.imagePage = image_page
        self.imageSize = list(eval(image_page))

    def setIsvLLM(self, isvLLM):
        self.isvLLM = isvLLM

    def setReqType(self, req_type):
        self.reqType = req_type

    def setReqStream(self, stream):
        self.reqStream = stream

    def setSystemPrompt(self, prompt):
        self.systemPrompt = prompt

    def setModel(self, model, is_text):
        self.curModel = model
        self.isText = is_text

    def get_user_info(self):
        url = "https://api.siliconflow.cn/v1/user/info"
        headers = {"Authorization": "Bearer %s" % self.apiKey}
        response = requests.request("GET", url, headers=headers)
        print(response.text)
        return response.json()

    def getReqHead(self):
        if self.apiKey:
            return {
                "Authorization": "Bearer %s" % self.apiKey,
                "Content-Type": "application/json"
            }
        else:
            return {
                "Content-Type": "application/json"
            }

    def getReqText(self, reqText):
        # 构造文本请求体
        return {
            # 模型名称
            "model": self.curModel,
            "messages": [
                {
                    "role": "user",
                    "content": reqText
                }
            ],
            "stream": self.reqStream,
            "max_tokens": 1024,
            "stop": ["null"],
            "temperature": 0.7,
            "top_p": 0.7,
            "top_k": 50,
            "frequency_penalty": 0,  # 频率惩罚
            "n": 1,
            "response_format": {"type": "text"},
        }

    def getVision(self, imagePath, queryText, systemText):
        # 构造请求体
        base64_image = self.resize_and_encode_image(imagePath)
        messages = [
            {
                "role": "system",
                "content": [
                    {
                        "type": "text",
                        "text": systemText
                    }
                ]
            }, {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": queryText
                    },
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{base64_image}",
                        }
                    }
                ]
            }
        ]
        return {
            "model": self.curModel,
            "messages": messages,
            "stream": False,
            "max_tokens": 1024,
            "temperature": 0.7,
            "top_p": 0.7,
            "top_k": 50,
            "presence_penalty": 0,
            "frequency_penalty": 0,
            "n": 1,
            "response_format": {"type": "text"},
        }

    def getVision_vLLM(self, imagePath, queryText):
        # 构造请求体
        base64_image = self.resize_and_encode_image(imagePath)
        prompt = [
            {
                "type": "text",
                "text": queryText
            },
            {
                "type": "image_url",
                "image_url": f"data:image/jpeg;base64,{base64_image}",
            }
        ]
        print(len(str(prompt)))
        return {
            "prompt": prompt,
            "stream": False,
            "max_tokens": 8192,
            "temperature": 0.01,
            "top_p": 0.9,
            "presence_penalty": 0.5,
            "frequency_penalty": 0.5,
        }

    # def getBase64(self, imagePath):
    #     # 读取二进制数据
    #     base64_str = None
    #     try:
    #         with Image.open(imagePath) as img:
    #             byte_arr = io.BytesIO()
    #             img.save(byte_arr, format='webp')
    #             byte_arr = byte_arr.getvalue()
    #             base64_str = base64.b64encode(byte_arr).decode('utf-8')
    #     except IOError:
    #         print(f"Error: Unable to open or convert the image {imagePath}")
    #     return base64_str

    def resize_and_encode_image(self, image_path):
        # 从 FileStorage 对象读取二进制数据
        base64_image = None
        with open(image_path, 'rb') as file:
            image_data = file.read()
            # 将二进制数据转换为 numpy 数组
            image = np.frombuffer(image_data, np.uint8)
            # 解码图像
            image = cv2.imdecode(image, cv2.IMREAD_COLOR)
            # 获取原始尺寸
            original_height, original_width, _ = image.shape
            aspect_ratio = float(original_width) / float(original_height)
            new_width = 0
            new_height = 0
            if original_height > self.imageSize[1] and original_width > self.imageSize[0]:
                # 超过限制
                new_width = self.imageSize[0]
                new_height = int(new_width / aspect_ratio)
                if new_height > self.imageSize[1]:
                    new_height = self.imageSize[1]
                    new_width = int(new_height * aspect_ratio)
            elif original_height > self.imageSize[1]:
                new_height = self.imageSize[1]
                new_width = int(new_height * aspect_ratio)
            elif original_width > self.imageSize[0]:
                new_width = self.imageSize[0]
                new_height = int(new_width / aspect_ratio)
            if new_width > 0 and new_height > 0:
                # # 调整图像尺寸,缩小质量最好
                resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
                # cv2.imwrite(image_path, resized_image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
                _, encoded_image = cv2.imencode('.jpg', resized_image)
            else:
                # 将调整后的图像编码为 JPEG 格式
                _, encoded_image = cv2.imencode('.jpg', image)
            # 将编码后的图像转换为字节串
            byte_data = encoded_image.tobytes()
            # 进行 base64 编码
            base64_image = base64.b64encode(byte_data).decode('utf-8')
        return base64_image

    def reasonImageRequest_vLLM(self, imageName, reqText):
        result = None
        try:
            head = self.getReqHead()
            data = self.getVision_vLLM(imageName, reqText)
            response = requests.post(self.baseUrlRequest, json=data, headers=head)
            # 检查HTTP错误
            response.raise_for_status()
            result = response.json()
        except requests.exceptions.RequestException as e:
            result = f"请求失败: {e}"
            return result
        if result:
            return result["choices"][0]["message"]["content"]
        return result

    def reasonTextRequest(self, reqText):
        result = None
        try:
            response = requests.post(self.baseUrlRequest, json=self.getReqText(reqText), headers=self.getReqHead())
            # 检查HTTP错误
            response.raise_for_status()
            result = response.json()
        except requests.exceptions.RequestException as e:
            result = f"请求失败: {e}"
            return result
        # 输出结果
        if result:
            generated_text = result["choices"][0]["message"]["content"]
            print("生成的回复：\n", generated_text)
            return generated_text
        return result

    def reasonTextOpenAI(self, reqText):
        result = None
        client = OpenAI(
            api_key=self.apiKey,
            base_url=self.baseUrlOpenAI
        )
        data = [
            {
                "role": "system",
                "content":
                    [
                        {
                            "type": "text",
                            "text": self.systemPrompt
                        }
                    ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": reqText
                    }
                ]
            }
        ]
        try:
            response = client.chat.completions.create(
                model=self.curModel,
                messages=data,
                stream=self.reqStream,
                max_tokens=1024,
                presence_penalty=0,
                frequency_penalty=0,
                temperature=0.7,
                top_p=0.7,
                n=1,
            )
            result = response.choices[0].message.content
        except Exception as e:
            result = f"请求失败: {e}"
        return result

    def reasonImageRequest(self, imageName, reqText):
        result = None
        try:
            head = self.getReqHead()
            data = self.getVision(imageName, reqText, self.systemPrompt)
            response = requests.post(self.baseUrlRequest, json=data, headers=head)
            # 检查HTTP错误
            response.raise_for_status()
            result = response.json()
        except requests.exceptions.RequestException as e:
            result = f"请求失败: {e}"
            return result
        if result:
            return result["choices"][0]["message"]["content"]
        return result

    def reasonImageOpenAI(self, imagePath, reqText):
        result = None
        client = OpenAI(
            api_key=self.apiKey,
            base_url=self.baseUrlOpenAI
        )
        base64_str = self.resize_and_encode_image(imagePath)
        data = [
            {
                "role": "system",
                "content":
                    [
                        {
                            "type": "text",
                            "text": self.systemPrompt
                        }
                    ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{base64_str}"
                        }
                    },
                    {
                        "type": "text",
                        "text": reqText
                    }
                ]
            }
        ]

        try:
            response = client.chat.completions.create(
                model=self.curModel,
                messages=data,
                stream=self.reqStream,
                max_tokens=1024,
                presence_penalty=0,
                frequency_penalty=0,
                temperature=0.7,
                top_p=0.7,
                n=1,
            )
            result = response.choices[0].message.content
        except Exception as e:
            result = f"请求失败: {e}"
        return result

    def getResultDict(self, result):
        result_dict = {
            'id': '',
            'object': '',
            'created': 0,
            'model': '',
            'content': '',
        }
        if type(result) == dict:
            result_dict['id'] = result['id']
            result_dict['object'] = result['object']
            result_dict['created'] = result['created']
            result_dict['model'] = result['model']
            result_dict['content'] = result['choices'][0]['message']['content']
            if 'usage' in result:
                result_dict['tokens'] = result['usage']['total_tokens']
        else:
            result_dict['content'] = result
        # 推理结果
        # result = {
        #     'id': '请求ID',
        #     'object': '返回结果类型',
        #     'created': 0,
        #     'model': '使用的推理模型',
        #     'content': '推理文本结果',
        #     'tokens': 0
        # }
        return result_dict
