from openai import OpenAI
import base64

#
# def imageToBase64(image_path):
#     with open(image_path, 'rb') as f:
#         image_data = base64.b64encode(f.read())
#     return image_data.decode('utf-8')
#

# imagePath = 'f:/Image/ocrTest/id0.jpg'
# base64Image = imageToBase64(imagePath)

from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from PIL import Image

import requests
import json
import base64

# API的URL和你的API Key
# api_url = "https://api.modelscope.cn/api/v1/inference"
api_url = "https://api-inference.modelscope.cn/v1"
api_key = 'd3f6c5cd-7c56-4c6b-809e-36ed52d58f8b'

# 图片路径
image_path = 'f:/Image/ocrTest/id0.jpg'

# 读取图片并进行Base64编码
with open(image_path, 'rb') as img_file:
    img_base64 = base64.b64encode(img_file.read()).decode('utf-8')

# 构建请求体
payload = {
    "model": "Qwen/Qwen2.5-VL-72B-Instruct",  # 替换为你想使用的模型名称
    "input": {
        "image": img_base64
    }
}

# 设置请求头
headers = {
    'Content-Type': 'application/json',
    'Authorization': f'Bearer {api_key}'
}

# 发送POST请求
response = requests.post(api_url, headers=headers, data=json.dumps(payload))

# 检查响应状态码
if response.status_code == 200:
    result = response.json()
    print("推理结果:", result)
else:
    print(f"请求失败，状态码: {response.status_code}, 错误信息: {response.text}")
