# @FileName  : test0.py
# @Time      : 2025/3/14 09:07
# @Author    : LuZhaoHui
# @Software  : PyCharm


# 设置 API 密钥和端点
# apiUrl = "https://api.siliconflow.cn/v1/chat/completions"
# apiUrl2 = "https://api.siliconflow.cn/v1"
# apiKey = "sk-kitgmqyorzmxyipdujbjzwxorwzlfkuuxzsgbkgmaftpudiw"
# apiTextModel = "Qwen/Qwen2.5-7B-Instruct"
# apiVisionModel = "Pro/Qwen/Qwen2-VL-7B-Instruct"
#

# apiVisionModel = "Qwen/Qwen2-VL-72B-Instruct"


# apiVisionModel = "deepseek-ai/deepseek-vl2"

#
# def getHead():
#     return {
#         "Authorization": "Bearer %s" % apiKey,
#         "Content-Type": "application/json"
#     }
#
#
# def getText(userText):
#     # 构造文本请求体
#     return {
#         # 模型名称
#         "model": apiTextModel,
#         "messages": [
#             {
#                 "role": "user",
#                 "content": userText
#             }
#         ],
#         "stream": False,
#         "max_tokens": 512,
#         "stop": ["null"],
#         "temperature": 0.7,
#         "top_p": 0.7,
#         "top_k": 50,
#         "frequency_penalty": 0.5,
#         "n": 1,
#         "response_format": {"type": "text"},
#     }
#

# def getBase64(imagePath):
#     # 读取二进制数据
#     try:
#         with Image.open(imagePath) as img:
#             byte_arr = io.BytesIO()
#             img.save(byte_arr, format='webp')
#             byte_arr = byte_arr.getvalue()
#             base64_str = base64.b64encode(byte_arr).decode('utf-8')
#             return base64_str
#     except IOError:
#         print(f"Error: Unable to open or convert the image {imagePath}")
#         return None
#
#
# def resize_and_encode_image(image_path):
#     # 从 FileStorage 对象读取二进制数据
#     file = open(image_path, 'rb')
#     image_data = file.read()
#     # 将二进制数据转换为 numpy 数组
#     image = np.frombuffer(image_data, np.uint8)
#     # 解码图像
#     image = cv2.imdecode(image, cv2.IMREAD_COLOR)
#     # # 获取原始尺寸
#     # original_height, original_width, _ = image.shape
#     # # 计算缩减一半后的尺寸
#     # new_width = original_width // 2
#     # new_height = original_height // 2
#     # # 调整图像尺寸
#     # resized_image = cv2.resize(image, (new_width, new_height))
#     # 将调整后的图像编码为 JPEG 格式
#     _, encoded_image = cv2.imencode('.jpg', image)
#     # 将编码后的图像转换为字节串
#     byte_data = encoded_image.tobytes()
#     # 进行 base64 编码
#     base64_image = base64.b64encode(byte_data).decode('utf-8')
#     # print(base64_image)
#     return base64_image

#
# def getVision(imagePath, queryText, systemText='图片信息解析'):
#     # 构造请求体
#     print(imagePath)
#     base64_image = resize_and_encode_image(imagePath)
#     messages = [
#         {
#             "role": "system",
#             "content": [
#                 {
#                     "type": "text",
#                     "text": systemText
#                 }
#             ]
#         }, {
#             "role": "user",
#             "content": [
#                 {
#                     "type": "text",
#                     "text": queryText
#                 },
#                 {
#                     "type": "image_url",
#                     "image_url": {
#                         "url": f"data:image/jpeg;base64,{base64_image}",
#                     }
#                 }
#             ]
#         }
#     ]
#     return {
#         "model": apiVisionModel,
#         "messages": messages,
#         "stream": False,
#         "max_tokens": 1024,
#         "temperature": 0.7,
#         "top_p": 0.7,
#         "top_k": 50,
#         "presence_penalty": 0,
#         "frequency_penalty": 0,
#     }
#
#
# def test1(reqText):
#     result = None
#     try:
#         response = requests.post(apiUrl1, json=getText(reqText), headers=getHead())
#         # response = requests.post(apiUrl, headers=getHead(), json=getTextDeepV3(reqText))
#         # 检查HTTP错误
#         response.raise_for_status()
#         result = response.json()
#     except requests.exceptions.RequestException as e:
#         print(f"请求失败: {e}")
#         return None
#     # 输出结果
#     if result:
#         # 假设响应格式为 {"choices": [{"message": {"content": "..."}}]}
#         generated_text = result["choices"][0]["message"]["content"]
#         print("生成的回复：\n", generated_text)
#         # saveLog(result, log='api.txt')
#
#
# def test2(imagePath, reqText):
#     try:
#         head = getHead()
#         data = getVision(imagePath, reqText)
#         response = requests.post(apiUrl, json=data, headers=head)
#         # 检查HTTP错误
#         response.raise_for_status()
#         result = response.json()
#     except requests.exceptions.RequestException as e:
#         print(f"请求失败: {e}")
#         result = None
#
#     if result:
#         # 假设响应格式为 {"choices": [{"message": {"content": "..."}}]}
#         generated_text = result["choices"][0]["message"]["content"]
#         print("生成的回复：\n", generated_text)
#         # saveLog(result, log='api.txt')
#

#

# def reasonOpenAI(imagePath, reqText):
#     client = OpenAI(
#         api_key=apiKey,
#         base_url=apiUrl2
#     )
#
#     data = [
#         {
#             "role": "system",
#             "content":
#                 [
#                     {
#                         "type": "text",
#                         "text": "图片信息解析"
#                     }
#                 ]
#         },
#         {
#             "role": "user",
#             "content": [
#                 {
#                     "type": "image_url",
#                     "image_url": {
#                         "url": f"data:image/jpeg;base64,{getBase64(imagePath)}",
#                         "detail": "low"
#                     }
#                 },
#                 {
#                     "type": "text",
#                     "text": reqText
#                 }
#             ]
#         }
#     ]
#
#     response = client.chat.completions.create(
#         model=apiVisionModel,
#         messages=data,
#         stream=False
#     )
#
#     result = response.choices[0].message.content
#     print("生成的回复：\n", result)

# for chunk in response:
#     chunk_message = chunk.choices[0].delta.content
#     print(chunk_message, end='', flush=True)


# if __name__ == '__main__':
#     print('siliconFlow test')
# initVar()
#     userPrompt = "你好,我是小白"
# userPrompt = "你好,我是小白,想知道如何学习siliconFlow的API调用"
#     test1(userPrompt)
# reqText = "需要你告诉我以下信息：姓名、别名、性别、出生日期、居民身份证编号、民族、婚姻状况、文化程度、籍贯"
# text = "获取图片的信息"
# image ='./image/card/1.jpg'
# reasonOpenAI('./image/card/1.jpg', reqText)
# test3('./image/hukou/微信图片_20250117165136.jpg', reqText)
# base641 = resize_and_encode_image(image)
# base642 = getBase64(image)
# test2(imagePath=image, reqText=text)

