#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/10/29 11:42
# @Author  : kai
# @File    : report_llm.py
# @Software: PyCharm
"""
调用大模型分析体检报告并给出建议
"""

from itertools import cycle
from typing import Generator, Tuple

import pdfplumber
from openai import OpenAI
from django.conf import settings

MODEL_LIST = cycle(
    ["HealthGPT-L14", "Baichuan-M2-32B", "medgemma-4b-it", "Lingshu-32B"]
)


def get_report_suggestion(report_file) -> str:
    """
    获取体检报告建议
    :param report_file: 体检报告
    :return: 分析结果字符串
    """
    text = ""
    with pdfplumber.open(report_file) as pdf:
        for page in pdf.pages:
            text += page.extract_text() + "\n"
    
    client = OpenAI(
        base_url=settings.LLM_BASE_URL,
        api_key=settings.LLM_API_KEY,
    )

    response = client.chat.completions.create(
        messages=[
            {
                "role": "system",
                "content": "你是专业医生。请仅根据以下体检报告内容，分点提出健康建议，不要重复或输出报告原文。建议应包括：1) 异常指标的关注点；2) 可能的健康风险；3) 改善建议（如饮食、运动等）。",
            },
            {
                "role": "user",
                "content": text,
            },
        ],
        model=next(MODEL_LIST),
        stream=False,
        max_tokens=1024,
        temperature=0.7,
        top_p=0.7,
        extra_body={
            "top_k": 50,
        },
        frequency_penalty=1,
    )

    # def stream_generator():
    #     full_response = ""
    #     # Stream response
    #     for chunk in response:
    #         if len(chunk.choices) == 0:
    #             continue
    #         delta = chunk.choices[0].delta
    #         # If is thinking content, include it in the response
    #         if hasattr(delta, "reasoning_content") and delta.reasoning_content:
    #             content = delta.reasoning_content
    #             full_response += content
    #             yield content
    #         elif delta.content:
    #             content = delta.content
    #             full_response += content
    #             yield content
    #     # Return the full response at the end
    #     yield f"\n[COMPLETE_RESPONSE]{full_response}"
    #
    # return stream_generator()
    print(response.choices[0].message.content)
    return response.choices[0].message.content



def analyze_health_data(health_data: dict) -> str:
    """
    分析用户的健康数据
    :param health_data: 健康数据字典
    :return: 分析结果
    """
    # 构造提示词
    prompt = "你是专业医生，请根据以下用户的健康数据进行分析并给出建议：\n\n"
    
    if 'body_records' in health_data:
        prompt += "身体指标记录：\n"
        for record in health_data['body_records']:
            prompt += f"- 身高: {record.get('height', 'N/A')}cm, 体重: {record.get('weight', 'N/A')}kg, "
            prompt += f"血压: {record.get('low_pressure', 'N/A')}/{record.get('high_pressure', 'N/A')}mmHg, "
            prompt += f"血糖: {record.get('blood_sugar', 'N/A')}mmol/L, 心率: {record.get('heart_rate', 'N/A')}bpm\n"
    
    if 'eating_records' in health_data:
        prompt += "\n饮食记录：\n"
        for record in health_data['eating_records']:
            prompt += f"- 食物: {record.get('foods_name', 'N/A')}, 摄入量: {record.get('intake', 'N/A')}, "
            prompt += f"卡路里: {record.get('calorie', 'N/A')}kcal\n"
    
    if 'sleep_records' in health_data:
        prompt += "\n睡眠记录：\n"
        for record in health_data['sleep_records']:
            prompt += f"- 入睡时间: {record.get('sleep_time', 'N/A')}, 起床时间: {record.get('wakeup_time', 'N/A')}, "
            prompt += f"睡眠时长: {record.get('sleep_duration', 'N/A')}小时, 睡眠质量: {record.get('sleep_quality', 'N/A')}\n"
    
    if 'sports_records' in health_data:
        prompt += "\n运动记录：\n"
        for record in health_data['sports_records']:
            prompt += f"- 运动类型: {record.get('type', 'N/A')}, 运动时长: {record.get('during', 'N/A')}分钟, "
            prompt += f"运动距离: {record.get('distance', 'N/A')}km, 卡路里: {record.get('calorie', 'N/A')}kcal\n"
    
    client = OpenAI(
        base_url=settings.LLM_BASE_URL,
        api_key=settings.LLM_API_KEY,
    )

    response = client.chat.completions.create(
        messages=[
            {
                "role": "system",
                "content": "你是专业医生，请根据用户的健康数据进行分析并给出建议。",
            },
            {
                "role": "user",
                "content": prompt,
            },
        ],
        model=next(MODEL_LIST),
        max_tokens=2048,
        temperature=0.7,
        top_p=0.7,
        extra_body={
            "top_k": 50,
        },
        frequency_penalty=1,
    )
    
    if response.choices and response.choices[0].message.content:
        return response.choices[0].message.content
    return "无法生成分析结果"