import sys
import os

# 添加项目根目录到Python路径，以便找到boson_multimodal模块
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(current_dir)
sys.path.insert(0, project_root)

from transformers import pipeline
from openai import OpenAI
from boson_multimodal.serve.serve_engine import HiggsAudioServeEngine, HiggsAudioResponse
from boson_multimodal.data_types import ChatMLSample, Message, AudioContent
import torch
import torchaudio

def img2text(url):
    image_to_text = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
    text = image_to_text(url)[0]['generated_text']
    print(text)
    return text

def chat_with_ai(question):
    """
    使用阿里云百炼API调用deepseek-r1模型进行对话
    
    Args:
        question (str): 要询问的问题
    
    Returns:
        dict: 包含思考过程和最终答案的字典
    """
    client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),  
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )
    
    completion = client.chat.completions.create(
        model="deepseek-r1",  
        messages=[
            {'role': 'user', 'content': question}
        ]
    )
    
    reasoning = completion.choices[0].message.reasoning_content
    answer = completion.choices[0].message.content
    
    print("思考过程：")
    print(reasoning)
    print("\n最终答案：")
    print(answer)
    
    return {
        'reasoning': reasoning,
        'answer': answer
    }

def text_to_speech(text, output_filename="story_audio.wav"):
    """
    将文本转换为语音
    
    Args:
        text (str): 要转换的文本
        output_filename (str): 输出音频文件名，默认为"story_audio.wav"
    
    Returns:
        str: 生成的音频文件路径
    """
    MODEL_PATH = "bosonai/higgs-audio-v2-generation-3B-base"
    AUDIO_TOKENIZER_PATH = "bosonai/higgs-audio-v2-tokenizer"
    
    system_prompt = (
        "Generate audio following instruction.\n\n<|scene_desc_start|>\nAudio is recorded from a quiet room.\n<|scene_desc_end|>"
    )
    
    messages = [
        Message(
            role="system",
            content=system_prompt,
        ),
        Message(
            role="user",
            content=text,
        ),
    ]
    
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"使用设备: {device}")
    
    serve_engine = HiggsAudioServeEngine(MODEL_PATH, AUDIO_TOKENIZER_PATH, device=device)
    
    print("正在生成语音...")
    output: HiggsAudioResponse = serve_engine.generate(
        chat_ml_sample=ChatMLSample(messages=messages),
        max_new_tokens=1024,
        temperature=0.3,
        top_p=0.95,
        top_k=50,
        stop_strings=["<|end_of_text|>", "<|eot_id|>"],
    )
    
    torchaudio.save(output_filename, torch.from_numpy(output.audio)[None, :], output.sampling_rate)
    print(f"语音文件已保存为: {output_filename}")
    
    return output_filename

def img_to_story(image_path, generate_audio=True):
    """
    将图片转换为故事：先用img2text获取图片描述，再用AI生成中文故事，最后转换为语音
    
    Args:
        image_path (str): 图片文件路径
        generate_audio (bool): 是否生成语音，默认为True
    
    Returns:
        dict: 包含图片描述、故事内容、语音文件路径等信息的字典
    """
    # 第一步：获取图片的英文描述
    print("=== 第一步：分析图片内容 ===")
    image_description = img2text(image_path)
    
    # 第二步：构建提示词
    prompt = f"""你是一个很会讲故事的老人，下面context的内容是一个外国人说的一句英文，请你根据这句话延展出一个中文的故事，最好还能有点小幽默，字数在50到100个字。直接输出故事内容，不要有任何解释，不要有其他故事以外的文字。

context: {image_description}"""
    
    # 第三步：使用AI生成故事
    print("\n=== 第二步：生成中文故事 ===")
    story_result = chat_with_ai(prompt)
    story_text = story_result['answer']
    
    # 第四步：将故事转换为语音（可选）
    audio_file = None
    if generate_audio:
        print("\n=== 第三步：生成语音 ===")
        audio_file = text_to_speech(story_text, "generated_story.wav")
    
    return {
        'image_description': image_description,
        'story': story_text,
        'reasoning': story_result['reasoning'],
        'audio_file': audio_file
    }

# 测试图片转故事功能
print("\n" + "="*50)
print("开始图片转故事功能测试")
print("="*50)
result = img_to_story("girl_with_pearl_earring_input.png")
print(f"\n完整流程已完成！")
print(f"故事内容: {result['story']}")
if result['audio_file']:
    print(f"语音文件: {result['audio_file']}")