from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
import aiofiles
import uuid
from pathlib import Path
import logging
from datetime import datetime
import torch
import torchaudio
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import numpy as np
import librosa
import os
from dotenv import load_dotenv

# 加载.env文件
load_dotenv()

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 导入HTTP客户端
import httpx
import json
from typing import Dict, Any

# Kimi API配置
KIMI_API_URL = "https://api.moonshot.cn/v1/chat/completions"
KIMI_API_KEY = os.getenv("KIMI_API_KEY", "").strip()  # 从环境变量获取API密钥

# 打印配置信息
print(f"🔑 KIMI_API_KEY已配置: {'是' if KIMI_API_KEY else '否'}")
if KIMI_API_KEY:
    print(f"🔑 API密钥前8位: {KIMI_API_KEY[:8]}...")
print(f"🌐 API地址: {KIMI_API_URL}")

async def call_kimi_api(transcript_text: str, template: str, meeting_title: str) -> str:
    """调用Kimi API生成会议纪要"""
    if not KIMI_API_KEY:
        raise ValueError("未配置KIMI_API_KEY环境变量")

    # 构建提示词
    prompt = f"""请根据以下会议转录内容，生成一份专业的会议纪要。

会议标题：{meeting_title}
使用模板：{template}

转录内容：
{transcript_text}

请生成结构化的会议纪要，包括：
1. 会议基本信息
2. 主要议题讨论
3. 关键决策事项
4. 行动计划（责任人、截止时间）
5. 下次会议安排

请用中文回复，格式清晰易读。"""

    headers = {
        "Authorization": f"Bearer {KIMI_API_KEY}",
        "Content-Type": "application/json"
    }

    data = {
        "model": "moonshot-v1-8k",
        "messages": [
            {
                "role": "user",
                "content": prompt
            }
        ],
        "temperature": 0.3,
        "max_tokens": 2000
    }

    try:
        async with httpx.AsyncClient(timeout=60.0) as client:
            response = await client.post(KIMI_API_URL, headers=headers, json=data)
            response.raise_for_status()

            result = response.json()
            return result["choices"][0]["message"]["content"]

    except httpx.HTTPStatusError as e:
        logger.error(f"Kimi API HTTP错误: {e.response.status_code} - {e.response.text}")
        raise Exception(f"Kimi API调用失败: {e.response.status_code}")
    except Exception as e:
        logger.error(f"Kimi API调用异常: {e}")
        raise Exception(f"Kimi API调用异常: {str(e)}")

def generate_local_minutes(transcript_text: str, template: str, meeting_title: str) -> str:
    """生成本地会议纪要（备用方案）"""
    if not transcript_text or transcript_text == "暂无转录内容":
        return "暂无转录内容，无法生成会议纪要。"

    return f"""# {meeting_title}

## 会议基本信息
- 时间：{datetime.now().strftime('%Y年%m月%d日 %H:%M')}
- 模板：{template}

## 主要议题讨论
根据转录内容，���议讨论了相关议题。

## 关键决策事项
- 会议达成了相关决策

## 行动计划
- 需要跟进相关事项

## 下次会议安排
- 待定

*注：这是系统生成的会议纪要，建议人工核对和完善。*

---
*原始转录内容：*
{transcript_text[:500]}{'...' if len(transcript_text) > 500 else ''}
"""

# 创建FastAPI应用
app = FastAPI(
    title="AI Meeting Minutes API",
    description="简化的智能会议纪要API",
    version="1.0.0"
)

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 创建必要目录
UPLOAD_DIR = Path("uploads")
UPLOAD_DIR.mkdir(exist_ok=True)

# 静态文件服务
app.mount("/uploads", StaticFiles(directory="uploads"), name="uploads")

# 全局变量
whisper_processor = None
whisper_model = None

# GPU设备配置 - 可以修改这个值来指定GPU设备
PREFERRED_GPU_DEVICE = 9  # 指定使用CUDA设备9

# 内存存储（替代数据库）
meetings_storage = {}
transcripts_storage = {}
minutes_storage = {}

def load_whisper_model():
    """加载Hugging Face Whisper模型"""
    global whisper_processor, whisper_model

    try:
        logger.info("正在加载Hugging Face Whisper模型...")

        # 使用OpenAI Whisper模型
        model_name = "openai/whisper-large-v3"

        # 加载处理器和模型
        whisper_processor = WhisperProcessor.from_pretrained(model_name)
        whisper_model = WhisperForConditionalGeneration.from_pretrained(model_name)

        # 设置为评估模式
        whisper_model.eval()

        # 如果有GPU，移动到指定GPU设备
        if torch.cuda.is_available():
            # 检查指定的GPU设备是否可用
            if torch.cuda.device_count() > PREFERRED_GPU_DEVICE:
                device = f"cuda:{PREFERRED_GPU_DEVICE}"
            else:
                logger.warning(f"GPU设备{PREFERRED_GPU_DEVICE}不可用，系统只有{torch.cuda.device_count()}个GPU设备，使用cuda:0")
                device = "cuda:0"
        else:
            device = "cpu"

        whisper_model.to(device)

        logger.info(f"Whisper模型加载成功，使用设备: {device}")
        return True

    except Exception as e:
        logger.error(f"模型加载失败: {e}")
        # 如果大型模型加载失败，尝试加载小型模型
        try:
            logger.info("尝试加载小型Whisper模型...")
            model_name = "openai/whisper-base"

            whisper_processor = WhisperProcessor.from_pretrained(model_name)
            whisper_model = WhisperForConditionalGeneration.from_pretrained(model_name)
            whisper_model.eval()

            if torch.cuda.is_available():
                # 检查指定的GPU设备是否可用
                if torch.cuda.device_count() > PREFERRED_GPU_DEVICE:
                    device = f"cuda:{PREFERRED_GPU_DEVICE}"
                else:
                    logger.warning(f"GPU设备{PREFERRED_GPU_DEVICE}不可用，系统只有{torch.cuda.device_count()}个GPU设备，使用cuda:0")
                    device = "cuda:0"
            else:
                device = "cpu"

            whisper_model.to(device)

            logger.info(f"小型Whisper模型加载成功，使用设备: {device}")
            return True

        except Exception as e2:
            logger.error(f"小型模型加载也失败: {e2}")
            return False

def transcribe_audio_file(
    audio_file_path: Path,
    language: str = "chinese"
):
    """使用Hugging Face Whisper转录音频文件"""
    try:
        logger.info(f"开始转录音频文件: {audio_file_path}")

        # 读取音频文件
        logger.info(f"处理音频文件: {audio_file_path}, 格式: {audio_file_path.suffix}")

        try:
            if audio_file_path.suffix.lower() == '.mp3':
                # MP3文件，使用librosa转换
                audio, sr = librosa.load(str(audio_file_path), sr=16000)
                logger.info(f"MP3文件加载成功，采样率: {sr}, 音频长度: {len(audio)}")
            elif audio_file_path.suffix.lower() in ['.wav', '.m4a', '.ogg', '.flac']:
                # 其他格式，使用torchaudio
                waveform, sr = torchaudio.load(str(audio_file_path))
                logger.info(f"音频文件加载成功，原始采样率: {sr}, 波形形状: {waveform.shape}")

                if sr != 16000:
                    resampler = torchaudio.transforms.Resample(sr, 16000)
                    waveform = resampler(waveform)
                    logger.info(f"重采样到16000Hz完成，新波形形状: {waveform.shape}")

                audio = waveform.squeeze().numpy()
                logger.info(f"转换为numpy数组完成，长度: {len(audio)}")
            else:
                raise ValueError(f"不支持的音频格式: {audio_file_path.suffix}")

            # 检查音频数据
            if len(audio) == 0:
                raise ValueError("音频数据为空")

            # 限制音频长度（30分钟以内）
            max_length = 30 * 16000  # 30分钟 * 16000采样率
            if len(audio) > max_length:
                audio = audio[:max_length]
                logger.info(f"音频过长，截取前30分钟")

            # 准备输入
            logger.info("准备Whisper处理器输入...")
            input_features = whisper_processor(
                audio,
                sampling_rate=16000,
                return_tensors="pt"
            )
            logger.info(f"输入特征准备完成，形状: {input_features.input_features.shape}")

        except Exception as audio_error:
            logger.error(f"音频处理失败: {audio_error}")
            raise HTTPException(status_code=400, detail=f"音频处理失败: {str(audio_error)}")

        # 移动到设备
        device = next(whisper_model.parameters()).device
        logger.info(f"使用设备: {device}")
        input_features = input_features.input_features.to(device)

        # 生成转录
        with torch.no_grad():
            logger.info("开始Whisper模型推理...")

            # 尝试自动检测语言，不强制指定
            try:
                predicted_ids = whisper_model.generate(
                    input_features,
                    max_new_tokens=1024,  # 增加token数量以支持更长的转录
                    do_sample=False,
                    temperature=0.0,
                    num_beams=1,
                    return_timestamps=False  # 暂时禁用时间戳避免错误
                )
                logger.info(f"Whisper推理完成，生成token长度: {predicted_ids.shape}")
            except Exception as model_error:
                logger.error(f"Whisper模型推理失败: {model_error}")
                raise Exception(f"模型推理失败: {model_error}")

        # 解码结果
        transcription = whisper_processor.batch_decode(
            predicted_ids,
            skip_special_tokens=True
        )[0]

        # 构建基本结果
        words = transcription.split() if transcription else []

        result = {
            "text": transcription,
            "confidence": 0.95,  # 默认置信度
            "duration": len(audio) / sr,
            "word_count": len(words),
            "word_timestamps": [],  # 暂时为空，避免错误
            "language": language,
            "model": "whisper-large-v3"
        }

        logger.info(f"转录完成，共 {len(words)} 个词")
        return result

    except Exception as e:
        logger.error(f"音频转录失败: {e}")
        raise HTTPException(status_code=500, detail=f"音频转录失败: {str(e)}")

@app.on_event("startup")
async def startup_event():
    """启动时加载模型"""
    # 显示GPU信息
    if torch.cuda.is_available():
        logger.info(f"检测到 {torch.cuda.device_count()} 个GPU设备")
        for i in range(torch.cuda.device_count()):
            gpu_name = torch.cuda.get_device_name(i)
            logger.info(f"GPU {i}: {gpu_name}")
        logger.info(f"配置使用GPU设备: {PREFERRED_GPU_DEVICE}")
    else:
        logger.info("未检测到GPU设备，将使用CPU运行")

    success = load_whisper_model()
    if not success:
        logger.error("模型加载失败，但服务将继续运行")
    else:
        logger.info("模型加载成功，服务准备就绪")

@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "AI Meeting Minutes API",
        "version": "1.0.0",
        "status": "running"
    }

@app.post("/api/v1/audio/upload")
async def upload_audio(
    file: UploadFile = File(...),
    title: str = Form(...),
    language: str = Form(default="chinese"),
    template: str = Form(default="MeetingSecretary")
):
    """上传音频文件"""
    try:
        # 验证文件类型
        if not file.content_type or not file.content_type.startswith('audio/'):
            raise HTTPException(status_code=400, detail="请上传音频文件")

        # 验证文件大小 (100MB)
        file_size = 0
        content = await file.read()
        file_size = len(content)

        if file_size > 100 * 1024 * 1024:
            raise HTTPException(status_code=400, detail="文件大小不能超过100MB")

        # 生成唯一ID
        meeting_id = str(uuid.uuid4())

        # 保存文件
        file_extension = Path(file.filename).suffix if file.filename else '.webm'
        file_path = UPLOAD_DIR / f"{meeting_id}{file_extension}"

        async with aiofiles.open(file_path, 'wb') as f:
            await f.write(content)

        # 创建会议记录
        meetings_storage[meeting_id] = {
            "id": meeting_id,
            "title": title,
            "language": language,
            "template": template,
            "audio_file_path": str(file_path),
            "audio_file_name": file.filename,
            "audio_file_size": file_size,
            "status": "uploaded",
            "created_at": datetime.now().isoformat()
        }

        logger.info(f"音频文件上传成功: {meeting_id}")

        return {
            "message": "文件上传成功",
            "meeting_id": meeting_id,
            "file_name": file.filename,
            "file_size": file_size,
            "file_path": f"/uploads/{meeting_id}{file_extension}"
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"文件上传失败: {e}")
        raise HTTPException(status_code=500, detail=f"文件上传失败: {str(e)}")

@app.post("/api/v1/audio/transcribe")
async def transcribe_audio_with_file(
    file: UploadFile = File(...),
    meeting_id: str = Form(None)
):
    """转录音频文件"""
    try:
        # 验证文件类型
        if not file.content_type or not file.content_type.startswith('audio/'):
            raise HTTPException(status_code=400, detail="请上传音频文件")

        # 验证文件大小 (100MB)
        content = await file.read()
        file_size = len(content)
        if file_size > 100 * 1024 * 1024:  # 100MB
            raise HTTPException(status_code=400, detail="文件大小不能超过100MB")

        # 如果没有提供meeting_id，创建一个新会议
        if not meeting_id:
            meeting_id = str(datetime.now().timestamp())
            meeting = {
                "id": meeting_id,
                "title": file.filename or "音频转录",
                "description": f"上传文件: {file.filename}",
                "status": "processing",
                "created_at": datetime.now().isoformat(),
                "updated_at": datetime.now().isoformat(),
                "duration": 0
            }
            meetings_storage[meeting_id] = meeting

        # 保存文件
        file_extension = file.filename.split('.')[-1] if file.filename else 'wav'
        audio_filename = f"{meeting_id}.{file_extension}"
        audio_path = Path("uploads") / audio_filename

        # 确保uploads目录存在
        Path("uploads").mkdir(exist_ok=True)

        with open(audio_path, "wb") as f:
            f.write(content)

        # 更新会议状态
        meetings_storage[meeting_id]["status"] = "processing"

        # 转录音频
        logger.info(f"开始转录音频: {audio_filename}")
        result = transcribe_audio_file(audio_path)

        # 更新会议状态
        meetings_storage[meeting_id]["status"] = "completed"
        meetings_storage[meeting_id]["updated_at"] = datetime.now().isoformat()

        # 存储转录结果
        transcript = {
            "id": str(datetime.now().timestamp()),
            "meeting_id": meeting_id,
            "text": result["text"],
            "confidence": result.get("confidence"),
            "timestamp": datetime.now().isoformat(),
            "start_time": 0,
            "end_time": result.get("duration", 0)
        }

        if meeting_id not in transcripts_storage:
            transcripts_storage[meeting_id] = []

        transcripts_storage[meeting_id].append(transcript)

        logger.info(f"音频转录完成: {meeting_id}")
        return {
            "id": transcript["id"],
            "meeting_id": meeting_id,
            "text": result["text"],
            "confidence": result.get("confidence"),
            "timestamp": transcript["timestamp"]
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"音频转录失败: {e}")
        # 更新会议状态为失败
        if meeting_id and meeting_id in meetings_storage:
            meetings_storage[meeting_id]["status"] = "failed"
            meetings_storage[meeting_id]["updated_at"] = datetime.now().isoformat()
        raise HTTPException(status_code=500, detail=f"音频转录失败: {str(e)}")

@app.post("/api/v1/audio/transcribe/{meeting_id}")
async def transcribe_audio(meeting_id: str):
    """转录音频文件"""
    try:
        # 检查会议是否存在
        if meeting_id not in meetings_storage:
            raise HTTPException(status_code=404, detail="会议不存在")

        meeting = meetings_storage[meeting_id]

        # 更新状态
        meeting["status"] = "processing"

        # 检查模型是否加载
        if whisper_model is None:
            raise HTTPException(status_code=503, detail="模型未加载，请稍后再试")

        # 使用真实的Hugging Face Whisper转录
        audio_file_path = Path(meeting["audio_file_path"])
        transcripts = transcribe_audio_file(audio_file_path, meeting["language"])

        # 保存转录数据
        transcripts_storage[meeting_id] = transcripts

        # 更新状态
        meeting["status"] = "completed"

        logger.info(f"音频转录完成: {meeting_id}，共 {len(transcripts)}个片段")

        return {
            "message": "转录完成",
            "meeting_id": meeting_id,
            "transcripts": transcripts
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"转录失败: {e}")
        raise HTTPException(status_code=500, detail=f"转录失败: {str(e)}")

@app.get("/api/v1/audio/transcripts/{meeting_id}")
async def get_transcripts(meeting_id: str):
    """获取转录文本"""
    try:
        if meeting_id not in transcripts_storage:
            raise HTTPException(status_code=404, detail="转录数据不存在")

        return {
            "meeting_id": meeting_id,
            "transcripts": transcripts_storage[meeting_id]
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取转录失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取转录失败")

@app.post("/api/v1/minutes/generate")
async def generate_minutes(request_data: dict):
    """生成会议纪要"""
    try:
        meeting_id = request_data.get("meeting_id")
        template = request_data.get("template", "MeetingSecretary")
        custom_prompt = request_data.get("custom_prompt", "")

        if not meeting_id:
            raise HTTPException(status_code=400, detail="缺少meeting_id参数")

        # 检查会议和转录数据
        if meeting_id not in meetings_storage:
            raise HTTPException(status_code=404, detail="会议不存在")

        if meeting_id not in transcripts_storage:
            raise HTTPException(status_code=400, detail="请先完成音频转录")

        meeting = meetings_storage[meeting_id]
        transcripts = transcripts_storage[meeting_id]

        # 构建转录文本
        if isinstance(transcripts, list) and len(transcripts) > 0:
            # 如果是转录对象列表
            if isinstance(transcripts[0], dict) and 'text' in transcripts[0]:
                transcript_text = transcripts[0]['text']  # 获取转录文本
            else:
                transcript_text = str(transcripts[0])  # 备用方案
        else:
            transcript_text = "暂无转录内容"

        # 调用Kimi API生成纪要
        try:
            minutes_content = await call_kimi_api(transcript_text, template, meeting["title"])
            logger.info(f"Kimi API调用成功，生成了会议纪要")
        except Exception as llm_error:
            logger.error(f"调用Kimi API失败: {llm_error}")
            # 如果API调用失败，使用本地生成
            minutes_content = generate_local_minutes(transcript_text, template, meeting["title"])

        # 保存纪要数据
        minutes_data = {
            "id": meeting_id,
            "meeting_id": meeting_id,
            "content": minutes_content,
            "template": template,
            "created_at": datetime.now().isoformat(),
            "updated_at": datetime.now().isoformat(),
            "status": "completed"
        }

        minutes_storage[meeting_id] = minutes_data

        logger.info(f"会议纪要生成完成: {meeting_id}")

        return {
            "message": "纪要生成成功",
            "meeting_id": meeting_id,
            "minutes": minutes_data
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"生成纪要失败: {e}")
        raise HTTPException(status_code=500, detail=f"生成纪要失败")

@app.get("/api/v1/minutes/{meeting_id}")
async def get_minutes(meeting_id: str):
    """获取会议纪要"""
    try:
        if meeting_id not in minutes_storage:
            raise HTTPException(status_code=404, detail="会议纪要不存在")

        return minutes_storage[meeting_id]

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取纪要失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取纪要失败")

@app.post("/api/v1/meetings")
async def create_meeting(meeting_data: dict):
    """创建新会议"""
    try:
        meeting_id = str(datetime.now().timestamp())

        meeting = {
            "id": meeting_id,
            "title": meeting_data.get("title", "未命名会议"),
            "description": meeting_data.get("description", ""),
            "status": "created",
            "created_at": datetime.now().isoformat(),
            "updated_at": datetime.now().isoformat(),
            "duration": 0
        }

        meetings_storage[meeting_id] = meeting

        logger.info(f"创建会议成功: {meeting_id}")
        return meeting

    except Exception as e:
        logger.error(f"创建会议失败: {e}")
        raise HTTPException(status_code=500, detail=f"创建会议失败")

@app.get("/api/v1/meetings")
async def get_meetings():
    """获取会议列表"""
    try:
        meetings = []
        for meeting_id, meeting_data in meetings_storage.items():
            meeting_info = {
                **meeting_data,
                "has_transcripts": meeting_id in transcripts_storage,
                "has_minutes": meeting_id in minutes_storage,
                "transcript_count": len(transcripts_storage.get(meeting_id, [])),
                "minutes_status": "completed" if meeting_id in minutes_storage else "pending"
            }
            meetings.append(meeting_info)

        # 按创建时间倒序排列
        meetings.sort(key=lambda x: x["created_at"], reverse=True)

        return {
            "meetings": meetings,
            "total": len(meetings)
        }

    except Exception as e:
        logger.error(f"获取会议列表失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取会议列表失败")

@app.get("/api/v1/meetings/{meeting_id}")
async def get_meeting(meeting_id: str):
    """获取单个会议详情"""
    try:
        if meeting_id not in meetings_storage:
            raise HTTPException(status_code=404, detail="会议不存在")

        meeting = meetings_storage[meeting_id].copy()

        # 添加相关数据
        meeting["transcripts"] = transcripts_storage.get(meeting_id, [])
        meeting["minutes"] = minutes_storage.get(meeting_id)

        return meeting

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取会议详情失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取会议详情失败")

@app.delete("/api/v1/meetings/{meeting_id}")
async def delete_meeting(meeting_id: str):
    """删除会议"""
    try:
        # 删除会议数据
        if meeting_id in meetings_storage:
            meeting = meetings_storage[meeting_id]

            # 删除音频文件
            if "audio_file_path" in meeting:
                file_path = Path(meeting["audio_file_path"])
                if file_path.exists():
                    file_path.unlink()

            del meetings_storage[meeting_id]

        # 删除转录数据
        if meeting_id in transcripts_storage:
            del transcripts_storage[meeting_id]

        # 删除纪要数据
        if meeting_id in minutes_storage:
            del minutes_storage[meeting_id]

        logger.info(f"会议删除成功: {meeting_id}")

        return {"message": "会议已删除"}

    except Exception as e:
        logger.error(f"删除会议失败: {e}")
        raise HTTPException(status_code=500, detail=f"删除会议失败")

@app.get("/api/v1/health")
async def health_check():
    """健康检查"""
    current_device = str(next(whisper_model.parameters()).device) if whisper_model else "none"

    return {
        "status": "healthy",
        "timestamp": datetime.now().isoformat(),
        "meetings_count": len(meetings_storage),
        "transcripts_count": len(transcripts_storage),
        "minutes_count": len(minutes_storage),
        "whisper_model_loaded": whisper_model is not None,
        "current_device": current_device,
        "preferred_gpu_device": PREFERRED_GPU_DEVICE,
        "available_gpu_count": torch.cuda.device_count() if torch.cuda.is_available() else 0,
        "gpu_available": torch.cuda.is_available()
    }

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        "simple_main:app",
        host="0.0.0.0",
        port=8000,
        reload=True,
        log_level="info"
    )