from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
import aiofiles
import uuid
from pathlib import Path
import logging
from datetime import datetime
import torch
import torchaudio
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import numpy as np
import io
import librosa

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 创建FastAPI应用
app = FastAPI(
    title="AI Meeting Minutes API",
    description="真实AI会议纪要API - 使用Hugging Face Whisper",
    version="1.0.0"
)

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 创建必要目录
UPLOAD_DIR = Path("uploads")
UPLOAD_DIR.mkdir(exist_ok=True)
MODEL_DIR = Path("models")
MODEL_DIR.mkdir(exist_ok=True)

# 静态文件服务
app.mount("/uploads", StaticFiles(directory="uploads"), name="uploads")

# 全局变量
whisper_processor = None
whisper_model = None

# 内存存储（替代数据库）
meetings_storage = {}
transcripts_storage = {}
minutes_storage = {}

def load_whisper_model():
    """加载Hugging Face Whisper模型"""
    global whisper_processor, whisper_model

    try:
        logger.info("正在加载Hugging Face Whisper模型...")

        # 使用OpenAI Whisper模型
        model_name = "openai/whisper-large-v3"

        # 加载处理器和模型
        whisper_processor = WhisperProcessor.from_pretrained(model_name, cache_dir=MODEL_DIR)
        whisper_model = WhisperForConditionalGeneration.from_pretrained(model_name, cache_dir=MODEL_DIR)

        # 设置为评估模式
        whisper_model.eval()

        # 如果有GPU，移动到GPU
        device = "cuda:9" if torch.cuda.is_available() else "cpu"
        whisper_model.to(device)

        logger.info(f"Whisper模型加载成功，使用设备: {device}")
        return True

    except Exception as e:
        logger.error(f"模型加载失败: {e}")
        # 如果大型模型加载失败，尝试加载小型模型
        try:
            logger.info("尝试加载小型Whisper模型...")
            model_name = "openai/whisper-base"

            whisper_processor = WhisperProcessor.from_pretrained(model_name, cache_dir=MODEL_DIR)
            whisper_model = WhisperForConditionalGeneration.from_pretrained(model_name, cache_dir=MODEL_DIR)
            whisper_model.eval()

            device = "cuda" if torch.cuda.is_available() else "cpu"
            whisper_model.to(device)

            logger.info(f"小型Whisper模型加载成功，使用设备: {device}")
            return True

        except Exception as e2:
            logger.error(f"小型模型加载也失败: {e2}")
            return False

def transcribe_audio_file(audio_file_path: Path, language: str = "chinese"):
    """使用Hugging Face Whisper转录音频文件"""
    try:
        logger.info(f"开始转录音频文件: {audio_file_path}")

        # 读取音频文件
        if audio_file_path.suffix.lower() == '.mp3':
            # MP3文件，使用librosa转换
            audio, sr = librosa.load(str(audio_file_path), sr=16000)
        else:
            # 其他格式，使用torchaudio
            waveform, sr = torchaudio.load(str(audio_file_path))
            if sr != 16000:
                resampler = torchaudio.transforms.Resample(sr, 16000)
                waveform = resampler(waveform)
            audio = waveform.squeeze().numpy()

        # 准备输入
        input_features = whisper_processor(
            audio,
            sampling_rate=16000,
            return_tensors="pt"
        )

        # 移动到设备
        device = next(whisper_model.parameters()).device
        input_features = input_features.input_features.to(device)

        # 生成转录
        with torch.no_grad():
            # 设置强制语言
            forced_decoder_ids = whisper_processor.get_decoder_prompt_ids(
                language=language,
                task="transcribe"
            )

            predicted_ids = whisper_model.generate(
                input_features,
                forced_decoder_ids=forced_decoder_ids,
                max_new_tokens=448,
                do_sample=False,
                temperature=0.0
            )

        # 解码结果
        transcription = whisper_processor.batch_decode(
            predicted_ids,
            skip_special_tokens=True
        )[0]

        # 获取时间戳（如果模型支持）
        try:
            # 尝试获取带时间戳的转录
            time_stamps = whisper_processor.batch_decode(
                predicted_ids,
                output_word_offsets=True,
                skip_special_tokens=True
            )

            # 提取段落和对应时间戳
            segments = []
            current_time = 0

            # 这里简化处理，将转录文本分割成段落
            words = transcription.split()
            segment_duration = len(audio) / sr / len(words)  # 每个词的大概时间

            for i, word in enumerate(words):
                segments.append({
                    "text": word,
                    "timestamp": round(i * segment_duration, 2),
                    "confidence": 0.95  # 默认置信度
                })

            logger.info(f"转录完成，共 {len(segments)} 个片段")
            return segments

        except Exception as e:
            logger.warning(f"时间戳提取失败，使用简单分割: {e}")
            # 如果时间戳提取失败，简单分割文本
            sentences = transcription.split('。')
            segment_duration = len(audio) / sr / len(sentences)

            segments = []
            for i, sentence in enumerate(sentences):
                if sentence.strip():
                    segments.append({
                        "text": sentence.strip() + '。',
                        "timestamp": round(i * segment_duration, 2),
                        "confidence": 0.95
                    })

            return segments

    except Exception as e:
        logger.error(f"音频转录失败: {e}")
        raise HTTPException(status_code=500, detail=f"音频转录失败: {str(e)}")

@app.on_event("startup")
async def startup_event():
    """启动时加载模型"""
    success = load_whisper_model()
    if not success:
        logger.error("模型加载失败，但服务将继续运行")
    else:
        logger.info("模型加载成功，服务准备就绪")

@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "AI Meeting Minutes API - Real Hugging Face Whisper",
        "version": "1.0.0",
        "status": "running",
        "model_loaded": whisper_model is not None
    }

@app.post("/api/v1/audio/upload")
async def upload_audio(
    file: UploadFile = File(...),
    title: str = Form(...),
    language: str = Form(default="chinese"),
    template: str = Form(default="MeetingSecretary")
):
    """上传音频文件"""
    try:
        # 验证文件类型
        if not file.content_type or not file.content_type.startswith('audio/'):
            raise HTTPException(status_code=400, detail="请上传音频文件")

        # 验证文件大小 (100MB)
        file_size = 0
        content = await file.read()
        file_size = len(content)

        if file_size > 100 * 1024 * 1024:
            raise HTTPException(status_code=400, detail="文件大小不能超过100MB")

        # 生成唯一ID
        meeting_id = str(uuid.uuid4())

        # 保存文件
        file_extension = Path(file.filename).suffix if file.filename else '.webm'
        file_path = UPLOAD_DIR / f"{meeting_id}{file_extension}"

        async with aiofiles.open(file_path, 'wb') as f:
            await f.write(content)

        # 创建会议记录
        meetings_storage[meeting_id] = {
            "id": meeting_id,
            "title": title,
            "language": language,
            "template": template,
            "audio_file_path": str(file_path),
            "audio_file_name": file.filename,
            "audio_file_size": file_size,
            "status": "uploaded",
            "created_at": datetime.now().isoformat()
        }

        logger.info(f"音频文件上传成功: {meeting_id}")

        return {
            "message": "文件上传成功",
            "meeting_id": meeting_id,
            "file_name": file.filename,
            "file_size": file_size,
            "file_path": f"/uploads/{meeting_id}{file_extension}"
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"文件上传失败: {e}")
        raise HTTPException(status_code=500, detail=f"文件上传失败: {str(e)}")

@app.post("/api/v1/audio/transcribe/{meeting_id}")
async def transcribe_audio(meeting_id: str):
    """转录音频文件"""
    try:
        # 检查会议是否存在
        if meeting_id not in meetings_storage:
            raise HTTPException(status_code=404, detail="会议不存在")

        meeting = meetings_storage[meeting_id]

        # 检查模型是否加载
        if whisper_model is None:
            raise HTTPException(status_code=503, detail="模型未加载，请稍后再试")

        # 更新状态
        meeting["status"] = "processing"

        logger.info(f"开始转录会议: {meeting_id}")

        # 执行真实的音频转录
        audio_file_path = Path(meeting["audio_file_path"])
        transcripts = transcribe_audio_file(audio_file_path, meeting["language"])

        # 保存转录数据
        transcripts_storage[meeting_id] = transcripts

        # 更新状态
        meeting["status"] = "completed"

        logger.info(f"音频转录完成: {meeting_id}，共 {len(transcripts)} 个片段")

        return {
            "message": "转录完成",
            "meeting_id": meeting_id,
            "transcripts": transcripts
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"转录失败: {e}")
        raise HTTPException(status_code=500, detail=f"转录失败: {str(e)}")

@app.get("/api/v1/audio/transcripts/{meeting_id}")
async def get_transcripts(meeting_id: str):
    """获取转录文本"""
    try:
        if meeting_id not in transcripts_storage:
            raise HTTPException(status_code=404, detail="转录数据不存在")

        return {
            "meeting_id": meeting_id,
            "transcripts": transcripts_storage[meeting_id]
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取转录失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取转录失败")

@app.post("/api/v1/minutes/generate")
async def generate_minutes(
    meeting_id: str,
    template: str = Form(default="MeetingSecretary"),
    custom_prompt: str = Form(default="")
):
    """生成会议纪要"""
    try:
        # 检查会议和转录数据
        if meeting_id not in meetings_storage:
            raise HTTPException(status_code=404, detail="会议不存在")

        if meeting_id not in transcripts_storage:
            raise HTTPException(status_code=400, detail="请先完成音频转录")

        meeting = meetings_storage[meeting_id]
        transcripts = transcripts_storage[meeting_id]

        # 构建转录文本
        transcript_text = "\n".join([
            f"[{item['timestamp']}s] {item.get('speaker', 'Unknown')}: {item['text']}"
            for item in transcripts
        ])

        # 这里应该调用Kimi API生成纪要
        # 暂时返回基本的纪要结构
        mock_minutes = {
            "title": meeting["title"],
            "summary": f"基于转录文本生成的会议概要。会议共转录了 {len(transcripts)} 个片段。",
            "keyPoints": [
                "会议主要讨论了相关议题",
                "形成了具体的行动计划",
                "安排了后续工作内容"
            ],
            "decisions": [
                "会议达成了重要共识",
                "确定了下一步工作计划"
            ],
            "actionItems": [
                {
                    "description": "完成相关工作",
                    "assignee": "待分配",
                    "dueDate": "",
                    "priority": "medium"
                }
            ],
            "content": transcript_text,
            "template": template,
            "created_at": datetime.now().isoformat()
        }

        # 保存纪要数据
        minutes_storage[meeting_id] = mock_minutes

        logger.info(f"会议纪要生成完成: {meeting_id}")

        return {
            "message": "纪要生成成功",
            "meeting_id": meeting_id,
            "minutes": mock_minutes
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"生成纪要失败: {e}")
        raise HTTPException(status_code=500, detail=f"生成纪要失败")

@app.get("/api/v1/minutes/{meeting_id}")
async def get_minutes(meeting_id: str):
    """获取会议纪要"""
    try:
        if meeting_id not in minutes_storage:
            raise HTTPException(status_code=404, detail="会议纪要不存在")

        return minutes_storage[meeting_id]

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取纪要失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取纪要失败")

@app.get("/api/v1/meetings")
async def get_meetings():
    """获取会议列表"""
    try:
        meetings = []
        for meeting_id, meeting_data in meetings_storage.items():
            meeting_info = {
                **meeting_data,
                "has_transcripts": meeting_id in transcripts_storage,
                "has_minutes": meeting_id in minutes_storage,
                "transcript_count": len(transcripts_storage.get(meeting_id, [])),
                "minutes_status": "completed" if meeting_id in minutes_storage else "pending"
            }
            meetings.append(meeting_info)

        # 按创建时间倒序排列
        meetings.sort(key=lambda x: x["created_at"], reverse=True)

        return {
            "meetings": meetings,
            "total": len(meetings)
        }

    except Exception as e:
        logger.error(f"获取会议列表失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取会议列表失败")

@app.delete("/api/v1/meetings/{meeting_id}")
async def delete_meeting(meeting_id: str):
    """删除会议"""
    try:
        # 删除会议数据
        if meeting_id in meetings_storage:
            meeting = meetings_storage[meeting_id]

            # 删除音频文件
            if "audio_file_path" in meeting:
                file_path = Path(meeting["audio_file_path"])
                if file_path.exists():
                    file_path.unlink()

            del meetings_storage[meeting_id]

        # 删除转录数据
        if meeting_id in transcripts_storage:
            del transcripts_storage[meeting_id]

        # 删除纪要数据
        if meeting_id in minutes_storage:
            del minutes_storage[meeting_id]

        logger.info(f"会议删除成功: {meeting_id}")

        return {"message": "会议已删除"}

    except Exception as e:
        logger.error(f"删除会议失败: {e}")
        raise HTTPException(status_code=500, detail=f"删除会议失败")

@app.get("/api/v1/health")
async def health_check():
    """健康检查"""
    return {
        "status": "healthy",
        "timestamp": datetime.now().isoformat(),
        "meetings_count": len(meetings_storage),
        "transcripts_count": len(transcripts_storage),
        "minutes_count": len(minutes_storage),
        "whisper_model_loaded": whisper_model is not None,
        "device": str(next(whisper_model.parameters()).device) if whisper_model else "none"
    }

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        "real_main:app",
        host="0.0.0.0",
        port=8000,
        reload=False,  # 生产模式，不使用reload
        log_level="info"
    )