from typing import Optional

from flask import Flask, request, jsonify
from flask_cors import CORS
import torch
import os
import tempfile
import time
from moviepy import VideoFileClip
from pydub import AudioSegment
from pydub.effects import normalize
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
import hmac
import hashlib
import base64
import uuid
import requests
import json
import logging

app = Flask(__name__)
CORS(app)  # 允许跨域请求

# 配置智能体 API 常量
APP_KEY = "hengnaoZ73S0JFmwFucCyglcBgk"
APP_SECRET = "3imquvvjytpc7ln48frxdz4mk8v4m2zd"
AGENT_ID = "04eeb370-9458-49e4-ab66-05571da7c926"
BASE_URL = "https://www.das-ai.com"
MAX_RETRIES = 3
TIMEOUT = 1000

# 配置日志
logging.basicConfig(
    level=logging.DEBUG,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)


class MediaTranscriber:
    def __init__(self):
        # 设备和数据类型设置
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.torch_dtype = torch.float16 if "cuda" in self.device else torch.float32
        torch.cuda.empty_cache()
        print(f"使用设备: {self.device}, 数据类型: {self.torch_dtype}")

        # 加载 Whisper 模型
        print("开始加载 Whisper 模型...")
        self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
            "openai/whisper-large-v3", torch_dtype=self.torch_dtype, device_map="auto", low_cpu_mem_usage=True
        )
        self.processor = AutoProcessor.from_pretrained("openai/whisper-large-v3")
        self.pipe = pipeline(
            "automatic-speech-recognition",
            model=self.model,
            tokenizer=self.processor.tokenizer,
            feature_extractor=self.processor.feature_extractor,
            torch_dtype=self.torch_dtype,
            model_kwargs={"use_flash_attention_2": False}
        )
        print("模型加载完成")

    def remove_file_with_retry(self, file_path, max_attempts=5, delay=0.5):
        for attempt in range(max_attempts):
            try:
                os.remove(file_path)
                print(f"成功删除: {file_path}")
                return
            except PermissionError:
                if attempt < max_attempts - 1:
                    print(f"删除文件 {file_path} 失败，第 {attempt + 1} 次尝试，等待 {delay} 秒")
                    time.sleep(delay)
                else:
                    print(f"无法删除文件: {file_path}，可能被占用")

    def transcribe(self, input_path, output_txt="output.txt"):
        audio_path = None
        try:
            print(f"开始处理文件: {input_path}")
            audio_path = self._preprocess_media(input_path)
            audio = AudioSegment.from_file(audio_path)
            segment_length = 30000  # 每段 30 秒 (单位: 毫秒)
            print(f"音频总长度: {len(audio)} 毫秒, 分段长度: {segment_length} 毫秒")
            if segment_length <= 0:
                raise ValueError("segment_length 必须大于 0")
            full_text = ""
            for i in range(0, len(audio), segment_length):
                segment = audio[i:i + segment_length]
                # 音频增强：标准化音量并降噪
                segment = normalize(segment).low_pass_filter(3000).high_pass_filter(300)
                with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
                    segment.export(tmpfile.name, format="wav")
                    print(f"处理临时文件: {tmpfile.name}")
                    result = self.pipe(
                        tmpfile.name,
                        chunk_length_s=10,
                        batch_size=1,
                        return_timestamps=True,
                        generate_kwargs={
                            "language": "zh",
                            "task": "transcribe",
                            "temperature": 0,
                            "num_beams": 10,
                        }
                    )
                    print(f"转录结果: {result}")
                    if "chunks" in result:
                        for chunk in result["chunks"]:
                            full_text += f"[{chunk['timestamp'][0]:.2f}-{chunk['timestamp'][1]:.2f}] {chunk['text']}\n"
                    else:
                        full_text += result["text"] + "\n"
                tmpfile.close()
                print(f"尝试删除: {tmpfile.name}")
                self.remove_file_with_retry(tmpfile.name)
            with open(output_txt, "w", encoding="utf-8") as f:
                f.write(full_text)
            print(f"转录结果已保存到: {output_txt}")
            return full_text  # 返回转录文本以供后续处理
        except Exception as e:
            print(f"转录出错: {e}")
            raise
        finally:
            if audio_path and os.path.exists(audio_path):
                self.remove_file_with_retry(audio_path)

    def _preprocess_media(self, input_path):
        if input_path.lower().endswith(('.mp4', '.avi', '.mov', '.mkv')):
            return self._extract_audio_from_video(input_path)
        return self._convert_audio_format(input_path)

    def _extract_audio_from_video(self, video_path):
        print(f"从视频提取音频: {video_path}")
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
            video = VideoFileClip(video_path, target_resolution=(None, 360), fps_source="fps")
            audio = video.audio
            audio.write_audiofile(tmpfile.name, codec='pcm_s16le', ffmpeg_params=["-ac", "1", "-ar", "16000"])
            video.close()
            enhanced_audio = AudioSegment.from_file(tmpfile.name)
            enhanced_audio = normalize(enhanced_audio).low_pass_filter(3000).high_pass_filter(300)
            enhanced_audio.export(tmpfile.name, format="wav")
            print(f"音频提取完成: {tmpfile.name}")
            return tmpfile.name

    def _convert_audio_format(self, input_path):
        print(f"转换音频格式: {input_path}")
        audio = AudioSegment.from_file(input_path)
        audio = audio.set_frame_rate(16000).set_channels(1)
        audio = normalize(audio).low_pass_filter(3000).high_pass_filter(300)
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
            audio.export(tmpfile.name, format="wav")
            print(f"音频转换完成: {tmpfile.name}")
            return tmpfile.name


# 智能体相关函数
def generate_sign(key: str, secret: str) -> str:
    """生成 HMAC-SHA256 签名"""
    timestamp = int(time.time() * 1000)
    data = f"{timestamp}\n{secret}\n{key}".encode('utf-8')
    signature = hmac.new(secret.encode('utf-8'), data, hashlib.sha256).digest()
    sign = f"{timestamp}{base64.b64encode(signature).decode('utf-8').strip()}"
    logger.debug(f"生成签名: timestamp={timestamp}, sign={sign}")
    return sign


def call_das_ai_api(payload: dict, session_id: str) -> tuple[Optional[dict], Optional[str]]:
    """调用 DAS AI API"""
    headers = {
        "appKey": APP_KEY,
        "sign": generate_sign(APP_KEY, APP_SECRET),
        "Content-Type": "application/json",
        "Accept": "application/json",
        "X-Session-Id": session_id
    }
    url = f"{BASE_URL}/open/api/v2/agent/execute"

    for attempt in range(MAX_RETRIES):
        try:
            logger.debug(
                f"发送请求到 {url}, 尝试 {attempt + 1}/{MAX_RETRIES}, payload={json.dumps(payload, ensure_ascii=False)}")
            response = requests.post(url, headers=headers, json=payload, timeout=TIMEOUT)
            response.raise_for_status()
            result = response.json()
            logger.debug(f"收到响应: {json.dumps(result, ensure_ascii=False, indent=2)}")
            return result, None
        except requests.exceptions.RequestException as req_err:
            error_msg = f"网络请求错误 (尝试 {attempt + 1}/{MAX_RETRIES}): {str(req_err)}"
            logger.warning(error_msg)
            if attempt == MAX_RETRIES - 1:
                return None, error_msg
        except json.JSONDecodeError as json_err:
            error_msg = f"响应解析错误: {str(json_err)}, 原始响应: {response.text}"
            logger.error(error_msg)
            return None, error_msg


def parse_assistant_response(result: dict) -> tuple[str, bool]:
    """解析助手的回复并判断是否成功"""
    if result.get('code', 1) != 0:
        error_msg = result.get('msg', '未知错误')
        logger.warning(f"API 返回错误: {error_msg}")
        return error_msg, False

    data = result.get('data', {})
    if 'session' in data and 'messages' in data['session']:
        messages = data['session']['messages']
        assistant_response = next(
            (msg['content'] for msg in reversed(messages) if msg.get('role') == 'assistant'),
            '无响应'
        )
        logger.debug(f"从 session.messages 解析回复: {assistant_response}")
    elif 'content' in data:
        assistant_response = data['content']
        logger.debug(f"从 data.content 解析回复: {assistant_response}")
    else:
        assistant_response = data.get('answer', '无响应')
        logger.debug(f"从 data.answer 或默认值解析回复: {assistant_response}")

    return assistant_response, True


def process_with_agent(transcription: str) -> str:
    """将转录文本提交给智能体处理"""
    session_id = str(uuid.uuid4())
    payload = {
        "id": AGENT_ID,
        "sid": session_id,
        "input": transcription,
        "stream": False
    }

    print(f"提交转录文本给智能体处理, session_id: {session_id}")
    result, error = call_das_ai_api(payload, session_id)

    if error:
        print(f"智能体处理失败: {error}")
        return f"转录文本处理失败: {error}"

    assistant_response, success = parse_assistant_response(result)
    if not success:
        print(f"智能体返回错误: {assistant_response}")
        return f"转录文本处理错误: {assistant_response}"

    print(f"智能体处理结果: {assistant_response}")
    return assistant_response


# 初始化转录器
transcriber = MediaTranscriber()


@app.route('/transcribe', methods=['POST'])
def transcribe_media():
    try:
        # 检查是否有文件上传
        if 'file' not in request.files:
            print("请求错误: 未提供文件")
            return jsonify({'error': 'No file provided'}), 400

        file = request.files['file']
        if file.filename == '':
            print("请求错误: 未选择文件")
            return jsonify({'error': 'No file selected'}), 400

        # 保存上传的文件
        upload_folder = 'uploads'
        os.makedirs(upload_folder, exist_ok=True)
        input_path = os.path.join(upload_folder, file.filename)
        file.save(input_path)
        print(f"文件已保存到: {input_path}")

        # 获取可选参数
        output_file = request.form.get('output_file', 'output.txt')
        print(f"输出文件设置为: {output_file}")

        # 执行转录
        transcription = transcriber.transcribe(input_path, output_file)

        # 将转录结果提交给智能体处理
        processed_text = process_with_agent(transcription)

        # 读取转录结果（原始文本仍保存到文件）
        with open(output_file, 'r', encoding='utf-8') as f:
            original_transcription = f.read()
        print(f"转录结果读取完成，长度: {len(original_transcription)} 字符")

        # 清理临时文件
        if os.path.exists(input_path):
            transcriber.remove_file_with_retry(input_path)

        return jsonify({
            'status': 'success',
            'processed_text': processed_text,  # 返回智能体处理后的文本
            'output_file': output_file
        })

    except Exception as e:
        print(f"处理请求时出错: {e}")
        return jsonify({'error': str(e)}), 500


if __name__ == '__main__':
    print("启动 Flask 服务...")
    app.run(debug=True, host='0.0.0.0', port=5000)