import dashscope
from dashscope import Generation, SpeechSynthesizer
import logging
import os
import uuid
from typing import Optional, Tuple
import aiofiles
import asyncio
from concurrent.futures import ThreadPoolExecutor
import wave
import io

from config import settings

logger = logging.getLogger(__name__)

class DashScopeService:
    """通义千问API服务类"""
    
    def __init__(self):
        """初始化服务"""
        dashscope.api_key = settings.dashscope_api_key
        self.executor = ThreadPoolExecutor(max_workers=4)
        
    async def generate_response(self, message: str, system_prompt: Optional[str] = None) -> str:
        """
        生成AI回复
        
        Args:
            message: 用户输入消息
            system_prompt: 系统提示词（可选）
            
        Returns:
            str: AI回复文本
        """
        try:
            # 构建消息列表
            messages = []
            
            if system_prompt:
                messages.append({
                    'role': 'system',
                    'content': system_prompt
                })
            
            messages.append({
                'role': 'user',
                'content': message
            })
            
            # 异步调用通义千问API
            loop = asyncio.get_event_loop()
            response = await loop.run_in_executor(
                self.executor,
                self._sync_generate_response,
                messages
            )
            
            return response
            
        except Exception as e:
            logger.error(f"生成回复失败: {str(e)}")
            raise Exception(f"AI回复生成失败: {str(e)}")
    
    def _sync_generate_response(self, messages: list) -> str:
        """同步调用通义千问API"""
        response = Generation.call(
            model='qwen-turbo',  # 使用qwen-plus模型
            messages=messages,
            result_format='message',
            stream=False,
            incremental_output=False,
            temperature=0.7,
            max_tokens=1000
        )
        
        if response.status_code == 200:
            return response.output.choices[0].message.content
        else:
            raise Exception(f"API调用失败: {response.message}")
    
    async def synthesize_speech(self, text: str, voice: str = "zhichu") -> Tuple[str, float, list]:
        """
        合成语音
        
        Args:
            text: 要合成的文本
            voice: 语音类型，默认为"zhichu"
            
        Returns:
            Tuple[str, float, list]: (音频文件URL, 音频时长, 句子时间戳列表)
        """
        try:
            # 异步调用语音合成
            loop = asyncio.get_event_loop()
            audio_data = await loop.run_in_executor(
                self.executor,
                self._sync_synthesize_speech,
                text,
                voice
            )
            
            if audio_data:
                # 保存音频文件
                audio_filename = f"tts_{uuid.uuid4().hex}.mp3"
                audio_path = os.path.join(settings.audio_dir, audio_filename)
                
                async with aiofiles.open(audio_path, 'wb') as f:
                    await f.write(audio_data)
                
                # 计算音频时长
                duration = self._calculate_audio_duration(audio_data, text)
                
                # 生成智能时间戳
                timestamps = self._generate_smart_timestamps(text, duration)
                
                # 返回相对URL路径
                audio_url = f"/audio/{audio_filename}"
                
                logger.info(f"语音合成成功: {audio_url}, 时长: {duration:.2f}秒")
                return audio_url, duration, timestamps
            else:
                # 降级到前端TTS
                logger.warning("DashScope TTS失败，降级到前端浏览器TTS")
                duration = len(text) * 0.15
                timestamps = self._generate_smart_timestamps(text, duration)
                return None, duration, timestamps
                
        except Exception as e:
            logger.error(f"语音合成失败: {str(e)}")
            # 降级到前端TTS
            duration = len(text) * 0.15
            timestamps = self._generate_smart_timestamps(text, duration)
            return None, duration, timestamps

    def _sync_synthesize_speech(self, text: str, voice: str) -> Optional[bytes]:
        """同步调用语音合成API"""
        try:
            # 根据诗人选择合适的语音模型
            # 避免循环导入，不再直接导入chat_service
            # 使用传入的voice参数来判断诗人类型
            current_poet = voice  # 传入的voice参数就是诗人ID
            
            # 根据不同诗人选择合适的语音模型和参数
            if current_poet == "liqingzhao":  # 李清照 - 女性，婉约
                model = "sambert-zhiyuan-v1"
                voice = "zhiyuan"
                speed = 0.9  # 稍慢以体现典雅
                pitch = 1.0
            elif current_poet == "libai":  # 李白 - 男性，豪放
                model = "sambert-zhihao-v1"
                voice = "zhihao"
                speed = 0.95  # 稍慢以体现稳重
                pitch = 1.0
            elif current_poet == "dufu":  # 杜甫 - 男性，深沉
                model = "sambert-zhide-v1"
                voice = "zhide"
                speed = 0.85  # 慢以体现深沉
                pitch = 0.9
            elif current_poet == "sushi":  # 苏轼 - 男性，豁达
                model = "sambert-zhilun-v1"
                voice = "zhilun"
                speed = 0.95  # 稍慢以体现沉稳
                pitch = 1.05  # 稍高以体现豁达
            elif current_poet == "taoyuanming":  # 陶渊明 - 与杜甫配置一致
                model = "sambert-zhide-v1"
                voice = "zhide"
                speed = 0.85  # 慢以体现深沉
                pitch = 0.9
            else:  # 默认配置
                model = 'sambert-zhide-v1'
                voice = 'zhide'
                speed = 1.0
                pitch = 1.0
            
            logger.info(f"为诗人 {current_poet} 选择语音模型: {model}, 语音: {voice}, 语速: {speed}, 音调: {pitch}")
            
            # 调用DashScope语音合成API
            response = SpeechSynthesizer.call(
                model=model,
                text=text,
                sample_rate=22050,
                format='mp3',
                voice=voice,
                speed=speed,
                pitch=pitch
            )

            # 检查响应状态
            if hasattr(response, 'status_code'):
                status_code = response.status_code
            elif hasattr(response, 'code'):
                status_code = response.code
            else:
                # 如果没有状态码，检查是否有音频数据
                status_code = 200 if hasattr(response, 'get_audio_data') else 400

            if status_code == 200:
                logger.info(f"DashScope TTS调用成功，文本长度: {len(text)}")
                if hasattr(response, 'get_audio_data'):
                    return response.get_audio_data()
                elif hasattr(response, 'audio_data'):
                    return response.audio_data
                else:
                    logger.error("响应中没有找到音频数据")
                    return None
            else:
                error_msg = getattr(response, 'message', getattr(response, 'msg', '未知错误'))
                logger.error(f"DashScope TTS调用失败: {status_code}, {error_msg}")
                return None
                
        except Exception as e:
            logger.error(f"DashScope TTS调用异常: {str(e)}")
            return None
    
    def _calculate_audio_duration(self, audio_data: bytes, text: str) -> float:
        """计算音频时长"""
        try:
            # 尝试从音频数据计算实际时长
            # 对于MP3格式，这是一个简化的估算
            # 实际项目中可以使用librosa或pydub等库进行精确计算
            
            # 简单估算：中文语音约每秒3-4个字符
            estimated_duration = len(text) / 3.5
            
            # 基于音频数据大小的估算（MP3压缩率约1:10）
            # 假设22050Hz采样率，16bit，单声道
            bytes_per_second = 22050 * 2  # 约44KB/s未压缩
            compressed_bytes_per_second = bytes_per_second / 10  # MP3压缩后约4.4KB/s
            
            if len(audio_data) > 0:
                data_based_duration = len(audio_data) / compressed_bytes_per_second
                # 取两种估算的平均值
                duration = (estimated_duration + data_based_duration) / 2
            else:
                duration = estimated_duration
            
            return max(duration, 0.5)  # 最少0.5秒
            
        except Exception as e:
             logger.warning(f"音频时长计算失败: {str(e)}, 使用估算值")
             return len(text) * 0.15
    
    def _generate_smart_timestamps(self, text: str, total_duration: float) -> list:
        """
        生成智能时间戳，基于文本特征预测每句话的时长
        
        Args:
            text: 完整文本
            total_duration: 总音频时长
            
        Returns:
            list: 句子时间戳列表，格式为 [{"text": "句子", "start": 开始时间, "end": 结束时间}, ...]
        """
        try:
            # 智能分割句子
            sentences = self._split_text_into_sentences(text)
            if not sentences:
                return []
            
            # 计算每个句子的相对权重（基于长度、标点、复杂度）
            sentence_weights = []
            for sentence in sentences:
                weight = self._calculate_sentence_weight(sentence)
                sentence_weights.append(weight)
            
            # 归一化权重
            total_weight = sum(sentence_weights)
            if total_weight == 0:
                # 如果总权重为0，平均分配
                sentence_weights = [1.0] * len(sentences)
                total_weight = len(sentences)
            
            # 根据权重分配时间
            timestamps = []
            current_time = 0.0
            
            for i, (sentence, weight) in enumerate(zip(sentences, sentence_weights)):
                # 计算这句话的时长
                sentence_duration = (weight / total_weight) * total_duration
                
                # 确保最小时长
                sentence_duration = max(sentence_duration, 0.5)
                
                # 添加时间戳
                timestamps.append({
                    "text": sentence.strip(),
                    "start": round(current_time, 2),
                    "end": round(current_time + sentence_duration, 2)
                })
                
                current_time += sentence_duration
            
            # 调整最后一句的结束时间，确保与总时长一致
            if timestamps:
                timestamps[-1]["end"] = round(total_duration, 2)
            
            logger.info(f"生成智能时间戳: {len(timestamps)}个句子")
            return timestamps
            
        except Exception as e:
            logger.warning(f"智能时间戳生成失败: {str(e)}, 使用简单分割")
            # 降级到简单平均分割
            sentences = text.split('。')
            if not sentences or (len(sentences) == 1 and not sentences[0].strip()):
                sentences = [text]
            
            timestamps = []
            sentence_duration = total_duration / len(sentences)
            
            for i, sentence in enumerate(sentences):
                if sentence.strip():
                    timestamps.append({
                        "text": sentence.strip(),
                        "start": round(i * sentence_duration, 2),
                        "end": round((i + 1) * sentence_duration, 2)
                    })
            
            return timestamps
    
    def _split_text_into_sentences(self, text: str) -> list:
        """智能分割文本为句子"""
        import re
        
        # 使用正则表达式分割句子
        # 匹配中文句号、问号、感叹号，以及英文的句号、问号、感叹号
        sentence_endings = r'[。！？.!?]+'
        sentences = re.split(sentence_endings, text)
        
        # 过滤空句子并清理
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 如果分割后句子太少，尝试用逗号分割
        if len(sentences) < 2:
            comma_split = re.split(r'[，,；;：:]', text)
            comma_sentences = [s.strip() for s in comma_split if s.strip()]
            if len(comma_sentences) > len(sentences):
                sentences = comma_sentences
        
        return sentences
    
    def _calculate_sentence_weight(self, sentence: str) -> float:
        """
        计算句子的权重（预测语音时长的相对比例）
        
        Args:
            sentence: 句子文本
            
        Returns:
            float: 权重值
        """
        if not sentence.strip():
            return 0.0
        
        # 基础权重：字符数量
        base_weight = len(sentence)
        
        # 标点符号影响（增加停顿时间）
        punctuation_count = sentence.count('，') + sentence.count(',') + \
                          sentence.count('；') + sentence.count(';') + \
                          sentence.count('：') + sentence.count(':')
        punctuation_weight = punctuation_count * 0.3
        
        # 数字和英文单词影响（通常读得较慢）
        import re
        numbers = len(re.findall(r'\d+', sentence))
        english_words = len(re.findall(r'[a-zA-Z]+', sentence))
        special_weight = (numbers + english_words) * 0.2
        
        # 语气词影响（如"啊"、"呢"、"吧"等，通常读得较慢）
        mood_particles = ['啊', '呢', '吧', '哦', '嗯', '哈', '呀', '嘛']
        mood_count = sum(sentence.count(particle) for particle in mood_particles)
        mood_weight = mood_count * 0.2
        
        # 总权重
        total_weight = base_weight + punctuation_weight + special_weight + mood_weight
        
        return max(total_weight, 0.1)  # 最小权重
    
    def analyze_emotion_and_action(self, text: str) -> Tuple[str, str]:
        """
        分析文本情绪并推荐动作
        
        Args:
            text: 要分析的文本
            
        Returns:
            Tuple[str, str]: (情绪, 推荐动作)
        """
        # 简单的关键词匹配来判断情绪和动作
        text_lower = text.lower()
        
        # 情绪分析
        if any(word in text_lower for word in ['开心', '高兴', '哈哈', '笑', '棒', '好的', '太好了']):
            emotion = "happy"
            animation = "happy"
        elif any(word in text_lower for word in ['惊讶', '哇', '真的吗', '不敢相信', '天哪']):
            emotion = "surprised"
            animation = "surprised"
        elif any(word in text_lower for word in ['思考', '想想', '让我想想', '嗯', '这个']):
            emotion = "thinking"
            animation = "thinking"
        elif any(word in text_lower for word in ['你好', '再见', '拜拜', '欢迎']):
            emotion = "friendly"
            animation = "wave"
        elif any(word in text_lower for word in ['是的', '对', '没错', '同意']):
            emotion = "agreeing"
            animation = "nod"
        else:
            emotion = "neutral"
            animation = "talk"
        
        return emotion, animation