import os
import wave
import numpy as np
from flask import Flask, request, jsonify, send_file, Response
from vosk import Model, KaldiRecognizer
import jieba
import time
from datetime import datetime
from PIL import Image
import torch
from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
from flask_cors import CORS
from aip import AipSpeech  # 百度云语音服务
import random  # 新增导入

# ---------------------- 1. 项目配置 ----------------------
CONFIG = {
    "BAIDU_APP_ID": "119919931",
    "BAIDU_API_KEY": "iWLQtltCY6wr5s7e0B3HPHWx",
    "BAIDU_SECRET_KEY": "HaA4zykzEurf6cBWX17MUSDRggy41NiZ",
    "VOSK_MODEL_PATH": r"vosk-model-small-cn-0.22",
    "GENERATE_SAVE_PATH": "generated_paintings/",
    "AUDIO_SAVE_PATH": "generated_audios/",  # 音频保存路径
    "SUPPORTED_STYLES": ["水墨", "工笔", "写意", "青绿", "浅绛"],
    "SUPPORTED_ELEMENTS": {
        "主题": [
            "山水", "花鸟", "人物", "茅亭", "松树", "小溪", "远山", "渔船", "牡丹", "梅花",
            "瀑布", "云海", "竹林", "柳树", "桃花", "荷花", "山石", "清泉", "孤峰", "层峦",
            "渡口", "栈道", "古桥", "村落", "渔船", "飞鸟", "流水", "积雪", "晚霞", "明月",
            "隐士", "行旅", "垂钓", "抚琴", "对弈", "茅屋", "寺庙", "塔", "楼阁", "牌坊"],
        "风格": [
            "水墨", "工笔", "写意", "青绿", "浅绛", 
            "金碧", "米家山水", "小青绿", "大青绿", "白描", 
            "没骨", "院体", "文人画", "减笔", "重彩"],
        "墨色": [
            "浓墨", "淡墨", "干墨", "湿墨", 
            "焦墨", "宿墨", "破墨", "积墨", "泼墨", "飞白"],
        "笔触": [
            "细笔", "粗笔", "皴法", "点染", 
            "勾勒", "渲染", "擦", "扫", "擢", "垛", 
            "披麻皴", "斧劈皴", "折带皴", "雨点皴", "米点皴"]
    },
    # 语音合成配置
    "TTS_CONFIG": {
        "vol": 5,    # 音量(0-15)
        "spd": 5,    # 语速(0-9)
        "pit": 5,    # 音调(0-9)
        "per": 4     # 发音人(0-普通女声,1-普通男声,3-情感男声,4-情感女声)
    },
    "RESPONSE_TIMEOUT": 180,
    "STYLE_MATCH_THRESHOLD": 0.85,
    "SD_MODEL": r"D:\SD1.5",
    "LORA_MODEL_PATH": r"D:\Lora_shanshui\梦镜中国 _ 国风无涯山水_v1.0.safetensors",
    "LORA_WEIGHT": 0.8,
    "SD_STEPS": 30,
    "SD_GUIDANCE": 7.5,
    "SD_RESOLUTION": (512, 512),
    "TERM_EXPLANATIONS": {}  # 术语解释
}

# 创建必要的文件夹
os.makedirs(CONFIG["GENERATE_SAVE_PATH"], exist_ok=True)
os.makedirs(CONFIG["AUDIO_SAVE_PATH"], exist_ok=True)

# 存储画作描述信息（新增播报文本字段）
painting_descriptions = {}  # 结构: {save_path: {"original_text": "...", "broadcast_text": "..."} }

# ---------------------- 2. 核心工具类 ----------------------
class VoiceService:
    """语音服务类，整合语音识别和语音合成功能"""
    def __init__(self):
        # 初始化百度云客户端
        try:
            self.baidu_client = AipSpeech(
                CONFIG["BAIDU_APP_ID"], 
                CONFIG["BAIDU_API_KEY"], 
                CONFIG["BAIDU_SECRET_KEY"]
            )
            self.use_baidu = True
            print("百度云语音服务初始化成功")
        except Exception as e:
            print(f"百度云语音服务初始化失败: {str(e)}")
            self.use_baidu = False
        
        # 初始化Vosk本地识别
        try:
            if not os.path.exists(CONFIG["VOSK_MODEL_PATH"]):
                raise Exception(f"Vosk模型路径不存在：{CONFIG['VOSK_MODEL_PATH']}")
            self.vosk_model = Model(CONFIG["VOSK_MODEL_PATH"])
            self.sample_rate = 16000
            print("Vosk语音识别初始化成功")
        except Exception as e:
            print(f"Vosk语音识别初始化失败: {str(e)}")
            self.vosk_model = None

    # 语音识别功能
    def audio_to_text(self, audio_data):
        start_time = time.time()
        result_text = ""
        recognition_method = ""
        
        try:
            # 优先使用百度云识别
            if self.use_baidu:
                try:
                    result_text = self._baidu_recognize(audio_data)
                    recognition_method = "百度云"
                except Exception as e:
                    print(f"百度云识别失败: {str(e)}，尝试使用Vosk")
                    self.use_baidu = False  # 暂时禁用百度云
            
            #  fallback到Vosk
            if not self.use_baidu and self.vosk_model:
                result_text = self._vosk_recognize(audio_data)
                recognition_method = "Vosk本地"
            
            if not result_text:
                raise Exception("所有语音识别方法均失败")
                
        except Exception as e:
            raise Exception(f"语音识别失败：{str(e)}")
        
        # 计算准确率
        words = jieba.lcut(result_text)
        valid_words = [w for w in words if any(w in elem_list for elem_list in CONFIG["SUPPORTED_ELEMENTS"].values())]
        accuracy = len(valid_words) / len(words) if len(words) > 0 else 0.0
        accuracy = min(accuracy, 1.0)

        response_time = time.time() - start_time
        if response_time > CONFIG["RESPONSE_TIMEOUT"]:
            print(f"警告：语音识别响应时间{response_time:.2f}秒，超出限制")

        return result_text, round(accuracy, 2), recognition_method

    # 语音合成功能
    def text_to_speech(self, text, **kwargs):
        """
        将文本转换为语音
        :param text: 待合成的文本
        :param**kwargs: 可选参数(vol, spd, pit, per)覆盖默认配置
        :return: 音频二进制数据, 保存路径
        """
        if not self.use_baidu:
            raise Exception("百度云服务未初始化，无法使用语音合成")
            
        if not text.strip():
            raise Exception("合成文本不能为空")
            
        # 合并配置（默认配置 + 传入参数）
        tts_config = CONFIG["TTS_CONFIG"].copy()
        tts_config.update(kwargs)
        
        # 调用百度云语音合成API
        result = self.baidu_client.synthesis(
            text, 
            'zh',  # 语言
            1,     # 客户端类型
            tts_config
        )
        
        # 处理合成结果
        if isinstance(result, dict):
            raise Exception(f"语音合成失败: {result.get('err_msg', '未知错误')}")
            
        # 保存音频文件
        save_filename = f"tts_{datetime.now().strftime('%Y%m%d%H%M%S')}.mp3"
        save_path = os.path.join(CONFIG["AUDIO_SAVE_PATH"], save_filename)
        with open(save_path, 'wb') as f:
            f.write(result)
            
        return result, save_path

    # 百度云识别实现
    def _baidu_recognize(self, audio_data):
        temp_wav_path = f"temp_{datetime.now().strftime('%Y%m%d%H%M%S')}.wav"
        with wave.open(temp_wav_path, "wb") as wf:
            wf.setnchannels(1)
            wf.setsampwidth(2)
            wf.setframerate(16000)
            wf.writeframes(audio_data)
        
        try:
            with open(temp_wav_path, 'rb') as f:
                audio_content = f.read()
            
            # 使用正确的参数调用百度语音识别
            result = self.baidu_client.asr(audio_content, 'wav', 16000, {
                'dev_pid': 1537,  # 普通话(支持简单的英文识别)
            })
            
            if result['err_no'] == 0:
                return result['result'][0]
            else:
                print(f"百度语音识别错误详情: {result}")
                raise Exception(f"百度云语音识别错误: {result['err_msg']}")
        finally:
            if os.path.exists(temp_wav_path):
                os.remove(temp_wav_path)

    # Vosk识别实现
    def _vosk_recognize(self, audio_data):
        temp_wav_path = f"temp_{datetime.now().strftime('%Y%m%d%H%M%S')}.wav"
        with wave.open(temp_wav_path, "wb") as wf:
            wf.setnchannels(1)
            wf.setsampwidth(2)
            wf.setframerate(self.sample_rate)
            wf.writeframes(audio_data)

        rec = KaldiRecognizer(self.vosk_model, self.sample_rate)
        result_text = ""
        with wave.open(temp_wav_path, "rb") as wf:
            while True:
                data = wf.readframes(4000)
                if len(data) == 0:
                    break
                if rec.AcceptWaveform(data):
                    result = eval(rec.Result())
                    result_text = result.get("text", "")
        
        os.remove(temp_wav_path)
        return result_text


class TextAnalysisService:
    """文本分析服务（优化后的播报文本生成功能）"""
    def __init__(self):
        for elem_type, elem_list in CONFIG["SUPPORTED_ELEMENTS"].items():
            for elem in elem_list:
                jieba.add_word(elem)

    def explain_terms(self, words):
        explained_words = []
        for word in words:
            if word in CONFIG["TERM_EXPLANATIONS"]:
                explained_words.append(f"{word}（{CONFIG['TERM_EXPLANATIONS'][word]}）")
            else:
                explained_words.append(word)
        return explained_words

    # 优化后的播报文本生成方法
    def generate_broadcast_text(self, analysis_result):
        """生成多样化的语音播报文本"""
        # 提取关键信息
        style = analysis_result["风格"][0] if analysis_result["风格"] else "水墨"
        themes = analysis_result["主题"]
        ink = analysis_result["墨色"][0] if analysis_result["墨色"] else "浓淡相宜的墨色"
        brush = analysis_result["笔触"][0] if analysis_result["笔触"] else "精妙的笔触"

        # 1. 丰富的形容词库
        style_adjectives = {
            "水墨": ["空灵缥缈", "水韵墨章", "清新淡雅", "酣畅淋漓", "墨韵悠长"],
            "工笔": ["细致入微", "工整细腻", "严谨华丽", "富丽堂皇", "精雕细琢"],
            "写意": ["奔放豪迈", "恣意洒脱", "形神兼备", "意趣盎然", "气韵生动"],
            "青绿": ["金碧辉煌", "苍翠欲滴", "设色艳丽", "古朴典雅", "色彩明丽"],
            "浅绛": ["雅致清丽", "淡而不薄", "色墨交融", "温润柔和", "清新雅致"],
            "default": ["意境深远", "笔墨精湛", "韵味独特", "格调高雅", "艺术价值高"]
        }
        
        # 获取对应风格的形容词列表
        adj_list = style_adjectives.get(style, style_adjectives["default"])
        
        # 2. 多样化的句式模板
        templates = [
            "这幅{style}作品{adj}，画中{themes}栩栩如生。{ink}与{brush}相得益彰，尽显传统书画的无穷魅力。",
            "观此{style}画，{adj}。{themes}跃然纸上，{brush}在{ink}的演绎下，营造出{adj2}的深远意境。",
            "这是一幅{adj}的{style}画作。艺术家以{ink}和{brush}，描绘出{themes}的生动姿态，令人仿佛置身画中。",
            "此作乃{style}佳作，{adj}。{themes}布局精妙，{brush}运用自如，于{ink}变化间传达出{adj2}的审美情趣。",
            "欣赏这幅{style}画，{adj}之感油然而生。{themes}错落有致，{brush}技法娴熟，{ink}浓淡适宜，体现了东方美学的精髓。"
        ]

        # 3. 处理主题文本
        if themes:
            themes_text = "、".join(themes[:3])  # 最多取前三个主题，避免过长
        else:
            themes_text = "山水林木"  # 默认主题

        # 4. 随机选择形容词和模板
        chosen_adj = random.choice(adj_list)
        # 确保同一个模板里不重复使用相同的形容词
        remaining_adjs = [a for a in adj_list if a != chosen_adj]
        chosen_adj2 = random.choice(remaining_adjs) if remaining_adjs else chosen_adj

        chosen_template = random.choice(templates)

        # 5. 格式化最终文本
        broadcast_text = chosen_template.format(
            style=style,
            adj=chosen_adj,
            adj2=chosen_adj2,
            themes=themes_text,
            ink=ink,
            brush=brush
        )

        return broadcast_text

    def analyze_text(self, input_text, user_selected_style=None):
        if not input_text.strip():
            return {"error": "输入文本不能为空"}, 0.0

        words = jieba.lcut(input_text)
        explained_words = self.explain_terms(words)
        
        analysis_result = {
            "原始文本": input_text.strip(),
            "解释文本": "".join(explained_words),
            "主题": [],
            "风格": [],
            "墨色": ["浓墨"],
            "笔触": ["细笔"],
            "error": None
        }

        # 提取文本中的元素
        for word in words:
            for elem_type, elem_list in CONFIG["SUPPORTED_ELEMENTS"].items():
                if word in elem_list and word not in analysis_result[elem_type]:
                    analysis_result[elem_type].append(word)

        # 处理风格信息
        if not analysis_result["风格"]:
            if user_selected_style and user_selected_style in CONFIG["SUPPORTED_STYLES"]:
                analysis_result["风格"].append(user_selected_style)
            else:
                analysis_result["风格"].append("水墨")

        # 计算风格匹配度
        style_words = analysis_result["风格"]
        style_related_words = [w for w in words if w in CONFIG["SUPPORTED_ELEMENTS"]["风格"] or w in style_words]
        style_match_score = len(style_related_words) / len(words) if len(words) > 0 else 0.0
        style_match_score = min(style_match_score, 1.0)

        # 新增：生成播报文本并添加到分析结果
        analysis_result["播报文本"] = self.generate_broadcast_text(analysis_result)

        return analysis_result, round(style_match_score, 2)


class PaintingGenerateService:
    """画作生成服务（修改存储逻辑以保存播报文本）"""
    def __init__(self):
        self.pipe = self._init_sd_pipeline()

    def _init_sd_pipeline(self):
        try:
            print("正在初始化Stable Diffusion模型...")
            
            scheduler = EulerAncestralDiscreteScheduler.from_pretrained(
                CONFIG["SD_MODEL"], 
                subfolder="scheduler"
            )
            
            pipe = StableDiffusionPipeline.from_pretrained(
                CONFIG["SD_MODEL"],
                scheduler=scheduler,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                safety_checker=None,
                requires_safety_checker=False,
                local_files_only=True
            )
            
            if os.path.exists(CONFIG["LORA_MODEL_PATH"]):
                try:
                    if CONFIG["LORA_MODEL_PATH"].endswith('.safetensors'):
                        pipe.load_lora_weights(
                            os.path.dirname(CONFIG["LORA_MODEL_PATH"]),
                            weight_name=os.path.basename(CONFIG["LORA_MODEL_PATH"])
                        )
                    else:
                        pipe.load_lora_weights(CONFIG["LORA_MODEL_PATH"])
                    print(f"成功加载Lora模型: {CONFIG['LORA_MODEL_PATH']}")
                except Exception as lora_error:
                    print(f"Lora模型加载失败: {str(lora_error)}")
            else:
                print(f"警告: Lora模型路径不存在: {CONFIG['LORA_MODEL_PATH']}")
            
            if torch.cuda.is_available():
                print("使用GPU加速")
                pipe = pipe.to("cuda")
                pipe.enable_attention_slicing()
                try:
                    pipe.enable_xformers_memory_efficient_attention()
                except Exception:
                    print("xformers不可用，将继续使用普通注意力机制")
            else:
                print("使用CPU模式，生成速度较慢")
            
            print("Stable Diffusion模型初始化成功")
            return pipe
        except Exception as e:
            print(f"Stable Diffusion初始化详细错误: {str(e)}")
            raise Exception(f"Stable Diffusion初始化失败：{str(e)}")

    def _build_prompt(self, analysis_result):
        explained_text = analysis_result.get("解释文本", "")
        
        if explained_text:
            base_prompt = f"{explained_text}，中国古典山水画，传统国画，意境深远，笔法精湛，水墨渲染"
        else:
            style = analysis_result["风格"][0]
            theme = "、".join(analysis_result["主题"]) if analysis_result["主题"] else "山水"
            ink = analysis_result["墨色"][0]
            brush = analysis_result["笔触"][0]
            base_prompt = (
                f"{style}风格山水画，{theme}，{ink}，{brush}，"
                "中国古典山水画，传统国画，意境深远，笔法精湛，水墨渲染"
            )
        
        negative_prompt = (
            "现代建筑，人物肖像，文字，签名，低分辨率，模糊，变形，色彩失真，"
            "西方绘画风格，照片质感，3D渲染，卡通，动漫"
        )
        return base_prompt, negative_prompt

    def generate_painting(self, analysis_result):
        start_time = time.time()
        if analysis_result.get("error"):
            raise Exception(f"文本解析错误：{analysis_result['error']}")
        
        prompt, negative_prompt = self._build_prompt(analysis_result)
        style = analysis_result["风格"][0]

        try:
            with torch.no_grad():
                lora_scale = CONFIG["LORA_WEIGHT"] if os.path.exists(CONFIG["LORA_MODEL_PATH"]) else 1.0
                
                generate_kwargs = {
                    "prompt": prompt,
                    "negative_prompt": negative_prompt,
                    "width": CONFIG["SD_RESOLUTION"][0],
                    "height": CONFIG["SD_RESOLUTION"][1],
                    "num_inference_steps": CONFIG["SD_STEPS"],
                    "guidance_scale": CONFIG["SD_GUIDANCE"],
                    "generator": torch.manual_seed(int(time.time()))
                }
                
                if os.path.exists(CONFIG["LORA_MODEL_PATH"]):
                    generate_kwargs["cross_attention_kwargs"] = {"scale": lora_scale}
                
                image = self.pipe(** generate_kwargs).images[0]
        except Exception as e:
            raise Exception(f"图像生成失败：{str(e)}")

        # 保存画作
        save_filename = f"{style}_{datetime.now().strftime('%Y%m%d%H%M%S')}.png"
        save_path = os.path.join(CONFIG["GENERATE_SAVE_PATH"], save_filename)
        image.save(save_path)

        # 存储画作描述信息（同时保存原始文本和播报文本）
        painting_descriptions[save_path] = {
            "original_text": analysis_result["原始文本"],
            "broadcast_text": analysis_result["播报文本"]
        }

        generate_cost = time.time() - start_time
        if generate_cost > CONFIG["RESPONSE_TIMEOUT"]:
            print(f"警告：生成耗时{generate_cost:.2f}秒，超出限制")

        return save_path, round(generate_cost, 2)


# ---------------------- 3. 后端API接口 ----------------------
app = Flask(__name__)
CORS(app)

# 全局服务实例
voice_service = None
text_service = None
paint_service = None

@app.before_request
def initialize_services():
    global voice_service, text_service, paint_service
    try:
        voice_service = VoiceService()  # 整合后的语音服务
        text_service = TextAnalysisService()
        paint_service = PaintingGenerateService()
        print("所有服务初始化成功")
    except Exception as e:
        print(f"服务初始化失败: {str(e)}")
        raise e

@app.route("/")
def index():
    return jsonify({"status": "success", "message": "后端服务正常运行"})

# 语音转文本接口
@app.route("/api/voice-to-text", methods=["POST"])
def api_voice_to_text():
    try:
        audio_data = request.data
        if not audio_data:
            return jsonify({"code": 400, "msg": "未接收到音频数据", "data": {}})
        
        result_text, accuracy, recognition_method = voice_service.audio_to_text(audio_data)
        return jsonify({
            "code": 200,
            "msg": "语音识别成功",
            "data": {
                "text": result_text,
                "accuracy": accuracy,
                "method": recognition_method,
                "tip": "可在文本框修改识别结果后生成画作"
            }
        })
    except Exception as e:
        return jsonify({"code": 500, "msg": f"语音识别失败：{str(e)}", "data": {}})

# 文本转语音接口
@app.route("/api/text-to-voice", methods=["POST"])
def api_text_to_voice():
    try:
        params = request.json
        text = params.get("text", "").strip()
        if not text:
            return jsonify({"code": 400, "msg": "文本内容不能为空", "data": {}})
        
        # 获取可选参数（音量、语速等）
        tts_params = {
            key: params.get(key) for key in ["vol", "spd", "pit", "per"] 
            if key in params and params[key] is not None
        }
        
        # 调用语音合成服务
        audio_data, save_path = voice_service.text_to_speech(text, **tts_params)
        
        # 返回音频数据
        return Response(
            audio_data,
            mimetype="audio/mp3",
            headers={"Content-Disposition": f"attachment; filename={os.path.basename(save_path)}"}
        )
    except Exception as e:
        return jsonify({"code": 500, "msg": f"语音合成失败：{str(e)}", "data": {}})

# 获取画作描述接口（返回生成的播报文本）
@app.route("/api/get-painting-description/<path:painting_path>", methods=["GET"])
def api_get_painting_description(painting_path):
    try:
        if painting_path not in painting_descriptions:
            return jsonify({"code": 404, "msg": "画作描述不存在", "data": {}})
        
        return jsonify({
            "code": 200,
            "msg": "获取描述成功",
            "data": {
                # 改为返回生成的播报文本
                "description": painting_descriptions[painting_path]["broadcast_text"]
            }
        })
    except Exception as e:
        return jsonify({"code": 500, "msg": f"获取画作描述失败：{str(e)}", "data": {}})

# 生成画作接口
@app.route("/api/generate-painting", methods=["POST"])
def api_generate_painting():
    try:
        params = request.json
        input_text = params.get("input_text", "").strip()
        user_style = params.get("style", "水墨")

        analysis_result, style_match = text_service.analyze_text(input_text, user_style)
        if analysis_result.get("error"):
            return jsonify({"code": 400, "msg": analysis_result["error"], "data": {}})
        
        save_path, generate_cost = paint_service.generate_painting(analysis_result)
        
        return jsonify({
            "code": 200,
            "msg": "书画生成成功",
            "data": {
                "painting_path": save_path,
                "style_match": style_match,
                "generate_cost": generate_cost
            }
        })
    except Exception as e:
        return jsonify({"code": 500, "msg": f"生成失败：{str(e)}", "data": {}})

# 获取画作接口
@app.route("/api/get-painting/<path:painting_path>", methods=["GET"])
def api_get_painting(painting_path):
    try:
        if not os.path.exists(painting_path):
            return jsonify({"code": 404, "msg": "画作不存在", "data": {}})
        return send_file(painting_path, mimetype="image/png")
    except Exception as e:
        return jsonify({"code": 500, "msg": f"获取画作失败：{str(e)}", "data": {}})

if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000, debug=True)