import time
from flask import Flask, request, jsonify
from flask_cors import CORS
import os
import pymysql
from pymysql import Error
import logging
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

# 导入数据库连接函数
from mysql import create_connection

# 创建一个后端应用
app = Flask(__name__, static_folder='static', static_url_path='/static')

# 配置CORS跨域
CORS(app, resources={r"/*": {"origins": "*", "methods": ["GET", "POST", "OPTIONS"], "allow_headers": ["Content-Type"]}})

# 增加文件大小限制到更大的值（例如50MB）
app.config['UPLOAD_FOLDER'] = './static'
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024  # 增加到50MB

# 确保上传目录存在
if not os.path.exists(app.config['UPLOAD_FOLDER']):
    os.makedirs(app.config['UPLOAD_FOLDER'])

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 本地模型配置
LOCAL_MODEL_PATH = './models/Qwen0.6'  # 本地模型路径

# 尝试预加载模型和分词器
try:
    logger.info("预加载本地模型...")
    tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_PATH)
    model = AutoModelForCausalLM.from_pretrained(LOCAL_MODEL_PATH, torch_dtype=torch.float16)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    logger.info("模型预加载成功")
except Exception as e:
    logger.error(f"模型预加载失败: {type(e).__name__} - {str(e)}")
    tokenizer = None
    model = None
    device = None
# MODEL_TYPE = ''  # 模型类型，如llama, chatglm等    
# 根据使用的模型类型，可能需要调整模型加载代码

# 添加一个函数来获取指定主题下的最新几条消息
# 获取最近的聊天消息
def get_recent_messages(theme_id, limit=5):
    connection = None
    cursor = None
    try:
        connection = create_connection()
        if connection:
            cursor = connection.cursor()
            sql = "SELECT * FROM chat_content WHERE theme_id = %s ORDER BY created_at DESC LIMIT %s"
            cursor.execute(sql, (theme_id, limit))
            result = cursor.fetchall()
            # 按时间升序返回
            return result[::-1]
        return []
    except Error as e:
        print(f"获取最近消息错误: {e}")
        return []
    finally:
        if cursor:
            cursor.close()
        if connection and connection.open:
            connection.close()

# 修改发送消息API
@app.route('/chat/send', methods=['POST'])
def send_message():
    try:
        data = request.json
        identity = data.get('identity')
        content = data.get('content')
        theme_id = data.get('theme_id', 0)
        theme_name = data.get('theme_name')
        message_type = data.get('message_type', 'text')
        is_first_message = data.get('is_first_message', False)
        
        # 验证必要参数
        if not identity or not content:
            return jsonify({'error': '缺少必要参数'}), 400
        
        # 处理主题识别和生成
        if theme_id == 0:
            # 如果是新对话，使用本地模型生成主题
            theme_name = generate_theme_with_local_model(content)
            logger.info(f"生成的主题: {theme_name}")
        
        # 保存用户消息到数据库
        saved = save_chat_to_db(identity, content, theme_id, theme_name, message_type)
        
        if saved:
            # 获取当前主题下的最近5条消息（包括刚保存的这条）
            recent_messages = get_recent_messages(theme_id, 10)  # 从5改为10，增加上下文窗口
            
            # 准备发送给大模型的消息历史
            messages_history = []
            for msg in recent_messages:
                role = "user" if msg['identity'] == 'user' else "assistant"
                messages_history.append({"role": role, "content": msg['content']})
            
            # 将主题信息添加到系统消息中
            system_message = {
                "role": "system", 
                "content": f"这是一个关于'{theme_name}'的对话，请根据上下文和用户最新的问题生成合适的回答。"
            }
            
            # 调用本地模型获取回复
            ai_response = get_local_model_response(system_message, messages_history)
            
            # 保存AI回复
            save_chat_to_db('ai', ai_response, theme_id, theme_name, 'text')
            
            return jsonify({
                'success': True,
                'message': '消息保存成功',
                'ai_response': ai_response,
                'theme_name': theme_name
            })
        else:
            return jsonify({'error': '保存消息失败'}), 500
    except Exception as e:
        logger.error(f"发送消息错误类型: {type(e).__name__}")
        logger.error(f"发送消息错误详情: {str(e)}")
        return jsonify({'error': f'服务器内部错误: {type(e).__name__}'}), 500

# 添加本地模型调用函数
def get_local_model_response(system_message, messages_history):
    try:
        logger.info("使用预加载的本地模型...")
        global tokenizer, model, device
        
        if tokenizer is None or model is None:
            logger.warning("预加载模型失败，尝试重新加载...")
            tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_PATH)
            model = AutoModelForCausalLM.from_pretrained(LOCAL_MODEL_PATH, torch_dtype=torch.float16)
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            model.to(device)
        
        # 构建对话历史
        all_messages = [system_message] + messages_history
        
        # 格式化输入 (优化版本)
        prompt = ("这是一个关于'{system_content}'的对话，请根据上下文和用户最新的问题生成合适的回答。\n"\
                  "要求：\n"\
                  "1. 回答必须独特，不与之前的回复重复\n"\
                  "2. 逻辑清晰\n"\
                  "3. 针对用户输入中的核心问题进行直接回应\n"\
                  "4. 当用户输入包含多个不相关主题时，优先处理最后提到的主题\n"\
                  "5. 避免生成重复或无意义的内容\n\n"\
                  "对话历史:\n{messages_str}\n\n"\
                  "用户最新问题:\n{latest_user_message}\n\n"\
                  "请生成回答:\n")
        
        # 提取最新用户消息
        latest_user_message = None
        for msg in reversed(messages_history):
            if msg['role'] == 'user':
                latest_user_message = msg['content']
                break
        
        # 格式化消息历史
        messages_str = ""
        for msg in messages_history[:-1]:  # 排除最新消息
            role = "用户" if msg['role'] == 'user' else "助手"
            messages_str += f"{role}: {msg['content']}\n"
        
        # 填充prompt
        prompt = prompt.format(
            system_content=system_message['content'],
            messages_str=messages_str,
            latest_user_message=latest_user_message
        )
        
        # 编码输入
        inputs = tokenizer(prompt, return_tensors="pt").to(device)
        
        # 生成回复
        logger.info("生成回复中...")
        outputs = model.generate(
            inputs.input_ids,
            attention_mask=inputs.attention_mask,
            max_length=1500,
            num_return_sequences=1,
            temperature=1.0,  # 进一步提高temperature增加多样性
            top_p=0.95,
            repetition_penalty=1.3,  # 提高重复惩罚
            do_sample=True,  # 确保启用采样
            top_k=50  # 添加top_k参数
        )
        
        # 解码输出
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # 提取回答部分
        answer_start = response.find("请生成回答:")
        if answer_start != -1:
            answer = response[answer_start + len("请生成回答:"):].strip()
        else:
            # 如果没找到固定标识，尝试提取最后一段非空文本
            paragraphs = [p.strip() for p in response.split('\n') if p.strip()]
            answer = paragraphs[-1] if paragraphs else "抱歉，无法生成合适的回答。"
        
        # 添加输出格式后处理
        answer = format_model_output(answer)
        logger.info(f"本地模型生成回答成功")
        return answer
    except Exception as e:
        logger.error(f"本地模型调用错误: {type(e).__name__} - {str(e)}")
        return f"抱歉，本地模型处理请求时发生错误: {type(e).__name__}"

# 自定义错误处理器 - 专门处理文件过大的错误
@app.errorhandler(413)
def request_entity_too_large(error):
    return jsonify({'error': '上传的文件过大，请选择小于50MB的文件'}), 413

# 格式化模型输出
def format_model_output(text):
    # 移除多余空行
    lines = [line.strip() for line in text.split('\n') if line.strip()]
    formatted_text = '\n'.join(lines)
    
    # 确保结尾有标点符号
    if formatted_text and formatted_text[-1] not in '。！？.,!?:':
        formatted_text += '。'
    
    return formatted_text
# 初始化数据库表
def init_db():
    connection = create_connection()
    if connection:
        try:
            cursor = connection.cursor()
            
            # 检查表是否存在，如果不存在才创建
            
            # 检查chat_content表是否存在
            cursor.execute("SHOW TABLES LIKE 'chat_content'")
            if not cursor.fetchone():
                cursor.execute('''
                CREATE TABLE chat_content (
                    id INT AUTO_INCREMENT PRIMARY KEY,
                    identity VARCHAR(255) NOT NULL,
                    content TEXT NOT NULL,
                    theme_id INT DEFAULT 0,
                    theme_name VARCHAR(255) DEFAULT NULL,
                    message_type VARCHAR(50) DEFAULT 'text',
                    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
                )
                ''')
                print("创建chat_content表成功")
            else:
                print("chat_content表已存在")
            
            # 检查chat_themes表是否存在
            cursor.execute("SHOW TABLES LIKE 'chat_themes'")
            if not cursor.fetchone():
                cursor.execute('''
                CREATE TABLE chat_themes (
                    id INT AUTO_INCREMENT PRIMARY KEY,
                    theme_name VARCHAR(255) NOT NULL,
                    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
                )
                ''')
                print("创建chat_themes表成功")
            else:
                print("chat_themes表已存在")
            
            connection.commit()
            print("数据库表初始化完成")
        except Error as e:
            print(f"数据库初始化错误: {e}")
        finally:
            if connection and connection.open:
                cursor.close()
                connection.close()

# 本地模型生成主题
def generate_theme_with_local_model(content):
    try:
        logger.info("使用预加载的本地模型生成主题...")
        global tokenizer, model, device
        
        if tokenizer is None or model is None:
            logger.warning("预加载模型失败，尝试重新加载...")
            tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_PATH)
            model = AutoModelForCausalLM.from_pretrained(LOCAL_MODEL_PATH, torch_dtype=torch.float16)
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            model.to(device)
        
        # 构建prompt
        prompt = f"请为以下聊天内容生成一个简短的主题，不超过10个中文字符：\n{content}"
        
        # 编码输入
        inputs = tokenizer(prompt, return_tensors="pt").to(device)
        
        # 生成回复
        outputs = model.generate(
            inputs.input_ids,
            max_length=50,
            num_return_sequences=1,
            temperature=0.7
        )
        
        # 解码输出
        theme = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
        
        # 确保主题不超过10个字符
        if len(theme) > 10:
            theme = theme[:10]
        
        # 主题格式标准化
        theme = theme.strip()
        if not theme:
            theme = "未分类"
        # 移除可能的特殊字符
        theme = ''.join([c for c in theme if c.isalnum() or c in ',.!?，。！？'])
        return theme
    except Exception as e:
        print(f"生成主题时出错: {e}")
        return "未分类"

# 保存聊天记录到数据库
def save_chat_to_db(identity, content, theme_id=0, theme_name=None, message_type='text'):
    connection = None
    cursor = None
    try:
        connection = create_connection()
        if connection:
            cursor = connection.cursor()
            sql = '''INSERT INTO chat_content (identity, content, theme_id, theme_name, message_type, created_at) 
                     VALUES (%s, %s, %s, %s, %s, CURRENT_TIMESTAMP)''' 
            cursor.execute(sql, (identity, content, theme_id, theme_name, message_type))
            connection.commit()
            return True
        return False
    except Error as e:
        print(f"保存聊天记录错误: {e}")
        if connection and connection.open:
            connection.rollback()
        return False
    finally:
        if cursor:
            cursor.close()
        if connection and connection.open:
            connection.close()

# 获取历史聊天记录
def get_chat_history(theme_id=None):
    connection = None
    cursor = None
    try:
        connection = create_connection()
        if connection:
            cursor = connection.cursor()
            if theme_id is not None:
                sql = "SELECT * FROM chat_content WHERE theme_id = %s ORDER BY created_at ASC"
                cursor.execute(sql, (theme_id,))
            else:
                sql = "SELECT * FROM chat_content WHERE theme_id = 0 ORDER BY created_at ASC"
                cursor.execute(sql)
            result = cursor.fetchall()
            return result
        return []
    except Error as e:
        print(f"获取聊天记录错误: {e}")
        return []
    finally:
        if cursor:
            cursor.close()
        if connection and connection.open:
            connection.close()

# 检查文件类型
def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}

# 注册路由
@app.route('/user/chat', methods=['GET'])
def user():
    return jsonify({'text': 'hello world'})

@app.route('/chat/v1', methods=['GET'])
def chat1():
    return jsonify({'text': '123'})

@app.route('/chat/v2', methods=['POST'])
def chat2():
    return jsonify({'text': 'post数据'})

@app.route('/chat/v3', methods=['GET'])
def chat3():
    # 获取前端用GET方式传递的参数
    name = request.args["chat_text"]
    return jsonify({'text': name})

@app.route('/chat/v4', methods=['POST'])
def chat4():
    # 获取前端用post方式传递的参数
    name = request.json["chat_text"]
    return jsonify({'text': name})

@app.route('/chat/v5', methods=['POST'])
def chat5():
    try:
        # 获取前端用post方式传递的文件
        if 'face' not in request.files:
            return jsonify({'error': '没有文件部分'}), 400
        
        file = request.files["face"]
        if file.filename == '':
            return jsonify({'error': '没有选择文件'}), 400
        
        if file and allowed_file(file.filename):
            # 生成唯一的文件名
            filename = f"{int(time.time())}_{file.filename}"
            # 保存文件
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            # 返回正确的URL
            return jsonify({'url': f'http://localhost:8080/static/{filename}'})
        else:
            return jsonify({'error': '不支持的文件类型'}), 400
    except Exception as e:
        # 打印错误信息以便调试
        print(f"文件上传错误: {str(e)}")
        # 根据不同的错误类型返回不同的提示
        if '413' in str(e):
            return jsonify({'error': '上传的文件过大，请选择小于50MB的文件'}), 413
        return jsonify({'error': f'服务器错误: {str(e)}'}), 500

# 获取历史聊天记录API
@app.route('/chat/history', methods=['GET'])
def chat_history():
    try:
        theme_id = request.args.get('theme_id', type=int)
        
        # 获取历史记录
        history = get_chat_history(theme_id)
        
        # 格式化返回数据
        formatted_history = []
        for item in history:
            formatted_history.append({
                'id': item['id'],
                'identity': item['identity'],
                'content': item['content'],
                'theme_id': item['theme_id'],
                'theme_name': item['theme_name'],
                'message_type': item['message_type'],
                'created_at': item['created_at'].strftime('%Y-%m-%d %H:%M:%S')
            })
        
        return jsonify({
            'success': True,
            'history': formatted_history
        })
    except Exception as e:
        print(f"获取历史记录错误: {e}")
        return jsonify({'error': '服务器内部错误'}), 500

# 在应用启动时手动调用init_db()
with app.app_context():
    init_db()

# 启动应用
if __name__ == '__main__':
    app.run(host='0.0.0.0', port=8080, debug=True)


