# 全局变量
import pickle
from urllib.parse import unquote
from rapidfuzz import fuzz

from app.services.doubao.model_dispatcher import ModelDispatcher
from app.services.ocr_engine.data_loader import load_questions_data
import logging
import os
import traceback
from flask import current_app, request, jsonify
from app.services.ocr.utils.doubao_parser import analysis_doubao
from app.services.doubao.ImageBasedUBALDO import ImageBasedUBALDO
from app.services.ocr_engine.preprocessor import prepare_query_text
from app.services.ocr_engine.result_filter import screen_text

try:
    import numpy as np
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.metrics.pairwise import cosine_similarity
except ImportError as e:
    import sys

    current_app.logger.error(f"缺少必要的依赖库: {str(e)}")
    current_app.logger.error("请执行: pip install scikit-learn numpy scipy")
    # 在服务器环境下不要退出，而是让后续代码产生明确错误
    if 'pytest' not in sys.modules and not os.environ.get('FLASK_DEBUG'):
        sys.exit(1)
ocr_logger = logging.getLogger('ocr_logger')

ai_model = None
vectorizer = None
question_vectors = None
questions_data = None


def initialize():
    global vectorizer, question_vectors, questions_data
    if vectorizer is not None and question_vectors is not None and questions_data is not None:
        return True
    if not load_model():
        current_app.logger.info("模型加载失败，开始训练模型")
        if not train_model():
            current_app.logger.error("模型训练失败")
            return False
    current_app.logger.info("模型初始化成功")
    return True


def train_model():
    """
    训练AI模型，基于现有题库数据，针对中文交管12123学法减分优化

    返回:
        是否训练成功
    """
    global ai_model, vectorizer, question_vectors, questions_data

    try:
        # 确保依赖库已正确导入
        try:
            from sklearn.feature_extraction.text import TfidfVectorizer
            from sklearn.metrics.pairwise import cosine_similarity
            import numpy as np
        except ImportError as e:
            current_app.logger.error(f"训练模型失败，缺少必要的库: {str(e)}")
            # 尝试自动安装依赖
            import subprocess
            try:
                current_app.logger.info("尝试自动安装所需依赖...")
                subprocess.run(['pip', 'install', 'scikit-learn', 'numpy', 'scipy'], check=True)
                from sklearn.feature_extraction.text import TfidfVectorizer
                from sklearn.metrics.pairwise import cosine_similarity
                import numpy as np
                current_app.logger.info("依赖安装成功")
            except Exception as install_err:
                current_app.logger.error(f"自动安装依赖失败: {str(install_err)}")
                return False

        # 加载题库数据
        questions_data = load_questions_data()
        if not questions_data:
            current_app.logger.error("无法加载题库数据，模型训练失败")
            return False

        corpus = []
        for question in questions_data:
            text = question.get('subject', '').strip()
            options = []
            for opt in ['optionA', 'optionB', 'optionC', 'optionD']:
                val = question.get(opt, '').strip()
                if val:
                    options.append(val)

            # 对选项排序，保证顺序无关
            options.sort()
            # 拼接题干和选项
            combined_text = text + ' ' + ' '.join(options)
            corpus.append(combined_text)

        # 创建并训练TF-IDF向量化模型 - 针对中文优化
        # 使用2-4字符级别n-gram，更适合中文词汇识别
        vectorizer = TfidfVectorizer(
            analyzer='char_wb',  # 字符级别分析，带边界
            ngram_range=(2, 4),  # 使用2-4字符的n-gram
            max_features=50000,  # 限制特征数量
            use_idf=True,  # 使用逆文档频率
            norm='l2'  # L2正则化
        )
        question_vectors = vectorizer.fit_transform(corpus)

        # 保存模型
        model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'text')
        os.makedirs(model_dir, exist_ok=True)

        with open(os.path.join(model_dir, 'vectorizer.pkl'), 'wb') as f:
            pickle.dump(vectorizer, f)

        with open(os.path.join(model_dir, 'question_vectors.pkl'), 'wb') as f:
            pickle.dump(question_vectors, f)

        current_app.logger.info("交管12123学法减分AI搜索模型训练完成并已保存")
        return True

    except Exception as e:
        current_app.logger.error(f"训练AI模型失败: {str(e)}")
        current_app.logger.error(traceback.format_exc())
        return False


def load_model():
    """
    加载预训练的AI模型

    返回:
        是否加载成功
    """
    global ai_model, vectorizer, question_vectors, questions_data

    try:
        model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'text')
        vectorizer_path = os.path.join(model_dir, 'vectorizer.pkl')
        vectors_path = os.path.join(model_dir, 'question_vectors.pkl')

        if not os.path.exists(vectorizer_path) or not os.path.exists(vectors_path):
            current_app.logger.warning("预训练模型文件不存在，需要重新训练")
            return False

        # 加载向量化模型
        with open(vectorizer_path, 'rb') as f:
            vectorizer = pickle.load(f)

        # 加载预训练向量
        with open(vectors_path, 'rb') as f:
            question_vectors = pickle.load(f)

        # 加载题库数据
        questions_data = load_questions_data()

        if not questions_data:
            current_app.logger.error("无法加载题库数据")
            return False

        current_app.logger.info("成功加载交管12123学法减分AI搜索模型")
        return True

    except Exception as e:
        current_app.logger.error(f"加载AI模型失败: {str(e)}")
        current_app.logger.error(traceback.format_exc())
        return False


def search_with_ai(query_text, file_path, threshold=0.5, max_results=40):
    """
    使用AI模型进行搜索，针对交管12123学法减分优化

    参数:
        query_text: 搜索关键词/OCR文本
        threshold: 相似度阈值
        max_results: 最大返回结果数

    返回:
        匹配的题目列表
    """

    try:
        # 确保模型加载
        if vectorizer is None or question_vectors is None or questions_data is None:
            current_app.logger.info("AI模型未加载，正在初始化")
            if not initialize():
                current_app.logger.error("AI模型初始化失败")
                current_app.logger.error(''.join(traceback.format_exc()))
                return []

        # 预处理查询文本
        processed_query = prepare_query_text(query_text)
        logging.info(f"处理后的查询文本: {processed_query}")
        if not processed_query or len(processed_query) < 5:
            current_app.logger.warning(f"查询文本过短或清理后为空: {query_text}")
            return []

        # 向量化查询文本
        try:
            query_vector = vectorizer.transform([processed_query])
        except Exception as e:
            current_app.logger.error(f"向量化查询文本失败: {str(e)}")
            # 模型可能已损坏，重新加载
            current_app.logger.info("尝试重新加载模型")
            initialize()
            # 再次尝试向量化
            query_vector = vectorizer.transform([processed_query])

        # 计算相似度
        similarities = cosine_similarity(query_vector, question_vectors).flatten()

        # 获取相似度排序索引
        top_indices = similarities.argsort()[::-1]

        results = []
        for i in top_indices:
            if similarities[i] >= threshold and len(results) < max_results:
                question = questions_data[i].copy()
                question['similarity'] = float(similarities[i])
                results.append(question)
        current_app.logger.info("首次查询: %s", len(results) if results else 'unknown')
        if results:
            # 进行初始化文本筛选
            results = screen_text(query_text, results)
        if not results:
            ocr_logger.info("原文: %s | 解析: %s | 个数：0", query_text, processed_query)
            current_app.logger.info("没有找到匹配结果，尝试使用豆包AI进行搜索")
            engine = ModelDispatcher.get_model('image')
            ai_results = engine.recognize(file_path, query_text)
            if ai_results:
                results = analysis_doubao(ai_results)
        return results

    except Exception as e:
        current_app.logger.error(f"AI搜索异常: {str(e)}")
        current_app.logger.error("AI搜索异常: total=%s", len(questions_data) if questions_data else 'unknown')
        current_app.logger.error(traceback.format_exc())
        return []


def search_with_text(threshold=0.7, max_results=10):
    """
    从 GET 请求参数中获取 query_text，对题库做模糊匹配返回结果
    """
    # 获取并解码 query 参数
    query_text = unquote(request.args.get('query', '').strip())

    if not query_text:
        return jsonify({"code": 400, "msg": "请求参数缺失"})

    results = []
    for q in questions_data:
        similarity = fuzz.partial_ratio(query_text, q['subject']) / 100.0
        if similarity >= threshold:
            item = q.copy()
            item['similarity'] = round(similarity, 3)
            results.append(item)

    results.sort(key=lambda x: x['similarity'], reverse=True)
    return jsonify(results[:max_results])
