import operator

from flask import Blueprint, request
from sqlalchemy import func, distinct
from sqlalchemy.sql import label
from flask import jsonify
import json
from sqlalchemy import case  # 新增导入
from openai import OpenAI
from algorithm.analyze_utils import translate_with_glm, extract_english_syntax, extract_chinese_syntax, \
    judge_translation_technique
from base.core import db
from hdfs_util import read_hdfs_file
from models.job import Review,  reviews_schema

wordBp = Blueprint("word", __name__)

@wordBp.route('/getReviews', methods=["GET"])
def getReviews():
    """
    获取复习记录
    参数:
    - page: 页码
    - per_page: 每页数量
    - sort: 排序字段(默认INDEX)
    - order: 排序方式(asc/desc)
    - book: 按单词书过滤
    - min_rate: 最低遗忘率
    - max_rate: 最高遗忘率
    """
    try:
        # 参数解析
        page = int(request.args.get('page', 1))
        per_page = min(int(request.args.get('per_page', 20)), 100)
        sort_field = request.args.get('sort', 'INDEX')
        order = request.args.get('order', 'asc').lower()
        book_filter = request.args.get('book')
        min_rate = request.args.get('min_rate', type=float)
        max_rate = request.args.get('max_rate', type=float)

        # 验证排序字段
        if not hasattr(Review, sort_field):
            sort_field = 'INDEX'

        # 构建查询
        query = Review.query

        # 应用过滤
        if book_filter:
            query = query.filter(Review.BOOK == book_filter)
        if min_rate is not None:
            query = query.filter(Review.rate >= min_rate)
        if max_rate is not None:
            query = query.filter(Review.rate <= max_rate)

        # 应用排序
        order_func = getattr(getattr(Review, sort_field), order)()
        query = query.order_by(order_func)

        # 执行分页查询
        pagination = query.paginate(
            page=page,
            per_page=per_page,
            error_out=False
        )

        return jsonify({
            'success': True,
            'data': reviews_schema.dump(pagination.items),
            'pagination': {
                'total': pagination.total,
                'pages': pagination.pages,
                'current': page,
                'per_page': per_page
            }
        })

    except Exception as e:
        return jsonify({
            'success': False,
            'message': str(e)
        }), 400



#获取遗忘率的分布情况，并计算平均遗忘率
@wordBp.route('/forget_rate_distribution')
def forget_rate_distribution():
    # 以0.1为步长分组
    bins = [round(-1 + 0.2 * i, 2) for i in range(11)]
    results = []
    for i in range(len(bins) - 1):
        low, high = bins[i], bins[i + 1]
        count = Review.query.filter(Review.rate >= low, Review.rate < high).count()
        results.append({
            "range": f"{low:.1f}-{high:.1f}",
            "count": count
        })

    avg_rate = db.session.query(func.avg(Review.rate)).scalar() or 0

    return jsonify({
        "distribution": results,
        "avg_rate": avg_rate
    })

# 根据遗忘率范围查询单词
@wordBp.route('/words_by_range')
def words_by_range():
    low = float(request.args.get('low', -1))
    high = float(request.args.get('high', 1))
    words = Review.query.filter(Review.rate >= low, Review.rate < high).all()
    return reviews_schema.jsonify(words)


@wordBp.route('/hotspot_matrix', methods=['GET'])
def hotspot_matrix():
    """生成List-Unit遗忘热点矩阵数据"""
    # 计算每个List-Unit组合的统计指标
    matrix_data = db.session.query(
        Review.LIST,
        Review.INDEX,
        func.avg(Review.rate).label('avg_rate'),
        func.count().label('word_count'),
        func.sum(case([(Review.rate < -0.7, 1)], else_=0)).label('hard_word_count')
    ).group_by(Review.LIST, Review.INDEX).all()

    # 构建热力图所需格式
    heatmap_data = []
    lists = sorted({d.LIST for d in matrix_data})
    index = sorted({d.INDEX for d in matrix_data})

    # 处理每一条数据，确保硬性词数为数字类型
    for d in matrix_data:
        heatmap_data.append({
            'x': index.index(d.INDEX),  # 列坐标（INDEX）
            'y': lists.index(d.LIST),  # 行坐标（对应List）
            'value': d.avg_rate,
            'count': d.word_count,
            'hard': float(d.hard_word_count)  # 转换为浮动数字，确保类型正确
        })

    # 返回数据
    return jsonify({
        'x_labels': [f'Index {u}' for u in index],
        'y_labels': [f'List {l}' for l in lists],
        'data': heatmap_data,
        'max_hard': max(float(d.hard_word_count) for d in matrix_data) if matrix_data else 0  # 转换最大值为数字类型
    })

#历年翻译结构统计
@wordBp.route('/GetTranslationOutput', methods=['GET'])
def GetTranslationOutput():
    # HDFS 中文件的路径，比如 /user/root/yearly_pos_frequency.json
    hdfs_file_path = '/AfterCleanData/translation_analysis_output.json'

    file_content = read_hdfs_file(hdfs_file_path)
    if file_content.startswith("读取失败"):
        return jsonify({'code': 500, 'msg': file_content})

    try:
        # 解析读取到的JSON内容
        yearly_pos_frequency_data = json.loads(file_content)
        return jsonify({'code': 200, 'msg': '读取成功', 'data': yearly_pos_frequency_data})
    except Exception as e:
        return jsonify({'code': 500, 'msg': f'解析JSON失败: {e}'})

#输入英文句子 → 翻译+结构分析
@wordBp.route('/analyze', methods=['POST'])
def analyze():
    data = request.json
    original_text = data.get('text', '')

    if not original_text:
        return jsonify({"error": "Missing input text"}), 400

    # 1. 翻译
    translation = translate_with_glm(original_text)

    # 2. 结构分析
    en_syntax = extract_english_syntax(original_text)
    zh_syntax = extract_chinese_syntax(translation)
    tech = judge_translation_technique(en_syntax, zh_syntax)

    result = {
        "original_text": original_text,
        "translation": translation,
        "syntax": {
            "english": en_syntax,
            "chinese": zh_syntax
        },
        "techniques": tech
    }

    return jsonify(result), 200


@wordBp.route('/evaluate_translation', methods=["POST"])
def evaluate_translation():
    data = request.json
    english = data.get("english", "").strip()
    user_translation = data.get("user_translation", "").strip()

    if not english:
        return jsonify({"success": False, "message": "英文不能为空"}), 400

    # 1. 如果只输入英文，执行翻译（原逻辑）
    if not user_translation:
        auto_translation = translate_with_glm(english)
        eng_syntax = extract_english_syntax(english)
        zh_syntax = extract_chinese_syntax(auto_translation)
        technique = judge_translation_technique(eng_syntax, zh_syntax)
        return jsonify({
            "success": True,
            "translated": auto_translation,
            "english_syntax": eng_syntax,
            "chinese_syntax": zh_syntax,
            "technique": technique
        })

    # 2. 若用户提供了翻译，进行结构对比和误差分析
    eng_syntax = extract_english_syntax(english)
    user_zh_syntax = extract_chinese_syntax(user_translation)
    technique = judge_translation_technique(eng_syntax, user_zh_syntax)

    # 调用大模型对比评价
    prompt = f"""你是翻译评估专家，请根据以下英文原文和用户的中文翻译，判断其是否准确，并指出翻译中的错误、不足或不地道的地方。
英文原文：{english}
用户翻译：{user_translation}
请以简明的方式指出问题，并提出更优的翻译建议："""

    client = OpenAI(
        api_key="514c84d600d7448bb9c81cbba77ae28c.E5aMEcijkvLR2Ux1",
        base_url="https://open.bigmodel.cn/api/paas/v4/"
    )

    try:
        evaluation = client.chat.completions.create(
            model="glm-4",
            messages=[
                {"role": "user", "content": prompt},
            ],
            temperature=0.7
        ).choices[0].message.content.strip()
    except Exception as e:
        evaluation = f"评价出错：{str(e)}"

    return jsonify({
        "success": True,
        "english_syntax": eng_syntax,
        "user_chinese_syntax": user_zh_syntax,
        "technique": technique,
        "evaluation": evaluation
    })
