"""数据预处理路由模块"""
import re
import os
from typing import List, Dict, Optional
from flask import Blueprint, request, jsonify
from werkzeug.utils import secure_filename
from models import db, SentencePair, SensitiveWord

# 创建蓝图
preprocess_bp = Blueprint('preprocess', __name__)

# 配置上传文件夹
UPLOAD_FOLDER = 'uploads'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)


def get_sensitive_words() -> List[str]:
    """从数据库获取敏感词列表"""
    try:
        sensitive_words = SensitiveWord.query.all()
        return [word.word for word in sensitive_words]
    except Exception as e:
        print(f"获取敏感词失败: {e}")
        return []


def filter_sensitive_words(text: str, sensitive_words: List[str]) -> str:
    """
    过滤文本中的敏感词
    
    Args:
        text: 原始文本
        sensitive_words: 敏感词列表
        
    Returns:
        过滤后的文本（敏感词用***替换）
    """
    filtered_text = text
    for word in sensitive_words:
        if word in filtered_text:
            filtered_text = filtered_text.replace(word, '*' * len(word))
    return filtered_text


def clean_text(text: str, sensitive_words: List[str] = None) -> str:
    """清洗文本，去除特殊字符和多余空格，并过滤敏感词"""
    # 去除多余空格
    text = re.sub(r'\s+', ' ', text)
    # 去除控制字符
    text = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', text)
    # 去除首尾空格
    text = text.strip()
    
    # 过滤敏感词
    if sensitive_words and text:
        text = filter_sensitive_words(text, sensitive_words)
    
    return text


def process_file(file_path: str, uploaded_by: Optional[int] = None) -> Dict:
    """
    处理上传的文件，清洗文本并组织成字典列表
    
    Args:
        file_path: 文件路径
        uploaded_by: 上传用户ID
        
    Returns:
        包含处理结果的字典，包括：
        - processed: 处理后的句子对列表
        - total_lines: 总行数
        - cleaned_lines: 清洗后有效行数
        - invalid_lines: 无效行数
        
    Raises:
        ValueError: 文件编码错误或格式错误
    """
    processed = []  # 存储处理后的数据
    line_num = 0  # 行号计数器
    total_lines = 0
    cleaned_lines = 0
    invalid_lines = 0

    try:
        # 获取敏感词列表
        sensitive_words = get_sensitive_words()
        
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                total_lines += 1
                line_num = total_lines
                line = line.strip()
                if not line:  # 跳过空行
                    invalid_lines += 1
                    continue
                    
                # 先尝试按制表符分割，如果不行则尝试按多个空格分割
                if '\t' in line:
                    parts = line.split('\t')
                else:
                    # 按连续空格分割
                    parts = re.split(r'\s{2,}', line)

                if len(parts) < 2:
                    print(f"警告：第 {line_num} 行格式错误（非两列）: {line}")
                    invalid_lines += 1
                    continue

                text1, text2 = parts[0].strip(), parts[1].strip()

                # 如果分割后的文本为空，尝试其他分割方式
                if not text1 or not text2:
                    # 尝试按单个空格分割
                    if ' ' in line:
                        all_parts = line.split()
                        if len(all_parts) >= 2:
                            # 查找可能的分割点：找到两个不同的语言文本
                            for i in range(1, len(all_parts)):
                                left = ' '.join(all_parts[:i])
                                right = ' '.join(all_parts[i:])
                                # 简单启发式：如果两边都有内容，就采用这个分割
                                if left and right:
                                    text1 = left
                                    text2 = right
                                    break

                if not text1 or not text2:
                    print(f"警告：第 {line_num} 行格式错误，无法分割为有效文本: {line}")
                    invalid_lines += 1
                    continue

                clean1 = clean_text(text1, sensitive_words)
                clean2 = clean_text(text2, sensitive_words)
                
                if clean1 and clean2:  # 两列都非空才处理
                    processed.append({
                        'lang1': 'zh',  # 第一列语言设为中文
                        'text1': clean1,  # 清洗后的第一列文本
                        'lang2': 'vi',  # 第二列语言设为越南语
                        'text2': clean2,  # 清洗后的第二列文本
                        'uploaded_by': uploaded_by  # 上传用户ID
                    })
                    cleaned_lines += 1
                else:
                    invalid_lines += 1
        
        return {
            'processed': processed,
            'total_lines': total_lines,
            'cleaned_lines': cleaned_lines,
            'invalid_lines': invalid_lines
        }

    except UnicodeDecodeError as e:
        raise ValueError(f"文件编码错误，请确保是 UTF-8 格式: {e}")
    except Exception as e:
        raise ValueError(f"文件处理失败: {str(e)}")


@preprocess_bp.route('/upload-corpus', methods=['POST', 'OPTIONS'])
def upload_corpus():
    """上传语料库并进行预处理"""
    # 预检请求直接放行
    if request.method == 'OPTIONS':
        return '', 204
    # 检查请求中是否有文件
    if 'file' not in request.files:
        return jsonify({'error': '未上传文件'}), 400
        
    file = request.files['file']
    
    # 检查文件名是否为空
    if file.filename == '':
        return jsonify({'error': '文件名为空'}), 400
        
    # 检查文件扩展名
    if not file.filename.endswith('.txt'):
        return jsonify({'error': '仅支持 .txt 文件'}), 400

    # 获取上传用户ID（可选）
    uploaded_by = request.form.get('uploaded_by', type=int)

    # 保存文件
    filename = secure_filename(file.filename)
    filepath = os.path.join(UPLOAD_FOLDER, filename)
    file.save(filepath)

    try:
        # 处理文件内容
        result = process_file(filepath, uploaded_by)
        
        # 批量入库
        for item in result['processed']:
            pair = SentencePair(
                lang1=item['lang1'],
                text1=item['text1'],
                lang2=item['lang2'],
                text2=item['text2'],
                source='manual',
                status='pending',
                uploaded_by=item['uploaded_by']
            )
            db.session.add(pair)
            
        db.session.commit()
        
        return jsonify({
            'message': f'成功清洗并入库 {result["cleaned_lines"]} 条句子对',
            'count': result["cleaned_lines"],
            'total_lines': result["total_lines"],
            'cleaned_lines': result["cleaned_lines"],
            'invalid_lines': result["invalid_lines"]
        }), 201  # 使用适当的HTTP状态码
        
    except ValueError as e:
        db.session.rollback()
        return jsonify({'error': str(e)}), 400
        
    except Exception as e:
        db.session.rollback()
        return jsonify({'error': f'服务器内部错误: {str(e)}'}), 500
        
    finally:
        # 清理临时文件
        if os.path.exists(filepath):
            os.remove(filepath)


@preprocess_bp.route('/clean-text', methods=['POST', 'OPTIONS'])
def clean_text_file():
    """文本清洗接口（用于前端直接清洗文本）"""
    # 预检请求直接放行
    if request.method == 'OPTIONS':
        return '', 204
    # 检查请求中是否有文件
    if 'file' not in request.files:
        return jsonify({'error': '未上传文件'}), 400
        
    file = request.files['file']
    
    # 检查文件名是否为空
    if file.filename == '':
        return jsonify({'error': '文件名为空'}), 400
        
    # 检查文件扩展名
    if not file.filename.endswith('.txt'):
        return jsonify({'error': '仅支持 .txt 文件'}), 400

    # 保存文件
    filename = secure_filename(file.filename)
    filepath = os.path.join(UPLOAD_FOLDER, filename)
    file.save(filepath)

    try:
        # 处理文件内容
        result = process_file(filepath)
        
        # 生成清洗后的文件
        cleaned_filename = f"cleaned_{filename}"
        cleaned_filepath = os.path.join(UPLOAD_FOLDER, cleaned_filename)
        
        with open(cleaned_filepath, 'w', encoding='utf-8') as f:
            for item in result['processed']:
                f.write(f"{item['text1']}\t{item['text2']}\n")
        
        # 生成一个简单的file_id（使用时间戳）
        import time
        file_id = str(int(time.time()))
        
        return jsonify({
            'message': '文本清洗完成',
            'total_lines': result["total_lines"],
            'cleaned_lines': result["cleaned_lines"],
            'invalid_lines': result["invalid_lines"],
            'file_id': file_id
        }), 200
        
    except ValueError as e:
        return jsonify({'error': str(e)}), 400
        
    except Exception as e:
        return jsonify({'error': f'服务器内部错误: {str(e)}'}), 500
        
    finally:
        # 清理临时文件
        if os.path.exists(filepath):
            os.remove(filepath)


@preprocess_bp.route('/download-cleaned/<file_id>', methods=['GET'])
@preprocess_bp.route('/download-cleaned', methods=['GET'])
def download_cleaned(file_id=None):
    """下载清洗后的文件"""
    # 查找最近生成的清洗文件
    import glob
    import os
    
    cleaned_files = glob.glob(os.path.join(UPLOAD_FOLDER, 'cleaned_*.txt'))
    if not cleaned_files:
        return jsonify({'error': '没有找到清洗后的文件'}), 404
    
    # 按修改时间排序，获取最新的文件
    cleaned_files.sort(key=os.path.getmtime, reverse=True)
    latest_file = cleaned_files[0]
    
    # 返回文件下载
    from flask import send_file
    return send_file(
        latest_file,
        as_attachment=True,
        download_name=os.path.basename(latest_file),
        mimetype='text/plain'
    )
