import os
import uuid
import json
import csv
import re
from flask import Blueprint, request, jsonify, current_app
from flask_jwt_extended import jwt_required, get_jwt_identity
from werkzeug.utils import secure_filename
from app import db
from app.models.data_model import DataUpload, Summary
from app.models.user_model import User

data_bp = Blueprint('data', __name__)

ALLOWED_EXTENSIONS = {'jsonl', 'csv'}
UPLOAD_FOLDER = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'uploads')

# 确保上传目录存在
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

def process_jsonl_content(file_content, upload_id):
    """处理JSONL文件内容并存入数据库"""
    # 导入模型服务
    from app.services.model_service import model_service
    
    summaries = []
    lines = file_content.decode('utf-8').splitlines()
    
    for line_num, line in enumerate(lines, 1):
        try:
            data = json.loads(line.strip())
            # 获取各个字段
            fname = data.get('fname', '')
            dialogue = data.get('dialogue', '')
            # 如果JSONL中已包含summary，直接使用，否则使用模型生成
            existing_summary = data.get('summary', '')
            # 获取主题字段，可能作为关键词
            topic = data.get('topic', '')
            
            # 检查对话内容是否存在
            if not dialogue:
                print(f"第 {line_num} 行缺少对话内容，已跳过")
                continue
            
            # 决定是使用已有摘要还是生成新摘要
            if existing_summary:
                summary_text = existing_summary
                # 生成关键词，结合topic
                if topic:
                    keywords = f"{topic}"
                else:
                    # 没有topic时，使用模型生成关键词
                    _, keywords, _, _ = model_service.generate_summary_and_keywords(
                        dialogue=dialogue,
                        model_name="model_1"
                    )
            else:
                # 使用模型生成摘要和关键词
                summary_text, generated_keywords, coreference_id, coreference_text = model_service.generate_summary_and_keywords(
                    dialogue=dialogue,
                    model_name="model_1"  # 默认使用model_1
                )
                # 如果有topic，添加到关键词中
                if topic:
                    keywords = f"{topic},{generated_keywords}"
                else:
                    keywords = generated_keywords
            
            # 创建摘要记录
            summary = Summary(
                upload_id=upload_id,
                fname=fname,
                dialogue=dialogue,
                summary=summary_text,
                keywords=keywords,  # 使用合并后的关键词
                coreference_id=coreference_id if 'coreference_id' in locals() else None,
                coreference_text=coreference_text if 'coreference_text' in locals() else None
            )
            summaries.append(summary)
            
        except json.JSONDecodeError as e:
            # 记录格式不正确的行
            print(f"第 {line_num} 行JSON解析错误: {str(e)}")
            continue
    
    # 批量插入数据
    if summaries:
        db.session.bulk_save_objects(summaries)
        db.session.commit()
        print(f"成功处理 {len(summaries)} 条JSONL数据")
    
    return len(summaries)

def process_csv_content(file_content, upload_id):
    """处理CSV文件内容并存入数据库"""
    # 导入模型服务
    from app.services.model_service import model_service
    
    summaries = []
    content_str = file_content.decode('utf-8')
    
    # 使用StringIO创建类文件对象
    from io import StringIO
    csv_file = StringIO(content_str)
    
    # 配置CSV读取器，设置quotechar和doublequote以正确处理引号内的换行符
    try:
        # 尝试自动检测分隔符
        dialect = csv.Sniffer().sniff(csv_file.read(1024))
        csv_file.seek(0)
        # 确保引号设置正确
        dialect.quotechar = '"'
        dialect.doublequote = True
        print(f"检测到CSV分隔符: '{dialect.delimiter}'")
    except Exception as e:
        # 如果自动检测失败，使用默认设置
        print(f"分隔符检测失败: {str(e)}，使用默认逗号分隔符")
        csv_file.seek(0)
        dialect = csv.excel
        dialect.quotechar = '"'
        dialect.doublequote = True
    
    # 使用普通reader读取CSV文件，确保能处理多行文本字段
    reader = csv.reader(csv_file, dialect=dialect)
    
    # 读取第一行作为标题
    try:
        headers = next(reader)
        print(f"读取到CSV标题行: {headers}")
    except StopIteration:
        print("CSV文件为空")
        return 0
    
    # 确保标题行中的字段名称没有空格和引号
    headers = [h.strip().strip('"\'') for h in headers]
    print(f"处理后的标题行: {headers}")
    
    # 查找必要字段的索引位置
    field_indices = {}
    for field in ['dialogue', 'fname', 'summary', 'topic']:
        try:
            field_indices[field] = headers.index(field)
        except ValueError:
            if field == 'dialogue':  # dialogue字段是必需的
                print(f"CSV文件缺少必要的'dialogue'字段")
                return 0
            field_indices[field] = -1  # 标记不存在的字段
    
    print(f"字段索引位置: {field_indices}")
    
    # 处理后续行数据
    for row_num, row in enumerate(reader, 2):  # 从2开始计数，因为第1行是标题行
        try:
            print(f"处理第 {row_num} 行数据，列数: {len(row)}")
            
            # 确保行有足够的列数
            if len(row) <= field_indices['dialogue']:
                print(f"第 {row_num} 行列数不足，已跳过")
                continue
                
            # 根据索引获取各个字段的值，并去除可能的引号
            dialogue = row[field_indices['dialogue']].strip('"\'') if len(row) > field_indices['dialogue'] else ''
            
            # 获取其他可选字段
            fname = row[field_indices['fname']].strip('"\'') if field_indices['fname'] >= 0 and len(row) > field_indices['fname'] else ''
            existing_summary = row[field_indices['summary']].strip('"\'') if field_indices['summary'] >= 0 and len(row) > field_indices['summary'] else ''
            topic = row[field_indices['topic']].strip('"\'') if field_indices['topic'] >= 0 and len(row) > field_indices['topic'] else ''
            
            # 输出字段长度信息，帮助调试
            print(f"第 {row_num} 行字段长度 - fname: {len(fname)}, dialogue: {len(dialogue)}, "
                  f"summary: {len(existing_summary)}, topic: {len(topic)}")
            
            if not dialogue:
                print(f"第 {row_num} 行'dialogue'字段为空，已跳过")
                continue
            
            # 决定是使用已有摘要还是生成新摘要
            if existing_summary:
                summary_text = existing_summary
                # 生成关键词，结合topic
                if topic:
                    keywords = f"{topic}"
                else:
                    # 没有topic时，使用模型生成关键词
                    print(f"第 {row_num} 行使用现有摘要，正在生成关键词...")
                    _, keywords, _, _ = model_service.generate_summary_and_keywords(
                        dialogue=dialogue,
                        model_name="model_1"
                    )
            else:
                # 使用模型生成摘要和关键词
                print(f"第 {row_num} 行生成摘要和关键词...")
                summary_text, generated_keywords, coreference_id, coreference_text = model_service.generate_summary_and_keywords(
                    dialogue=dialogue,
                    model_name="model_1"  # 默认使用model_1
                )
                # 如果有topic，添加到关键词中
                if topic:
                    keywords = f"{topic},{generated_keywords}"
                else:
                    keywords = generated_keywords
            
            # 创建摘要记录
            summary = Summary(
                upload_id=upload_id,
                fname=fname,
                dialogue=dialogue,
                summary=summary_text,
                keywords=keywords,  # 使用合并后的关键词
                coreference_id=coreference_id if 'coreference_id' in locals() else None,
                coreference_text=coreference_text if 'coreference_text' in locals() else None
            )
            summaries.append(summary)
            print(f"第 {row_num} 行处理完成，摘要长度: {len(summary_text)}")
            
        except Exception as e:
            # 详细记录处理失败的行
            print(f"处理第 {row_num} 行CSV时出错: {str(e)}")
            continue
    
    # 批量插入数据
    if summaries:
        try:
            db.session.bulk_save_objects(summaries)
            db.session.commit()
            print(f"成功处理 {len(summaries)} 条CSV数据，全部保存到数据库")
        except Exception as e:
            db.session.rollback()
            print(f"保存CSV数据到数据库时出错: {str(e)}")
            return 0
    else:
        print("没有有效的CSV数据需要保存")
    
    return len(summaries)

@data_bp.route('/data/upload', methods=['POST'])
@jwt_required()
def upload_file():
    current_user_id = get_jwt_identity()
    
    # 检查是否有文件上传
    if 'file' not in request.files:
        return jsonify({'error': '没有上传文件'}), 400
    
    file = request.files['file']
    
    # 检查文件名是否为空
    if file.filename == '':
        return jsonify({'error': '没有选择文件'}), 400
    
    # 检查文件类型
    if not allowed_file(file.filename):
        return jsonify({'error': f'不支持的文件类型，仅支持: {", ".join(ALLOWED_EXTENSIONS)}'}), 400
    
    # 安全地获取文件名并生成一个唯一的存储名称
    original_filename = secure_filename(file.filename)
    file_extension = original_filename.rsplit('.', 1)[1].lower()
    unique_filename = f"{uuid.uuid4()}.{file_extension}"
    
    # 读取文件内容
    file_content = file.read()
    file_size = len(file_content)
    
    try:
        # 创建上传记录
        upload = DataUpload(
            user_id=current_user_id,
            file_name=unique_filename,
            original_file_name=original_filename,
            file_size=file_size,
            file_type=file_extension,
            file_content=file_content,  # 存储文件内容
            status='processing'  # 初始状态为处理中
        )
        
        db.session.add(upload)
        db.session.flush()  # 获取ID但不提交事务
        
        # 根据文件类型处理内容
        summary_count = 0
        if file_extension == 'jsonl':
            summary_count = process_jsonl_content(file_content, upload.id)
        elif file_extension == 'csv':
            summary_count = process_csv_content(file_content, upload.id)
        
        # 更新处理状态为已完成
        upload.status = 'completed'
        db.session.commit()
        
        return jsonify({
            'message': f'文件上传成功，已处理 {summary_count} 条摘要',
            'upload_id': upload.id,
            'status': 'completed',
            'summary_count': summary_count
        }), 201
        
    except Exception as e:
        # 发生错误时回滚事务
        db.session.rollback()
        
        # 记录处理失败的记录
        error_message = str(e)
        try:
            upload = DataUpload(
                user_id=current_user_id,
                file_name=unique_filename,
                original_file_name=original_filename,
                file_size=file_size,
                file_type=file_extension,
                status='failed',
                error_message=error_message
            )
            db.session.add(upload)
            db.session.commit()
            
            return jsonify({
                'error': '文件处理失败',
                'message': error_message,
                'upload_id': upload.id,
                'status': 'failed'
            }), 500
            
        except Exception:
            return jsonify({
                'error': '文件上传失败',
                'message': error_message
            }), 500

@data_bp.route('/data/uploads', methods=['GET'])
@jwt_required()
def get_uploads():
    current_user_id = get_jwt_identity()
    
    uploads = DataUpload.query.filter_by(user_id=current_user_id).order_by(DataUpload.upload_time.desc()).all()
    
    return jsonify([upload.to_dict() for upload in uploads]), 200

@data_bp.route('/data/uploads/<int:upload_id>', methods=['GET'])
@jwt_required()
def get_upload_details(upload_id):
    current_user_id = get_jwt_identity()
    
    upload = DataUpload.query.filter_by(id=upload_id, user_id=current_user_id).first()
    
    if not upload:
        return jsonify({'error': '未找到上传记录或无权访问'}), 404
    
    # 获取摘要数据，分页处理
    page = request.args.get('page', 1, type=int)
    per_page = request.args.get('per_page', 10, type=int)
    
    summaries_query = Summary.query.filter_by(upload_id=upload_id)
    summaries_count = summaries_query.count()
    
    summaries = summaries_query.order_by(Summary.id).offset((page - 1) * per_page).limit(per_page).all()
    
    result = upload.to_dict()
    result['summaries'] = [summary.to_dict() for summary in summaries]
    result['total_summaries'] = summaries_count
    result['page'] = page
    result['per_page'] = per_page
    result['total_pages'] = (summaries_count + per_page - 1) // per_page
    
    return jsonify(result), 200

@data_bp.route('/summaries', methods=['GET'])
@jwt_required()
def search_summaries():
    current_user_id = get_jwt_identity()
    
    # 获取当前用户信息，检查是否是管理员
    current_user = User.query.get(current_user_id)
    is_admin = current_user and current_user.is_admin()
    
    # 获取搜索参数
    query = request.args.get('query', '')
    page = request.args.get('page', 1, type=int)
    per_page = request.args.get('per_page', 10, type=int)
    search_type = request.args.get('search_type', 'all')
    chat_only = request.args.get('chat_only', 'false').lower() == 'true'  # 是否只搜索聊天记录
    
    # 如果没有查询，返回最近的摘要
    if not query:
        if is_admin:
            # 管理员可以查看所有用户的上传
            uploads = DataUpload.query.all()
        else:
            # 普通用户只能查看自己的上传
            uploads = DataUpload.query.filter_by(user_id=current_user_id).all()
            
        upload_ids = [upload.id for upload in uploads]
        
        if not upload_ids:
            return jsonify({
                'summaries': [],
                'total': 0,
                'page': page,
                'per_page': per_page,
                'total_pages': 0
            }), 200
        
        summaries_query = Summary.query.filter(Summary.upload_id.in_(upload_ids))
        
        # 如果只搜索聊天记录
        if chat_only:
            summaries_query = summaries_query.filter(Summary.keywords.like('%chat%'))
        
        summaries_count = summaries_query.count()
        
        summaries = summaries_query.order_by(Summary.created_at.desc()) \
            .offset((page - 1) * per_page).limit(per_page).all()
    else:
        # 搜索摘要
        if is_admin:
            # 管理员可以搜索所有上传
            uploads = DataUpload.query.all()
        else:
            # 普通用户只能搜索自己的上传
            uploads = DataUpload.query.filter_by(user_id=current_user_id).all()
            
        upload_ids = [upload.id for upload in uploads]
        
        if not upload_ids:
            return jsonify({
                'summaries': [],
                'total': 0,
                'page': page,
                'per_page': per_page,
                'total_pages': 0
            }), 200
        
        # 使用全文搜索
        search_query = f"%{query}%"
        base_query = Summary.query.filter(Summary.upload_id.in_(upload_ids))
        
        # 如果只搜索聊天记录
        if chat_only:
            base_query = base_query.filter(Summary.keywords.like('%chat%'))
        
        # 根据search_type参数决定在哪些字段中搜索
        if search_type == 'summary':
            search_filter = Summary.summary.like(search_query)
        elif search_type == 'dialogue':
            search_filter = Summary.dialogue.like(search_query)
        elif search_type == 'keywords':
            search_filter = Summary.keywords.like(search_query)
        elif search_type == 'coreference_text':
            search_filter = Summary.coreference_text.like(search_query)
        else:  # 默认搜索所有字段
            search_filter = (Summary.summary.like(search_query) | 
                          Summary.dialogue.like(search_query) |
                          Summary.keywords.like(search_query) |
                          Summary.coreference_text.like(search_query))
        
        summaries_query = base_query.filter(search_filter)
        summaries_count = summaries_query.count()
        
        summaries = summaries_query.order_by(Summary.created_at.desc()) \
            .offset((page - 1) * per_page).limit(per_page).all()
    
    # 获取每个摘要关联的用户信息
    result_summaries = []
    for summary in summaries:
        summary_dict = summary.to_dict()
        # 添加对应的上传者信息
        upload = DataUpload.query.get(summary.upload_id)
        if upload:
            uploader = User.query.get(upload.user_id)
            if uploader:
                summary_dict['uploader'] = {
                    'id': uploader.id,
                    'username': uploader.username
                }
        result_summaries.append(summary_dict)
    
    return jsonify({
        'summaries': result_summaries,
        'total': summaries_count,
        'page': page,
        'per_page': per_page,
        'total_pages': (summaries_count + per_page - 1) // per_page
    }), 200

# 添加 /data/search 路由作为 /summaries 的别名
@data_bp.route('/data/search', methods=['GET'])
@jwt_required()
def search_data():
    return search_summaries()

# 更新摘要
@data_bp.route('/summaries/<int:summary_id>', methods=['PUT'])
@jwt_required()
def update_summary(summary_id):
    current_user_id = get_jwt_identity()
    
    # 获取当前用户信息，检查是否是管理员
    current_user = User.query.get(current_user_id)
    is_admin = current_user and current_user.is_admin()
    
    # 获取请求数据
    data = request.get_json()
    if not data or 'summary' not in data:
        return jsonify({'error': '请提供摘要内容'}), 400
    
    # 查找需要更新的摘要
    summary = Summary.query.get(summary_id)
    if not summary:
        return jsonify({'error': '未找到摘要'}), 404
    
    # 验证权限：只有管理员或摘要的所有者才能更新
    if not is_admin:
        # 获取上传记录，验证所有权
        upload = DataUpload.query.get(summary.upload_id)
        if not upload or upload.user_id != current_user_id:
            return jsonify({'error': '无权更新此摘要'}), 403
    
    # 更新摘要内容
    summary.summary = data['summary']
    db.session.commit()
    
    # 获取上传者信息
    upload = DataUpload.query.get(summary.upload_id)
    uploader = User.query.get(upload.user_id) if upload else None
    
    summary_dict = summary.to_dict()
    if uploader:
        summary_dict['uploader'] = {
            'id': uploader.id,
            'username': uploader.username
        }
    
    return jsonify({
        'message': '摘要更新成功',
        'summary': summary_dict
    }), 200

# 使用模型生成摘要
@data_bp.route('/summaries/generate', methods=['POST'])
@jwt_required()
def generate_summary():
    current_user_id = get_jwt_identity()
    
    # 获取当前用户信息，检查是否是管理员
    current_user = User.query.get(current_user_id)
    is_admin = current_user and current_user.is_admin()
    
    # 获取请求数据
    data = request.get_json()
    if not data or 'dialogue' not in data or 'model' not in data or 'summary_id' not in data:
        return jsonify({'error': '请提供对话内容、模型选择和摘要ID'}), 400
    
    # 验证摘要是否存在
    summary_id = data['summary_id']
    summary = Summary.query.get(summary_id)
    if not summary:
        return jsonify({'error': '未找到摘要'}), 404
    
    # 验证权限：只有管理员或摘要的所有者才能生成
    if not is_admin:
        # 获取上传记录，验证所有权
        upload = DataUpload.query.get(summary.upload_id)
        if not upload or upload.user_id != current_user_id:
            return jsonify({'error': '无权为此摘要生成内容'}), 403
    
    # 获取对话内容和模型选择
    dialogue = data['dialogue']
    model = data['model']
    
    # 使用模型服务生成摘要和关键词
    from app.services.model_service import model_service
    
    # 直接发送对话内容给模型，获取摘要和关键词
    summary_text, keywords, coreference_id, coreference_text = model_service.generate_summary_and_keywords(
        dialogue=dialogue,
        model_name=model
    )
    
    return jsonify({
        'message': '摘要生成成功',
        'summary': summary_text,
        'keywords': keywords,
        'coreference_id': coreference_id,
        'coreference_text': coreference_text
    }), 200

# 注册Blueprint
def register_blueprints(app):
    app.register_blueprint(data_bp, url_prefix='/api') 