from flask import Flask, render_template, request, jsonify, send_file
import json
import os
import tempfile
from werkzeug.utils import secure_filename
import pandas
from collections import defaultdict
import re

app = Flask(__name__)
app.config['SECRET_KEY'] = 'your-secret-key-here'
app.config['UPLOAD_FOLDER'] = 'uploads'
app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024  # 500MB max file size

# 确保上传目录存在
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)

# 全局变量存储处理后的数据
processed_data = {}

def detect_language(text):
    """检测文本语言"""
    if not text or text.strip() == '':
        return 'unknown'
    
    # 中文检测：检查是否包含中文字符
    chinese_regex = re.compile(r'[\u4e00-\u9fff]')
    # 英文检测：检查是否主要包含英文字符
    english_regex = re.compile(r'^[a-zA-Z\s.,!?;:\'"()\-]+$')
    
    sample_text = text[:100]  # 检查前100个字符
    has_chinese = chinese_regex.search(text) is not None
    is_english = english_regex.match(sample_text) is not None
    
    if has_chinese:
        return 'chinese'
    if is_english:
        return 'english'
    return 'unknown'

def process_jsonl_data(file_path):
    """处理JSONL文件并返回结构化数据"""
    data = []
    unique_docs = set()
    doc_map = {}
    
    with open(file_path, 'r', encoding='utf-8') as f:
        for line_num, line in enumerate(f, 1):
            line = line.strip()
            if not line:
                continue
                
            try:
                item = json.loads(line)
                # 验证必需字段
                required_fields = ['doc1_id', 'doc2_id', 'similarity', 'doc1_content', 'doc2_content']
                if not all(field in item for field in required_fields):
                    continue
                
                # 添加索引
                item['index'] = len(data)
                
                # 检测语言
                item['doc1_language'] = detect_language(item['doc1_content'])
                item['doc2_language'] = detect_language(item['doc2_content'])
                item['pair_language'] = 'mixed'
                if item['doc1_language'] == 'chinese' and item['doc2_language'] == 'chinese':
                    item['pair_language'] = 'chinese'
                elif item['doc1_language'] == 'english' and item['doc2_language'] == 'english':
                    item['pair_language'] = 'english'
                
                data.append(item)
                
                # 记录唯一文档
                unique_docs.add(item['doc1_id'])
                unique_docs.add(item['doc2_id'])
                
                # 存储文档内容
                if item['doc1_id'] not in doc_map:
                    doc_map[item['doc1_id']] = item['doc1_content']
                if item['doc2_id'] not in doc_map:
                    doc_map[item['doc2_id']] = item['doc2_content']
                    
            except json.JSONDecodeError as e:
                print(f"Error parsing line {line_num}: {e}")
                continue
    
    # 计算统计信息
    stats = {
        'pair_count': len(data),
        'doc_count': len(unique_docs),
        'avg_similarity': 0.0,
        'max_similarity': 0.0,
        'min_similarity': 1.0
    }
    
    if data:
        similarities = [item['similarity'] for item in data]
        stats['avg_similarity'] = sum(similarities) / len(similarities)
        stats['max_similarity'] = max(similarities)
        stats['min_similarity'] = min(similarities)
    
    return {
        'data': data,
        'stats': stats,
        'unique_docs': list(unique_docs),
        'doc_map': doc_map
    }

@app.route('/')
def index():
    """主页面"""
    return render_template('index.html')

@app.route('/upload', methods=['POST'])
def upload_file():
    """处理文件上传"""
    if 'file' not in request.files:
        return jsonify({'error': '没有选择文件'}), 400
    
    file = request.files['file']
    if file.filename == '':
        return jsonify({'error': '没有选择文件'}), 400
    
    if file and file.filename.endswith('.jsonl'):
        filename = secure_filename(file.filename)
        file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(file_path)
        
        try:
            # 处理数据
            result = process_jsonl_data(file_path)
            
            # 生成唯一ID存储数据
            import uuid
            data_id = str(uuid.uuid4())
            processed_data[data_id] = result
            
            # 清理上传的文件
            os.remove(file_path)
            
            return jsonify({
                'success': True,
                'data_id': data_id,
                'stats': result['stats'],
                'filename': filename
            })
            
        except Exception as e:
            return jsonify({'error': f'文件处理失败: {str(e)}'}), 500
    
    return jsonify({'error': '不支持的文件格式，请上传JSONL文件'}), 400

@app.route('/api/data/<data_id>')
def get_data(data_id):
    """获取处理后的数据"""
    if data_id not in processed_data:
        return jsonify({'error': '数据不存在'}), 404
    
    data_obj = processed_data[data_id]
    
    # 获取查询参数
    page = int(request.args.get('page', 1))
    per_page = int(request.args.get('per_page', 10))
    sort_field = request.args.get('sort_field', 'index')
    sort_order = request.args.get('sort_order', 'asc')
    language_filter = request.args.get('language_filter', 'all')
    
    # 过滤数据
    filtered_data = data_obj['data']
    if language_filter != 'all':
        filtered_data = [item for item in filtered_data if item['pair_language'] == language_filter]
    
    # 排序数据
    if sort_field != 'index':
        reverse = sort_order == 'desc'
        filtered_data.sort(key=lambda x: x[sort_field], reverse=reverse)
    
    # 分页
    total_items = len(filtered_data)
    total_pages = (total_items + per_page - 1) // per_page
    start_idx = (page - 1) * per_page
    end_idx = min(start_idx + per_page, total_items)
    
    page_data = filtered_data[start_idx:end_idx]
    
    return jsonify({
        'data': page_data,
        'pagination': {
            'current_page': page,
            'per_page': per_page,
            'total_items': total_items,
            'total_pages': total_pages,
            'start_index': start_idx + 1,
            'end_index': end_idx
        },
        'stats': data_obj['stats']
    })

@app.route('/api/visualization/<data_id>')
def get_visualization_data(data_id):
    """获取可视化数据"""
    if data_id not in processed_data:
        return jsonify({'error': '数据不存在'}), 404
    
    data_obj = processed_data[data_id]
    
    # 相似度分布数据
    similarities = [item['similarity'] for item in data_obj['data']]
    bins = [i * 0.1 for i in range(10)]
    counts = [0] * 10
    
    for sim in similarities:
        bin_index = min(int(sim * 10), 9)
        counts[bin_index] += 1
    
    # 网络图数据
    nodes = [{'id': doc_id, 'label': f'Doc {doc_id}', 'shape': 'dot', 'size': 15} 
             for doc_id in data_obj['unique_docs']]
    
    edges = []
    for item in data_obj['data']:
        if item['similarity'] > 0.5:  # 只显示相似度大于0.5的关系
            edges.append({
                'from': item['doc1_id'],
                'to': item['doc2_id'],
                'label': f"{item['similarity']:.2f}",
                'value': item['similarity'] * 5
            })
    
    return jsonify({
        'similarity_distribution': {
            'bins': [f"{bin:.1f}-{bin+0.1:.1f}" for bin in bins],
            'counts': counts
        },
        'network_data': {
            'nodes': nodes,
            'edges': edges
        },
        'scatter_data': [{'x': i, 'y': item['similarity'], 'index': i} 
                        for i, item in enumerate(data_obj['data'][:100])]  # 使用索引作为x轴，限制前100个点
    })

@app.route('/api/document/<data_id>/<int:index>')
def get_document_pair(data_id, index):
    """获取特定文档对的内容"""
    if data_id not in processed_data:
        return jsonify({'error': '数据不存在'}), 404
    
    data_obj = processed_data[data_id]
    
    if index < 0 or index >= len(data_obj['data']):
        return jsonify({'error': '索引超出范围'}), 400
    
    item = data_obj['data'][index]
    
    return jsonify({
        'doc1_id': item['doc1_id'],
        'doc2_id': item['doc2_id'],
        'similarity': item['similarity'],
        'doc1_content': item['doc1_content'],
        'doc2_content': item['doc2_content']
    })

@app.route('/api/export/<data_id>')
def export_data(data_id):
    """导出处理后的数据"""
    if data_id not in processed_data:
        return jsonify({'error': '数据不存在'}), 404
    
    data_obj = processed_data[data_id]
    
    # 创建临时文件
    with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
        json.dump(data_obj, f, ensure_ascii=False, indent=2)
        temp_path = f.name
    
    return send_file(temp_path, as_attachment=True, download_name='similarity_data.json')

if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0', port=5000)
