#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
智能文本审核系统 v2.0 - 纯本地分析版
支持功能：
1. 本地模型审核（离线）
2. 关键词匹配审核
3. 批量文件审核
4. 情感分析检测
"""

import os
import re
import json
import time
import logging
from datetime import datetime
from flask import Flask, request, jsonify, render_template_string, send_from_directory
from flask_cors import CORS
from werkzeug.utils import secure_filename
import csv
from concurrent.futures import ThreadPoolExecutor
from config import Config

# 配置日志
logging.basicConfig(
    level=getattr(logging, Config.LOG_LEVEL),
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(Config.LOG_FILE),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

app = Flask(__name__)
app.config.from_object(Config)
CORS(app)

# 创建必要的目录
for folder in [Config.UPLOAD_FOLDER, Config.RESULTS_FOLDER]:
    if not os.path.exists(folder):
        os.makedirs(folder)

class LocalModerator:
    """本地模型审核"""
    def __init__(self):
        self.load_models()
        self.load_sensitive_words()
        
    def load_models(self):
        """加载预训练模型"""
        try:
            # 情感分析模型
            self.sentiment_analyzer = pipeline(
                "sentiment-analysis",
                model="uer/roberta-base-finetuned-jd-binary-chinese",
                tokenizer="uer/roberta-base-finetuned-jd-binary-chinese"
            )
            
            # 文本毒性检测模型
            self.toxicity_analyzer = pipeline(
                "text-classification",
                model="IDEA-CCNL/Erlangshen-MacBERT-110M-Sentiment-Chinese",
                tokenizer="IDEA-CCNL/Erlangshen-MacBERT-110M-Sentiment-Chinese"
            )
            
            logger.info("本地模型加载成功")
        except Exception as e:
            logger.error(f"本地模型加载失败: {e}")
            self.sentiment_analyzer = None
            self.toxicity_analyzer = None
    
    def load_sensitive_words(self):
        """加载敏感词库"""
        self.sensitive_words = {
            'political': [
                '反动', '颠覆', '分裂', '暴乱', '政变', '革命', '推翻', '政权',
                '台独', '港独', '藏独', '疆独', '法轮功', '六四', '天安门', '民运'
            ],
            'pornographic': [
                '色情', '淫秽', '性交', '做爱', '自慰', '裸照', '裸体', '乳房',
                '阴茎', '阴道', '肛交', '口交', '乱伦', '强奸', '性虐', '嫖娼'
            ],
            'violence': [
                '杀人', '抢劫', '爆炸', '恐怖袭击', '炸弹', '枪支', '刀具',
                '暴力', '殴打', '伤害', '威胁', '恐吓', '绑架', '谋杀', '砍人'
            ],
            'advertisement': [
                '加微信', 'QQ群', '微信号', '联系方式', '购买', '优惠', '促销',
                '广告', '推广', '代理', '加盟', '赚钱', '投资', '理财', '微商'
            ],
            'privacy': [
                '身份证号', '银行卡号', '手机号', '密码', '验证码', '个人信息',
                '隐私', '泄露', '出售', '购买信息', '黑客', '破解', '盗号', '社工库'
            ],
            'abuse': [
                '傻逼', '混蛋', '白痴', '智障', '脑残', '废物', '垃圾', '去死',
                '他妈的', '操你妈', '王八蛋', '贱人', '婊子', '狗屎', '草泥马'
            ],
            'gambling': [
                '赌博', '赌场', '博彩', '彩票', '下注', '赌球', '赌马', '老虎机',
                '百家乐', '轮盘', '德州扑克', '炸金花', '牛牛', '六合彩'
            ],
            'drugs': [
                '毒品', '吸毒', '大麻', '冰毒', '海洛因', '可卡因', '摇头丸',
                'K粉', '鸦片', '罂粟', '制毒', '贩毒', '吸毒工具', '溜冰'
            ]
        }
        
        # 编译正则表达式
        self.patterns = {}
        for category, words in self.sensitive_words.items():
            pattern = '|'.join(map(re.escape, words))
            self.patterns[category] = re.compile(pattern, re.IGNORECASE)
    
    def keyword_check(self, text):
        """关键词检测"""
        results = {}
        for category, pattern in self.patterns.items():
            matches = pattern.findall(text)
            if matches:
                results[category] = {
                    'found': True,
                    'keywords': list(set(matches)),
                    'count': len(matches),
                    'severity': 'high' if len(matches) > 5 else 'medium'
                }
            else:
                results[category] = {
                    'found': False,
                    'keywords': [],
                    'count': 0,
                    'severity': 'low'
                }
        return results
    
    def sentiment_analysis(self, text):
        """情感分析检测辱骂、暴力倾向"""
        if not self.sentiment_analyzer:
            return {'negative_score': 0.0, 'is_abusive': False, 'confidence': 0.0}
        
        try:
            result = self.sentiment_analyzer(text[:512])[0]
            negative_score = result['score'] if result['label'] == 'NEGATIVE' else 0
            
            # 如果负面情感很强，可能是辱骂内容
            is_abusive = negative_score > 0.85
            
            return {
                'negative_score': float(negative_score),
                'is_abusive': is_abusive,
                'confidence': float(result['score'])
            }
        except Exception as e:
            logger.error(f"情感分析失败: {e}")
            return {'negative_score': 0.0, 'is_abusive': False, 'confidence': 0.0}
    
    def toxicity_analysis(self, text):
        """毒性内容检测"""
        if not self.toxicity_analyzer:
            return {'is_toxic': False, 'toxicity_score': 0.0}
        
        try:
            result = self.toxicity_analyzer(text[:512])[0]
            label = result['label']
            score = result['score']
            
            # 根据标签判断毒性
            is_toxic = label in ['toxic', 'insult', 'threat'] and score > 0.7
            
            return {
                'is_toxic': bool(is_toxic),
                'toxicity_score': float(score),
                'label': label
            }
        except Exception as e:
            logger.error(f"毒性检测失败: {e}")
            return {'is_toxic': False, 'toxicity_score': 0.0}
    
    def comprehensive_check(self, text):
        """综合检测"""
        if not text or not text.strip():
            return {'error': '文本不能为空'}
        
        text = text.strip()
        
        # 1. 关键词检测
        keyword_results = self.keyword_check(text)
        
        # 2. 情感分析
        sentiment_results = self.sentiment_analysis(text)
        
        # 3. 毒性检测
        toxicity_results = self.toxicity_analysis(text)
        
        # 4. 综合评估
        risk_level = 'low'
        risk_score = 0
        violations = []
        
        # 根据关键词检测结果评估
        for category, result in keyword_results.items():
            if result['found']:
                violations.append({
                    'type': category,
                    'keywords': result['keywords'],
                    'count': result['count'],
                    'severity': result['severity']
                })
                
                if result['severity'] == 'high':
                    risk_score += 40
                elif result['severity'] == 'medium':
                    risk_score += 20
        
        # 根据情感分析结果评估
        if sentiment_results['is_abusive']:
            violations.append({
                'type': 'abuse_sentiment',
                'reason': '负面情感强烈',
                'severity': 'medium',
                'score': sentiment_results['negative_score']
            })
            risk_score += 15
        
        # 根据毒性检测结果评估
        if toxicity_results['is_toxic']:
            violations.append({
                'type': 'toxicity',
                'reason': '毒性内容',
                'severity': 'high',
                'score': toxicity_results['toxicity_score']
            })
            risk_score += 25
        
        # 计算最终风险等级
        if risk_score >= 60:
            risk_level = 'high'
        elif risk_score >= 30:
            risk_level = 'medium'
        else:
            risk_level = 'low'
        
        # 生成建议
        suggestions = []
        if risk_level == 'high':
            suggestions.extend([
                '内容存在严重违规，建议立即删除',
                '可能涉及违法内容，请谨慎处理',
                '建议人工复核确认'
            ])
        elif risk_level == 'medium':
            suggestions.extend([
                '内容需要人工审核',
                '建议进一步检查敏感词汇',
                '可考虑限制发布或添加警告'
            ])
        else:
            suggestions.extend([
                '内容合规，可以发布',
                '建议定期复查内容质量'
            ])
        
        return {
            'text': text,
            'text_length': len(text),
            'risk_level': risk_level,
            'risk_score': risk_score,
            'violations': violations,
            'keyword_check': keyword_results,
            'sentiment_analysis': sentiment_results,
            'toxicity_analysis': toxicity_results,
            'suggestions': suggestions,
            'timestamp': datetime.now().isoformat(),
            'type': 'local'
        }

class LocalModerator:
    """本地模型审核"""
    def __init__(self):
        self.load_sensitive_words()
        self.risk_thresholds = Config.RISK_THRESHOLDS
        self.risk_weights = Config.RISK_WEIGHTS
        
    def load_sensitive_words(self):
        """加载敏感词库"""
        self.sensitive_words = {
            'political': [
                '反动', '颠覆', '分裂', '暴乱', '政变', '革命', '推翻', '政权',
                '台独', '港独', '藏独', '疆独', '法轮功', '六四', '天安门', '民运'
            ],
            'pornographic': [
                '色情', '淫秽', '性交', '做爱', '自慰', '裸照', '裸体', '乳房',
                '阴茎', '阴道', '肛交', '口交', '乱伦', '强奸', '性虐', '嫖娼'
            ],
            'violence': [
                '杀人', '抢劫', '爆炸', '恐怖袭击', '炸弹', '枪支', '刀具',
                '暴力', '殴打', '伤害', '威胁', '恐吓', '绑架', '谋杀', '砍人'
            ],
            'advertisement': [
                '加微信', 'QQ群', '微信号', '联系方式', '购买', '优惠', '促销',
                '广告', '推广', '代理', '加盟', '赚钱', '投资', '理财', '微商'
            ],
            'privacy': [
                '身份证号', '银行卡号', '手机号', '密码', '验证码', '个人信息',
                '隐私', '泄露', '出售', '购买信息', '黑客', '破解', '盗号', '社工库'
            ],
            'abuse': [
                '傻逼', '混蛋', '白痴', '智障', '脑残', '废物', '垃圾', '去死',
                '他妈的', '操你妈', '王八蛋', '贱人', '婊子', '狗屎', '草泥马'
            ],
            'gambling': [
                '赌博', '赌场', '博彩', '彩票', '下注', '赌球', '赌马', '老虎机',
                '百家乐', '轮盘', '德州扑克', '炸金花', '牛牛', '六合彩'
            ],
            'drugs': [
                '毒品', '吸毒', '大麻', '冰毒', '海洛因', '可卡因', '摇头丸',
                'K粉', '鸦片', '罂粟', '制毒', '贩毒', '吸毒工具', '溜冰'
            ]
        }
        
        # 编译正则表达式
        self.patterns = {}
        for category, words in self.sensitive_words.items():
            pattern = '|'.join(map(re.escape, words))
            self.patterns[category] = re.compile(pattern, re.IGNORECASE)
    
    def keyword_check(self, text):
        """关键词检测"""
        results = {}
        for category, pattern in self.patterns.items():
            matches = pattern.findall(text)
            if matches:
                results[category] = {
                    'found': True,
                    'keywords': list(set(matches)),
                    'count': len(matches),
                    'severity': 'high' if len(matches) > 5 else 'medium'
                }
            else:
                results[category] = {
                    'found': False,
                    'keywords': [],
                    'count': 0,
                    'severity': 'low'
                }
        return results
    
    def sentiment_analysis(self, text):
        """情感分析检测辱骂、暴力倾向"""
        # 简化的情感分析，基于关键词和文本特征
        negative_indicators = [
            '不', '没', '无', '非', '坏', '坏', '差', '糟', '烂', '讨厌',
            '恨', '烦', '怒', '气', '骂', '打', '杀', '死', '滚'
        ]
        
        text_lower = text.lower()
        negative_count = sum(1 for word in negative_indicators if word in text_lower)
        negative_score = min(negative_count / 10, 1.0)
        
        is_abusive = negative_score > 0.6
        
        return {
            'negative_score': float(negative_score),
            'is_abusive': is_abusive,
            'confidence': float(negative_score)
        }
    
    def toxicity_analysis(self, text):
        """毒性内容检测"""
        # 简化的毒性检测，基于敏感词和文本特征
        toxic_indicators = [
            '傻逼', '混蛋', '白痴', '智障', '脑残', '废物', '垃圾', '去死',
            '他妈的', '操你妈', '王八蛋', '贱人', '婊子', '狗屎', '草泥马'
        ]
        
        text_lower = text.lower()
        toxic_count = sum(1 for word in toxic_indicators if word in text_lower)
        toxicity_score = min(toxic_count / 5, 1.0)
        
        is_toxic = toxicity_score > 0.5
        
        return {
            'is_toxic': bool(is_toxic),
            'toxicity_score': float(toxicity_score),
            'label': 'toxic' if is_toxic else 'safe'
        }
    
    def comprehensive_check(self, text):
        """综合检测"""
        if not text or not text.strip():
            return {'error': '文本不能为空'}
        
        text = text.strip()
        
        # 1. 关键词检测
        keyword_results = self.keyword_check(text)
        
        # 2. 情感分析
        sentiment_results = self.sentiment_analysis(text)
        
        # 3. 毒性检测
        toxicity_results = self.toxicity_analysis(text)
        
        # 4. 综合评估
        risk_level = 'low'
        risk_score = 0
        violations = []
        
        # 根据关键词检测结果评估
        for category, result in keyword_results.items():
            if result['found']:
                violations.append({
                    'type': category,
                    'keywords': result['keywords'],
                    'count': result['count'],
                    'severity': result['severity']
                })
                
                if result['severity'] == 'high':
                    risk_score += 40
                elif result['severity'] == 'medium':
                    risk_score += 20
        
        # 根据情感分析结果评估
        if sentiment_results['is_abusive']:
            violations.append({
                'type': 'abuse_sentiment',
                'reason': '负面情感强烈',
                'severity': 'medium',
                'score': sentiment_results['negative_score']
            })
            risk_score += 15
        
        # 根据毒性检测结果评估
        if toxicity_results['is_toxic']:
            violations.append({
                'type': 'toxicity',
                'reason': '毒性内容',
                'severity': 'high',
                'score': toxicity_results['toxicity_score']
            })
            risk_score += 25
        
        # 计算最终风险等级
        if risk_score >= self.risk_thresholds['high']:
            risk_level = 'high'
        elif risk_score >= self.risk_thresholds['medium']:
            risk_level = 'medium'
        else:
            risk_level = 'low'
        
        # 生成建议
        suggestions = []
        if risk_level == 'high':
            suggestions.extend([
                '内容存在严重违规，建议立即删除',
                '可能涉及违法内容，请谨慎处理',
                '建议人工复核确认'
            ])
        elif risk_level == 'medium':
            suggestions.extend([
                '内容需要人工审核',
                '建议进一步检查敏感词汇',
                '可考虑限制发布或添加警告'
            ])
        else:
            suggestions.extend([
                '内容合规，可以发布',
                '建议定期复查内容质量'
            ])
        
        return {
            'text': text,
            'text_length': len(text),
            'risk_level': risk_level,
            'risk_score': risk_score,
            'violations': violations,
            'keyword_check': keyword_results,
            'sentiment_analysis': sentiment_results,
            'toxicity_analysis': toxicity_results,
            'suggestions': suggestions,
            'timestamp': datetime.now().isoformat(),
            'type': 'local'
        }

class BatchModerator:
    """批量审核处理器"""
    def __init__(self):
        self.local_moderator = LocalModerator()
        self.executor = ThreadPoolExecutor(max_workers=Config.MAX_WORKERS)
    
    def allowed_file(self, filename):
        return '.' in filename and \
               filename.rsplit('.', 1)[1].lower() in Config.ALLOWED_EXTENSIONS
    
    def process_file(self, filepath):
        """处理单个文件"""
        results = []
        
        try:
            with open(filepath, 'r', encoding=Config.ENCODING) as f:
                if filepath.endswith('.csv'):
                    reader = csv.reader(f)
                    for row_num, row in enumerate(reader, 1):
                        if row and row[0].strip():
                            text = row[0].strip()
                            result = self.local_moderator.comprehensive_check(text)
                            result['line'] = row_num
                            results.append(result)
                else:
                    # 处理文本文件
                    for line_num, line in enumerate(f, 1):
                        line = line.strip()
                        if line:
                            result = self.local_moderator.comprehensive_check(line)
                            result['line'] = line_num
                            results.append(result)
        
        except Exception as e:
            logger.error(f"文件处理失败: {e}")
            return {'error': str(e)}
        
        return results
    
    def save_results(self, results, filename):
        """保存结果到文件"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        result_filename = f"moderation_result_{timestamp}_{filename}.json"
        result_path = os.path.join(app.config['RESULTS_FOLDER'], result_filename)
        
        with open(result_path, 'w', encoding=Config.ENCODING) as f:
            json.dump(results, f, ensure_ascii=False, indent=2)
        
        return result_filename

# 初始化审核器
local_moderator = LocalModerator()
batch_moderator = BatchModerator()

@app.route('/')
def index():
    """主页"""
    return render_template_string('''
    <!DOCTYPE html>
    <html lang="zh-CN">
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>智能文本审核系统 v2.0</title>
        <script src="https://cdn.tailwindcss.com"></script>
        <link href="https://cdn.bootcdn.net/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
        <style>
            .gradient-bg {
                background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            }
            .glass-effect {
                background: rgba(255, 255, 255, 0.1);
                backdrop-filter: blur(10px);
                border: 1px solid rgba(255, 255, 255, 0.2);
            }
        </style>
    </head>
    <body class="bg-gray-50">
        <div class="gradient-bg min-h-screen py-8">
            <div class="max-w-7xl mx-auto px-4">
                <!-- 头部 -->
                <div class="text-center mb-8">
                    <h1 class="text-5xl font-bold text-white mb-4">
                        <i class="fas fa-shield-alt mr-3"></i>
                        智能文本审核系统
                        <span class="text-sm bg-yellow-400 text-black px-2 py-1 rounded ml-2">v2.0</span>
                    </h1>
                    <p class="text-xl text-gray-200">全方位内容安全检测，支持本地模型和云端API</p>
                </div>
                
                <!-- 统计卡片 -->
                <div class="grid grid-cols-1 md:grid-cols-3 gap-4 mb-8">
                    <div class="glass-effect rounded-lg p-4 text-center">
                        <i class="fas fa-file-alt text-3xl text-white mb-2"></i>
                        <div class="text-2xl font-bold text-white" id="totalChecks">0</div>
                        <div class="text-gray-200">总检测次数</div>
                    </div>
                    <div class="glass-effect rounded-lg p-4 text-center">
                        <i class="fas fa-exclamation-triangle text-3xl text-yellow-400 mb-2"></i>
                        <div class="text-2xl font-bold text-white" id="riskTexts">0</div>
                        <div class="text-gray-200">风险文本</div>
                    </div>
                    <div class="glass-effect rounded-lg p-4 text-center">
                        <i class="fas fa-shield-alt text-3xl text-green-400 mb-2"></i>
                        <div class="text-2xl font-bold text-white">离线</div>
                        <div class="text-gray-200">本地分析</div>
                    </div>
                </div>
                
                <!-- 主要内容区 -->
                <div class="grid grid-cols-1 lg:grid-cols-2 gap-8">
                    <!-- 文本审核 -->
                    <div class="bg-white rounded-xl shadow-2xl p-6">
                        <h2 class="text-2xl font-bold mb-4 flex items-center">
                            <i class="fas fa-edit text-blue-600 mr-2"></i>
                            文本审核
                        </h2>
                        
                        <div class="mb-4">
                            <label class="block text-sm font-medium text-gray-700 mb-2">检测模式</label>
                            <div class="bg-green-50 border border-green-200 rounded-md p-3">
                                <i class="fas fa-shield-alt text-green-600 mr-2"></i>
                                <span class="text-green-800 font-medium">纯本地分析（无需网络）</span>
                            </div>
                        </div>
                        
                        <div class="mb-4">
                            <label class="block text-sm font-medium text-gray-700 mb-2">输入文本</label>
                            <textarea id="textInput" rows="6" 
                                class="w-full px-3 py-2 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500"
                                placeholder="请输入需要审核的文本内容..."></textarea>
                            <div class="text-sm text-gray-500 mt-2">
                                <span id="charCount">0</span> 字符 | 
                                <span id="wordCount">0</span> 词语
                            </div>
                        </div>
                        
                        <button onclick="checkText()" id="checkButton"
                            class="w-full bg-blue-600 text-white py-3 rounded-md hover:bg-blue-700 transition">
                            <i class="fas fa-search mr-2"></i>开始审核
                        </button>
                    </div>
                    
                    <!-- 文件审核 -->
                    <div class="bg-white rounded-xl shadow-2xl p-6">
                        <h2 class="text-2xl font-bold mb-4 flex items-center">
                            <i class="fas fa-file-upload text-green-600 mr-2"></i>
                            文件审核
                        </h2>
                        
                        <div class="mb-4">
                            <label class="block text-sm font-medium text-gray-700 mb-2">选择文件</label>
                            <input type="file" id="fileInput" accept=".txt,.csv,.json,.log"
                                class="w-full px-3 py-2 border border-gray-300 rounded-md">
                            <p class="text-xs text-gray-500 mt-1">支持: txt, csv, json, log (最大16MB)</p>
                        </div>
                        
                        <button onclick="uploadFile()" id="uploadButton"
                            class="w-full bg-green-600 text-white py-3 rounded-md hover:bg-green-700 transition">
                            <i class="fas fa-upload mr-2"></i>上传并审核
                        </button>
                        
                        <div id="uploadProgress" class="hidden mt-4">
                            <div class="bg-gray-200 rounded-full h-2">
                                <div class="bg-green-600 h-2 rounded-full" style="width: 0%"></div>
                            </div>
                        </div>
                    </div>
                </div>
                
                <!-- 结果展示 -->
                <div id="results" class="hidden mt-8">
                    <div class="bg-white rounded-xl shadow-2xl p-6">
                        <h2 class="text-2xl font-bold mb-4 flex items-center">
                            <i class="fas fa-chart-bar text-purple-600 mr-2"></i>
                            审核结果
                        </h2>
                        
                        <div id="resultContent"></div>
                    </div>
                </div>
                
                <!-- 历史记录 -->
                <div class="mt-8">
                    <div class="bg-white rounded-xl shadow-2xl p-6">
                        <h2 class="text-2xl font-bold mb-4 flex items-center">
                            <i class="fas fa-history text-orange-600 mr-2"></i>
                            审核历史
                        </h2>
                        <div id="historyList" class="space-y-2">
                            <p class="text-gray-500 text-center">暂无历史记录</p>
                        </div>
                    </div>
                </div>
            </div>
        </div>
        
        <script>
            let checkCount = 0;
            let riskCount = 0;
            let history = [];
            
            // 文本输入监听
            const textInput = document.getElementById('textInput');
            const charCount = document.getElementById('charCount');
            const wordCount = document.getElementById('wordCount');
            
            textInput.addEventListener('input', () => {
                const text = textInput.value;
                charCount.textContent = text.length;
                wordCount.textContent = text.trim() ? text.trim().split(/\\s+/).length : 0;
            });
            
            // 文本审核
            async function checkText() {
                const text = textInput.value.trim();
                if (!text) {
                    alert('请输入文本内容');
                    return;
                }
                
                const button = document.getElementById('checkButton');
                
                button.disabled = true;
                button.innerHTML = '<i class="fas fa-spinner fa-spin mr-2"></i>审核中...';
                
                try {
                    const response = await fetch('/api/check', {
                        method: 'POST',
                        headers: {
                            'Content-Type': 'application/json',
                        },
                        body: JSON.stringify({ text })
                    });
                    
                    const result = await response.json();
                    displayResults(result);
                    
                    // 更新统计
                    checkCount++;
                    if (result.risk_level !== 'low') riskCount++;
                    updateStats();
                    
                    // 添加到历史
                    history.unshift({
                        text: text.substring(0, 50) + '...',
                        risk: result.risk_level,
                        time: new Date().toLocaleString()
                    });
                    updateHistory();
                    
                } catch (error) {
                    alert('审核失败: ' + error.message);
                } finally {
                    button.disabled = false;
                    button.innerHTML = '<i class="fas fa-search mr-2"></i>开始审核';
                }
            }
            
            // 文件上传审核
            async function uploadFile() {
                const fileInput = document.getElementById('fileInput');
                const file = fileInput.files[0];
                
                if (!file) {
                    alert('请选择文件');
                    return;
                }
                
                const formData = new FormData();
                formData.append('file', file);
                
                const button = document.getElementById('uploadButton');
                const progress = document.getElementById('uploadProgress');
                
                button.disabled = true;
                button.innerHTML = '<i class="fas fa-spinner fa-spin mr-2"></i>上传中...';
                progress.classList.remove('hidden');
                
                try {
                    const response = await fetch('/api/upload', {
                        method: 'POST',
                        body: formData
                    });
                    
                    const result = await response.json();
                    alert('文件审核完成，结果已保存');
                    
                } catch (error) {
                    alert('上传失败: ' + error.message);
                } finally {
                    button.disabled = false;
                    button.innerHTML = '<i class="fas fa-upload mr-2"></i>上传并审核';
                    progress.classList.add('hidden');
                }
            }
            
            // 显示结果
            function displayResults(result) {
                const resultsDiv = document.getElementById('results');
                const contentDiv = document.getElementById('resultContent');
                
                let riskColor = {
                    'low': 'text-green-600',
                    'medium': 'text-yellow-600',
                    'high': 'text-red-600'
                };
                
                let riskText = {
                    'low': '低风险',
                    'medium': '中风险',
                    'high': '高风险'
                };
                
                contentDiv.innerHTML = `
                    <div class="grid grid-cols-1 md:grid-cols-3 gap-4 mb-6">
                        <div class="bg-gray-50 p-4 rounded-lg">
                            <h3 class="font-semibold mb-2">风险等级</h3>
                            <div class="text-2xl font-bold ${riskColor[result.risk_level]}">
                                ${riskText[result.risk_level]}
                            </div>
                            <div class="text-sm text-gray-600">评分: ${result.risk_score || 0}/100</div>
                        </div>
                        
                        <div class="bg-gray-50 p-4 rounded-lg">
                            <h3 class="font-semibold mb-2">文本长度</h3>
                            <div class="text-2xl font-bold text-blue-600">${result.text_length || 0}</div>
                            <div class="text-sm text-gray-600">字符</div>
                        </div>
                        
                        <div class="bg-gray-50 p-4 rounded-lg">
                            <h3 class="font-semibold mb-2">检测方式</h3>
                            <div class="text-2xl font-bold text-purple-600">
                                ${result.type === 'tencent' ? '腾讯云' : '本地模型'}
                            </div>
                            <div class="text-sm text-gray-600">AI审核</div>
                        </div>
                    </div>
                    
                    ${result.violations && result.violations.length > 0 ? `
                        <div class="mb-6">
                            <h3 class="text-lg font-semibold mb-3">违规详情</h3>
                            <div class="space-y-2">
                                ${result.violations.map(v => `
                                    <div class="border-l-4 border-red-500 pl-4 py-2">
                                        <div class="font-medium">${v.type}</div>
                                        <div class="text-sm text-gray-600">${v.keywords ? v.keywords.join(', ') : v.reason}</div>
                                    </div>
                                `).join('')}
                            </div>
                        </div>
                    ` : ''}
                    
                    <div class="mb-6">
                        <h3 class="text-lg font-semibold mb-3">处理建议</h3>
                        <div class="bg-blue-50 p-4 rounded-lg">
                            ${result.suggestions.map(s => `<div class="mb-1">• ${s}</div>`).join('')}
                        </div>
                    </div>
                `;
                
                resultsDiv.classList.remove('hidden');
                resultsDiv.scrollIntoView({ behavior: 'smooth' });
            }
            
            // 更新统计
            function updateStats() {
                document.getElementById('totalChecks').textContent = checkCount;
                document.getElementById('riskTexts').textContent = riskCount;
            }
            
            // 更新历史
            function updateHistory() {
                const historyDiv = document.getElementById('historyList');
                if (history.length === 0) {
                    historyDiv.innerHTML = '<p class="text-gray-500 text-center">暂无历史记录</p>';
                    return;
                }
                
                historyDiv.innerHTML = history.slice(0, 5).map(h => `
                    <div class="flex justify-between items-center p-3 bg-gray-50 rounded">
                        <div>
                            <div class="font-medium">${h.text}</div>
                            <div class="text-sm text-gray-500">${h.time}</div>
                        </div>
                        <span class="px-2 py-1 rounded text-sm ${
                            h.risk === 'high' ? 'bg-red-100 text-red-800' :
                            h.risk === 'medium' ? 'bg-yellow-100 text-yellow-800' :
                            'bg-green-100 text-green-800'
                        }">
                            ${h.risk === 'high' ? '高风险' : h.risk === 'medium' ? '中风险' : '低风险'}
                        </span>
                    </div>
                `).join('');
            }
        </script>
    </body>
    </html>
    ''')

@app.route('/api/check', methods=['POST'])
def check_text():
    """API接口：文本审核（纯本地分析）"""
    try:
        data = request.get_json()
        text = data.get('text', '')
        
        if not text or not text.strip():
            return jsonify({'error': '文本不能为空'}), 400
        
        # 使用本地模型进行分析
        result = local_moderator.comprehensive_check(text)
        
        return jsonify(result)
        
    except Exception as e:
        logger.error(f"API调用失败: {e}")
        return jsonify({'error': str(e)}), 500

@app.route('/api/upload', methods=['POST'])
def upload_file():
    """API接口：文件上传和审核（纯本地分析）"""
    try:
        if 'file' not in request.files:
            return jsonify({'error': '没有上传文件'}), 400
        
        file = request.files['file']
        
        if file.filename == '':
            return jsonify({'error': '没有选择文件'}), 400
        
        if file and batch_moderator.allowed_file(file.filename):
            filename = secure_filename(file.filename)
            filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(filepath)
            
            # 使用本地模型处理文件
            results = batch_moderator.process_file(filepath)
            
            if 'error' in results:
                return jsonify(results), 500
            
            # 保存结果
            result_filename = batch_moderator.save_results(results, filename)
            
            return jsonify({
                'message': '文件处理完成',
                'result_file': result_filename,
                'total_items': len(results)
            })
        
        return jsonify({'error': '文件类型不支持'}), 400
        
    except Exception as e:
        logger.error(f"文件上传失败: {e}")
        return jsonify({'error': str(e)}), 500

@app.route('/api/results/<filename>')
def download_result(filename):
    """下载审核结果"""
    try:
        return send_from_directory(app.config['RESULTS_FOLDER'], filename, as_attachment=True)
    except Exception as e:
        return jsonify({'error': '文件不存在'}), 404

@app.route('/api/stats')
def get_stats():
    """获取统计信息"""
    try:
        # 这里可以添加更复杂的统计逻辑
        return jsonify({
            'total_checks': 0,
            'risk_texts': 0,
            'local_model_ready': True
        })
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.errorhandler(413)
def too_large(e):
    return jsonify({'error': '文件过大'}), 413

if __name__ == '__main__':
    logger.info("启动文本审核系统...")
    logger.info("系统配置：纯本地分析模式，无需外部API")
    logger.info("本地模型状态：已就绪")
    logger.info("系统启动完成，访问 http://localhost:5000 使用")
    
    app.run(host='0.0.0.0', port=5000, debug=True)