import json
import re
import nltk
import os
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import f1_score, accuracy_score
from sklearn.base import BaseEstimator, ClassifierMixin  # 添加这一行
from xgboost import XGBClassifier
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification
import numpy as np
import warnings
import xgboost as xgb

# 忽略警告信息
warnings.filterwarnings('ignore')

class TextDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_length=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]

        encoding = self.tokenizer(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

# 在 TextDataset 类之后，TextDetector 类之前添加
class BertClassifierWrapper(BaseEstimator, ClassifierMixin):
    def __init__(self, model, tokenizer, device):
        self.model = model
        self.tokenizer = tokenizer
        self.device = device
        self._estimator_type = "classifier"  # 添加这行
        self.classes_ = np.array([0, 1])     # 添加这行
        
    def fit(self, X, y):
        # 保存类别信息
        self.classes_ = np.unique(y)
        return self
        
    def predict_proba(self, texts):
        if isinstance(texts[0], (list, np.ndarray)):
            # 如果输入是特征数组，我们需要获取原始文本
            texts = [str(x) for x in texts]
            
        dataset = TextDataset(texts, [0] * len(texts), self.tokenizer)
        loader = DataLoader(dataset, batch_size=32)
        predictions = []
        
        self.model.eval()
        with torch.no_grad():
            for batch in loader:
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                
                outputs = self.model(
                    input_ids=input_ids,
                    attention_mask=attention_mask
                )
                
                probs = torch.softmax(outputs.logits, dim=1)
                predictions.extend(probs.cpu().numpy())
        
        return np.array(predictions)
    
    def predict(self, texts):
        probs = self.predict_proba(texts)
        return np.argmax(probs, axis=1)

class TextDetector:
    def __init__(self):
        """大模型生成文本检测器主类，包含全流程处理功能"""
        self.tokenizer = None
        self.bert_model = None
        self.xgb_model = None
        self.ensemble_model = None
        self._configure_device()

    def _configure_device(self):
        """配置设备（GPU/CPU）"""
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if torch.cuda.is_available():
            print(f"使用 GPU: {torch.cuda.get_device_name(0)}")
            print(f"CUDA 版本: {torch.version.cuda}")
        else:
            print("未检测到 GPU，使用 CPU 训练")

    def clean_text(self, text):
        """文本清洗：去除特殊字符、HTML标签、统一大小写"""
        text = re.sub(r'<.*?>', '', text)
        text = re.sub(r'[^\w\s]', '', text)
        text = re.sub(r'\s+', ' ', text).strip()
        text = text.lower()
        return text

    def extract_basic_features(self, text):
        """提取基础文本特征"""
        features = {}
        # 长度相关特征
        features['text_length'] = len(text)
        word_tokens = text.split()  # 使用简单分词，避免依赖punkt
        features['word_count'] = len(word_tokens)
        sentence_tokens = re.split(r'[.!?]+', text)
        sentence_tokens = [s for s in sentence_tokens if s.strip()]
        features['sentence_count'] = len(sentence_tokens)

        # 标点符号特征
        punctuation = re.findall(r'[^\w\s]', text)
        features['punctuation_count'] = len(punctuation)
        features['punctuation_ratio'] = len(punctuation) / features['word_count'] if features['word_count'] > 0 else 0

        # 停用词特征
        stop_words = set(stopwords.words('english'))
        features['stopword_ratio'] = len([w for w in word_tokens if w in stop_words]) / len(word_tokens) if len(
            word_tokens) > 0 else 0

        # 数字特征
        numbers = re.findall(r'\d+', text)
        features['number_count'] = len(numbers)
        features['number_ratio'] = len(numbers) / len(word_tokens) if len(word_tokens) > 0 else 0

        return features

    def extract_style_features(self, text):
        """提取风格和语言特征"""
        features = {}
        word_tokens = text.split()  # 使用简单分词

        # 词汇丰富度特征
        if len(word_tokens) > 0:
            unique_words = set(word_tokens)
            features['lexical_diversity'] = len(unique_words) / len(word_tokens)
            features['avg_word_length'] = sum(len(word) for word in word_tokens) / len(word_tokens)

        # 大模型生成特征检测
        features['list_pattern'] = self._detect_list_pattern(text)
        features['model_keywords'] = self._detect_model_keywords(text)

        return features

    def _detect_list_pattern(self, text):
        """检测列表模式（大模型生成文本常见特征）"""
        list_patterns = [
            r'\d+\.', r'[a-z]\.', r'[A-Z]\.', r'•', r'\- ', r'\+ ', r'\* '
        ]
        count = 0
        for pattern in list_patterns:
            count += len(re.findall(pattern, text))
        return count

    def _detect_model_keywords(self, text):
        """检测大模型生成文本常见关键词"""
        model_keywords = [
            "as an", "to be", "in order to", "it is important to",
            "here are", "let's", "in summary", "Therefore", "以下是"
        ]
        count = 0
        for keyword in model_keywords:
            count += text.lower().count(keyword.lower())
        return count

    def extract_all_features(self, text):
        """提取所有文本特征"""
        basic_features = self.extract_basic_features(text)
        style_features = self.extract_style_features(text)
        return {**basic_features, **style_features}

    def load_data(self, file_path):
        """加载JSONL格式数据"""
        data = []
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                try:
                    sample = json.loads(line)
                    text = self.clean_text(sample.get('text', ''))
                    label = sample.get('label', -1)
                    data.append({
                        'text': text,
                        'label': label,
                        'original_text': sample.get('text', '')
                    })
                except json.JSONDecodeError:
                    print(f"警告：JSON解析失败，已跳过")
        return data

    def preprocess_train_data(self, train_file, test_size=0.2, random_state=42):
        """预处理训练数据，划分训练集和验证集"""
        print("加载训练数据...")
        train_data = self.load_data(train_file)

        print("提取特征...")
        X = [self.extract_all_features(sample['text']) for sample in train_data]
        y = [sample['label'] for sample in train_data]
        texts = [sample['text'] for sample in train_data]

        # 划分训练集和验证集
        X_train, X_val, y_train, y_val = train_test_split(
            X, y, test_size=test_size, random_state=random_state, stratify=y
        )
        texts_train, texts_val, _, _ = train_test_split(
            texts, y, test_size=test_size, random_state=random_state, stratify=y
        )

        print(f"训练集大小: {len(X_train)}, 验证集大小: {len(X_val)}")
        return X_train, X_val, y_train, y_val, texts_train, texts_val

    def train_xgboost_model(self, X_train, y_train, X_val, y_val):
        """训练XGBoost模型"""
        print("训练XGBoost模型...")
        
        # 将特征字典转换为数组
        X_train_array = np.array([list(features.values()) for features in X_train])
        X_val_array = np.array([list(features.values()) for features in X_val])
        
        # 创建XGBoost分类器
        model = XGBClassifier(
            n_estimators=100,
            learning_rate=0.1,
            max_depth=5,
            min_child_weight=1,
            subsample=0.8,
            colsample_bytree=0.8,
            objective='binary:logistic',
            random_state=42
        )
        
        # 简化训练过程
        model.fit(X_train_array, y_train)
        
        # 评估模型
        y_pred = model.predict(X_val_array)
        accuracy = accuracy_score(y_val, y_pred)
        f1 = f1_score(y_val, y_pred)
        print(f"XGBoost 验证集准确率: {accuracy:.4f}, F1分数: {f1:.4f}")
        
        return model

    def train_bert_model(self, texts_train, y_train, texts_val, y_val, max_length=128, epochs=3):
        """训练BERT模型（PyTorch版本）"""
        print("初始化BERT模型...")
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        self.bert_model = BertForSequenceClassification.from_pretrained(
            'bert-base-uncased',
            num_labels=2
        ).to(self.device)
    
        # 如果存在已训练的模型，直接加载
        if os.path.exists('best_model.pth'):
            print("加载已有的最佳模型...")
            self.bert_model.load_state_dict(torch.load('best_model.pth'))
            return self.bert_model
    
        # 创建数据集和数据加载器
        train_dataset = TextDataset(texts_train, y_train, self.tokenizer, max_length)
        val_dataset = TextDataset(texts_val, y_val, self.tokenizer, max_length)
        
        train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=16)
    
        # 优化器和学习率调度器
        optimizer = torch.optim.AdamW(self.bert_model.parameters(), lr=2e-5)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9)
    
        # 训练循环
        best_val_f1 = 0
        for epoch in range(epochs):
            print(f"\nEpoch {epoch + 1}/{epochs}")
            
            # 训练阶段
            self.bert_model.train()
            total_loss = 0
            for batch in train_loader:
                optimizer.zero_grad()
                
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                labels = batch['labels'].to(self.device)
        
                outputs = self.bert_model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    labels=labels
                )
        
                loss = outputs.loss
                total_loss += loss.item()
                
                loss.backward()
                optimizer.step()
    
            avg_train_loss = total_loss / len(train_loader)
            print(f"平均训练损失: {avg_train_loss:.4f}")
        
            # 验证阶段
            print("验证中...")
            self.bert_model.eval()
            val_preds = []
            val_labels = []
            val_loss = 0
            
            with torch.no_grad():
                for batch in val_loader:
                    input_ids = batch['input_ids'].to(self.device)
                    attention_mask = batch['attention_mask'].to(self.device)
                    labels = batch['labels'].to(self.device)
        
                    outputs = self.bert_model(
                        input_ids=input_ids,
                        attention_mask=attention_mask,
                        labels=labels
                    )
        
                    val_loss += outputs.loss.item()
                    preds = torch.argmax(outputs.logits, dim=1)
                    val_preds.extend(preds.cpu().numpy())
                    val_labels.extend(labels.cpu().numpy())
        
            val_f1 = f1_score(val_labels, val_preds)
            avg_val_loss = val_loss / len(val_loader)
            print(f"验证集 Loss: {avg_val_loss:.4f}, F1-score: {val_f1:.4f}")
        
            if val_f1 > best_val_f1:
                best_val_f1 = val_f1
                print(f"保存最佳模型 (F1: {val_f1:.4f})")
                torch.save(self.bert_model.state_dict(), 'best_model.pth')
        
            scheduler.step()
        
        print(f"最佳验证集 F1-score: {best_val_f1:.4f}")
        return self.bert_model

    def predict_with_bert(self, texts):
        """使用BERT模型预测（批量处理）"""
        self.bert_model.eval()
        dataset = TextDataset(texts, [0] * len(texts), self.tokenizer)  # 标签无关紧要
        loader = DataLoader(dataset, batch_size=32)
        
        predictions = []
        with torch.no_grad():
            for batch in loader:
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)

                outputs = self.bert_model(
                    input_ids=input_ids,
                    attention_mask=attention_mask
                )

                preds = torch.softmax(outputs.logits, dim=1)[:, 1]
                predictions.extend(preds.cpu().numpy())

        return np.array(predictions)

    def build_ensemble_model(self, models):
        """构建集成模型"""
        print("构建集成模型...")
        self.ensemble_model = VotingClassifier(
            estimators=models,
            voting='soft',
            weights=[1, 1]
        )
        return self.ensemble_model

    def train_ensemble(self, X_train, y_train, X_val, y_val, bert_model, xgb_model, texts_train, texts_val):
        """训练集成模型 - 使用自定义方法而不是VotingClassifier"""
        print("准备集成模型训练数据...")
        
        # 保存模型引用
        self.bert_model = bert_model
        self.xgb_model = xgb_model
        
        # 评估单独模型在验证集上的性能
        # XGBoost预测
        X_val_array = np.array([list(features.values()) for features in X_val])
        xgb_preds = self.xgb_model.predict_proba(X_val_array)[:, 1]
        
        # BERT预测
        bert_preds = self.predict_with_bert(texts_val)
        
        # 找到最佳权重
        best_f1 = 0
        best_weight = 0.5
        
        print("寻找最佳集成权重...")
        for weight in np.arange(0.1, 1.0, 0.1):
            # 加权组合预测
            ensemble_probs = weight * xgb_preds + (1 - weight) * bert_preds
            ensemble_preds = (ensemble_probs > 0.5).astype(int)
            
            # 计算F1分数
            f1 = f1_score(y_val, ensemble_preds)
            print(f"权重 {weight:.1f} (XGB) / {1-weight:.1f} (BERT): F1 = {f1:.4f}")
            
            if f1 > best_f1:
                best_f1 = f1
                best_weight = weight
        
        print(f"最佳集成权重: XGB={best_weight:.2f}, BERT={1-best_weight:.2f}, F1={best_f1:.4f}")
        self.ensemble_weight = best_weight
        
        return self

    def predict_with_ensemble(self, text):
        """使用自定义集成方法预测"""
        # 提取特征用于XGBoost
        features = self.extract_all_features(text)
        features_array = np.array([list(features.values())])
        
        # 获取XGBoost预测概率
        xgb_prob = self.xgb_model.predict_proba(features_array)[0, 1]
        
        # 获取BERT预测概率
        bert_prob = self.predict_with_bert([text])[0]
        
        # 加权组合
        ensemble_prob = self.ensemble_weight * xgb_prob + (1 - self.ensemble_weight) * bert_prob
        
        # 返回预测类别
        return 1 if ensemble_prob > 0.5 else 0

    def predict_test_set(self, test_file, output_file='submit.txt'):
        """对测试集进行预测并保存结果"""
        print(f"加载测试数据: {test_file}")
        test_data = self.load_data(test_file)

        print("开始预测...")
        predictions = []
        for i, sample in enumerate(test_data):
            text = sample['text']
            pred = self.predict_with_ensemble(text)
            predictions.append(str(pred))

            # 显示进度
            if (i + 1) % 100 == 0:
                print(f"已处理 {i + 1}/{len(test_data)} 条记录")

        # 保存结果
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write('\n'.join(predictions))

        print(f"预测结果已保存至: {output_file}")
        return predictions


def ensure_nltk_resources():
    """确保NLTK资源已安装"""
    required_resources = ['punkt', 'stopwords', 'wordnet']
    missing = []

    for resource in required_resources:
        try:
            nltk.data.find(f'tokenizers/{resource}' if resource == 'punkt' else f'corpora/{resource}')
        except LookupError:
            missing.append(resource)

    if missing:
        print(f"正在下载NLTK资源: {', '.join(missing)}")
        nltk.download(missing)
        print("资源下载完成")
    else:
        print("NLTK资源已齐全")


def main():
    """主函数：执行完整的训练和预测流程"""
    # 确保NLTK资源已安装
    ensure_nltk_resources()

    # 配置CUDA环境
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    
    print("配置PyTorch环境...")
    if torch.cuda.is_available():
        print(f"使用 GPU: {torch.cuda.get_device_name(0)}")
        print(f"CUDA 版本: {torch.version.cuda}")
        torch.cuda.set_per_process_memory_fraction(0.8)
        torch.backends.cudnn.benchmark = True
    else:
        print("未检测到 GPU，将使用 CPU 训练")
    
    # 初始化检测器
    detector = TextDetector()

    # 训练模型
    print("\n=== 开始训练模型 ===")
    X_train, X_val, y_train, y_val, texts_train, texts_val = detector.preprocess_train_data(
        'train.jsonl'
    )

    xgb_model = detector.train_xgboost_model(X_train, y_train, X_val, y_val)
    bert_model = detector.train_bert_model(texts_train, y_train, texts_val, y_val)

    # 构建和训练集成模型
    bert_wrapper = BertClassifierWrapper(detector.bert_model, detector.tokenizer, detector.device)
    detector.build_ensemble_model([('xgb', xgb_model), ('bert', bert_wrapper)])
    detector.train_ensemble(X_train, y_train, X_val, y_val, bert_model, xgb_model, texts_train, texts_val)

    # 预测测试集
    print("\n=== 开始预测测试集 ===")
    detector.predict_test_set('test.jsonl', 'submit.txt')

    print("\n=== 处理完成 ===")
    print("训练数据: train.jsonl")
    print("测试数据: test.jsonl")
    print("预测结果: submit.txt")


if __name__ == "__main__":
    main()