#0.7267

import pandas as pd
import numpy as np
import json
import re
import string
import gc
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import SGDClassifier, LogisticRegression, RidgeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.naive_bayes import MultinomialNB, ComplementNB
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
import warnings

warnings.filterwarnings('ignore')


def advanced_text_features(texts):
    """提取高级文本特征，专门针对AI检测"""
    features = []

    for text in texts:
        # 基础统计
        char_count = len(text)
        words = text.split()
        word_count = len(words)

        # 句子分析
        sentences = re.split(r'[.!?]+', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        sent_count = len(sentences)

        # 1. 语言复杂度特征
        avg_word_len = np.mean([len(w) for w in words]) if words else 0
        word_len_std = np.std([len(w) for w in words]) if len(words) > 1 else 0
        max_word_len = max([len(w) for w in words]) if words else 0

        # 2. 句子复杂度
        if sentences:
            sent_lengths = [len(s.split()) for s in sentences]
            avg_sent_len = np.mean(sent_lengths)
            sent_len_std = np.std(sent_lengths) if len(sent_lengths) > 1 else 0
            max_sent_len = max(sent_lengths)
        else:
            avg_sent_len = sent_len_std = max_sent_len = 0

        # 3. 标点符号模式（AI文本通常更规范）
        punct_chars = '.,;:!?'
        punct_counts = [text.count(p) for p in punct_chars]
        punct_total = sum(punct_counts)
        punct_density = punct_total / max(char_count, 1)
        punct_variety = len([p for p in punct_counts if p > 0])

        # 4. 大小写模式
        upper_count = sum(1 for c in text if c.isupper())
        upper_ratio = upper_count / max(char_count, 1)
        title_case_words = sum(1 for w in words if w.istitle())
        title_ratio = title_case_words / max(word_count, 1)

        # 5. 数字和特殊字符
        digit_count = sum(1 for c in text if c.isdigit())
        digit_ratio = digit_count / max(char_count, 1)
        special_count = sum(1 for c in text if c in '@#$%^&*()[]{}')
        special_ratio = special_count / max(char_count, 1)

        # 6. 词汇多样性和重复性
        unique_words = len(set([w.lower() for w in words]))
        lexical_diversity = unique_words / max(word_count, 1)

        # 词汇重复度
        word_freq = {}
        for w in words:
            word_freq[w.lower()] = word_freq.get(w.lower(), 0) + 1

        repeated_words = sum(1 for count in word_freq.values() if count > 1)
        repetition_ratio = repeated_words / max(len(word_freq), 1)

        # 7. 停用词分析
        common_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'is',
                        'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would',
                        'could', 'should', 'can', 'may', 'might', 'must', 'shall', 'this', 'that', 'these', 'those'}

        stop_words_count = sum(1 for w in words if w.lower() in common_words)
        stop_words_ratio = stop_words_count / max(word_count, 1)

        # 8. 高级词汇特征
        formal_words = {'however', 'therefore', 'furthermore', 'moreover', 'nevertheless', 'consequently',
                        'specifically', 'particularly', 'additionally', 'alternatively', 'significantly', 'substantial',
                        'comprehensive', 'fundamental', 'essential', 'critical', 'crucial', 'vital', 'paramount',
                        'optimal'}

        formal_count = sum(1 for w in words if w.lower() in formal_words)
        formal_ratio = formal_count / max(word_count, 1)

        # 长词比例（AI可能使用更多复杂词汇）
        long_words = sum(1 for w in words if len(w) > 7)
        long_word_ratio = long_words / max(word_count, 1)

        # 9. 语言流畅性指标
        # 连接词使用
        connectors = ['and', 'but', 'or', 'so', 'because', 'since', 'while', 'although', 'though', 'unless', 'until',
                      'before', 'after', 'when', 'where', 'why', 'how']
        connector_count = sum(1 for w in words if w.lower() in connectors)
        connector_ratio = connector_count / max(word_count, 1)

        # 10. 文本结构特征
        paragraph_count = text.count('\n\n') + 1
        avg_para_len = word_count / paragraph_count

        # 引号使用
        quote_count = text.count('"') + text.count("'")
        quote_ratio = quote_count / max(char_count, 1)

        features.append([
            char_count, word_count, sent_count, avg_word_len, word_len_std, max_word_len,
            avg_sent_len, sent_len_std, max_sent_len, punct_density, punct_variety,
            upper_ratio, title_ratio, digit_ratio, special_ratio, lexical_diversity,
            repetition_ratio, stop_words_ratio, formal_ratio, long_word_ratio,
            connector_ratio, paragraph_count, avg_para_len, quote_ratio
        ])

    return np.array(features)


def preprocess_text(text):
    """智能预处理，保留AI文本特征"""
    # 标准化空格但保留结构
    text = re.sub(r' +', ' ', text)
    text = re.sub(r'\n+', '\n', text)

    # 保留但标准化特殊标记
    text = re.sub(r'https?://\S+', ' [URL] ', text)
    text = re.sub(r'\S+@\S+', ' [EMAIL] ', text)
    text = re.sub(r'\b\d+\b', ' [NUM] ', text)

    return text.strip()


print("读取数据...")
with open('data/train.jsonl', 'r', encoding='utf-8') as f:
    train = [json.loads(line) for line in f.readlines()]
    train = pd.DataFrame(train)

with open('data/test.jsonl', 'r', encoding='utf-8') as f:
    test = [json.loads(line) for line in f.readlines()]
    test = pd.DataFrame(test)

print(f"训练集: {len(train)}, 测试集: {len(test)}")

# 预处理
train['text_clean'] = train['text'].apply(preprocess_text)
test['text_clean'] = test['text'].apply(preprocess_text)

# 合并数据
train['is_test'] = 0
test['is_test'] = 1
df_all = pd.concat([train, test], axis=0, ignore_index=True)

print("多层特征提取...")

# 1. 词级TF-IDF (多个配置)
tfidf_word1 = TfidfVectorizer(max_features=1500, ngram_range=(1, 2), min_df=2, max_df=0.95, sublinear_tf=True,
                              stop_words='english')
tfidf_word2 = TfidfVectorizer(max_features=800, ngram_range=(2, 3), min_df=3, max_df=0.9, sublinear_tf=True)

word_features1 = tfidf_word1.fit_transform(df_all['text_clean'])
word_features2 = tfidf_word2.fit_transform(df_all['text_clean'])

# 2. 字符级TF-IDF
tfidf_char = TfidfVectorizer(analyzer='char', ngram_range=(3, 6), max_features=800, min_df=3, max_df=0.9)
char_features = tfidf_char.fit_transform(df_all['text_clean'])

# 3. Count特征
count_vec = CountVectorizer(max_features=500, ngram_range=(1, 1), min_df=5, max_df=0.8, stop_words='english')
count_features = count_vec.fit_transform(df_all['text_clean'])

# 4. 高级统计特征
stat_features = advanced_text_features(df_all['text_clean'].values)

print("特征合并和选择...")
# 合并稀疏特征
from scipy.sparse import hstack, csr_matrix

sparse_features = hstack([word_features1, word_features2, char_features, count_features])

# 特征选择（保留最重要的特征）
train_labels = df_all[df_all['is_test'] == 0]['label']
train_sparse = sparse_features[:len(train)]

selector = SelectKBest(chi2, k=2500)  # 选择前2500个特征
selected_sparse = selector.fit_transform(train_sparse, train_labels)
selected_features_all = selector.transform(sparse_features)

# SVD降维
svd = TruncatedSVD(n_components=300, random_state=42)
svd_features = svd.fit_transform(selected_features_all)

# 合并所有特征
final_features = np.hstack([svd_features, stat_features])

print(f"最终特征维度: {final_features.shape[1]}")

# 特征标准化
scaler = StandardScaler()
final_features_scaled = scaler.fit_transform(final_features)

# 构建数据
feature_df = pd.DataFrame(final_features_scaled)
feature_df['label'] = df_all['label']
feature_df['is_test'] = df_all['is_test']

train_data = feature_df[feature_df['is_test'] == 0].drop(['is_test'], axis=1)
test_data = feature_df[feature_df['is_test'] == 1].drop(['is_test', 'label'], axis=1)

X = train_data.drop(['label'], axis=1)
y = train_data['label']

print("高级模型集成...")

# 多样化模型组合
models = {
    'SGD_modified': SGDClassifier(loss='modified_huber', alpha=0.0001, max_iter=1000, random_state=42),
    'SGD_log': SGDClassifier(loss='log_loss', alpha=0.0001, max_iter=1000, random_state=42),
    'LogisticReg': LogisticRegression(C=1.0, solver='liblinear', random_state=42, max_iter=1000),
    'Ridge': RidgeClassifier(alpha=0.5, random_state=42),
    'RandomForest': RandomForestClassifier(n_estimators=100, max_depth=10, random_state=42, n_jobs=-1),
    'ExtraTrees': ExtraTreesClassifier(n_estimators=100, max_depth=10, random_state=42, n_jobs=-1),
    'MultinomialNB': MultinomialNB(alpha=0.1),
    'ComplementNB': ComplementNB(alpha=0.1),
    'LinearSVC': LinearSVC(C=0.1, random_state=42, max_iter=2000)
}

# Stacking交叉验证
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

print("开始超级集成训练...")
model_predictions = {}
model_scores = {}

for name, model in models.items():
    print(f"\n=== {name} ===")
    fold_scores = []
    test_preds = np.zeros(len(test_data))
    oof_preds = np.zeros(len(X))

    for fold, (train_idx, val_idx) in enumerate(cv.split(X, y)):
        X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]
        y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]

        try:
            model.fit(X_train, y_train)

            # 预测
            if hasattr(model, 'predict_proba'):
                val_probs = model.predict_proba(X_val)[:, 1]
                test_probs = model.predict_proba(test_data)[:, 1]
            elif hasattr(model, 'decision_function'):
                val_scores = model.decision_function(X_val)
                test_scores = model.decision_function(test_data)
                val_probs = 1 / (1 + np.exp(-val_scores))
                test_probs = 1 / (1 + np.exp(-test_scores))
            else:
                val_probs = model.predict(X_val).astype(float)
                test_probs = model.predict(test_data).astype(float)

            val_preds = (val_probs > 0.5).astype(int)
            fold_f1 = f1_score(y_val, val_preds, average='weighted')
            fold_scores.append(fold_f1)

            oof_preds[val_idx] = val_probs
            test_preds += test_probs / cv.get_n_splits()

            print(f"  Fold {fold + 1}: {fold_f1:.4f}")

        except Exception as e:
            print(f"  模型 {name} 训练失败: {e}")
            fold_scores.append(0)

    if fold_scores and max(fold_scores) > 0:
        mean_score = np.mean(fold_scores)
        print(f"  平均F1: {mean_score:.4f}")
        model_predictions[name] = test_preds
        model_scores[name] = mean_score

# 智能权重分配
print(f"\n{'=' * 50}")
print("模型性能排序:")
sorted_models = sorted(model_scores.items(), key=lambda x: x[1], reverse=True)

for i, (name, score) in enumerate(sorted_models):
    print(f"{i + 1:2d}. {name:15s}: {score:.4f}")

# 选择top5模型进行最终集成
top_n = min(5, len(sorted_models))
top_models = sorted_models[:top_n]

print(f"\n使用Top-{top_n}模型集成:")
final_prediction = np.zeros(len(test_data))
total_weight = 0

for name, score in top_models:
    weight = score ** 2  # 平方权重，更偏向高分模型
    final_prediction += weight * model_predictions[name]
    total_weight += weight
    print(f"  {name}: 权重 {weight:.4f}")

final_prediction /= total_weight

# 自适应阈值
train_ai_ratio = y.mean()
optimal_threshold = 0.5

# 寻找最优阈值
thresholds = np.arange(0.3, 0.8, 0.01)
target_ratio = train_ai_ratio

best_threshold = 0.5
min_diff = float('inf')

for thresh in thresholds:
    pred_ratio = np.mean(final_prediction > thresh)
    diff = abs(pred_ratio - target_ratio)
    if diff < min_diff:
        min_diff = diff
        best_threshold = thresh

print(f"\n训练集AI比例: {train_ai_ratio:.3f}")
print(f"最优阈值: {best_threshold:.3f}")

final_labels = (final_prediction > best_threshold).astype(int)
final_ai_ratio = np.mean(final_labels)

print(f"最终AI比例: {final_ai_ratio:.3f}")
print(f"预测分布: AI={np.sum(final_labels)}, 人类={len(final_labels) - np.sum(final_labels)}")

# 保存结果
with open("submit.txt", "w") as file:
    for label in final_labels:
        file.write(str(label) + "\n")

print(f"\n{'=' * 50}")
print("超级集成完成！期望显著提升分数到0.8+")
print("结果已保存到 submit.txt")
print(f"{'=' * 50}")

gc.collect()