import numpy as np
import json
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, Masking, Bidirectional
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from sklearn.utils.class_weight import compute_class_weight
from tensorflow.keras.preprocessing.sequence import pad_sequences
from scipy import stats

# 数据集路径
DATASET_PATH = 'assets/database/existing_user_database.json'
MODEL_SAVE_PATH = 'model/precise_model/lstm_model.keras'
LABEL_TO_INDEX_PATH = 'model/precise_model/lstm_label_to_index.json'
USER_STATS_PATH = 'model/precise_model/user_stats.json'

def load_data(file_path):
    """加载JSON格式的击键数据"""
    with open(file_path, 'r') as f:
        return json.load(f)

def extract_robust_features(data):
    """特征提取（保持原有实现）"""
    sequences, labels = [], []
    user_stats = defaultdict(lambda: {
        'avg_duration': 0,
        'avg_interval': 0,
        'bigrams': defaultdict(list),
        'trigrams': defaultdict(list),
        'global_percentiles': defaultdict(dict)
    })
    
    # 第一阶段：收集全局统计信息
    all_durations = []
    all_intervals = []
    for entry in data:
        keystrokes = entry['keystrokes']
        for i in range(len(keystrokes)):
            duration = keystrokes[i]['release_time'] - keystrokes[i]['press_time']
            all_durations.append(duration)
            if i > 0:
                interval = keystrokes[i]['press_time'] - keystrokes[i-1]['press_time']
                all_intervals.append(interval)
    
    global_duration_stats = (np.mean(all_durations), np.std(all_durations))
    global_interval_stats = (np.mean(all_intervals), np.std(all_intervals))
    
    # 第二阶段：收集用户特定统计信息
    for entry in data:
        keystrokes = entry['keystrokes']
        durations = []
        intervals = []
        
        for i in range(len(keystrokes)):
            k = keystrokes[i]
            duration = k['release_time'] - k['press_time']
            durations.append(duration)
            
            if i > 0:
                interval = k['press_time'] - keystrokes[i-1]['press_time']
                intervals.append(interval)
                
                # 记录bigram特征
                bigram = (keystrokes[i-1]['key_content'], k['key_content'])
                user_stats[entry['subject']]['bigrams'][bigram].append((duration, interval))
                
                if i > 1:
                    trigram = (keystrokes[i-2]['key_content'], 
                              keystrokes[i-1]['key_content'], 
                              k['key_content'])
                    prev_interval = keystrokes[i-1]['press_time'] - keystrokes[i-2]['press_time']
                    user_stats[entry['subject']]['trigrams'][trigram].append(
                        (prev_interval, interval, duration)
                    )
        
        # 更新用户平均特征（使用中位数）
        if durations:
            user_stats[entry['subject']]['avg_duration'] = np.median(durations)
        if intervals:
            user_stats[entry['subject']]['avg_interval'] = np.median(intervals)
    
    # 第三阶段：计算全局百分位信息
    for user in user_stats:
        for bigram in list(user_stats[user]['bigrams'].keys()):
            if len(user_stats[user]['bigrams'][bigram]) < 3:  # 移除低频bigram
                del user_stats[user]['bigrams'][bigram]
            else:
                durations = [x[0] for x in user_stats[user]['bigrams'][bigram]]
                intervals = [x[1] for x in user_stats[user]['bigrams'][bigram]]
                user_stats[user]['global_percentiles'][bigram] = (
                    stats.percentileofscore(all_durations, np.median(durations)),
                    stats.percentileofscore(all_intervals, np.median(intervals))
                )
    
    # 第四阶段：构建特征序列（确保每个时间步特征维度一致）
    FEATURE_DIM = 11  # 固定特征维度
    for entry in data:
        sequence = []
        keystrokes = entry['keystrokes']
        
        for i in range(len(keystrokes)):
            k = keystrokes[i]
            duration = k['release_time'] - k['press_time']
            rel_duration = duration / (user_stats[entry['subject']]['avg_duration'] + 1e-6)
            
            # 初始化特征向量（全零）
            features = np.zeros(FEATURE_DIM, dtype=np.float32)
            
            # 基础特征（始终存在）
            features[0] = duration
            features[1] = rel_duration
            features[10] = i / len(keystrokes)  # 位置信息
            
            if i > 0:
                prev = keystrokes[i-1]
                interval = k['press_time'] - prev['press_time']
                rel_interval = interval / (user_stats[entry['subject']]['avg_interval'] + 1e-6)
                prev_duration = prev['release_time'] - prev['press_time']
                accel = (duration - prev_duration) / (interval + 1e-6)
                
                # Bigram处理
                bigram = (prev['key_content'], k['key_content'])
                bigram_data = user_stats[entry['subject']]['bigrams'].get(bigram, None)
                
                if bigram_data and len(bigram_data) >= 3:
                    median_duration = np.median([x[0] for x in bigram_data])
                    median_interval = np.median([x[1] for x in bigram_data])
                    duration_percentile = stats.percentileofscore(
                        [x[0] for x in bigram_data], duration)
                    interval_percentile = stats.percentileofscore(
                        [x[1] for x in bigram_data], interval)
                    
                    features[2] = interval
                    features[3] = rel_interval
                    features[4] = accel
                    features[5] = duration_percentile / 100.0
                    features[6] = interval_percentile / 100.0
                    features[7] = duration / (median_duration + 1e-6)
                    features[8] = interval / (median_interval + 1e-6)
                else:
                    global_percentile = user_stats[entry['subject']]['global_percentiles'].get(
                        bigram, (50.0, 50.0))
                    features[2] = interval
                    features[3] = rel_interval
                    features[4] = accel
                    features[5] = global_percentile[0] / 100.0
                    features[6] = global_percentile[1] / 100.0
                    features[7] = 1.0  # 中性比率
                    features[8] = 1.0
            
            sequence.append(features)
        
        sequences.append(sequence)
        labels.append(entry['subject'])
    
    # 保存可序列化的统计信息
    serializable_stats = {
        'label_to_index': {k: i for i, k in enumerate(set(labels))},
        'user_stats': defaultdict(dict),
        'global_stats': {
            'durations': global_duration_stats,
            'intervals': global_interval_stats
        }
    }
    
    for user in user_stats:
        serializable_stats['user_stats'][user] = {
            'avg_duration': user_stats[user]['avg_duration'],
            'avg_interval': user_stats[user]['avg_interval'],
            'bigrams': {f"{k[0]}_{k[1]}": v for k, v in user_stats[user]['bigrams'].items()},
            'trigrams': {f"{k[0]}_{k[1]}_{k[2]}": v for k, v in user_stats[user]['trigrams'].items()},
            'global_percentiles': {f"{k[0]}_{k[1]}": v for k, v in user_stats[user]['global_percentiles'].items()}
        }
    
    with open(USER_STATS_PATH, 'w') as f:
        json.dump(serializable_stats, f, indent=2)
    
    return sequences, labels

def create_advanced_model(input_shape, num_classes):
    """LSTM模型结构"""
    model = Sequential([
        Masking(mask_value=0.0, input_shape=input_shape),
        Bidirectional(LSTM(128, return_sequences=True)),
        Dropout(0.4),
        Bidirectional(LSTM(64)),
        Dropout(0.3),
        Dense(64, activation='relu', kernel_regularizer='l2'),
        Dense(num_classes, activation='softmax')
    ])
    
    model.compile(
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy'],
        weighted_metrics=['accuracy']
    )
    return model

def evaluate_model(model, X_test, y_test, label_to_index):
    """简化版模型评估（移除校准曲线）"""
    y_pred = model.predict(X_test, verbose=0)
    y_true = np.argmax(y_test, axis=1)
    y_pred_labels = np.argmax(y_pred, axis=1)
    
    index_to_label = {v: k for k, v in label_to_index.items()}
    print("\nEvaluation results:")
    print(f"Accuracy: {accuracy_score(y_true, y_pred_labels):.4f}")
    
    print("\nClassification report:")
    print(classification_report(
        [index_to_label[i] for i in y_true],
        [index_to_label[i] for i in y_pred_labels],
        digits=4
    ))

def main():
    # 1. 数据加载和预处理
    print("Loading and preprocessing data...")
    data = load_data(DATASET_PATH)
    sequences, labels = extract_robust_features(data)
    
    # 2. 编码标签
    encoded_labels, label_to_index = encode_labels(labels)
    y = to_categorical(encoded_labels)
    
    # 3. 序列填充
    max_len = max(len(s) for s in sequences)
    X = pad_sequences(sequences, maxlen=max_len, padding='post', dtype='float32')
    
    # 4. 数据集划分
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42, stratify=encoded_labels
    )
    
    # 5. 计算类别权重
    class_weights = compute_class_weight(
        'balanced',
        classes=np.unique(encoded_labels),
        y=encoded_labels
    )
    class_weights = dict(enumerate(class_weights))
    
    # 6. 创建并训练模型
    print("\nTraining model...")
    model = create_advanced_model(
        (X_train.shape[1], X_train.shape[2]),
        y_train.shape[1]
    )
    
    early_stopping = EarlyStopping(
        monitor='val_loss',
        patience=15,
        restore_best_weights=True,
        verbose=1
    )
    
    history = model.fit(
        X_train, y_train,
        epochs=100,
        batch_size=32,
        validation_data=(X_test, y_test),
        class_weight=class_weights,
        callbacks=[
            ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-5, verbose=1),
            early_stopping
        ],
        verbose=1
    )
    
    # 7. 保存模型
    model.save(MODEL_SAVE_PATH)
    with open(LABEL_TO_INDEX_PATH, 'w') as f:
        json.dump(label_to_index, f)
    
    # 8. 简化评估
    evaluate_model(model, X_test, y_test, label_to_index)
    
    # 打印早停信息
    if early_stopping.stopped_epoch > 0:
        print(f"\nEarly stopping triggered at epoch {early_stopping.stopped_epoch + 1}")
    else:
        print("\nTraining completed without early stopping")

def encode_labels(labels):
    """标签编码"""
    label_to_index = {}
    encoded = []
    for label in labels:
        if label not in label_to_index:
            label_to_index[label] = len(label_to_index)
        encoded.append(label_to_index[label])
    return np.array(encoded), label_to_index

if __name__ == "__main__":
    main()