import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import seaborn as sns
import os
import matplotlib

# 设置中文字体，解决中文乱码问题
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'SimHei', 'Arial Unicode MS', 'Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False
matplotlib.rcParams['font.family'] = 'sans-serif'

def create_sequences(data, targets_data, sequence_length):
    """创建序列数据"""
    sequences = []
    targets = []
    
    for i in range(len(data) - sequence_length):
        seq = data[i:(i + sequence_length)]
        target = targets_data[i + sequence_length]
        sequences.append(seq)
        targets.append(target)
    
    return np.array(sequences), np.array(targets)

def load_and_preprocess_data(data_path, feature_columns, target_column, sequence_length, train_split=0.8):
    """加载和预处理数据"""
    # 加载数据
    df = pd.read_csv(data_path)
    
    # 提取特征和目标
    features = df[feature_columns].values
    targets = df[target_column].values
    
    # 数据标准化（只对特征进行标准化）
    scaler = StandardScaler()
    features_scaled = scaler.fit_transform(features)
    
    # 创建序列
    X, y = create_sequences(features_scaled, targets, sequence_length)
    
    # 分割训练集和测试集
    split_idx = int(len(X) * train_split)
    
    X_train, X_test = X[:split_idx], X[split_idx:]
    y_train, y_test = y[:split_idx], y[split_idx:]
    
    # 转换为PyTorch张量
    X_train = torch.FloatTensor(X_train)
    X_test = torch.FloatTensor(X_test)
    y_train = torch.LongTensor(y_train)
    y_test = torch.LongTensor(y_test)
    
    return X_train, X_test, y_train, y_test, scaler

def load_preprocessed_dataset(train_path, test_path, feature_columns, target_column):
    """
    加载已经预处理和划分好的数据集
    
    Args:
        train_path: 训练集文件路径
        test_path: 测试集文件路径
        feature_columns: 特征列名列表
        target_column: 目标列名
    
    Returns:
        tuple: (X_train, X_test, y_train, y_test, scaler)
    """
    # 加载训练集和测试集
    train_df = pd.read_csv(train_path)
    test_df = pd.read_csv(test_path)
    
    # 提取特征和目标
    X_train = train_df[feature_columns].values
    y_train = train_df[target_column].values
    X_test = test_df[feature_columns].values
    y_test = test_df[target_column].values
    
    # 数据标准化（使用训练集拟合scaler）
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 重塑数据为序列格式 (samples, sequence_length, features)
    # 对于单变量时间序列，每个样本有250个时间步，特征维度为1
    sequence_length = len(feature_columns)  # 250个时间步
    n_features = 1  # 单变量时间序列
    
    # 重塑为 (samples, sequence_length, n_features)
    X_train_reshaped = X_train_scaled.reshape(-1, sequence_length, n_features)
    X_test_reshaped = X_test_scaled.reshape(-1, sequence_length, n_features)
    
    # 转换为PyTorch张量
    X_train_tensor = torch.FloatTensor(X_train_reshaped)
    X_test_tensor = torch.FloatTensor(X_test_reshaped)
    y_train_tensor = torch.LongTensor(y_train)
    y_test_tensor = torch.LongTensor(y_test)
    
    print(f"训练集大小: {X_train_tensor.shape}")
    print(f"测试集大小: {X_test_tensor.shape}")
    print(f"序列长度: {sequence_length}")
    print(f"特征数量: {n_features}")
    
    return X_train_tensor, X_test_tensor, y_train_tensor, y_test_tensor, scaler

def plot_training_history(train_losses, val_losses, train_accuracies, val_accuracies):
    """绘制训练历史"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
    
    # 绘制损失曲线
    ax1.plot(train_losses, label='Train Loss')
    ax1.plot(val_losses, label='Validation Loss')
    ax1.set_title('Training and Validation Loss')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss')
    ax1.legend()
    ax1.grid(True)
    
    # 绘制准确率曲线
    ax2.plot(train_accuracies, label='Train Accuracy')
    ax2.plot(val_accuracies, label='Validation Accuracy')
    ax2.set_title('Training and Validation Accuracy')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Accuracy')
    ax2.legend()
    ax2.grid(True)
    
    plt.tight_layout()
    plt.savefig('training_history.png', dpi=300, bbox_inches='tight')
    plt.show()

def evaluate_model(model, X_test, y_test, device):
    """评估模型性能"""
    model.eval()
    with torch.no_grad():
        outputs = model(X_test.to(device))
        _, predicted = torch.max(outputs.data, 1)
        
        accuracy = accuracy_score(y_test.cpu().numpy(), predicted.cpu().numpy())
        
        print(f"Test Accuracy: {accuracy:.4f}")
        print("\nClassification Report:")
        print(classification_report(y_test.cpu().numpy(), predicted.cpu().numpy()))
        
        # 绘制混淆矩阵
        cm = confusion_matrix(y_test.cpu().numpy(), predicted.cpu().numpy())
        plt.figure(figsize=(8, 6))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
        plt.title('Confusion Matrix')
        plt.ylabel('True Label')
        plt.xlabel('Predicted Label')
        plt.savefig('confusion_matrix.png', dpi=300, bbox_inches='tight')
        plt.show()
        
        return accuracy

def save_model(model, filepath):
    """保存模型"""
    # 确保目录存在
    os.makedirs(os.path.dirname(filepath), exist_ok=True)
    torch.save({
        'model_state_dict': model.state_dict(),
        'model_config': model.get_config() if hasattr(model, 'get_config') else {}
    }, filepath)
    print(f"模型已保存到: {filepath}")

def load_model(model_class, filepath, device):
    """加载模型"""
    checkpoint = torch.load(filepath, map_location=device)
    model = model_class(**checkpoint['model_config'])
    model.load_state_dict(checkpoint['model_state_dict'])
    model.to(device)
    return model