# 数据集划分示例脚本
# 本脚本展示如何使用IVIDS-server系统中的数据集划分功能

import os
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split

# 添加项目根目录到系统路径
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# 导入项目中的数据处理函数
from app.ml.models import load_and_preprocess_data


def split_dataset(dataset_path, test_size=0.2, random_state=42, output_dir=None):
    """手动划分数据集为训练集和测试集
    
    Args:
        dataset_path: 数据集文件路径
        test_size: 测试集比例，默认为0.2（即20%）
        random_state: 随机种子，用于确保结果可重现
        output_dir: 输出目录，如果提供则将划分后的数据集保存到该目录
        
    Returns:
        tuple: (X_train, X_test, y_train, y_test) 训练特征、测试特征、训练标签、测试标签
    """
    # 加载并预处理数据
    print(f"加载数据集: {dataset_path}")
    X, y, feature_names = load_and_preprocess_data(dataset_path)
    
    # 划分训练集和测试集
    print(f"将数据集划分为训练集({100-test_size*100:.0f}%)和测试集({test_size*100:.0f}%)")
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state, stratify=y
    )
    
    print(f"训练集样本数: {X_train.shape[0]}, 测试集样本数: {X_test.shape[0]}")
    
    # 如果提供了输出目录，保存划分后的数据集
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)
        
        # 将特征和标签合并为DataFrame
        train_df = pd.DataFrame(X_train, columns=feature_names)
        train_df['target'] = y_train.values
        
        test_df = pd.DataFrame(X_test, columns=feature_names)
        test_df['target'] = y_test.values
        
        # 保存为CSV文件
        train_path = os.path.join(output_dir, 'train_set.csv')
        test_path = os.path.join(output_dir, 'test_set.csv')
        
        train_df.to_csv(train_path, index=False)
        test_df.to_csv(test_path, index=False)
        
        print(f"训练集已保存到: {train_path}")
        print(f"测试集已保存到: {test_path}")
    
    return X_train, X_test, y_train, y_test


def main():
    """主函数，用于演示数据集划分"""
    import argparse
    
    parser = argparse.ArgumentParser(description="数据集划分示例")
    parser.add_argument("--dataset", type=str, required=True, help="数据集路径")
    parser.add_argument("--test_size", type=float, default=0.2, help="测试集比例")
    parser.add_argument("--random_state", type=int, default=42, help="随机种子")
    parser.add_argument("--output_dir", type=str, default="split_data", help="输出目录")
    
    args = parser.parse_args()
    
    # 划分数据集
    X_train, X_test, y_train, y_test = split_dataset(
        args.dataset, args.test_size, args.random_state, args.output_dir
    )
    
    # 显示类别分布
    print("\n类别分布:")
    print("原始数据集:")
    unique, counts = np.unique(y, return_counts=True)
    for u, c in zip(unique, counts):
        print(f"  类别 {u}: {c} 样本 ({c/len(y)*100:.2f}%)")
    
    print("训练集:")
    unique, counts = np.unique(y_train, return_counts=True)
    for u, c in zip(unique, counts):
        print(f"  类别 {u}: {c} 样本 ({c/len(y_train)*100:.2f}%)")
    
    print("测试集:")
    unique, counts = np.unique(y_test, return_counts=True)
    for u, c in zip(unique, counts):
        print(f"  类别 {u}: {c} 样本 ({c/len(y_test)*100:.2f}%)")


if __name__ == "__main__":
    main()