import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from collections import Counter


def main():
    # Load data
    train_df = pd.read_csv('dataset/cs-training.csv')
    median = train_df.median()
    print("median:", median)
    print("\n")
    train_df = train_df.fillna(median)
    test_df = pd.read_csv('dataset/cs-test.csv')
    test_df = test_df.fillna(median)

    print(train_df.columns)
    train = train_df.values[:, 1:]
    test = test_df.values[:, 1:]

    X, y = train[:, 1:], train[:, 0]
    X_test, y_test = test[:, 1:], test[:, 0]

    # Min-max scaler
    scaler = MinMaxScaler().fit(X)
    X = scaler.transform(X)
    scaler.transform(X_test)
    print(type(y))
    print(f'全集 总共: {len(y)} 条数据')
    d = Counter(y)
    d_s = sorted(d.items(), key=lambda x: x[1], reverse=True)
    print(f'值：{d_s[0][0]}: {d_s[0][1]}条 {d_s[0][1] / len(y)}占比')
    print(f'值：{d_s[1][0]}: {d_s[1][1]}条 {d_s[1][1] / len(y)}占比')
    print(f'=====进行切分=====')
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
    print(f'验证集 总共: {len(y_val)} 条数据')
    d = Counter(y_val)
    d_s = sorted(d.items(), key=lambda x: x[1], reverse=True)
    for item in d_s:
        print(f'值：{item[0]}: {item[1]}条 {item[1] / len(y_val)}占比')
    print(f'训练集 总共: {len(y_train)} 条数据')
    d = Counter(y_train)
    d_s = sorted(d.items(), key=lambda x: x[1], reverse=True)
    for item in d_s:
        print(f'值：{item[0]}: {item[1]}条 {item[1] / len(y_train)}占比')
    print(f'=====切分完成=====')


if __name__ == '__main__':
    main()
