# 导入相关的库
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.preprocessing import StandardScaler



# 导入数据
data_train = pd.read_csv(r"G:\Course Materials\数据挖掘\HW3 分类模型\forest_train_data.csv")
data_test = pd.read_csv(r"G:\Course Materials\数据挖掘\HW3 分类模型\forest_test_data.csv")


continuity_count = 0
discrete_count = 0

for column_name, column_data in data_train.items():
    if data_train[column_name].value_counts().count() > 10:
        print(f'{column_name} is continuity fearure')
        continuity_count += 1
    else:
        print(f'{column_name} is discrete fearure')
        discrete_count += 1

print(f'continuity count: {continuity_count},\n discrete count: {discrete_count},\n all count: {len(data_train.columns)}')


from pandas_summary import DataFrameSummary
data_s = DataFrameSummary(data_train)
data_s.summary().to_csv('summary.csv')





# 简单的数据可视化
for i in range(1,11):
    data = data_train.iloc[:,i]
    sns.histplot(data, bins = 30, kde = True, color = 'blue', alpha = 0.5)
    plt.title(f'{data_train.columns[i]}Histogram')
    plt.xlabel(f'{data_train.columns[i]}')
    plt.ylabel('Frequency')
    plt.show()


def data_split(data):
    train = data[data['ch'] == 'train']
    test = data[data['ch'] == 'test']

    X_train = train.iloc[:, :-2]
    y_train = train.loc[:,['Cover_Type']]
    X_test = test.iloc[:, :-2]
    y_test = test.loc[:,['Cover_Type']]

    tensor_dict = {}
    for data ,name in zip([X_train,X_test,y_train,y_test],['X_train','X_test','y_train','y_test']):
        if data.shape[1] > 1:
            tensor_dict[f'{name}_tensor'] = torch.tensor(data.to_numpy(),dtype=torch.float32)
        else:
            tensor_dict[f'{name}_tensor'] = torch.tensor(data.to_numpy().reshape(-1,1),dtype=torch.float32).view(-1,1)
    
    train_X = tensor_dict['X_train_tensor']
    train_y = tensor_dict['y_train_tensor']
    test_X = tensor_dict['X_test_tensor']
    test_y = tensor_dict['y_test_tensor']

    return train_X, train_y, test_X, test_y



# 将数据进行合并，标准化处理后，在分割为训练数据与测试数据。
data_train['ch'] = 'train'
data_test['ch'] = 'test'
data = pd.concat([data_train, data_test], ignore_index=True)
data = data.drop(columns=['Unnamed: 0'])
stand_var = {}
for column_name, column_data in data.items():
    stand_var[f'{column_name}_scaler'] = StandardScaler()
    if data[column_name].value_counts().count() > 10:
        data[column_name]  = stand_var[f'{column_name}_scaler'].fit_transform(data[[column_name]])

X_train, y_train, X_test, y_test = data_split(data)


def train_data(model, optimizer, criterion, 
               X_train, y_train, X_test, y_test,
               batch_size = 32, epoches = 100):
    train_losses = []
    test_losses = []
    grad_list = []
    
    train_dataset = TensorDataset(X_train, y_train)
    train_loader = DataLoader(train_dataset, batch_size = batch_size, shuffle = True)

    for rpoch in range(epoches):
        model.train()
        loss_process = 0
        for batch_X, batch_y in train_loader:
            optimizer.zero_grad()
            batch_x = model(batch_X)
            loss = criterion(batch_x, batch_y)
            loss.backward()
            optimizer.step()
            loss_process += loss.item()
        avg_loss = loss_process / len(train_loader)
        train_losses.append(avg_loss)
        
        for param in model.parameters():
            if param.grad is not None:
                grad_list.append(param.grad.numpy())
        
        model.eval()
        with torch.no_grad():
            test_x = model(X_test)
            test_loss = criterion(test_x, y_test)
            test_losses.append(test_loss.item())
    
    return train_losses, test_losses, grad_list 









