import torch
import pandas as pd
import torch.nn as nn
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from torch.utils.data import DataLoader,TensorDataset
import matplotlib.pyplot as plt

def create_dataset():
    # load data
    data = pd.read_csv('dataset/house_prices.csv')
    #去除无关特征
    data.drop(columns=['Id'],inplace=True)
    #划分特征和目标
    x = data.drop(columns=['SalePrice'])
    y = data['SalePrice']
    #筛选数值特征
    num_features = x.select_dtypes(exclude=['object']).columns #获取数值特征列名
    #筛选类别特征
    cat_features = x.select_dtypes(include=['object']).columns #获取类别特征列名
    #划分训练集和测试集
    x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=42)
    #   数值型特征先用平均值填充缺失值，再进行标准化
    num_transformer = Pipeline(steps=[
        ("fillna",SimpleImputer(strategy="mean")),
        ("std",StandardScaler())
    ])
    #   类别型特征先将缺失值替换为字符串"NaN"，再进行独热编码
    cat_transformer = Pipeline(steps=[
        ("fillna",SimpleImputer(strategy="constant",fill_value="NaN")),
        ("onehot",OneHotEncoder(handle_unknown="ignore"))
    ])
    #   组合特征预处理器
    preprocessor = ColumnTransformer(transformers=[
        ("num",num_transformer,num_features),# num特征的用num的transformer
        ("cat",cat_transformer,cat_features)# cat特征的用cat的transformer
    ])
    #   进行特征预处理
    x_train = pd.DataFrame(preprocessor.fit_transform(x_train).toarray(),columns=preprocessor.get_feature_names_out())
    x_test = pd.DataFrame(preprocessor.transform(x_test).toarray(),columns=preprocessor.get_feature_names_out())
    # 构建数据集
    train_dataset = TensorDataset(torch.tensor(x_train.values,dtype=torch.float32),
                                  torch.tensor(y_train.values,dtype=torch.float32))
    test_dataset = TensorDataset(torch.tensor(x_test.values,dtype=torch.float32),
                                 torch.tensor(y_test.values,dtype=torch.float32))
    # print(y_test)
    # 返回训练集，测试集，特征数量
    return train_dataset, test_dataset, x_train.shape[1]


train_dataset,test_dataset,n_feature=create_dataset()
# print(n_feature)

model = nn.Sequential(
    nn.Linear(n_feature,128),
    nn.BatchNorm1d(128),
    nn.ReLU(),
    nn.Dropout(0.2),
    nn.Linear(128,1)
)
#损失函数
def log_rmse(pred,target):
    mse=nn.MSELoss()
    pred.squeeze_()#去除pred多余维度
    pred=torch.clamp(pred,1,float('inf'))#限制pred最小值为1，防止log(0)出现nan
    return torch.sqrt(mse(torch.log(pred),torch.log(target)))

#模型训练
def train(model,train_dataset,test_dataset,lr,epoch_num,batch_size,device):
    def init_weight(m):
        if type(m)==nn.Linear:
            nn.init.xavier_uniform_(m.weight)
    model.apply(init_weight)#初始化参数
    model=model.to(device)
    optimizer=torch.optim.Adam(model.parameters(),lr=lr)
    train_loss_list=[]
    test_loss_list=[]
    for epoch in range(epoch_num):
        model.train()
        train_loader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
        train_loss_accumulate = 0
        for x,y in train_loader:
            x,y = x.to(device),y.to(device)
            output=model(x)
            loss_value=log_rmse(output,y)
            optimizer.zero_grad()
            loss_value.backward()
            optimizer.step()
            train_loss_accumulate+=loss_value.item()
        this_train_loss = train_loss_accumulate / len(train_loader)
        train_loss_list.append(this_train_loss)

        #验证过程
        model.eval()
        test_loader=DataLoader(test_dataset,batch_size=batch_size,shuffle=False)
        test_loss_accumulate=0
        with torch.no_grad():
            for x,y in test_loader:
                x,y=x.to(device),y.to(device)
                output=model(x)
                loss_value=log_rmse(output,y)
                test_loss_accumulate+=loss_value.item()
        this_test_loss=test_loss_accumulate/len(test_loader)
        test_loss_list.append(this_test_loss)
        print("train_loss:{:.4f}, test_loss:{:.4f}".format(this_train_loss, this_test_loss))
    return train_loss_list,test_loss_list
    
train_loss_list,test_loss_list=train(model,train_dataset,test_dataset,lr=0.1,epoch_num=100,batch_size=64,device='cpu')
plt.plot(train_loss_list,"r-",label='Train Loss',linewidth=3)
plt.plot(test_loss_list,"b--",label='Test Loss',linewidth=2)
plt.legend()
plt.show()