import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader

# 1. 数据准备
DATA_DIR = "data/"  
train_data = pd.read_csv(os.path.join(DATA_DIR, "used_car_train_20200313.csv"), sep=' ')
testB_data = pd.read_csv(os.path.join(DATA_DIR, "used_car_testB_20200421.csv"), sep=' ')

# 目标变量进行对数变换
train_data['price'] = np.log1p(train_data['price'])

# 2. 特征工程
# 处理无效月份
def fix_month(date_str):
   year = date_str[:4]
   month = date_str[4:6]
   day = date_str[6:]
   if month == '00':
       month = '01'  # 替换为 01
   return year + month + day

train_data['regDate'] = train_data['regDate'].astype(str).apply(fix_month)
testB_data['regDate'] = testB_data['regDate'].astype(str).apply(fix_month)

train_data['creatDate'] = train_data['creatDate'].astype(str).apply(fix_month)
testB_data['creatDate'] = testB_data['creatDate'].astype(str).apply(fix_month)

# 提取年份、月份、天
train_data['regDate_year'] = pd.to_datetime(train_data['regDate'], format='%Y%m%d').dt.year
train_data['regDate_month'] = pd.to_datetime(train_data['regDate'], format='%Y%m%d').dt.month
train_data['regDate_day'] = pd.to_datetime(train_data['regDate'], format='%Y%m%d').dt.day

testB_data['regDate_year'] = pd.to_datetime(testB_data['regDate'], format='%Y%m%d').dt.year
testB_data['regDate_month'] = pd.to_datetime(testB_data['regDate'], format='%Y%m%d').dt.month
testB_data['regDate_day'] = pd.to_datetime(testB_data['regDate'], format='%Y%m%d').dt.day

train_data['creatDate_year'] = pd.to_datetime(train_data['creatDate'], format='%Y%m%d').dt.year
train_data['creatDate_month'] = pd.to_datetime(train_data['creatDate'], format='%Y%m%d').dt.month
train_data['creatDate_day'] = pd.to_datetime(train_data['creatDate'], format='%Y%m%d').dt.day

testB_data['creatDate_year'] = pd.to_datetime(testB_data['creatDate'], format='%Y%m%d').dt.year
testB_data['creatDate_month'] = pd.to_datetime(testB_data['creatDate'], format='%Y%m%d').dt.month
testB_data['creatDate_day'] = pd.to_datetime(testB_data['creatDate'], format='%Y%m%d').dt.day

# 年限特征
train_data['used_years'] = train_data['creatDate_year'] - train_data['regDate_year']
testB_data['used_years'] = testB_data['creatDate_year'] - testB_data['regDate_year']

# Brand 编码 (Target Encoding)
def target_encode(train, test, col, target):
   # 计算训练集上的均值
   mean_values = train.groupby(col)[target].mean()

   # 处理测试集中的缺失值 (如果存在)
   test[col + '_encoded'] = test[col].map(mean_values)
   test[col + '_encoded'] = test[col + '_encoded'].fillna(mean_values.mean())  # 使用全局均值填充

   # 处理训练集中的缺失值 (如果存在)
   train[col + '_encoded'] = train[col].map(mean_values)
   train[col + '_encoded'] = train[col + '_encoded'].fillna(mean_values.mean())  # 使用全局均值填充

   return train, test

train_data, testB_data = target_encode(train_data, testB_data, 'brand', 'price')

# One-Hot Encoding for gearbox and notRepairedDamage
train_data = pd.get_dummies(train_data, columns=['gearbox', 'notRepairedDamage'])
testB_data = pd.get_dummies(testB_data, columns=['gearbox', 'notRepairedDamage'])

# 3. 特征对齐
# 获取所有列名
all_columns = list(set(train_data.columns) | set(testB_data.columns))

# 添加缺失列，并填充为0
for column in all_columns:
   if column not in train_data.columns:
       train_data[column] = 0
   if column not in testB_data.columns:
       testB_data[column] = 0

# 确保列的顺序一致
train_data = train_data[all_columns]
testB_data = testB_data[all_columns]

# 选择特征
features = ['regDate_year', 'regDate_month', 'regDate_day', 'creatDate_year', 'creatDate_month', 'creatDate_day', 'used_years', 'brand_encoded', 'power', 'kilometer', 'v_0', 'v_1', 'v_2', 'v_3', 'v_4', 'v_5', 'v_6', 'v_7', 'v_8', 'v_9', 'v_10', 'v_11', 'v_12', 'v_13', 'v_14', 'gearbox_0', 'gearbox_1', 'notRepairedDamage_0', 'notRepairedDamage_1']

# 确保特征都在数据集中
features = [f for f in features if f in train_data.columns]

# 3. 数据预处理
# 分离特征和目标变量
X = train_data[features]
y = train_data['price']

# 测试集B预处理
X_testB = testB_data[features]

# 数值特征标准化
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_testB = scaler.transform(X_testB) # 使用训练集 scaler

# 4. 准备 PyTorch 数据集
class CarDataset(Dataset):
   def __init__(self, X, y=None):
       self.X = torch.tensor(X, dtype=torch.float32)
       self.y = torch.tensor(y, dtype=torch.float32) if y is not None else None

   def __len__(self):
       return len(self.X)

   def __getitem__(self, idx):
       if self.y is not None:
           return self.X[idx], self.y[idx]
       else:
           return self.X[idx]

train_dataset = CarDataset(X, y)
testB_dataset = CarDataset(X_testB)

# 划分训练集和验证集
train_size = int(0.8 * len(train_dataset))
val_size = len(train_dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, val_size])

# 创建 DataLoader
batch_size = 32
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
testB_loader = DataLoader(testB_dataset, batch_size=batch_size, shuffle=False)

# 5. 定义 PyTorch 模型
class CarModel(nn.Module):
   def __init__(self, input_size):
       super(CarModel, self).__init__()
       self.fc1 = nn.Linear(input_size, 128)
       self.dropout1 = nn.Dropout(0.3)
       self.fc2 = nn.Linear(128, 64)
       self.dropout2 = nn.Dropout(0.3)
       self.fc3 = nn.Linear(64, 32)
       self.fc4 = nn.Linear(32, 1)

   def forward(self, x):
       x = torch.relu(self.fc1(x))
       x = self.dropout1(x)
       x = torch.relu(self.fc2(x))
       x = self.dropout2(x)
       x = torch.relu(self.fc3(x))
       x = self.fc4(x)
       return x

# 6. 模型训练
# 初始化模型、优化器和损失函数
input_size = X.shape[1]
model = CarModel(input_size)
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()

# 训练循环
epochs = 100
best_val_loss = float('inf')
patience = 10
counter = 0

for epoch in range(epochs):
   model.train()
   for inputs, labels in train_loader:
       optimizer.zero_grad()
       outputs = model(inputs)
       loss = criterion(outputs.squeeze(), labels)
       loss.backward()
       optimizer.step()

   # 验证
   model.eval()
   val_loss = 0.0
   with torch.no_grad():
       for inputs, labels in val_loader:
           outputs = model(inputs)
           val_loss += criterion(outputs.squeeze(), labels).item()
   val_loss /= len(val_loader)

   print(f'Epoch {epoch+1}, Training Loss: {loss.item():.4f}, Validation Loss: {val_loss:.4f}')

   # Early Stopping
   if val_loss < best_val_loss:
       best_val_loss = val_loss
       counter = 0
       torch.save(model.state_dict(), 'best_model.pth')  # 保存最佳模型
   else:
       counter += 1
       if counter >= patience:
           print("Early stopping!")
           break

# 7. 模型评估
# 加载最佳模型
model = CarModel(input_size)
model.load_state_dict(torch.load('best_model.pth'))
model.eval()

# 预测验证集
y_val_true = []
y_val_pred = []
with torch.no_grad():
   for inputs, labels in val_loader:
       outputs = model(inputs)
       y_val_pred.extend(outputs.squeeze().numpy())
       y_val_true.extend(labels.numpy())

y_val_true = np.array(y_val_true)
y_val_pred = np.array(y_val_pred)

# 计算 MAE (还原对数变换)
mae = mean_absolute_error(np.expm1(y_val_true), np.expm1(y_val_pred))
print(f'MAE: {mae}')

# 8. 预测测试集 B
y_pred_testB = []
with torch.no_grad():
   for inputs in testB_loader:
       outputs = model(inputs)
       y_pred_testB.extend(outputs.squeeze().numpy())

y_pred_testB = np.array(y_pred_testB)
y_pred_testB = np.expm1(y_pred_testB)  # 还原对数变换

# 9. 生成 submission 文件
# 创建 submission DataFrame
submission = pd.DataFrame({'SaleID': testB_data['SaleID'], 'price': y_pred_testB})

# 将 price 列保留两位小数
submission['price'] = submission['price'].round(2)

# 保存为 CSV 文件，不包含索引
submission.to_csv('submission.csv', index=False)

print("Submission file generated: submission.csv")
