import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import random


# 固定随机种子
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)


set_seed(42)  # 固定随机种子

# 读取数据
df = pd.read_csv('/home/liudd/deeplearing/processed_data_with_bands_all9.18.csv')
df = df[df['fy_clt'] == 2]

# 提取特征和目标
features = ['fy_cth_log', 'fy_ctt_scaled', 'fy_ctp_scaled',
            'fy_lat_sin', 'fy_lat_cos', 'fy_lon_sin', 'fy_lon_cos',
            'fy_band1_scaled', 'fy_band2_scaled', 'fy_band5_scaled', 'fy_band6_scaled', 'fy_band7_scaled',
            'fy_band12_scaled']
target = 'cloudsat_cbh'

X = df[features].values
y = df[target].values

# 对目标值进行标准化
scaler_y = StandardScaler()
y = scaler_y.fit_transform(y.reshape(-1, 1)).flatten()

# 定义 KFold 进行交叉验证
kfold = KFold(n_splits=3, shuffle=True, random_state=42)


# 模型定义
class OptimizedCNN_DNN(nn.Module):
    def __init__(self, input_size):
        super(OptimizedCNN_DNN, self).__init__()
        self.conv1 = nn.Conv1d(1, 64, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm1d(64)
        self.conv2 = nn.Conv1d(64, 128, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm1d(128)
        self.conv3 = nn.Conv1d(128, 256, kernel_size=3, padding=1)
        self.bn3 = nn.BatchNorm1d(256)
        self.fc1 = nn.Linear(256 * input_size, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 128)
        self.fc4 = nn.Linear(128, 1)
        self.relu = nn.LeakyReLU(0.01)
        self.dropout = nn.Dropout(p=0.3)

    def forward(self, x):
        x = self.relu(self.bn1(self.conv1(x)))
        x = self.relu(self.bn2(self.conv2(x)))
        x = self.relu(self.bn3(self.conv3(x)))
        x = x.view(x.size(0), -1)
        x = self.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.relu(self.fc2(x))
        x = self.dropout(x)
        x = self.relu(self.fc3(x))
        x = self.fc4(x)
        return x


# 训练和验证过程
def train_and_validate(model, train_loader, val_loader, criterion, optimizer, scheduler, device, num_epochs=100,
                       early_stopping_patience=20):
    best_val_loss = float('inf')
    early_stopping_counter = 0
    scaler = torch.cuda.amp.GradScaler()  # 使用混合精度

    # 用于记录损失和R²的列表
    train_losses = []
    val_losses = []
    train_r2_scores = []
    val_r2_scores = []

    for epoch in range(num_epochs):
        model.train()
        train_loss = 0
        y_train_true, y_train_pred = [], []

        for X_batch, y_batch in train_loader:
            X_batch, y_batch = X_batch.to(device), y_batch.to(device)
            optimizer.zero_grad()

            with torch.cuda.amp.autocast():  # 自动混合精度
                outputs = model(X_batch.unsqueeze(1))
                loss = criterion(outputs, y_batch)

            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()

            train_loss += loss.item()

            # 收集训练集的预测和真实值以计算R²
            y_train_true.extend(y_batch.cpu().numpy())
            y_train_pred.extend(outputs.cpu().detach().numpy())

        # 计算训练集的平均损失和R²
        train_loss /= len(train_loader)
        train_losses.append(train_loss)
        train_r2 = r2_score(np.array(y_train_true), np.array(y_train_pred))
        train_r2_scores.append(train_r2)

        # 验证阶段
        val_loss = 0
        y_val_true, y_val_pred = [], []
        model.eval()
        with torch.no_grad():
            for X_batch, y_batch in val_loader:
                X_batch, y_batch = X_batch.to(device), y_batch.to(device)
                outputs = model(X_batch.unsqueeze(1))
                val_loss += criterion(outputs, y_batch).item()

                # 收集验证集的预测和真实值以计算R²
                y_val_true.extend(y_batch.cpu().numpy())
                y_val_pred.extend(outputs.cpu().numpy())

        # 计算验证集的平均损失和R²
        val_loss /= len(val_loader)
        val_losses.append(val_loss)
        val_r2 = r2_score(np.array(y_val_true), np.array(y_val_pred))
        val_r2_scores.append(val_r2)

        print(
            f'Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Train R²: {train_r2:.4f}, Val R²: {val_r2:.4f}')

        scheduler.step(val_loss)

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            early_stopping_counter = 0
            best_model_weights = model.state_dict()  # 保存最佳模型
        else:
            early_stopping_counter += 1
            if early_stopping_counter >= early_stopping_patience:
                print(f'Early stopping at epoch {epoch + 1}')
                break

    # 加载最佳权重
    model.load_state_dict(best_model_weights)
    return model, train_losses, val_losses, train_r2_scores, val_r2_scores


# 绘制损失和R²曲线
def plot_metrics(train_losses, val_losses, train_r2_scores, val_r2_scores):
    epochs = range(1, len(train_losses) + 1)

    plt.figure(figsize=(12, 5))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(epochs, train_losses, label='Train Loss')
    plt.plot(epochs, val_losses, label='Val Loss')
    plt.title('Loss per Epoch')
    plt.xlabel('Epochs')
    plt.ylabel('Loss (MSE)')
    plt.legend()

    # 绘制R²曲线
    plt.subplot(1, 2, 2)
    plt.plot(epochs, train_r2_scores, label='Train R²')
    plt.plot(epochs, val_r2_scores, label='Val R²')
    plt.title('R² per Epoch')
    plt.xlabel('Epochs')
    plt.ylabel('R² Score')
    plt.legend()

    plt.tight_layout()
    plt.show()


# 交叉验证
r2_scores = []
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

for fold, (train_idx, val_idx) in enumerate(kfold.split(X, y)):
    print(f"\nFold {fold + 1}")
    X_train, X_val = X[train_idx], X[val_idx]
    y_train, y_val = y[train_idx], y[val_idx]

    X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
    y_train_tensor = torch.tensor(y_train, dtype=torch.float32).view(-1, 1)
    X_val_tensor = torch.tensor(X_val, dtype=torch.float32)
    y_val_tensor = torch.tensor(y_val, dtype=torch.float32).view(-1, 1)

    train_loader = DataLoader(TensorDataset(X_train_tensor, y_train_tensor), batch_size=32, shuffle=True)
    val_loader = DataLoader(TensorDataset(X_val_tensor, y_val_tensor), batch_size=32, shuffle=False)

    model = OptimizedCNN_DNN(X_train.shape[1]).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)

    # 训练和验证
    model, train_losses, val_losses, train_r2_scores, val_r2_scores = train_and_validate(
        model, train_loader, val_loader, criterion, optimizer, scheduler, device
    )

    plot_metrics(train_losses, val_losses, train_r2_scores, val_r2_scores)

    with torch.no_grad():
        X_val_tensor = X_val_tensor.to(device)
        y_pred = model(X_val_tensor.unsqueeze(1)).cpu().numpy()
        y_val_true = y_val_tensor.cpu().numpy()

    # 反标准化目标值
    y_pred_true = scaler_y.inverse_transform(y_pred)
    y_val_true = scaler_y.inverse_transform(y_val_true)

    r2 = r2_score(y_val_true, y_pred_true)
    print(f'Fold {fold + 1} R² Score: {r2:.4f}')
    r2_scores.append(r2)

# 输出结果
if r2_scores:
    r2_scores_filtered = [r for r in r2_scores if not np.isnan(r)]
    if r2_scores_filtered:
        avg_r2 = np.mean(r2_scores_filtered)
        print(f'Average R² Score across {len(r2_scores_filtered)} valid folds: {avg_r2:.4f}')
    else:
        print("No valid R² scores to average.")
