from sklearn.model_selection import train_test_split
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import xarray as xr
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt


# 读取单个.nc文件并提取海冰浓度数据
def read_nc_data(nc_file):
    ds = xr.open_dataset(nc_file)
    time = (int)(nc_file.split("\\")[3].split("_")[4])  # 解析时间
    if time <= 19911218:
        sea_ice_concentration = ds['F08_ICECON'].values[0]
    elif time > 19911218 and time <= 19950929:
        sea_ice_concentration = ds['F11_ICECON'].values[0]
    elif time > 19950929 and time <= 20071231:
        sea_ice_concentration = ds['F13_ICECON'].values[0]
    else:
        sea_ice_concentration = ds['F17_ICECON'].values[0]

    return sea_ice_concentration


# 处理所有年份文件夹中的数据
def load_data(base_dir):
    all_data = []
    all_labels = []
    file_paths = []

    for year in range(2023, 2025):
        year_dir = os.path.join(base_dir, str(year))
        files = [f for f in os.listdir(year_dir) if f.endswith('.nc')]
        files.sort()  # 按文件名（即日期）排序

        # 保存文件路径
        for filename in files:
            nc_file = os.path.join(year_dir, filename)
            file_paths.append(nc_file)

        # 生成数据对使用文件路径
        for i in range(len(file_paths) - 14):
            X_paths = file_paths[i:i + 7]  # 7天的数据文件
            y_path = file_paths[i + 7: i + 14]  # 第8天的数据文件
            all_data.append(X_paths)
            all_labels.append(y_path)

    return all_data, all_labels


# 定义 PyTorch Dataset 类
class SeaIceDataset(Dataset):
    def __init__(self, X_paths, y_paths):
        self.X_paths = X_paths
        self.y_paths = y_paths

    def __len__(self):
        return len(self.X_paths)

    def __getitem__(self, idx):
        # 读取数据文件
        X = [read_nc_data(path) for path in self.X_paths[idx]]
        y = [read_nc_data(path) for path in self.y_paths[idx]]
        return np.array(X), np.array(y)


# 标准化或归一化
def normalize_data(X):
    return X / 100  # 假设海冰浓度是0-100之间的值


# 构建 CNN 模型
class SeaIceCNN(nn.Module):
    def __init__(self):
        super(SeaIceCNN, self).__init__()

        # 第一层卷积层
        self.conv1 = nn.Conv2d(7, 32, kernel_size=3, padding=1)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第二层卷积层
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第三层卷积层
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第四层卷积层
        self.conv4 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
        self.upsample1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

        # 第五层卷积层
        self.conv5 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
        self.upsample2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

        # 第六层卷积层
        self.conv6 = nn.Conv2d(32, 7, kernel_size=3, padding=1)
        self.upsample3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

    def forward(self, x):
        x = self.pool1(nn.ReLU()(self.conv1(x)))  # [32,224,152]

        temp1 = x
        x = self.pool2(nn.ReLU()(self.conv2(x)))  # [64,112,76]

        temp2 = x
        x = self.pool3(nn.ReLU()(self.conv3(x)))  # [128,56,38]

        x = self.conv4(x)  # [64,56,38]
        x = self.upsample1(x)  # [64,112,76]
        x = self.conv5(x + temp2)  # [32,112,76]
        x = self.upsample2(x)  # [32,224,152]
        x = self.conv6(x + temp1)  # [7,224,152]
        x = self.upsample3(x)  # [7,448,304]

        return x


# 设置设备（如果有 GPU 可用，使用 GPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# 加载数据
base_dir = 'H:\\NC-daily'
X_paths, y_paths = load_data(base_dir)

# 划分训练集和验证集 (80% 训练集, 20% 验证集)
X_train, X_val, y_train, y_val = train_test_split(X_paths, y_paths, test_size=0.2, random_state=42)


train_dataset = SeaIceDataset(X_train, y_train)
val_dataset = SeaIceDataset(X_val, y_val)

train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)

# 将数据标准化
# 注意：这里假设你需要对数据进行标准化处理
# 由于数据在加载时才被读取，因此可以在这里进行处理
# 你可以在这里选择如何实现标准化，这里仅是一个示例

# 初始化模型
model = SeaIceCNN().to(device)

# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
num_epochs = 20
for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    for inputs, labels in train_loader:

        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()

        # 前向传播
        outputs = model(inputs)

        # 计算损失
        loss = criterion(outputs, labels)
        print(loss)
        # 反向传播
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

    avg_loss = running_loss / len(train_loader)
    print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}')
torch.save(model.state_dict(), '20250103.pth')
# 评估模型
model.eval()
with torch.no_grad():
    test_loss = 0.0
    for inputs, labels in val_loader:
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        test_loss += loss.item()

    avg_test_loss = test_loss / len(train_loader)
    print(f'Test Loss: {avg_test_loss:.4f}')

# 可视化结果
sample_idx = 0
predicted = model(torch.tensor(X_paths[sample_idx]).unsqueeze(0).unsqueeze(1).to(device))  # 预测一个样本
true_values = read_nc_data(y_paths[sample_idx])

# 可视化第一个预测样本
plt.subplot(1, 2, 1)
plt.title("True Values")
plt.imshow(true_values[0], cmap='Blues')  # 真实的海冰浓度图
plt.subplot(1, 2, 2)
plt.title("Predicted Values")
plt.imshow(predicted[0].cpu().numpy()[0], cmap='Blues')  # 预测的海冰浓度图
plt.show()
