import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import torch.nn.functional as F
import pandas as pd

# 定义模型
class CNN(nn.Module):
    def __init__(self, input_shape):
        super(CNN, self).__init__()
        # 定义卷积层
        self.conv1 = nn.Conv2d(in_channels=input_shape[0], out_channels=64, kernel_size=1)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1)
        self.conv3 = nn.Conv2d(in_channels=32, out_channels=16, kernel_size=1)
        # 全连接层
        self.fc1 = nn.Linear(16 * input_shape[1] * input_shape[2], 8)
        self.fc2 = nn.Linear(8, 1)
        
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        x = x.view(x.size(0), -1)  # 展平
        x = F.relu(self.fc1(x))
        x = torch.sigmoid(self.fc2(x))  # 二分类问题用sigmoid
        return x

# 加载数据（假设x_train_reshape, y_train_matrix已经是torch.Tensor格式）
train_data = TensorDataset(x_train_reshape, y_train_matrix)
train_loader = DataLoader(train_data, batch_size=32, shuffle=True)

# 输入形状假设和模型实例化
input_shape = (1, x_train_reshape.shape[2], x_train_reshape.shape[3])  # 假设输入是 (batch_size, channels, height, width)
model = CNN(input_shape)

# 定义损失函数和优化器
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
epochs = 40
for epoch in range(epochs):
    model.train()
    running_loss = 0.0
    for i, (inputs, labels) in enumerate(train_loader):
        optimizer.zero_grad()  # 清空梯度
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()  # 反向传播
        optimizer.step()  # 优化器更新
        
        running_loss += loss.item()
    print(f'Epoch [{epoch+1}/{epochs}], Loss: {running_loss/len(train_loader)}')

# 验证集预测
model.eval()
with torch.no_grad():
    y_pred = model(x_val_reshape) > 0.5
    y_pred_df = pd.DataFrame(y_pred.cpu().numpy())  # 转换为numpy并存储在DataFrame
