import os
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
import torch.nn as nn
import matplotlib.pyplot as plt
import torch.optim
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, accuracy_score

class FocalLoss(nn.Module):
    def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=None):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.ignore_index = ignore_index
        self.size_average = size_average

    def forward(self, inputs, targets):
        ce_loss = nn.functional.cross_entropy(inputs, targets,
                                               ignore_index=self.ignore_index,
                                               reduce=False)
        pt = torch.exp(-ce_loss)  # Prevents nans when probability 0
        focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
        if self.size_average:
            return focal_loss.mean()
        else:
            return focal_loss.sum()
class LoadData(Dataset):
    def __init__(self, X, y):
        self.X = X
        self.y = y

    def __len__(self):
        return len(self.X)

    def __getitem__(self, index):
        X = torch.tensor(self.X.iloc[index])
        y = torch.tensor(self.y.iloc[index])
        return X, y
df = pd.read_csv('Normalization_total_extend.csv')
X = df.drop(columns=['Label'])
y = df['Label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=50)
train_data = LoadData(X_train, y_train)
test_data = LoadData(X_test, y_test)
X_dimension = len(X_train.columns)
y_dimension = len(y_train.value_counts())
print(f"X的维度：{X_dimension}")
print(f"y的维度：{y_dimension}")
batch_size = 128
train_dataloader = DataLoader(train_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

class ChannelAttention(nn.Module):
    def __init__(self, in_features, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.in_features = in_features
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(in_features, in_features // reduction_ratio, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(in_features // reduction_ratio, in_features, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        avg_out = self.fc(self.avg_pool(x).view(b, c))
        out = x * avg_out.expand_as(x)
        return out

class SpatialAttention(nn.Module):
    def __init__(self):
        super(SpatialAttention, self).__init__()
        self.conv = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        out = torch.cat([avg_out, max_out], dim=1)
        out = self.conv(out)
        return torch.sigmoid(out)

class CBAM(nn.Module):
    def __init__(self, in_features, reduction_ratio=16):
        super(CBAM, self).__init__()
        self.channel_attention = ChannelAttention(in_features, reduction_ratio)
        self.spatial_attention = SpatialAttention()

    def forward(self, x):
        x = self.channel_attention(x)
        x = self.spatial_attention(x)
        return x

class CNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = nn.Sequential(
            nn.Conv1d(1, 32, kernel_size=2),
            nn.Conv1d(32, 64, kernel_size=2),
            nn.MaxPool1d(2, 2),
            nn.Conv1d(64, 64, kernel_size=2),
            nn.Conv1d(64, 128, kernel_size=2),
            nn.MaxPool1d(2, 2),
        )
        self.cbam = CBAM(128)  # 使用CBAM模块
        self.flatten = nn.Flatten()
        self.fc = nn.Sequential(
            nn.Linear(2304, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, 15))


def forward(self, x):
    x = self.backbone(x)
    x = self.cbam(x)  # 应用CBAM模块
    x = self.flatten(x)
    x = self.fc(x)
    return x

def loss_value_plot(losses, iter):
    plt.figure()
    plt.plot([i for i in range(1, iter+1)], losses)
    plt.xlabel('Iterations (×100)')
    plt.ylabel('Loss Value')
CNN_model = CNN()
CNN_model.to(device=device)
def train(model, optimizer, loss_fn, epochs):
    losses = []
    iter = 0
    for epoch in range(epochs):
        print(f"epoch {epoch+1}\n-----------------")
        for i, (X, y) in enumerate(train_dataloader):
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.float32)
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y.long())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i % 100 == 0:
                print(f"loss: {loss.item()}\t[{(i+1)*len(X)}/{len(train_data)}]")
                iter += 1
                losses.append(loss.item())
    return losses, iter
def test(model, loss_fn):
    y_true_list = []
    y_pred_list = []
    loss_sum = 0
    iter = 0
    with torch.no_grad():
        for X, y in test_dataloader:
            X, y = X.to(device).to(torch.float32), y.to(device).to(torch.long)
            X = X.reshape(X.shape[0], 1, X_dimension)
            y_pred = model(X)
            loss = loss_fn(y_pred, y)
            loss_sum += loss.item()
            iter += 1
            y_pred_class = torch.argmax(y_pred, dim=1)
            y_true_list.extend(y.cpu().numpy())
            y_pred_list.extend(y_pred_class.cpu().numpy())
    conf_mat = confusion_matrix(y_true_list, y_pred_list)
    precision = precision_score(y_true_list, y_pred_list, average='macro')
    recall = recall_score(y_true_list, y_pred_list, average='macro')
    f1 = f1_score(y_true_list, y_pred_list, average='macro')
    accuracy = accuracy_score(y_true_list, y_pred_list)
    avg_loss = loss_sum / iter

    print("Confusion Matrix:\n", conf_mat)
    print("Macro-average Precision:", precision)
    print("Macro-average Recall:", recall)
    print("Macro-average F1 Score:", f1)
    print("Accuracy:", accuracy)
    print("Average Loss:", avg_loss)
if os.path.exists('NEW_CNNModel/NEW_CNN_model.pth'):
    CNN_model.load_state_dict(torch.load('NEW_CNNModel/NEW_CNN_model.pth'))
else:
    optimizer=torch.optim.Adam(CNN_model.parameters(), lr=0.001)
    loss_fn=FocalLoss(alpha=1, gamma=2)
    epochs=10
    losses, iter = train(CNN_model, optimizer, loss_fn, epochs)
    torch.save(CNN_model.state_dict(), 'NEW_CNNModel/NEW_CNN_model.pth')
    loss_value_plot(losses, iter)
    plt.savefig('NEW_CNNModel/NEW_CNN_loss.png')
loss_fn=FocalLoss(alpha=1, gamma=2)
test(CNN_model,loss_fn)


