import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from transformers import ViTModel, ViTFeatureExtractor, Swinv2Model, AutoImageProcessor, Dinov2Model, AutoModel
from datasets import load_dataset
from tqdm import tqdm
from PIL import Image


class CustomViTClassifier(nn.Module):
    def __init__(self, num_labels):
        super(CustomViTClassifier, self).__init__()
        # 加载预训练的Swinv2模型
        self.vit = AutoModel.from_pretrained('facebook/convnextv2-tiny-1k-224')
        print(self.vit.config)
        # 如果需要冻结Swinv2模型的所有参数，应将requires_grad设置为False
        for param in self.vit.parameters():
            param.requires_grad = False

        # 在分类层之前添加一个额外的全连接层
        self.fc1 = nn.Linear(self.vit.config.hidden_size, 128)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.3)
        self.classifier = nn.Linear(128, num_labels)
        self.softmax = nn.Softmax(dim=1)  # 指定softmax作用的维度

    def forward(self, pixel_values):
        outputs = self.vit(pixel_values=pixel_values)
        last_hidden_state = outputs.last_hidden_state
        pooled_output = last_hidden_state[:, 0]  # 取[CLS] token对应的向量

        # 通过额外的全连接层
        x = self.fc1(pooled_output)
        x = self.relu(x)
        x = self.dropout(x)

        # 应用分类层
        logits = self.classifier(x)

        # 应用softmax激活函数来获取概率分布
        probabilities = self.softmax(logits)
        return probabilities


# 假设的标签映射


def transform(examples):
    # 使用ViT的特征提取器对图像进行处理
    inputs = feature_extractor(examples['image'], return_tensors='pt')
    inputs['labels'] = torch.tensor(examples['label'])
    print(inputs['labels'])
    return inputs


# 加载模型和特征提取器
feature_extractor = AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224')
model = CustomViTClassifier(num_labels=5)  # 假设有10个类别
print(model)
# 加载并预处理数据集
dataset = load_dataset('data')
print(dataset['train'][0])
transformed_dataset = dataset.map(transform, batched=True)
train_dataset = transformed_dataset['train'].with_format('torch')
eval_dataset = transformed_dataset['test'].with_format('torch')

# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
eval_loader = DataLoader(eval_dataset, batch_size=16)

# 定义优化器和损失函数
optimizer = optim.Adam(model.parameters(), lr=5e-5)
loss_fn = nn.CrossEntropyLoss()

# 将模型移动到GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)


# 训练模型
def train(model, train_loader):
    model.train()
    total_loss = 0
    for batch in tqdm(train_loader):
        inputs = batch['pixel_values'].squeeze().to(device)
        labels = batch['labels'].to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = loss_fn(outputs, labels)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
    return total_loss / len(train_loader)


# 评估模型
def evaluate(model, eval_loader):
    model.eval()
    total_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch in tqdm(eval_loader):
            inputs = batch['pixel_values'].squeeze().to(device)
            labels = batch['labels'].to(device)

            outputs = model(inputs)
            loss = loss_fn(outputs, labels)
            total_loss += loss.item()

            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return total_loss / len(eval_loader), accuracy


pre_acc = 0
# 进行训练和评估
for epoch in range(20):
    print(f'Epoch {epoch + 1}')
    train_loss = train(model, train_loader)
    eval_loss, eval_accuracy = evaluate(model, eval_loader)

    print(f'Training Loss: {train_loss:.4f}')
    print(f'Validation Loss: {eval_loss:.4f}, Accuracy: {eval_accuracy:.2f}%')
    if pre_acc > eval_accuracy:
        break
    else:
        pre_acc = eval_accuracy

torch.save(model, 'ConvNextV2.pth')
