from hyperopt import fmin, tpe, hp, Trials
import argparse
import os
import os
import timm
import torch
import torch.nn as nn
import torch.optim as optim

# Image datasets and image manipulation
import torchvision
import torchvision.transforms as transforms

# PyTorch TensorBoard support
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets
from timm.models.vision_transformer import VisionTransformer

import argparse


writer = SummaryWriter('logs/selectv1')


class CustomViTModel(VisionTransformer):
    def __init__(self, img_size=224, num_classes=1000, patch_size=16, hidden_size=768,
                 num_layers=12, num_heads=12, mlp_ratio=4, drop_rate=0.1):
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.mlp_ratio = mlp_ratio
        self.drop_rate = drop_rate  # 将 dropout_rate 改为 drop_rate

        self.depth = num_layers  # 设置 depth 属性

        super().__init__(img_size=img_size, num_classes=num_classes,
                         patch_size=patch_size, embed_dim=hidden_size,
                         depth=self.depth, num_heads=num_heads, mlp_ratio=mlp_ratio,
                         drop_rate=drop_rate)

def train(dataloader, model, loss_fn, optimizer, device, epoch, n):
    size = len(dataloader.dataset)
    model.train()
    running_loss = 0.0
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)

        # Compute prediction error
        pred = model(X)
        loss = loss_fn(pred, y)

        # Backpropagation
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        running_loss += loss.item()

        if batch % 100 >= 99:  # every 1000 mini-batches...

            # ...log the running loss
            writer.add_scalar('training loss',
                              running_loss / 1000,
                              epoch * n + batch)

            # ...log a Matplotlib Figure showing the model's predictions on a
            # random mini-batch
            # writer.add_figure('predictions vs. actuals',
            #                   plot_classes_preds(net, inputs, labels),
            #                   global_step=epoch * len(trainloader) + i)
            running_loss = 0.0

        # if batch % 100 == 0:
        #     loss, current = loss.item(), (batch + 1) * len(X)
        #     print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")

def val(dataloader, model, loss_fn, device, save_path, global_correct):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    #print(f"Test Error: \n Accuracy:{(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
    if correct > global_correct:
        global_correct = correct
        # torch.save(model.state_dict(), os.path.join(save_path, "model.pth"))
        torch.save(model, os.path.join(save_path, "model.pth"))
        print("Saved PyTorch Model State to model.pth")

def main(batch_size=128, lr=1e-5, epochs=50, num_workers=0, save_path='models/vit/20231206/', 
         data_dir='combinations/11', patch_size=None, num_layers=None, hidden_size=None,
         num_heads=None, mlp_ratio=None, dropout_rate=None):
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # device = torch.device("cpu")
    print("using {} device.".format(device))
    print(patch_size * patch_size)

    data_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])


    # 加载训练集
    train_dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms)
    val_dataset = datasets.ImageFolder(os.path.join(data_dir, 'val'), data_transforms)

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    n = len(train_loader)

    # 定义 ViT 模型
    model = CustomViTModel(num_classes=2, patch_size=patch_size,
                              num_layers=num_layers, hidden_size=hidden_size,
                              num_heads=num_heads, mlp_ratio=mlp_ratio,
                              drop_rate=dropout_rate)  # 这里使用了一个预定义的 ViT 模型

    model.to(device)

    loss_fn = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)

    dataiter = iter(train_loader)
    images, labels = next(dataiter)

    # create grid of images
    img_grid = torchvision.utils.make_grid(images)

    # write to tensorboard
    writer.add_image('four_fashion_mnist_images', img_grid)
    writer.add_graph(model, images.to(device))

    global_correct = 0.0
    for e in range(epochs):
        print(f"Epoch {e + 1}\n-------------------------------")
        train(train_loader, model, loss_fn, optimizer, device, e, n)
        val(val_loader, model, loss_fn, device, save_path, global_correct)

    writer.close()

    return global_correct

# 定义超参数搜索空间
space = {
    'lr': hp.choice('lr', [0.00001, 0.0001, 0.001, 0.1]),
    'batch_size': hp.choice('batch_size', [32, 64, 128, 256]),
    'patch_size': hp.choice('patch_size', [8, 16, 32]),
    'num_layers': hp.choice('num_layers', [6, 12, 24]),
    'hidden_size': hp.choice('hidden_size', [256, 512, 768, 1024]),
    'num_heads': hp.choice('num_heads', [8, 12, 16]),
    'mlp_ratio': hp.choice('mlp_ratio', [2, 3, 4]),
    'dropout_rate': hp.choice('dropout_rate',[0.1, 0.2, 0.3, 0.4, 0.5])
}

# 创建 Trials 对象来记录搜索历史
trials = Trials()

def objective(params):
    # 调用 main 函数获取准确率，并返回作为优化目标
    accuracy = main(**params)
    return -accuracy 

# 使用 TPE 算法进行超参数搜索
best = fmin(
    fn=objective,
    space=space,
    algo=tpe.suggest,
    max_evals=50,  # 设置搜索次数
    trials=trials
)

with open('parm.txt', 'w') as f:
    for key, value in best.items():
        f.write(f'{key}: {value}\n')
    f.close()
    
with open('search_history.txt', 'w') as file:
    for i, result in enumerate(trials.results):
        params = trials.trial_attachments(trials.trials[i])['misc']['vals']
        file.write(f"Iteration {i + 1}: {params} - {result['loss']}\n")
    file.close()