from torch.utils.data import DataLoader
import torch.optim as optim
from torch.optim import SGD,Adam,lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from utils import *
import torch
from cls_dataset import *
from torch.utils.data import DataLoader
from lib.utils.label_smooth import LabelSmoothingCrossEntropy
import argparse
import os
import shutil
import time
from torchvision import models
from pthflops import count_ops
from torchsummary import summary
from loss import *
from datetime import datetime
from tools.common_tools import show_confMat
from lib.utils.resnext_29 import ResNeXt, ResNeXt29_2x64d
from torch.utils.data import WeightedRandomSampler
best_pred = 0.0

BASE_DIR = os.path.abspath('.')
print(BASE_DIR)


class MyModel(nn.Module):
    def __init__(self, cls_num):
        super(MyModel, self).__init__()

        # 为什么要设计这样的网络结构

        self.cls_num = cls_num

        model = models.resnet18()
        # model = ResNeXt29_2x64d()
        # print(model.conv1.__str__())
        # model.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
        # model.conv1 = nn.Sequential(
        #     nn.Conv2d(1, 16, kernel_size=(3, 3), stride=(2, 2), bias=False),
        #     nn.Conv2d(16, 32, kernel_size=(3, 3), stride=(2, 2), bias=False),
        #     nn.Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), bias=False)
        # )

        # model.fc = torch.nn.Linear(512, 5)
        model.fc = torch.nn.Sequential(
            # torch.nn.Dropout(),
            torch.nn.Linear(512, 7)
        )
        self.base_model = model
        # model.to(device)


    def forward(self, input,p_id):

        p_id = self.base_model(input)

        # print(x.shape)
        # print(p_id.shape)
        # p_id = self.p_fc(p_id)
        # x = torch.cat([x,p_id],1)

        # print(x.shape)
        # x = self.fc1(x)

        return p_id

timestr = time.strftime('%m%d%H%M')

def main():
    cls_num = 7
    now_time = datetime.now()
    time_str = datetime.strftime(now_time, '%m-%d_%H-%M')
    log_dir = os.path.join(BASE_DIR, "results", time_str)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    class_names = ('verbose_v1', 'verbose_v2', 'disc_v1', 'disc_v2', 'disc_v3', 'disc_v4', 'disc_v5')

    val_json_path = "/home/san/mnt/GitHubClone/lumbar_project/data/DatasetA/train/lumbar_train51/lumbar_train51_annotation.json"
    val_data_root = "/home/san/mnt/GitHubClone/lumbar_project/data/DatasetA/train/lumbar_train51/train"

    test_dataset = DiscClsDataset(val_json_path, val_data_root, mode="test", split=0.75, transform=test_transforms)
    test_dataloader = DataLoader(test_dataset, batch_size=16, drop_last=True)
    device = "cpu"
    if torch.cuda.is_available():
        device="cuda"
        print("cuda")

    model = MyModel(cls_num = cls_num)

    model.to(device)
    model_path = "/home/san/mnt/GitHubClone/lumbar_project/codes/cls_estimation/trained_models/best_checkpoint.pth.tar"
    model.load_state_dict(torch.load(model_path)['state_dict'])

    criterion = LabelSmoothingCrossEntropy()

    best_acc1 = 0

    mat_valid, acc1 = validate(test_dataloader, model, criterion)

    is_best = acc1 > best_acc1
    best_acc1 = max(acc1, best_acc1)

    show_confMat(mat_valid, class_names, "valid", log_dir, verbose=True)

def validate(val_loader, model, criterion):
    batch_time = AverageMeter("Time", ":6.3f")
    losses = AverageMeter("Loss", ":.4e")
    top1 = AverageMeter("Acc@1", ":6.2f")
    rec1 = AverageMeter("Rec@1", ":6.2f")
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, losses, top1,rec1],
        prefix="Test: ")

    # switch to evaluate mode
    model.eval()
    global best_pred
    conf_mat = np.zeros((7, 7))
    with torch.no_grad():
        end = time.time()
        for i, (inputs, target, p_id) in enumerate(val_loader):

            if torch.cuda.is_available():
                inputs = inputs.cuda().float()
                target = target.cuda().long()
                p_id = p_id.cuda().float()

            output = model(inputs, p_id)

            loss = criterion(output, target)

            acc1 = accuracy(output, target)
            # rec = recall(output,target)
            losses.update(loss.item(), inputs.size(0))
            top1.update(acc1.item(), inputs.size(0))

            batch_time.update(time.time() - end)
            end = time.time()

            _, predicted = torch.max(output.data, 1)

            # 统计混淆矩阵
            for j in range(len(target)):
                cate_i = target[j].cpu().numpy()
                pre_i = predicted[j].cpu().numpy()
                conf_mat[int(cate_i), int(pre_i)] += 1.

            if i % 5 == 0:
                progress.display(i)

        print(" * Acc@1 {top1.avg:.3f}"
              .format(top1=top1))

    best_pred = max(top1.avg, best_pred)
    return conf_mat, top1.avg

class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self, name, fmt=":f"):
        self.name = name
        self.fmt = fmt
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

    def __str__(self):
        fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
        return fmtstr.format(**self.__dict__)


class ProgressMeter(object):
    def __init__(self, num_batches, meters, prefix=""):
        self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
        self.meters = meters
        self.prefix = prefix

    def display(self, batch):
        entries = [self.prefix + self.batch_fmtstr.format(batch)]
        entries += [str(meter) for meter in self.meters]
        print("\t".join(entries))

    def _get_batch_fmtstr(self, num_batches):
        num_digits = len(str(num_batches // 1))
        fmt = "{:" + str(num_digits) + "d}"
        return "[" + fmt + "/" + fmt.format(num_batches) + "]"


def recall(output,target):
    _, predicted = output.max(1)
    TP = ((predicted == 1) & (target == 1)).sum().item()
    FN = ((predicted == 0) & (target == 1)).sum().item()
    r = TP/(TP + FN + 0.0000001) * 100.0
    return r



def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)
        _, pred = output.max(1)
        correct = pred.eq(target).sum().float()
        res = correct.mul_(100.0 / batch_size)
        return res

if __name__ == '__main__':
    main()