import os
from datetime import datetime
from os.path import join

import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from DAP.data.dataset import MyDataset
from DAP.models.confg_model import config_model
from common_utils.Evaluator import Evaluator
from common_utils.utils import load_args
from DAP.flags import parser

device = 'cuda' if torch.cuda.is_available() else 'cpu'

parser.add_argument('--phase', default='test', help='训练阶段')
def main():
    print(os.getcwd())
    args = parser.parse_args()
    load_args(args.config, args)
    TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
    logpath = os.path.join(args.cv_dir, args.name, 'tensorboard/'+args.phase+"/"+TIMESTAMP)
    os.makedirs(logpath, exist_ok=True)
    testSet = MyDataset(data_dir=join(args.data_root,args.data_dir), dataset=args.dataset, phase=args.phase,model = args.image_extractor)
    testLoader = DataLoader(testSet, batch_size=args.batch_size, shuffle=True)
    writer = SummaryWriter(log_dir= logpath, flush_secs=30)
    model, optimizer = config_model(args = args,dataset = testSet)
    model.eval()
    modelPath = os.path.join(args.cv_dir, args.name)
    checkpoint = torch.load(join(modelPath, "checkpoint"),map_location= device)
    model.load_state_dict(checkpoint['model_state_dict'])
    evaluator = Evaluator(len(testSet.classes))
    for epoch in tqdm(range(0,args.epochs+1),desc = "Current epoch"):
        test_loss = 0.0
        for idx, data in tqdm(enumerate(testLoader), total=len(testLoader), desc=f'Training  epoch {epoch}',position=0,leave=True):
            data = [d.to(device) for d in data]
            imgs, labels, attrs = data
            loss,attr_preds,label_preds = model(imgs=imgs,labels=labels,attrs=attrs)
            evaluator.evaluate_predictions(attr_preds=attr_preds.detach(), label_preds=label_preds.detach(),
                                           attrs=testSet.attrs, labels=labels)
            # if attr_preds is not None:
            #     evaluator.evaluate_predictions(attr_preds=attr_preds.detach(), label_preds= None,
            #                              attrs=testSet.attrs, labels=labels)
            # else:
            #     evaluator.evaluate_predictions(attr_preds=None, label_preds=label_preds.detach(),
            #                                          attrs=testSet.attrs, labels=labels)
        metric_attr,metric_label = evaluator.compute()
        printMetric(metric=metric_attr, phase='test', metric_type='attr')
        printMetric(metric=metric_label, phase='test', metric_type='label')
        # acc, prec, rec = metric_attr.values()
        # writer.add_scalar("acc/test_total", acc, epoch)
        # writer.add_scalar("prec/test_total", prec, epoch)
        # writer.add_scalar("rec/test_total", rec, epoch)
        evaluator.reset()

def printMetric(metric,phase,metric_type):
    if metric is None:
        return
    acc, prec, rec = metric.values()
    print(f"{phase} {metric_type}  Accuracy on all data: {acc}, {prec}, {rec}")
    # writer.add_scalar("acc/val_total", acc, epoch)
    # writer.add_scalar("prec/val_total", prec, epoch)
    # writer.add_scalar("rec/val_total", rec, epoch)

if __name__ == '__main__':
    main()
