import os
import sys

import cv2
import yaml
import torch
from torch.utils.data.dataloader import DataLoader
from progressbar import *
import pandas as pd

from WSOL.model import build_detector
from WSOL.dataset import build_dataset, build_preprocess_op
from WSOL.log import build_image_saver, build_logger
from WSOL.utils import Meter, get_predict_metrices


def test(cfg):
    exp_cfg = cfg['Exp']
    model_cfg = cfg['Model']
    dataset_cfg = cfg['Dataset']
    test_cfg = cfg['Test_cfg']

    ''' MODEL INIT '''
    check_point_path = test_cfg['check_point']
    model = build_detector(model_cfg).cuda()
    torch.save(model.state_dict(), 'sal_model.pth')
    model_dict = model.state_dict()
    check_point_dict = torch.load(check_point_path)
    check_point_dict = {k: v for k, v in check_point_dict.items() if k in model_dict}
    model_dict.update(check_point_dict)
    model.load_state_dict(model_dict)
    model.eval()

    ''' LOGGER INIT '''
    log_cfg = test_cfg['log']
    logger = build_logger({**log_cfg['logger'], **exp_cfg})
    logger.log([{'Exp': exp_cfg, 'Model': model_cfg, 'Dataset': dataset_cfg, 'Test': test_cfg}])

    ''' TEST SET INIT '''
    dataset_cfg['mode'] = 'test'
    loc_dataset = build_dataset(dataset_cfg)
    cls_dataset = build_dataset(dataset_cfg)

    loc_data_pipe_cfgs = test_cfg['loc_data_pipe']
    cls_data_pipe_cfgs = test_cfg['cls_data_pipe']
    batch_size = test_cfg['batch_size']
    thres = [test_cfg['thres']]
    auto_search_thres = test_cfg['auto_search_thres']

    data_pipe = []
    for data_preprocess_cfg in loc_data_pipe_cfgs:
        data_pipe.append(build_preprocess_op(data_preprocess_cfg))
    loc_dataset.set_data_pipe(data_pipe)

    loc_data_loader = DataLoader(dataset=loc_dataset, batch_size=batch_size, shuffle=False, drop_last=False)

    data_pipe = []
    for data_preprocess_cfg in cls_data_pipe_cfgs:
        data_pipe.append(build_preprocess_op(data_preprocess_cfg))
    cls_dataset.set_data_pipe(data_pipe)

    cls_data_loader = DataLoader(dataset=cls_dataset, batch_size=batch_size, shuffle=False, drop_last=False)

    ''' SETTING FOR VISUALIZATION '''
    save_img_cfgs = test_cfg['save_img']
    if_save_img = save_img_cfgs['if_save_img']
    img_dir = save_img_cfgs['img_dir']
    img_saver_cfgs = save_img_cfgs['img_saver_cfgs']
    savers = []
    for img_saver_cfg in img_saver_cfgs:
        savers.append(build_image_saver(img_saver_cfg))

    ''' Test '''

    if auto_search_thres:
        thres = [0.01 * t for t in range(1, 100)]
        logger.log('Search on {}'.format(thres))
    else:
        logger.log('Test threshold: {}'.format(thres[0]))

    sample_count = 0
    gt_loc_acc, top1_loc, top1_cls = \
        [Meter() for _ in range(len(thres))], [Meter() for _ in range(len(thres))], [Meter() for _ in range(len(thres))]
    # sample_names = []
    # bbox_area = []
    # x1_list, y1_list, x2_list, y2_list = [], [], [], []
    # thres_values = []

    # best_sample_thres = []

    widgets = ['testing: ', Percentage(), ' ', Bar('|'), ' ', Timer(),
               ' ', ETA(), ' ']
    pbar = ProgressBar(widgets=widgets, maxval=len(loc_data_loader)).start()
    for idx, (loc_data, cls_data) in enumerate(zip(loc_data_loader, cls_data_loader)):
        input_data, annotation, size, name = loc_data
        input_data = input_data.cuda()
        cls_label = annotation[:, 0].cuda().long()
        bbox_label = annotation[:, 1:].numpy()
        _, predict_bbox_list, addition_data = model(feature=input_data, label=cls_label, mode="loc", thres=thres)

        input_cls, _, _, name = cls_data
        input_cls = input_cls.view(-1, input_data.size(1), input_cls.size(2), input_cls.size(3)).cuda()
        predict_cls = model(feature=input_cls, mode="cls").unsqueeze(0).mean(1)

        metric_dict_bk = 0
        ious = Meter()

        for bbox_idx, predict_bbox in enumerate(predict_bbox_list):
            metric_dict = get_predict_metrices(sizes=size, input_size=input_data.shape[-2:],
                                               predict_bboxes=predict_bbox,
                                               bbox_labels=bbox_label,
                                               predict_clses=predict_cls.cpu().detach().numpy(),
                                               cls_labels=cls_label.cpu().detach().numpy(),
                                               addition_data=addition_data)
            metric_dict_bk = metric_dict
            ious.update(metric_dict["iou"][0].item())
            gt_loc_acc[bbox_idx].update(metric_dict["gt_loc_acc"])
            top1_loc[bbox_idx].update(metric_dict["top1_loc"])
            top1_cls[bbox_idx].update(metric_dict["top1_cls"])

        if if_save_img and not auto_search_thres:
            img = cv2.imread(os.path.join(img_dir, name[0]))
            for saver in savers:
                tmp_img = saver.process(img.copy(), metric_dict_bk)
                saver.save(os.path.join(exp_cfg['save_dir'], exp_cfg['exp_name'],
                                        check_point_path.split('/')[-1].replace('.pth', '')), name, tmp_img)

        # if not auto_search:
        #     # log sample info
        #     sample_names.append(name[0])
        #     thres_value = thres[0] * (metric_dict_bk['heat_map'].max() - metric_dict_bk['heat_map'].min()) + metric_dict_bk['heat_map'].min()
        #     thres_values.append(thres_value)
        #     predict_bbox = predict_bbox_list[0][0]
        #     x1, y1, x2, y2 = predict_bbox
        #     # x1, y1, x2, y2 = bbox_label[0]
        #     x1_list.append(x1)
        #     y1_list.append(y1)
        #     x2_list.append(x2)
        #     y2_list.append(y2)
        #     bbox_area.append((x2 - x1) * (y2 - y1))
        #
        #     # log best threshold
        #     best_sample_thres.append(thres[ious.get_max_index()])

        sample_count += cls_label.shape[0]
        pbar.update(sample_count)
    pbar.finish()

    # save sample state to xlsx
    # data = {'name': sample_names, 'bbox_area': bbox_area, 'x1': x1_list, 'y1': y1_list, 'x2': x2_list, 'y2': y2_list, 'thres_value': thres_values}
    # df = pd.DataFrame(data=data)
    # file = './output_finalrelu.xlsx'
    # f = open(file, 'w')
    # f.close()
    # writer = pd.ExcelWriter(file)
    # df.to_excel(writer)
    # writer.save()

    # data = {'name': sample_names, 'best_sample_thres': best_sample_thres}
    # df = pd.DataFrame(data=data)
    # file = './output_best_t.xlsx'
    # f = open(file, 'w')
    # f.close()
    # writer = pd.ExcelWriter(file)
    # df.to_excel(writer)
    # writer.save()

    if auto_search_thres:
        global best_thres_id
        best_thres_id = 0
        best_acc = -1
        global search_end
        search_end = False
        for idx, t in enumerate(thres):
            logger.log('threshold:{} || gt_loc_acc:{} || top1_loc:{} || top1_cls:{}'.format
                       (thres[idx], round(gt_loc_acc[idx].avg, 5), round(top1_loc[idx].avg, 5),
                        round(top1_cls[idx].avg, 5)))
            if gt_loc_acc[idx].avg >= best_acc:
                best_acc = gt_loc_acc[idx].avg
                logger.log('{} better than {}'.format(t, thres[best_thres_id]))
                best_thres_id = idx
            else:
                logger.log('{} better than {}'.format(thres[best_thres_id], t))
                search_end = True
                break
        if search_end:
            logger.log('Best threshold:{} || gt_loc_acc:{} || top1_loc:{} || top1_cls:{}'.format
                       (thres[best_thres_id], round(gt_loc_acc[best_thres_id].avg, 5),
                        round(top1_loc[best_thres_id].avg, 5), round(top1_cls[best_thres_id].avg, 5)))
        else:
            logger.log("Didn't find best ")
    else:
        logger.log('threshold:{} || gt_loc_acc:{} || top1_loc:{} || top1_cls:{}'.format
                   (thres[0], round(gt_loc_acc[0].avg, 5), round(top1_loc[0].avg, 5), round(top1_cls[0].avg, 5)))


if __name__ == '__main__':
    torch.cuda.set_device(int(sys.argv[1]))
    config = "./configs/cub/sal_sep_cam_detector_cub.yaml"
    print('Config file: {}'.format(config))
    f = open(config, 'r', encoding='utf-8')
    cfg = yaml.safe_load(f)
    f.close()
    test(cfg)
