import argparse
import os
from pathlib import Path
from functools import partial
import cv2
import mmcv
import numpy
import numpy as np
import sys
import os.path as osp

import torch

from mmdet.core.bbox.iou_calculators import bbox_overlaps
sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../'))

# from mmdet.cv_core import (Config, load_checkpoint, FeatureMapVis, show_tensor, imdenormalize, show_img, imwrite,traverse_file_paths)

from mmcv import Config
from mmdet.models import build_detector
from mmdet.datasets.builder import build_dataset
from mmdet.datasets.pipelines import Compose
import matplotlib.pyplot as plt
from BboxToolkit.visualization.show import imshow_bboxes


def parse_args():
    parser = argparse.ArgumentParser(
        description='MMDet test (and eval) a model')
    parser.add_argument('--config',type=str, default='configs/nwpuv2/oicr.py', help='test config file path')
    parser.add_argument('--checkpoint',type=str, default='/media/deep/Data/4_WSOD/work_dirs/oicr_vgg16_test/latest.pth', help='checkpoint file')
    parser.add_argument('--img_dir', type=str, default='../_DATASET/NWPUv2/JPEGImages', help='show img dir')
    # 显示预测结果
    parser.add_argument('--show', type=bool, default=True, help='show results')
    # 可视化图片保存路径
    parser.add_argument(
        '--output_dir', default='./_vis_pp',help='directory where painted images will be saved')
    args = parser.parse_args()
    return args


def forward(self, img, img_metas=None, return_loss=False, **kwargs):
    x = self.extract_feat(img)
    # outs = self.rpn_head(x)
    # outs = self.roi_head(x)
    # return outs
    return x

def create_model(cfg, use_gpu=True):
    # print(cfg.model)
    # print(cfg.train_cfg)
    model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, map_location='cpu')
    model.eval()
    if use_gpu:
        model = model.cuda()
    return model



def show_featuremap_from_datalayer():

    dataset = build_dataset(cfg.data.test)
    sum = 0
    num = 0
    sum1 = 0
    sum2 = 0
    sum3 = 0
    file = open('text.txt', mode='w')
    # file.writelines(
    #     'ori_filename' + '\t' + 'tatol' + '\t' + '>0.5' + '\t' + '0< <0.5' + '\t' + '0' + '\t' +
    #         'gt_labels' + '\n')
    for item in dataset:
        img_tensor = item['img']
        gt_labels = item['gt_labels']
        img_metas = item['img_metas'][0].data
        filename = img_metas['filename']
        ori_filename = img_metas['ori_filename']
        img_norm_cfg = img_metas['img_norm_cfg']
        gt_bboxes = item['gt_bboxes'][0]
        size = len(gt_bboxes)
        num1 = 0
        num2 = 0
        num3 = 0
        if(size > 1):
            overlaps = np.zeros([size,size])
            for i in range(size):
                for j in range(i+1,size):
                    overlaps[i][j] = bbox_overlaps1(torch.from_numpy(gt_bboxes[i]), torch.from_numpy(gt_bboxes[j]))
            for i in range(size):
                for j in range(i+1,size):
                    if(overlaps[i][j] > 0.5):
                        num1 += 1
                    if(0.1 < overlaps[i][j] < 0.5):
                        num2 += 1
                    if (0 <= overlaps[i][j] < 0.1):
                        num3 += 1
            file.writelines(ori_filename +  '\t' + str(size) + '\t' + str(num1) + '\t' + str(num2) + '\t' + str(num3) + '\t' + str(gt_labels) + '\n')
            sum1 += num1
            sum2 += num2
            sum3 += num3
            sum += size
            y = [num1,num2,num3]
            plt.title(str(y))
            plt.pie(y, labels=['>0.5', '0.1-0.5', '0-0.1'])
            # plt.show()
            path = '/media/deep/Data/4_WSOD/pie/' + ori_filename
            plt.savefig(path)
            plt.close()
    # x = [sum1,sum2,sum3]
    # plt.title(str(x))
    # plt.pie(x,labels=['>0.5','0.1-0.5','0-0.1'])
    # plt.savefig('fig.png')
    # plt.show()

def show_proposal():
    dataset = build_dataset(cfg.data.test)
    for item in dataset:
#         print(item.keys())
        gt_labels = item['gt_labels'][0]
        proposal = item['proposals'][0]
        gt_boxes = item['gt_bboxes'][0]
#         print(gt_boxes)
        img_metas = item['img_metas'][0].data
        img_1 = img_metas['filename']
        out_file = "_vis_pp" + img_metas["ori_filename"][10:]
        boxes = np.array([0, 0, 0, 0])
        for i,label in enumerate(gt_labels):
            if label ==7:#桥的label是3，#车是9 #油罐是7
                drawed_img = imshow_bboxes(img_1, gt_boxes, out_file=out_file, show=False,colors='red',with_text=False,thickness=2)
                for proposal_ in proposal:
                    overlaps = bbox_overlaps1(torch.from_numpy(gt_boxes[i]), torch.from_numpy(proposal_))
                    if overlaps > 0.5:
                        # print(overlaps)
                        boxes = np.vstack((boxes,proposal_))
                if len(boxes) != 4:
                    imshow_bboxes(drawed_img, boxes,out_file=out_file,show=False,colors='blue',with_text=False,thickness=1)


def show_proposal_txt():
    dataset = build_dataset(cfg.data.test)
    file = open('text.txt', mode='w')
    for item in dataset:
        gt_labels = item['gt_labels'][0]
        proposal = item['proposals'][0]
        gt_boxes = item['gt_bboxes'][0]
        img_metas = item['img_metas'][0].data
        img_1 = img_metas['filename']
        for i,lable in enumerate(gt_labels):
            # if lable ==3:
                for proposal_ in proposal:
                    overlaps = bbox_overlaps1(torch.from_numpy(gt_boxes[i]), torch.from_numpy(proposal_))
                    # if overlaps > 0.8:


def bbox_overlaps1(bboxes1,bboxes2):

    area1 = (bboxes1[2] - bboxes1[0]) * (
            bboxes1[3] - bboxes1[1])
    area2 = (bboxes2[2] - bboxes2[0]) * (
            bboxes2[3] - bboxes2[1])


    lt = torch.max(bboxes1[:2],
                       bboxes2[:2])  # [B, rows, cols, 2]
    rb = torch.min(bboxes1[2:],
                       bboxes2[2:])  # [B, rows, cols, 2]

    wh = torch.clamp(rb - lt, min=0)
    overlap = wh[0] * wh[1]

    union = area1 + area2 - overlap
    ious = overlap / union
    return ious

def concat():
    # dir_path1 = '/media/deep/Data/test/0.3-ST'
    # dir_path2 = '/media/deep/Data/test/0.5-ST'
    # dir_path3 = '/media/deep/Data/test/0.8-ST'
    dir_path1 = '/media/deep/Data/4_WSOD/work_dirs/oicr_vgg16_test/ss_/0.5-GTF'
    dir_path2 = '/media/deep/Data/4_WSOD/work_dirs/oicr_vgg16_test/ss_/0.8-GTF'
    dir_path3 = '/media/deep/Data/4_WSOD/work_dirs/oicr_vgg16_test/pre/JPEGImages'
    dir_path4 = '/media/deep/Data/4_WSOD/work_dirs/oicr_vgg16_test/cam + GT'
    output = '/media/deep/Data/4_WSOD/work_dirs/oicr_vgg16_test/ss+pre+cam/GTF'
    files = os.listdir(dir_path2)
    for file in files:
        img1 = mmcv.imread(dir_path1 + '/' + file)
        img2 = mmcv.imread(dir_path2 + '/' + file)
        img3 = mmcv.imread(dir_path3 + '/' + file)
        img4 = mmcv.imread(dir_path4 + '/' + file)
        img4 = mmcv.imresize(img4,(400,400))
        concat1 = np.concatenate((img2,img1),axis=1)
        concat2 = np.concatenate((img3, img4), axis=1)
        concat = np.concatenate((concat1, concat2), axis=0)
        # concat = np.concatenate((img3, img2), axis=1)
        cv2.imwrite(output + '/' + file, concat)



if __name__ == '__main__':
    args = parse_args()
    cfg = Config.fromfile(args.config)
#     show_featuremap_from_datalayer()
    show_proposal()
#     concat()