import datetime
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import torch
import torch.utils.data
from torch import nn
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import FoodLoader
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from engine import train_one_epoch
import numpy as np
import utils
import transforms as T
from PIL import Image,ImageDraw,ImageFont
import itertools
import glob,time
from torchvision.models.detection.rpn import AnchorGenerator
import itertools
from lxml import etree as ET
import shutil
import tqdm

def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)


def generate_object_bndbox(xmin, ymin, xmax, ymax):
    ### 创建新的子对象
    object = ET.Element("object")

    ET.SubElement(object, "name").text = "0"
    ET.SubElement(object, "pose").text = "Unspecified"
    ET.SubElement(object, "truncated").text = "0"
    ET.SubElement(object, "difficult").text = "0"

    bndbox = ET.SubElement(object, "bndbox")
    ET.SubElement(bndbox, "xmin").text = str(xmin)
    ET.SubElement(bndbox, "ymin").text = str(ymin)
    ET.SubElement(bndbox, "xmax").text = str(xmax)
    ET.SubElement(bndbox, "ymax").text = str(ymax)

    return object

#pretty print method
def indent(elem, level=0):
    i = "\n" + level*" "
    j = "\n" + (level-1)*"  "
    if len(elem):
        if not elem.text or not elem.text.strip():
            elem.text = i + " "
        if not elem.tail or not elem.tail.strip():
            elem.tail = i
        for subelem in elem:
            indent(subelem, level+1)
        if not elem.tail or not elem.tail.strip():
            elem.tail = j
    else:
        if level and (not elem.tail or not elem.tail.strip()):
            elem.tail = i
    return elem


def main(args):
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    # dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True), args.data_path)
    # dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False), args.data_path)
    transform = get_transform(False)
    # dataset_test = CharacterLoader.CharacterDataset(data_dir="/NetDisk/SSD/SSD-研发部/项目工作目录/OCR项目/拍摄的数据集/邦纳提供图片整理/小图/**/*.jpg",
    #                                                 split="test",
    #                                                 transforms=transform)
    num_classes = 2

    print("Creating model")

    # for resnet
    anchor_sizes = ((48,), (96,), (128,), (256,))
    aspect_ratios = ((0.5, 1., 2.),) * len(anchor_sizes)  #需要是浮点数

    #for mobile net
    # anchor_sizes = ((16 ,32, 48, 64))
    # aspect_ratios = ((1., 2.),) * len(anchor_sizes)  #需要是浮点数
    min_size = 416
    max_size = 1200
    rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
    model = torchvision.models.detection.__dict__[args.model](num_classes=num_classes,
                                                              backbone_name="resnet18",
                                                              pretrained=args.pretrained,
                                                              ##**kwargs
                                                              min_size=min_size, max_size=max_size,
                                                              image_mean=(0.485, 0.456, 0.406),
                                                              image_std=(0.229, 0.224, 0.225),

                                                              # RPN parameters
                                                              rpn_anchor_generator=rpn_anchor_generator, rpn_head=None,
                                                              rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
                                                              rpn_post_nms_top_n_train=2000,
                                                              rpn_post_nms_top_n_test=1000,
                                                              rpn_nms_thresh=0.7,
                                                              rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
                                                              rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,

                                                              # Box parameters
                                                              box_roi_pool=None, box_head=None, box_predictor=None,
                                                              box_score_thresh=0.05, box_nms_thresh=0.5,
                                                              box_detections_per_img=100,
                                                              box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
                                                              box_batch_size_per_image=512, box_positive_fraction=0.25,
                                                              bbox_reg_weights=None
                                                              )
    model.to(device)
    model.eval()

    checkpoint = torch.load("model/2020-04-09-11-33-17/fasterrcnn_resnet50_fpn_model_2.pth")
    model.load_state_dict(checkpoint['model'])
    colors = itertools.cycle(["red", "green", "blue", "yellow", "cyan", "gold", "purple", "violet", "pink"])

    ## save path
    xml_save_path = "generate_annotation_path/Annotations"
    img_save_path = "generate_annotation_path/JPEGImages"
    os.makedirs(xml_save_path, exist_ok=True)
    os.makedirs(img_save_path,exist_ok=True)

    with torch.no_grad():
        # for path in glob.glob("/media/retoo/RetooDisk/wanghui/Data/Food/北雅中学/20200118-test/*.jpg"):
        # files =  glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/単机菜品识别/菜品识别图片/检测端/原始数据_只可拷贝/湖南日报现场数据0321-0327图片汇总/一次性餐盒/*.jpg",recursive=True)
        # files =  glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/小碗菜/泽西城小碗1217/*.jpg",recursive=True)
        # files =  glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/20200111_第29批_泽西城门店43010132_1224-0110测试汇总/*.jpg", recursive=True)
        # files = glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/大铁锅/*.jpg",recursive=True)
        # files = glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/20191223_第28批_九峰小区门店43010146_1217-1223测试汇总/*.jpg",recursive=True)
        files = glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/単机菜品识别/菜品识别图片/检测端/原始数据_只可拷贝/湖南日报现场数据/湖南日报现场数据0410/*.jpg")
        for path in tqdm.tqdm(files, total=len(files)):
            # copy image to current directory
            shutil.copy2(path, os.path.join(img_save_path, os.path.basename(path)))

            ### 原始root 文件
            root = ET.parse("sample.xml")

            ## 修改相关参数
            root.find("folder").text = os.path.dirname(path)
            root.find("filename").text = os.path.basename(path)
            root.find("path").text = path

            ##
            src = Image.open(path).convert("RGB")
            data, _ = transform(src, None)
            data = data.unsqueeze(0).to(device)

            ## forward
            start = time.time()
            loss_dict = model(data)
            end = time.time()

            det = loss_dict[0]
            boxes = det["boxes"].cpu().numpy()
            labels = det["labels"].cpu().numpy()
            scores = det["scores"].cpu().numpy()
            save_image = src.copy()
            draw = ImageDraw.Draw(save_image)
            for idx in range(boxes.shape[0]):
                xmin, ymin, xmax, ymax = boxes[idx]
                score = scores[idx]
                label = labels[idx]

                if score > 0.9:
                    ### modify xml
                    object = generate_object_bndbox(int(xmin), int(ymin), int(xmax), int(ymax))
                    root.getroot().append(object)

            root.write(xml_save_path + "/%s" % (os.path.basename(path).replace(".jpg", ".xml")),
                       pretty_print=True,xml_declaration=True)

            ### 重新读取,进一步格式化
            tree = ET.parse(xml_save_path + "/%s" % (os.path.basename(path).replace(".jpg", ".xml")))
            root = tree.getroot()

            tree = ET.ElementTree(indent(root))
            tree.write(xml_save_path + "/%s" % (os.path.basename(path).replace(".jpg", ".xml")),
                       xml_declaration=True,encoding='utf-8')

            del draw, src, save_image



if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument('--data-path', default='', help='dataset')
    parser.add_argument('--dataset', default='pascal_voc', help='dataset')
    parser.add_argument('--model', default='fasterrcnn_resnet50_fpn',
                        help='model, choice of [fasterrcnn_mobilenetv2_fpn, fasterrcnn_resnet50_fpn]')
    parser.add_argument('--device', default='cuda', help='device')
    parser.add_argument('-b', '--batch-size', default=2, type=int,
                        help='images per gpu, the total batch size is $NGPU x batch_size')
    parser.add_argument('--epochs', default=10, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')
    parser.add_argument('--lr', default=0.02, type=float,
                        help='initial learning rate, 0.02 is the default value for training '
                             'on 8 gpus and 2 images_per_gpu')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--lr-step-size', default=8, type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
    parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
    parser.add_argument('--output-dir', default='.', help='path where to save')
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
    parser.add_argument('--aspect-ratio-group-factor', default=-1, type=int)
    parser.add_argument(
        "--test-only",
        dest="test_only",
        help="Only test the model",
        action="store_true",
    )
    # 如果pretrained为False, 则会加载backbone的预训练模型，否则会加载faster_rcnn_resnet50_fp整个检测的预训练模型
    parser.add_argument(
        "--pretrained",
        default=False,
        dest="pretrained",
        help="Use pre-trained models from the modelzoo",
        action="store_true",
    )

    # distributed training parameters
    parser.add_argument('--world-size', default=1, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')

    args = parser.parse_args()

    if args.output_dir:
        utils.mkdir(args.output_dir)

    main(args)
