import argparse
import json
import math
import os

import cv2
import numpy as np
from numpy import matrix
from scipy import ndimage


from tqdm import tqdm



from xml.dom.minidom import parse
import xml.dom.minidom
import xml.etree.ElementTree as ET


parser = argparse.ArgumentParser("yolo_class")
parser.add_argument('--weights', type=str, default='/home/hetao/darknet-master/backup/person_last.weights', help="yolov3 weights")
parser.add_argument('--namefile', type=str, default='/home/hetao/darknet-master/cfg/person.names', help="class file")
parser.add_argument('--cfgfile', type=str, default='/home/hetao/darknet-master/cfg/person.cfg', help="yolov3 config file")
parser.add_argument('--imgpath', type=str, default='/home/hetao/darknet-master/person/JPEGImages', help="img path file")
parser.add_argument('--savepath', type=str, default='./test', help="savepath")
parser.add_argument('--classes',type=list,default=['person','people'],help='required classes')
args = parser.parse_args()


if not os.path.exists(args.savepath):
    os.mkdir(args.savepath)
print("xml files will be saved in: ",args.savepath)
def get_output_layers(net):
    layer_names = net.getLayerNames()

    output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]

    return output_layers


def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
    label = str(classes[class_id])

    color = COLORS[class_id]

    cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
    txt = label + " " + str(round(confidence, 3))
    cv2.putText(img, txt, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
    # cv的图像操作

scale = 0.00392

classes = None

with open(args.namefile, 'r') as f:
    classes = [line.strip() for line in f.readlines()]




COLORS = np.random.uniform(0, 255, size=(len(classes), 3))

net = cv2.dnn.readNet(args.weights, args.cfgfile)



#name bndbox.getElementsByTagName('name')[0]
#xmin bndbox.getElementsByTagName('name')[0]
#ymin
#xmax
#ymax

def createnode(name,points):
    dom = xml.dom.minidom.parse('./2.xml')
    root = dom.documentElement
    object = root.getElementsByTagName('object')[0]
    object.getElementsByTagName('name')[0].childNodes[0].data = name
    bndbox = object.getElementsByTagName('bndbox')[0]
    bndbox.getElementsByTagName('xmin')[0].childNodes[0].data = points[0]
    bndbox.getElementsByTagName('ymin')[0].childNodes[0].data = points[1]
    bndbox.getElementsByTagName('xmax')[0].childNodes[0].data = points[0] + points[2]
    bndbox.getElementsByTagName('ymax')[0].childNodes[0].data = points[1] + points[3]

    return object






def _xml(name, results,img_shape):
    #读取模板

    dom = xml.dom.minidom.parse('./1.xml')
    root = dom.documentElement
    root.getElementsByTagName('filename')[0].childNodes[0].data = name
    root.getElementsByTagName('path')[0].childNodes[0].data = os.path.join("./", name)
    root.getElementsByTagName('size')[0].getElementsByTagName("width")[0].childNodes[0].data = img_shape[0]
    root.getElementsByTagName('size')[0].getElementsByTagName("height")[0].childNodes[0].data = img_shape[1]


    for class_name,points in results:
        root.appendChild(createnode(class_name,points))

    f = open(os.path.join(args.savepath,name.split(".")[0]+'.xml'), 'w',encoding='utf-8')
    dom.writexml(f, encoding='utf-8')
    f.close()







def __img__(path):

    testSample = [[os.path.join(path, i),i] for i in os.listdir(path)]
    for i in tqdm(testSample):
        image = cv2.imread(i[0])
        Width = image.shape[1]
        Height = image.shape[0]
        blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False)
        net.setInput(blob)
        outs = net.forward(get_output_layers(net))
        class_ids = []
        confidences = []
        boxes = []
        results = []
        for out in outs:
            for detection in out:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if classes[class_id] in args.classes:
                    if confidence > 0.5:
                        center_x = int(detection[0] * Width)
                        center_y = int(detection[1] * Height)
                        w = int(detection[2] * Width)
                        h = int(detection[3] * Height)
                        x = center_x - w / 2
                        y = center_y - h / 2
                        class_ids.append(class_id)
                        confidences.append(float(confidence))
                        boxes.append([x, y, w, h])
                        results.append([classes[class_id], [x,y,w,h]])

        _xml(i[1], results,[Width,Height])


__img__(args.imgpath)
