# encoding: utf-8

from xml.etree.ElementTree import Element, SubElement, ElementTree
import pdb
import os,shutil
import csv
import pickle
import numpy as np
import cv2




imgPath = "./img/"
dataSplitPath = './ImageSets/Main/'
annotationsPath = "./Annotations/"
# 标签
lablePath = "./lable/DL_info.csv"
# 数据集
imgDataSetPath = 'F:/dataset/key_slince_no_mark/'
trainRatio = 0.8
trainValRatio = 0.4



lables = {}
list = os.listdir(imgPath)




def loadLables():
    lb = csv.reader(open(lablePath,'r'))
    for line in lb:
        imgName = line[0]
        imgLable = line[6]
        #print(imgName,":",imgLable)
        lables[imgName] = imgLable

    lb_store = open("./lables.npz",'wb')
    pickle.dump(lables,lb_store)

def createXml():
    for imgIndex in range(len(list)):
        imgName = list[imgIndex]
        #print(imgName)

        if imgName in lables:

            img = cv2.imread(imgPath+imgName)
            size = img.shape
            wd = size[0]
            ht = size[1]

            lb = lables[imgName]
            xmin_coordinate = lb.split(',')[0]
            ymin_coordinate = lb.split(',')[1]
            xmax_coordinate = lb.split(',')[2]
            ymax_coordinate = lb.split(',')[3]
            if float(xmin_coordinate) <= 0 or float(ymin_coordinate) <= 0 or float(xmax_coordinate) <= 0 or float(
                    ymax_coordinate) <= 0:
                print(xmin_coordinate,ymin_coordinate,xmax_coordinate,ymax_coordinate)
                print(imgName)
                os.remove(imgPath+imgName)
                print("----------remove------------")
                continue


            # 生成结构
            root = Element('annotation')
            folder = SubElement(root, 'folder')
            folder.text = 'VOC2007'
            filename = SubElement(root, 'filename')
            filename.text = imgName
            # source
            source = SubElement(root, 'source')
            database = SubElement(source, 'database')
            database.text = 'The VOC2007 Database'
            annotation = SubElement(source, 'annotation')
            annotation.text = 'PASCAL VOC2007'
            image = SubElement(source, 'image')
            image.text = 'flickr'
            # size
            size = SubElement(root, 'size')
            width = SubElement(size, 'width')
            width.text = str(wd)
            height = SubElement(size, 'height')
            height.text = str(ht)
            depth = SubElement(size, 'depth')
            depth.text = '3'
            #segmented
            segmented = SubElement(root, 'segmented')
            segmented.text = '0'
            # object
            object = SubElement(root, 'object')
            name = SubElement(object, 'name')
            name.text = 'focus'
            pose = SubElement(object, 'pose')
            pose.text = 'Unspecified'
            truncated = SubElement(object, 'truncated')
            truncated.text = '0'
            difficult = SubElement(object, 'difficult')
            difficult.text = '0'
            bndbox = SubElement(object, 'bndbox')
            xmin = SubElement(bndbox, 'xmin')
            xmin.text = xmin_coordinate
            ymin = SubElement(bndbox, 'ymin')
            ymin.text = ymin_coordinate
            xmax = SubElement(bndbox, 'xmax')
            xmax.text = xmax_coordinate
            ymax = SubElement(bndbox, 'ymax')
            ymax.text = ymax_coordinate



            # 写文件
            tree = ElementTree(root)
            tree.write(annotationsPath+imgName.split('.')[0]+'.xml', encoding='utf-8')



def splitDataSet():
    files = os.listdir(annotationsPath)
    files = np.asarray(files)
    np.random.shuffle(files)
    #print(files)

    trainNum = int(len(files)*trainRatio)
    testNum = len(files) - trainNum
    trainValNum = int(trainNum*trainValRatio)
    trainNum = trainNum - trainValNum
    #print(trainNum,testNum,trainValNum)

    trainData = files[0:trainNum]
    #print(trainData)
    valData = files[trainNum:trainNum+trainValNum]
    testData = files[trainNum+trainValNum:]
    #print(len(trainData),len(valData),len(testData))
    # train
    f = open(dataSplitPath+'train.txt', 'w')
    for line in trainData:
        f.write(line.split('.')[0]+"\n")
    f.close()
    # test
    f = open(dataSplitPath + 'test.txt', 'w')
    for line in testData:
        f.write(line.split('.')[0] + "\n")
    f.close()
    # val
    f = open(dataSplitPath + 'val.txt', 'w')
    for line in valData:
        f.write(line.split('.')[0] + "\n")
    f.close()
    # train+val
    f = open(dataSplitPath + 'trainval.txt', 'w')
    for line in np.r_[trainData,valData]:
        f.write(line.split('.')[0] + "\n")
    f.close()

def copyfile(srcfile,dstfile):
    if not os.path.isfile(srcfile):
        #print("%s not exist!"%(srcfile))
        pass
    else:
        fpath,fname=os.path.split(dstfile)
        if not os.path.exists(fpath):
            os.makedirs(fpath)
        shutil.copyfile(srcfile,dstfile)
        #print("copy %s -> %s"%( srcfile,dstfile))


def getImg(max=1000000):
    imgRoot = imgDataSetPath
    imgs = []
    imgDirs = os.listdir(imgRoot)
    print(imgDirs)
    for imgDir in imgDirs:
        imgDirName = imgDir
        print(imgRoot+imgDir+"/")
        imgDir = imgRoot+imgDir+"/"
        if os.path.isdir(imgDir):
            print(2)
            imgList = os.listdir(imgDir)
            for img in imgList:
                imgName = img
                img = imgDir+img
                imgs.append(img)
                print(img,imgPath+imgDirName+"_"+imgName)
                copyfile(img,imgPath+imgDirName+"_"+imgName)
                if len(imgs)>=max:
                    return


#getImg(5000)
loadLables()
createXml()
splitDataSet()








