'''
train，test，validation 

parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
                    help='number of data loading workers (default: 16)')

'''
import os
import pandas as pds
import argparse
import xml.etree.ElementTree as Etree
import math
import random


parse=argparse.ArgumentParser(description="split dataset")

parse.add_argument("--root",default="/media/gis/data/jupyterlabhub/gitcode/hrx/dataset/hastrack",type=str,help="the root dir of the dataset")
parse.add_argument("--labeldir",default="biaoqian",type=str,help="the dir name of label")
parse.add_argument("--imagedir",default="images",type=str,help="the dir name of image")
parse.add_argument("--train",default=0.7,type=float,help="the number of train is divided by all images")
parse.add_argument("--test",default=0.1,type=float,help="the number of test is divided by all images")
parse.add_argument("--vail",default=0.2,type=float,help="the number of vail is divided by all images")

args=parse.parse_args() #参数解析
rootdir=args.root
anndir=args.labeldir
imgdir=args.imagedir
# 开始分割文件
splitcache=[]
# 确定可以参与分割的文件情况
imglist=os.listdir(os.path.join(rootdir,imgdir))

csvname="train_test_vail.csv"
csvpath=os.path.join(rootdir,csvname)

result={"id":[],"annotation":[],"images":[],"sig":[],"class":[]}
i=1
# 构建训练csv
for imgname in imglist:
    imgpath=os.path.join(rootdir,imgdir,imgname)
    if not os.path.exists(os.path.join(rootdir,imgdir,imgname)):
        print("图片没有找到：{}".format(os.path.join(rootdir,imgdir,imgname)))
        continue
    result["id"].append(i)
    result["annotation"].append(".")
    result["images"].append(imgpath)
    sig=random.random()
    if sig<args.train:
        result["sig"].append("train")
    elif sig<args.train+args.test:
        result["sig"].append("test")
    elif sig<args.train+args.test+args.vail:
        result["sig"].append("vail")
    # 图片分类
    clsid=int(imgname.split(".")[0].split("_")[1])
    if clsid==0:
        result["class"].append(0)
    elif clsid==1:
        result["class"].append(1)
    i=i+1
for keys in result.keys():
    print("{}:{}".format(keys,len(result[keys])))

result = pds.DataFrame(result)
result.to_csv(csvpath,encoding="utf-8")
