import pandas as pd
import numpy as np
import os
from os import listdir, getcwd
from os.path import join
import random
from shutil import copyfile
import cv2
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold
from glob import glob #该方法返回所有匹配的文件路径列表,该方法需要一个参数用来指定匹配的路径字符串(本字符串可以为绝对路径也可以为相对路径)
import yaml
import os
from shutil import copyfile
import time

IMG_DIR='JPEGImages'
LB_DIR='Annotations'
PATH = os.getcwd()
IMG_Path = os.path.join(PATH, f"{IMG_DIR}/")  # 图片所在文件夹路径，后面的/一定要带上
LB_Path = os.path.join(PATH, f"{LB_DIR}/")   # txt所在文件夹路径，后面的/一定要带上
CSV_Path = os.path.join(PATH, 'img_label.csv')
YAML_NAME='data '+time.strftime("%Y-%m-%d %H:%M", time.localtime())

Selected_fold=1
# CLASSES=['cattle_profile','cattle_face','cattle_head']
YOLO_DIR_NAME='yolov5'
YOLO_PATH=os.path.join(os.getcwd(),YOLO_DIR_NAME)+'/'

def mkDir(img_path,lb_path):
    os.makedirs(os.path.join(img_path, "train"), exist_ok=True)
    os.makedirs(os.path.join(img_path, "valid"), exist_ok=True)
    os.makedirs(os.path.join(lb_path, "train"), exist_ok=True)
    os.makedirs(os.path.join(lb_path, "valid"), exist_ok=True)

def getCsv(img_path, lb_path):  # txt所在文件夹路径，xml文件保存路径，图片所在文件夹路径
    """此函数用于将yolo格式txt标注文件转换为voc格式xml标注文件
    在自己的标注图片文件夹下建三个子文件夹，分别命名为picture、txt、xml
    """
    df=pd.DataFrame(columns=['image','label','x','y','w','h'])
    test=[]
    files = os.listdir(lb_path)
    len_size=len(files)

    with tqdm(total=len_size,colour='green',leave=False) as pbar:
        for i, name in enumerate(files):
            pbar.update(1)
            if(name=='classes.txt' or os.path.isdir(lb_path+name)):
                continue
            # print(name)
            img_file = name[0:-4]
            found_flag=False
            for file_name in os.listdir(img_path):
                if(img_file==(file_name.split('.')[0])):
                    img_file=file_name
                    found_flag=True
                    break
            if not found_flag:
                continue

            txtFile = open(lb_path + name)
            txtList = txtFile.readlines()
            for line in txtList:
                ls=[img_file]
                line=line.replace("\n", "")
                ls.extend(line.split(' '))
                df=pd.concat([df,pd.DataFrame(np.array(ls).reshape(1,6),columns=['image','label','x','y','w','h'])],ignore_index=True)
            # img = cv2.imread(img_path + name[0:-4] + ".jpg")
            # Pheight, Pwidth, Pdepth = img.shape
            # cv2.imwrite(os.path.join(img_val_dir,f'{name[0:-4]}.jpg'),img)
        df = df.drop_duplicates().reset_index(drop=True)

    return df

def div_TrainData(delete_src=False):
    # --- Read data ---
    TRAIN_PATH = IMG_Path
    # Read in the data CSV files
    train_df = pd.read_csv(CSV_Path,index_col=0)
    #train_df=train_df.head(n=5000)
    print('Number of ground truth bounding boxes: ', len(train_df))
    # Number of unique labels

    category_name_to_id  = {label: i for i, label in enumerate(train_df.label.unique())}
    print('Classes_id: ', category_name_to_id )

    # Classes=['Helmet','Helmet-Blurred','Helmet-Difficult','Helmet-Sideline','Helmet-Partial']
    # print('Classes: ', Classes )

    train_meta = train_df.drop_duplicates(subset=['image'])
    train_meta.reset_index(inplace=True)
    train_meta["split"] = "train"
    # print(train_meta.head())

    sfolder = StratifiedKFold(n_splits=5, random_state=42, shuffle=True)
    X = train_meta[['image']]
    y = train_meta[['label']]

    fold_no = 1
    for train, valid in sfolder.split(X, y):
        if fold_no == Selected_fold:
            train_meta.loc[valid, "split"] = "valid"
        fold_no += 1

    # print(train_meta)

    print("Dataset Size:", len(train_meta),
          "Training Images:", len(train_meta[train_meta.split == 'train']),
          "Validation Images:", len(train_meta[train_meta.split == 'valid']))

    mkDir(IMG_Path,LB_Path)
    Pth = PATH

    for i in tqdm(range(len(train_meta)),leave=False):
        row = train_meta.loc[i]
        if row.split == 'train':
            copyfile(f'{TRAIN_PATH}{row.image}', f'{IMG_Path}/train/{row.image}')
            copyfile(f"{LB_Path}{row.image.split('.')[0]}.txt", f"{IMG_Path}/train/{row.image.split('.')[0]}.txt")
        else:
            copyfile(f'{TRAIN_PATH}{row.image}', f'{IMG_Path}/valid/{row.image}')
            copyfile(f"{LB_Path}{row.image.split('.')[0]}.txt", f"{IMG_Path}/valid/{row.image.split('.')[0]}.txt")
        if (delete_src):
            os.remove(f'{TRAIN_PATH}{row.image}')
            os.remove(f"{LB_Path}{row.image.split('.')[0]}.txt")

    print(Pth)
    with open(f'{Pth}/train.txt', 'w') as f:
        for path in glob(f'{IMG_Path}train/*'):
            f.write(path + '\n')

    with open(f'{Pth}/valid.txt', 'w') as f:
        for path in glob(f'{IMG_Path}valid/*'):
            f.write(path + '\n')

    data = dict(
        train=f'{IMG_Path}train',
        val=f'{IMG_Path}valid',

        nc=3,  # number of classes
        names="['cattle_profile','cattle_face','cattle_head']"  # classes
    )

    with open(f'{YOLO_PATH}data/'+YAML_NAME+'.yaml', 'w') as outfile:
        yaml.dump(data, outfile, default_flow_style=False)

    return train_meta


if __name__ == "__main__":
    mkDir(IMG_Path, LB_Path)
    df=getCsv(IMG_Path, LB_Path)
    df.to_csv(CSV_Path, encoding="utf-8")
    train_meta = div_TrainData(delete_src=True)
