from PIL import Image
import os
import time
import json
from glob import glob
from tqdm import tqdm
import datetime
import Augmentor
import fire

with open('config.json', 'r', encoding='utf-8') as f:
    config = json.load(f)


# 自动拷贝采样后的文件到训练文件夹中
def CopySamplingToTrain():
    # 采样后源样本路径
    targetPath = config['targetPath']
    # 目标存放路径
    trainPath = config['trainPath']
    startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    # 将采样数据整合到训练目录里面
    integration(targetPath)
    # 自动把Document文件夹中的文件拷贝到召回系统对应目录中的1文件夹
    Document2Recall(trainPath)
    # 自动将身份证切片后-原件拷贝到 纠偏训练的文件夹中
    sfzqpq2Preprocessing(trainPath)
    # 自动扩展证件数据 到 1000 个
    DocumentExtendedData(trainPath)
    # 扩展训练数据到  1:1
    extendedData(trainPath)
    # 采样完成后记录采样时间
    endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    info = '开始时间：{};结束时间：{};总共用时：{}'.format(startTime, endTime, getTimeDiff(startTime, endTime))
    print(info)


# 将采样数据整合到训练目录里面
def integration(targetPath):
    # 上一次完成时间
    trainBuildTime = config.get('TrainBuildTime')
    # 采样后源样本路径
    targetPath = config['targetPath']
    # 目标存放路径
    trainPath = config['trainPath']
    days = os.listdir(targetPath)
    nowtime = time.strftime('%Y%m%d', time.localtime(time.time()) )
    for filename in days:
        fp = os.path.join( targetPath , filename)
        filename = os.path.split(fp)[-1]
        if trainBuildTime <= filename <= nowtime and os.path.isdir(fp):
            ## 区域标记 证件
            AreaDetectDatasetsDoc = os.path.join(fp, 'AreaDetect\datasets\DocumentClassifier\datasets')
            copy_files(AreaDetectDatasetsDoc, os.path.join(targetPath,filename), trainPath)
            ## 区域标记 版面
            AreaDetectDatasetsLay = os.path.join(fp, 'AreaDetect\datasets\DLayoutClassifier\datasets')
            copy_files(AreaDetectDatasetsLay, os.path.join(targetPath,filename), trainPath)
            ## 证件类型采样
            DocumentClassifierDatasets = os.path.join(fp, 'DocumentClassifier\datasets')
            copy_files(DocumentClassifierDatasets, os.path.join(targetPath,filename), trainPath)
            ## 版面类型采样
            LayoutClassifierDatasets = os.path.join(fp, 'LayoutClassifier\datasets')
            copy_files(LayoutClassifierDatasets, os.path.join(targetPath,filename), trainPath)
            ## 纠偏类型采样
            PreprocessingDatasets = os.path.join(fp, 'Preprocessing')
            copy_files(PreprocessingDatasets, os.path.join(targetPath,filename), trainPath)
            ## 图片类型采样
            TypeClassifierDatasets = os.path.join(fp, 'TypeClassifier\datasets')
            copy_files(TypeClassifierDatasets, os.path.join(targetPath,filename), trainPath)
            ## 图片类型采样
            RecallDatasets = os.path.join(fp, 'Recall\datasets')
            recall_copy_files( RecallDatasets, os.path.join(targetPath,filename), trainPath )
            config['TrainBuildTime'] = filename

    with open('config.json', 'w', encoding='utf-8') as outfile:
        outfile.write(json.dumps(config, sort_keys=False, indent=2, separators=(',', ':'), ensure_ascii=False))
        outfile.close()
    pass

def recall_copy_files(RecallDatasets, sourcePath, targetPath):
    if os.path.exists(RecallDatasets):
        for filename_ in os.listdir(RecallDatasets):
            fp_ = os.path.join(RecallDatasets, filename_,'0')
            datasetsFiles = glob(os.path.join(fp_, '*'))
            for i in tqdm(range(len(datasetsFiles)), desc='召回数据同步-->>'.format(fp_)):
                src = datasetsFiles[i]
                target = src.replace(sourcePath, targetPath)
                copy_file(src, target)
    pass

def copy_files(datasets, sourcePath, targetPath):
    if os.path.exists(datasets):
        for filename_ in os.listdir(datasets):
            fp_ = os.path.join(datasets, filename_)
            datasetsFiles = glob(os.path.join(fp_, '*'))
            for i in tqdm(range(len(datasetsFiles)), desc='训练数据同步-->{}'.format(fp_)):
                src = datasetsFiles[i]
                target = src.replace(sourcePath, targetPath)
                copy_file(src, target)
    pass

def copy_file(src, target):
    filename = os.path.split(src)[-1]
    if not os.path.exists(target.replace(filename, '')):
        os.makedirs(target.replace(filename, ''))
    if not os.path.exists(target) and os.path.isfile(src):
        image1 = Image.open(src)
        image1.save(target)
    pass


def DocumentExtendedData(trainPath):
    pathList = glob(os.path.join(trainPath, 'DocumentClassifier\\datasets\\*'))
    for i in tqdm(range(len(pathList)), desc='证件扩展-->>{}'.format(os.path.join(trainPath, 'DocumentClassifier\\datasets\\*'))):
        path = pathList[i]
        fileList = glob(os.path.join(path))
        if len(fileList) > 0 and len(fileList) < 1000:
            dataAugmentor(path, path, 1000 - len(fileList))

# 自动把Document文件夹中的文件拷贝到召回系统对应目录中的1文件夹
def Document2Recall(trainPath):
    pathList = glob(os.path.join(trainPath, 'DocumentClassifier\\datasets\\*'))
    for i in tqdm(range(len(pathList))):
        path = pathList[i]
        filename = os.path.split(path)[-1]
        tempath = 'Recall\\datasets\\{}'.format(filename)
        fp = os.path.join(trainPath, tempath)
        if os.path.exists(path) and os.path.isdir(fp):
            files = glob(os.path.join(path, '*'))
            for k in tqdm(range(len(files)), desc='Docuemnt同步到召回系统'.format(path ) ):
                file = files[k]
                if os.path.isfile(file):
                    copyFile(file, os.path.join(fp, '1'))

# 自动将身份证切片后-原件拷贝到 纠偏训练的文件夹中
def sfzqpq2Preprocessing(trainPath):
    sfzqphlist = glob(os.path.join(trainPath, 'AreaDetect\\datasets\\DocumentClassifier\\datasets\\身份证切片后-原图\\*'))
    for i in tqdm(range(len(sfzqphlist)), desc='身份证切片->纠偏训练：{}'.format(os.path.join(trainPath, 'AreaDetect\\datasets\\DocumentClassifier\\datasets\\身份证切片后-原图\\*') ) ):
        sfzqphpath = sfzqphlist[i]
        sfzqphfilename = os.path.split(sfzqphpath)[-1]
        sfzqphtempath = 'Preprocessing\\datasets'
        sfzqphfp = os.path.join(trainPath, sfzqphtempath)
        copyFile(sfzqphpath, sfzqphfp)

# 循环迭代文件夹
def search(src, parentPath):
    pathList = glob(os.path.join(src, '*'))
    for i in tqdm(range(len(pathList))):
        path = pathList[i]
        filename = os.path.split(path)[-1]
        if os.path.isfile(path):
            father_path = os.path.abspath(os.path.dirname(path) + os.path.sep + ".")
            newpath = father_path.replace(parentPath, config['trainPath'])
            copyFile(path, newpath)
        elif os.path.isdir(path):
            search(path, parentPath)

# 文件复制
def copyFile(src, target):
    filename = os.path.split(src)[-1]
    if not os.path.exists(target):
        os.makedirs(target)
    path2 = os.path.join(target, filename)
    if not os.path.exists(path2):
        image1 = Image.open(src)
        image1.save(path2)


# 扩展召回系统数据
def extendedData(src):
    # 迭代文件
    pathList = glob(os.path.join(src, 'Recall\\datasets\\*'))
    for i in tqdm(range(len(pathList)), desc='扩展召回系统数据==>>{}'.format(os.path.join(src, 'Recall\\datasets')) ):
        path = pathList[i]
        files_list_0 = os.listdir(os.path.join(path, '0'))
        files_list_1 = os.listdir(os.path.join(path, '1'))
        # 样本最低数量
        minPageSize = 3000
        if len(files_list_0) == len(files_list_1) == 0:
            return
        # 两个样本都小于3000个
        if len(files_list_0) < minPageSize and len(files_list_1) < minPageSize :
            dataAugmentor(os.path.join(path, '1'), os.path.join(path, '1'), minPageSize - len(files_list_1))
            dataAugmentor(os.path.join(path, '0'), os.path.join(path, '0'), minPageSize - len(files_list_0))
        # 负样本小于正样本 并且 正样本大于3000
        if len(files_list_0) < len(files_list_1) and len(files_list_1) > minPageSize:
            dataAugmentor(os.path.join(path, '0'), os.path.join(path, '0'), len(files_list_1) - len(files_list_0))
        # 负样本大于正样本 并且 负样本大于3000
        if len(files_list_0) > len(files_list_1) and len(files_list_0) > minPageSize:
            dataAugmentor(os.path.join(path, '1'), os.path.join(path, '1'), len(files_list_0) - len(files_list_1) )

# 扩展数据
def dataAugmentor(source_dir, output_dir='output', sample_num=None):
    p = Augmentor.Pipeline(source_dir, output_directory=output_dir)

    p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10)
    p.zoom(probability=0.5, min_factor=0.8, max_factor=1.2)
    p.random_erasing(probability=0.5, rectangle_area=0.15)

    sample_num = sample_num or len(p.augmentor_images) * 2
    p.sample(sample_num)
    print('\tsample end!')


# 时间a减去时间b，获得二者的时间差,参数为时间字符串，例如：2017-03-30 16:54:01.660
def getTimeDiff(timeStra, timeStrb):
    if timeStra <= timeStrb:
        return 0
    ta = time.strptime(timeStra, "%Y-%m-%d %H:%M:%S")
    tb = time.strptime(timeStrb, "%Y-%m-%d %H:%M:%S")
    y, m, d, H, M, S = ta[0:6]
    dataTimea = datetime.datetime(y, m, d, H, M, S)
    y, m, d, H, M, S = tb[0:6]
    dataTimeb = datetime.datetime(y, m, d, H, M, S)
    secondsDiff = (dataTimea - dataTimeb).seconds
    return secondsDiff


if __name__ == '__main__':
    fire.Fire(CopySamplingToTrain)
