import os
import pandas as pd

import random

from code.utils.utils import getRootPath

# 将比赛所给的图片标签和path写到一个txt中
def get_train_txt(imgPath='', txtPath='../train.txt'):
    df = pd.read_csv(r'../annos.csv')     # 读取标签文件
    rootPath = getRootPath()      # 获取项目根路径
    if imgPath != '':
        dataPath = rootPath + imgPath

    dataset = os.listdir(dataPath)      # 获取所有子文件夹
    dataset = sorted(dataset, key=lambda x:int(x))
    with open(txtPath, 'w') as f:
        # 逐行读取
        for file_id, file_name in enumerate(dataset):
            label = df[df['id']==int(file_name)]['label']     # 获取是否为同一个人的label标签
            revers_label = 0 if label.item()==1 else 1   # 重新调整label 至与论文相同 方便后续处理
            # 将子目录的path加上
            photos_path = os.path.join(dataPath, file_name)
            if not os.path.isdir(photos_path):
                continue
            photos_name = os.listdir(photos_path)     # 获取图片名称
            # 逐行写入 对应文件夹名字;对应是否为同一人标签(0 相同 1 不同);图片a路径;图片b路径
            f.write("{};{};{};{}".format(
                    str(file_id),
                    str(str(revers_label)),
                    os.path.join(os.path.abspath(dataPath), file_name, photos_name[0]),
                    os.path.join(os.path.abspath(dataPath), file_name, photos_name[1])
                    ))
            f.write('\n')

# 获取LFW数据信息
def get_lfw_txt(imgPath, txtPath='../init_data/lfw.txt'):
    rootPath = getRootPath()  # 获取项目根路径
    if imgPath != '':
        dataPath = rootPath + imgPath
    dataset = os.listdir(dataPath)       # 获取所有子文件夹
    # class_num = len(init_data)             # 获取人脸总数
    with open(txtPath, 'w') as f:
        for people_class, data in enumerate(dataset):
            imgs_path = os.path.join(dataPath, data)      # 获取子文件夹下面的各个图片
            imgs_info = get_info_from_dir(imgs_path)
            for img_info in imgs_info:
                f.write("{};{}".format(
                    people_class,
                    img_info
                ))
                f.write('\n')

# 获取一个文件夹下的所有图片信息 用以辅助lfw读取
def get_info_from_dir(dirPath):
    dirPath = os.path.normpath(dirPath)       # 规范化路径
    imgs_info = []                            # 存放文件夹下的图片信息
    imgs_path = os.listdir(dirPath)           # 获取这个文件夹下面的所有文件
    for img_path in imgs_path:
        imgs_info.append(os.path.join(dirPath, img_path))

    return imgs_info

# 获取lfw数据集配对样本 1万个相同样本 一万个不相同样本
def get_flw_pair_txt(readtxt=r'../init_data/lfw.txt', loadtxt=r'../init_data/lfw_pair.txt'):
    img_dic = dict()                          # 存放各个人脸的字典
    with open(readtxt, 'r') as f:
        while True:
            img_info =  f.readline()                 # 逐行读取
            if not img_info:
                break
            img_info = img_info.split(';')
            class_people = int(img_info[0])          # 获取类别
            img_path = img_info[1].strip()           # 获取图片地址
            if class_people not in img_dic:          # 判断是否存在key
                img_dic[class_people] = [img_path]
            else:
                img_dic[class_people].append(img_path)
    length = len(img_dic.keys())                     # 获取字典的长度
    count = 7000
    # 相同样本写入
    with open(loadtxt, 'a') as f:
        for i in range(length-1):
            result = get_same_pair(img_dic[i])
            result_length = len(result)
            same_sample = []
            choice_num = 0
            if result_length > 0:
                choice_num = result_length//2 + 1 if result_length < 7 else 4
                same_sample = random.sample(result,choice_num)
            for sample in same_sample:
                f.write("{};{};{}".format(0, sample[0], sample[1]))
                f.write('\n')
            count = count - choice_num
            if count <= 0:
                count = 7000
                break

    # 不同样本写入
    with open(loadtxt, 'a') as f:
        for i in range(length-1):
            diff_sample1 = get_diff_pair(img_dic, i, random.randint(i, length))
            diff_sample2 = get_diff_pair(img_dic, i, random.randint(i, length))
            f.write("{};{};{}".format(1, diff_sample1[0], diff_sample1[1]))
            f.write('\n')
            f.write("{};{};{}".format(1, diff_sample2[0], diff_sample2[1]))
            f.write('\n')
            count = count - 2
            if count <= 0:
                break

# 用以辅助lfw_pair写入相同样本
def get_same_pair(imglis):
    result = []                     # 存放结果
    length = len(imglis)            # 获取长度
    for i in range(0, length-1):
        for j in range(i+1, length):
            result.append([imglis[i], imglis[j]])
    return result

# 用以辅助lfw_pair写入不同样本
def get_diff_pair(imgdic, start, end):
    chose = random.randint(start, end)               # 挑选一个随机数
    anchor = imgdic[start]                           # 不同样本配对的前一个
    chose_imgs = imgdic[chose]                       # 按索引获取对应的图片列表
    if len(anchor) == 1 and len(chose_imgs) == 1:
        return [anchor[0], chose_imgs[0]]
    elif len(anchor) == 1 and len(chose_imgs) != 1:
        return [anchor[0], random.choice(chose_imgs)]
    elif len(anchor) != 1 and len(chose_imgs) == 1:
        return [random.choice(anchor), chose_imgs[0]]
    else:
        return [random.choice(anchor), random.choice(chose_imgs)]


# CASIA-WebFace
def get_casia_txt():
    #---------------------#
    #   训练集所在的路径
    #---------------------#
    datasets_path = "../init_data/toUser/train"

    people = os.listdir(datasets_path)
    people = sorted(people)

    with open('../init_data/casia_train.txt', 'w') as f:
        for pop_id, pop_name in enumerate(people):
            photos_path = os.path.join(datasets_path, pop_name)
            if not os.path.isdir(photos_path):
                continue
            photos_name = os.listdir(photos_path)

            for photo_name in photos_name:
                f.write(str(pop_id) + ";" + '%s'%(os.path.join(os.path.abspath(datasets_path), pop_name, photo_name)))
                f.write('\n')


if __name__ == '__main__':
    get_train_txt(imgPath='/init_data/toUser/train', txtPath='../train.txt')
    # imgPath = '/init_data/lfw'
    # get_lfw_txt(imgPath=imgPath)
    # get_flw_pair_txt()
    # get_casia_txt()
