"""
@Time    : 2019/9/29 12:48
@Author  : CcH
"""
import os
from torchvision import transforms

import numpy as np
from tqdm import tqdm
import numpy as np
import torch
from PIL import Image

transform1 = transforms.Compose([
    transforms.Grayscale(num_output_channels=1),
    # transforms.CenterCrop((50, 100)),
    transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
    # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # transforms.Normalize([0.398, ], [0.116, ]),
    transforms.Normalize([0.5, ], [0.5, ]),
]
)


def process_picture(input_img):
    img = Image.open(input_img).convert('L')

    # img.show()
    img = transform1(img)


    return img

def data_mean_var(path):
    mean_val = 0.0
    var_val = 0.0
    pct_num = 0
    # for train_id in tqdm(os.listdir(path)):
    #     input_path = path + "//" + train_id
    #     for pct_id in os.listdir(input_path):
    #         pct_num += 1
    #         picture_pt = input_path + "//" + pct_id
    #         img = process_picture(picture_pt)
    #         img = img.squeeze().numpy()
    #         mean_val = mean_val + np.sum(img)
    #
    # mean_v = mean_val/(pct_num*50*100)
    mean_v = 0.398
    print(mean_v)

    for train_id in tqdm(os.listdir(path)):
        input_path = path + "//" + train_id
        for pct_id in os.listdir(input_path):
            pct_num += 1
            picture_pt = input_path + "//" + pct_id
            img = process_picture(picture_pt)
            img = img.squeeze().numpy()
            var_val = var_val + np.sum((img-mean_v)** 2)
    print(np.sqrt(var_val / (pct_num*50*100)))

def cut_pad_image(pct_len):
    pad_pos = {
        20: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
        21: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
        22: [1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22],
        23: [1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23],
        24: [1, 2, 3, 5, 6, 7, 8, 10, 11, 13, 12, 14, 15, 17, 18, 19, 20, 22, 23, 24],
        # 25: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]

    }

    return pad_pos[pct_len]


def file_2_allpicture(path):
    max_length = 20

    picture_list = []
    file_list = os.listdir(path)
    pct_len = len(file_list)
    pct_listid = pit2id_sort(file_list)
    if pct_len >= max_length:
        pct_idx = cut_pad_image(pct_len)
        for pct_id in pct_idx:
            picture_path = path + "\\" + str(pct_listid[pct_id - 1]) + ".png"
            picture_brg = process_picture(picture_path)
            out_tensor = picture_brg.unsqueeze(1)
            picture_list.append(out_tensor)
        output = torch.cat(picture_list, dim=1)
    else:
        for pct_id in pct_listid:
            picture_path = path + "\\" + str(pct_id) + ".png"
            picture_brg = process_picture(picture_path)
            out_tensor = picture_brg.unsqueeze(1)
            picture_list.append(out_tensor)
        pad_len = max_length - pct_len
        pad_tensor = torch.zeros((1, pad_len, 50, 100))
        picture_list.append(pad_tensor)
        output = torch.cat(picture_list, dim=1)
    if pct_len >= max_length:
        pct_len = max_length
    return output.unsqueeze(0),pct_len


def pit2id_sort(pidlist):
    pid = [int(id.strip().split(".")[0]) for id in pidlist]
    return sorted(pid)


if __name__ == '__main__':
    path = r"D:\XW_Bank\LipRecognition\train\lip_112_train\000f62ff4a21cf6480cc6eb4bd9bcd5a"

    # data_mean_var(path)
    file_2_allpicture(path)
    # # process_picture(path + "\\" + "5.png")
    # print(torch.zeros((3, 1, 64, 64)))
