from glob import glob
from os.path import join

import numpy as np
import torch
import yaml
from PIL.Image import Image
from torchvision.transforms import transforms
from tqdm import tqdm


def chunks(l, n):
    '''
    YIELD SUCCSESSIVE N-SIZED CHUNKS FROM L.
    从l中产生连续的n大小的块
    :param l:
    :param n:
    :return:
    '''
    for i in range(0, len(l), n):
        yield l[i:i + n]
def load_args(filename, args):
    '''
    加载配置
    :param filename:
    :param args:
    :return:
    '''
    with open(filename, 'r', encoding= 'utf-8') as stream:
        data_loaded = yaml.safe_load(stream)
    for key, group in data_loaded.items():
        for key, val in group.items():
            setattr(args, key, val)
def image():
    classesFileName = 'classes.txt'
    classes = []
    data_dir = 'E:datasets/Animals_with_Attributes2'
    with open(join(data_dir, classesFileName), 'r') as f:
        pairs = [p.split('\t')[1].strip() for p in f.read().strip().split('\n')]
        classes.extend(pairs)
    dataPath = join(data_dir, 'JPEGImages')
    files_all = []
    # recursive 递归获取
    files_before = glob(join(dataPath, '**', '*.jpg'), recursive=True)
    for current in files_before:
        parts = current.replace('\\', '/').split('/')
        files_all.append(join(parts[-2], parts[-1]))
    image_feats = []
    image_files = []
    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
    transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean, std)
            ])
    imageLoader = ImageLoader2()
    for files in tqdm(chunks(files_all, 512), total=len(files_all) // 512, desc=f'EXTRACTING features '):
        imgs = list(map(imageLoader, files))
        imgs = list(map(transform, imgs))
        image_feats.append(imgs.data.cpu())
        image_files += files
    image_feats = torch.cat(image_feats, 0)
    print('features for %d images generated' % (len(image_files)))
    # torch.save({Constants.FEATURES: image_feats, Constants.FILES: image_files}, out_file)
class ImageLoader2:
    def __init__(self, root):
        self.root_dir = root

    def __call__(self, img):
        img = Image.open(join(self.root_dir, img)).convert('RGB')  # We don't want alpha
        return img
def l2_normalize(x,dim = 1):
    """
    l2 归一化
    """
    # x = N x d (d:feature dimension, N:number of instances)
    x = x + 1e-10
    feature_norm = torch.sum(x ** 2, dim=dim) ** 0.5
    feature_norm = feature_norm.unsqueeze(-1)
    # l2-norm l2-范数
    feat = x / feature_norm#feature_norm[:, np.newaxis]
    return feat

def standardScaler(x:torch.Tensor, dim=0):
    """
    https://blog.csdn.net/a384504062/article/details/103827166
    Z-score标准化（0-1标准化）方法
    x_normalized = (x - mean) / std
    去均值方差归一化
    """
    mean = torch.mean(x,dim=dim).unsqueeze(-1)
    std = torch.std(x,dim=dim).unsqueeze(-1)
    normalized = (x-mean)/std
    mean2 = torch.mean(normalized, dim=dim)
    std2 = torch.std(normalized, dim=dim)
    return normalized