#!/usr/bin/env python3
# -*- encoding: utf-8 -*-


from pathlib import Path
import requests
import gzip
import numpy as np
import pickle


work_dir = Path(__file__).resolve().parent
save_file = work_dir / 'mnist.pkl'
base_url = 'http://yann.lecun.com/exdb/mnist/'
file_names = (
    'train-images-idx3-ubyte.gz',
    'train-labels-idx1-ubyte.gz',
    't10k-images-idx3-ubyte.gz',
    't10k-labels-idx1-ubyte.gz'
)
img_size = 28 * 28


def _download(file_name):
    # 如果存在就无需再下载
    file_path = work_dir / file_name
    if file_path.exists():
        return

    print("Downloading %s ..." % file_name)
    r = requests.get(base_url + file_name)
    with file_path.open('wb') as f:
        f.write(r.content)
    print('Done')


def download_mnist():
    for file_name in file_names:
        _download(file_name)


def _load_img(file_name):
    file_path = work_dir / file_name

    print('Converting %s to Numpy Array ...' % file_name)
    with gzip.open(file_path, 'rb') as f:
        data = np.frombuffer(f.read(), np.uint8, offset=16)
    data = data.reshape(-1, img_size)
    print('Done')

    return data


def _load_label(file_name):
    file_path = work_dir / file_name

    print('Converting %s to Numpy Array ...' % file_name)
    with gzip.open(file_path, 'rb') as f:
        labels = np.frombuffer(f.read(), np.uint8, offset=8)
    print('Done')

    return labels


def _convert_numpy():
    dataset = {
        'train': {
            'img': _load_img(file_names[0]),
            'label': _load_label(file_names[1]),
        },
        'test': {
            'img': _load_img(file_names[2]),
            'label': _load_label(file_names[3]),
        }
    }

    return dataset


def init_mnist():
    download_mnist()
    dataset = _convert_numpy()
    print('Creating pickle file ...')
    with save_file.open('wb') as f:
        pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
    print('Done')


def save_file_not_exists():
    print('Dataset directory is %s' % work_dir)
    return not save_file.exists()


def _change_one_hot_label(data):
    res = np.zeros((data.size, 10))
    # 关于enumerate，用法举例如下
    #   >>> seasons = ['Spring', 'Summer', 'Fall', 'Winter']
    #   >>> list(enumerate(seasons))
    #   [(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')]
    for idx, row in enumerate(res):
        row[data[idx]] = 1

    return res


def _shuffle_mnist(data):
    permutation = np.random.permutation(data['img'].shape[0])
    for key in data:
        data[key] = data[key][permutation]
    return data


def test_dir():
    print('__file__ = %s' % __file__)
    print('Dataset directory is %s' % work_dir)


def load_mnist(normalize=True, flatten=True, one_hot_label=False, shuffle=False):
    """
    读入MNIST数据集
    :param normalize: 是否将图像的像素正规化为0.0~1.0
    :param flatten: 是否将图像展开为一维数组
    :param one_hot_label:
        True，标签作为one-hot数组返回
        one-hot数组是指[0,0,1,0]这样的数组
    :param shuffle: 是否打乱顺序
    :return: (训练图像, 训练标签), (测试图像, 测试标签)
    """
    # 如果不存在处理过的保存文件，执行初始化
    if save_file_not_exists():
        init_mnist()

    # 将转换成Numpy的数据读入
    with save_file.open('rb') as f:
        dataset = pickle.load(f)

    # 正规化处理
    if normalize:
        for key in dataset:
            # 将数据类型从整数转为小数，以便作除法
            dataset[key]['img'] = dataset[key]['img'].astype(np.float32)
            dataset[key]['img'] /= 255.0

    # 将图像转为一维数组
    if not flatten:
        for key in dataset:
            dataset[key]['img'] = dataset[key]['img'].reshape(-1, 1, 28, 28)

    # 将标签转为one-hot
    if one_hot_label:
        for key in dataset:
            dataset[key]['label'] = _change_one_hot_label(dataset[key]['label'])

    # 随机打乱顺序
    if shuffle:
        for key in dataset:
            dataset[key] = _shuffle_mnist(dataset[key])

    return (dataset['train']['img'], dataset['train']['label']), \
           (dataset['test']['img'], dataset['test']['label'])
