#coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

from matplotlib import pyplot as plt
import numpy as np
import os, lmdb, shutil, glob, math
import os.path as osp
from imageio import imread
from random import shuffle
from caffe2.python.predictor.predictor_exporter as pe
from caffe2.proto import caffe2_pb2
from caffe2.python.predictor import mobile_exporter
from caffe2.python import (
        brew, core, model_helper, net_drawer, optimizer, visualize, workspace,
)

core.GlobalInit(['caffe2', '--caffe2_log_level=0'])
print('Necessities imported!')

ROOT_dir        = osp.dirname(osp.dirname(osp.abspath(__file__))
# 存储cifar10数据的地方
data_folder         = osp.join(ROOT_dir, 'dataset', 'cifar10')
# 存储模型参数的地方
model_folder        = osp.join(ROOT_dir, 'model', 'cifar10')
# lmdb的地址
training_lmdb_path, validation_lmdb_path, testing_lmdb_path = \
        osp.join(data_folder, 'training_lmdb'), \
        osp.join(data_folder, 'validation_lmdb'), \
        osp.join(data_folder, 'testing_lmdb')

# download cifar dataset
def download_cifar10():
    import requests
    import tarfile

    url = "http://pjreddie.com/media/files/cifar.tgz"   # url to data
    filename = url.split("/")[-1]                       # download file name
    download_path = os.path.join(data_folder, filename) # path to extract data to
    
    # Create data_folder if not already there
    if not os.path.isdir(data_folder):
        os.makedirs(data_folder)
    # If data does not already exist, download and extract
    if not os.path.exists(download_path.strip('.tgz')):
        # Download data
        r = requests.get(url, stream=True)
        print("Downloading... {} to {}".format(url, download_path))
        open(download_path, 'wb').write(r.content)
        print("Finished downloading...")

        # Unpack images from tgz file
        print('Extracting images from tarball...')
        tar = tarfile.open(download_path, 'r')
        for item in tar:
            tar.extract(item, data_folder)
        print("Completed download and extraction!")
    else:
        print("Image directory already exists. Moving on...")

def pre_train_test():
    # 数据的文件夹
    training_dir    = osp.join(data_folder, 'cifar', 'train')
    testing_dir     = osp.join(data_folder, 'cifar', 'test')

    # label文件的地址
    training_label_path     = osp.join(data_folder, 'training_dictionary.txt')
    validation_label_path   = osp.join(data_folder, 'validation_dictionary.txt')
    testing_label_path      = osp.join(data_folder, 'testing_dictionary.txt')

    # 原始label的地址,即包含所有类别名字的文件
    labels_path     = osp.join(data_folder, 'cifar', 'labels.txt')

    with open(labels_path, 'r') as labels_handler:
        classes     = {}
        i           = 0
        lines       = labels_handler.readlines()
        for line in sorted(lines):
            line    = line.strip()
            classes[line]   = i
            i += 1
    print("classes:", classes)

    # random split total data to 3 部分
    training_labels_handler     = open(training_label_path, 'w')
    validation_labels_handler   = open(validation_label_path, 'w')
    testing_labels_handler      = open(testing_label_path, 'w')
    # 创建三部分各自的label文件
    validation_count    = 6000
    img_list            = glob.glob(training_dir + "/*.png")
    shuffle(img_list)
    for img in img_list[:validation_count]:
        validation_labels_handler.write(img + ' ' + str(classes[img.split('_')[-1].split('.')[0]]) + '\n')
    for img in img_list[validation_count:]:
        training_labels_handler.write(img + ' ' + str(classes[img.split('_')[-1].split('.')[0]]) + '\n')
    for img in glob.glob(testing_dir + "/*.png"):
        testing_labels_handler.write(img + ' ' + str(classes[img.split('_')[-1].split('.')[0]]) + '\n')
    # 关闭文件handlers
    training_labels_handler.close()
    validation_labels_handler.close()
    testing_labels_handler.close()

def generate_lmdb():
    # Call function to write our LMDBs
    if not os.path.exists(training_lmdb_path):
        print("Writing training LMDB")
        write_lmdb(training_labels_path, training_lmdb_path)
    else:
        print(training_lmdb_path, "already exists!")
    if not os.path.exists(validation_lmdb_path):
        print("Writing validation LMDB")
        write_lmdb(validation_labels_path, validation_lmdb_path)
    else:
        print(validation_lmdb_path, "already exists!")
    if not os.path.exists(testing_lmdb_path):
        print("Writing testing LMDB")
        write_lmdb(testing_labels_path, testing_lmdb_path)
    else:
        print(testing_lmdb_path, "already exists!")

def write_lmdb(labels_file_path, lmdb_path):
    with open(labels_file_path) as labels_handler:
        print(">>> Write database...")
        LMDB_MAP_SIZE = 1 << 40
        print("LMDB_MAP_SIZE", LMDB_MAP_SIZE)
        env = lmdb.open(lmdb_path, map_size=LMDB_MAP_SIZE)
        with env.begin(write=True) as txn:
            count = 0
            for line in labels_handler.readlines():
                line = line.rstrip()
                im_path = line.split()[0]
                im_label = int(line.split()[1])
            
                # read in image (as RGB)
                img_data = imread(im_path).astype(np.float32)
            
                # convert to BGR
                img_data = img_data[:, :, (2, 1, 0)]
            
                # HWC -> CHW (N gets added in AddInput function)
                img_data = np.transpose(img_data, (2,0,1))
            
                # Create TensorProtos
                tensor_protos = caffe2_pb2.TensorProtos()
                img_tensor = tensor_protos.protos.add()
                img_tensor.dims.extend(img_data.shape)
                img_tensor.data_type = 1
                flatten_img = img_data.reshape(np.prod(img_data.shape))
                img_tensor.float_data.extend(flatten_img)
                label_tensor = tensor_protos.protos.add()
                label_tensor.data_type = 2
                label_tensor.int32_data.append(im_label)
                txn.put(
                    '{}'.format(count).encode('ascii'),
                    tensor_protos.SerializeToString()
                )
                if ((count % 1000 == 0)):
                    print("Inserted {} rows".format(count))
                count = count + 1

        print("Inserted {} rows".format(count))
        print("\nLMDB saved at " + lmdb_path + "\n\n")

def AddInput(model, batch_size, db, db_type):
    # 加载数据
    data_uint8,label    = brew.db_input(
        model, blobs_out=["data_uint8", "label"],
        batch_size= batch_size, db=db, db_type=db_type,
    )
    #数据转换为float，再缩放到[0,1]范围
    data    = model.Cast(data_uint8, "data", to=core.DataTyple.FLOAT)
    data    = model.Scale(data, data, scale=float(1.0 / 256))
    # 这里从data到data_uint8不需要再有梯度反传
    data    = model.StopGradient(data, data)
    return data, label

# Helper function for maintaining the correct height and width dimensions after
# convolutional and pooling layers downsample the input data
def update_dims(height, width, kernel, stride, pad):
    new_height = ((height - kernel + 2*pad)//stride) + 1
    new_width = ((width - kernel + 2*pad)//stride) + 1
    return new_height, new_width

def Add_Original_CIFAR10_Model(model, data, num_classes, image_height, image_width, image_channels):
    # Convolutional layer 1
    conv1 = brew.conv(model, data, 'conv1', dim_in=image_channels, dim_out=32, kernel=5, stride=1, pad=2)
    h,w = update_dims(height=image_height, width=image_width, kernel=5, stride=1, pad=2)
    # Pooling layer 1
    pool1 = brew.max_pool(model, conv1, 'pool1', kernel=3, stride=2)
    h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
    # ReLU layer 1
    relu1 = brew.relu(model, pool1, 'relu1')
    
    # Convolutional layer 2
    conv2 = brew.conv(model, relu1, 'conv2', dim_in=32, dim_out=32, kernel=5, stride=1, pad=2)
    h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2)
    # ReLU layer 2
    relu2 = brew.relu(model, conv2, 'relu2')
    # Pooling layer 1
    pool2 = brew.average_pool(model, relu2, 'pool2', kernel=3, stride=2)
    h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
    
    # Convolutional layer 3
    conv3 = brew.conv(model, pool2, 'conv3', dim_in=32, dim_out=64, kernel=5, stride=1, pad=2)
    h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2)
    # ReLU layer 3
    relu3 = brew.relu(model, conv3, 'relu3')
    # Pooling layer 3
    pool3 = brew.average_pool(model, relu3, 'pool3', kernel=3, stride=2)
    h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
    
    # Fully connected layers
    fc1 = brew.fc(model, pool3, 'fc1', dim_in=64*h*w, dim_out=64)
    fc2 = brew.fc(model, fc1, 'fc2', dim_in=64, dim_out=num_classes)
    
    # Softmax layer
    softmax = brew.softmax(model, fc2, 'softmax')
    return softmax

def AddTrainingOperators(model, softmax, label):
    xent = model.LabelCrossEntropy([softmax, label], 'xent')
    # Compute the expected loss
    loss = model.AveragedLoss(xent, "loss")
    # Use the average loss we just computed to add gradient operators to the model
    model.AddGradientOperators([loss])
    # Use stochastic gradient descent as optimization function
    optimizer.build_sgd(
        model,
        base_learning_rate=0.01,
        policy="fixed",
        momentum=0.9,
        weight_decay=0.004
    )

def AddAccuracy(model, softmax, label):
    accuracy = brew.accuracy(model, [softmax, label], "accuracy")
    return accuracy

def train_val_save_func():
    init_net_out    = 'cifar10_init_net.pb'
    predict_net_out = 'cifar10_predict_net.pb'

    # 数据集相关参数
    img_width, img_height   = 32, 32
    img_channels, num_classes   = 3, 10
    # 训练相关参数
    training_iters, training_net_batch_size = 2000, 100
    validation_images, validation_interval  = 6000, 100
    checkpoints_iters   = 1000
    if osp.exists(model_folder):
        os.makedirs(model_folder)
    workspace.ResetWorkspace(model_folder)

    # define_train_val_deploy_model
    arg_scope = {"order": "NCHW"}
    # TRAINING MODEL
    # Initialize with ModelHelper class
    train_model = model_helper.ModelHelper(
        name="train_net", arg_scope=arg_scope)
    # Add data layer from training_lmdb
    data, label = AddInput(
        train_model, batch_size=training_net_batch_size,
        db=training_lmdb_path,
        db_type='lmdb')
    # Add model definition, save return value to 'softmax' variable
    softmax = Add_Original_CIFAR10_Model(train_model, data, num_classes, image_height, image_width, image_channels)
    # Add training operators using the softmax output from the model
    AddTrainingOperators(train_model, softmax, label)
    
    # Create uniquely named directory under model_folder to output checkpoints to
    unique_timestamp = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    checkpoint_dir = os.path.join(model_folder, unique_timestamp)
    os.makedirs(checkpoint_dir)
    print("Checkpoint output location: ", checkpoint_dir)
    # Add checkpoints to a given model
    def AddCheckpoints(model, checkpoint_iters, db_type):
        ITER = brew.iter(train_model, "iter")
        train_model.Checkpoint([ITER] + train_model.params, [], db=os.path.join(unique_timestamp, "cifar10_checkpoint_%05d.lmdb"), db_type="lmdb", every=checkpoint_iters)
    # Add periodic checkpoint outputs to the model
    AddCheckpoints(train_model, checkpoint_iters, db_type="lmdb")

    # VALIDATION MODEL
    # Initialize with ModelHelper class without re-initializing params
    val_model = model_helper.ModelHelper(
        name="val_net", arg_scope=arg_scope, init_params=False)
    # Add data layer from validation_lmdb
    data, label = AddInput(
        val_model, batch_size=validation_images,
        db=validation_lmdb_path,
        db_type='lmdb')
    # Add model definition, save return value to 'softmax' variable
    softmax = Add_Original_CIFAR10_Model(val_model, data, num_classes, image_height, image_width, image_channels)
    # Add accuracy operator
    AddAccuracy(val_model, softmax, label)

    # DEPLOY MODEL
    # Initialize with ModelHelper class without re-initializing params
    deploy_model = model_helper.ModelHelper(
        name="deploy_net", arg_scope=arg_scope, init_params=False)
    # Add model definition, expect input blob called "data"
    Add_Original_CIFAR10_Model(deploy_model, "data", num_classes, image_height, image_width, image_channels)

    print("Training, Validation, and Deploy models all defined!")

    # begin training
    # Initialize and create the training network
    workspace.RunNetOnce(train_model.param_init_net)
    workspace.CreateNet(train_model.net, overwrite=True)
    # Initialize and create validation network
    workspace.RunNetOnce(val_model.param_init_net)
    workspace.CreateNet(val_model.net, overwrite=True)
    # Placeholder to track loss and validation accuracy
    loss = np.zeros(int(math.ceil(training_iters/validation_interval)))
    val_accuracy = np.zeros(int(math.ceil(training_iters/validation_interval)))
    val_count = 0
    iteration_list = np.zeros(int(math.ceil(training_iters/validation_interval)))

    # Now, we run the network (forward & backward pass)
    for i in range(training_iters):
        workspace.RunNet(train_model.net)
    
        # Validate every <validation_interval> training iterations
        if (i % validation_interval == 0):
            print("Training iter: ", i)
            loss[val_count] = workspace.FetchBlob('loss')
            workspace.RunNet(val_model.net)
            val_accuracy[val_count] = workspace.FetchBlob('accuracy')
            print("Loss: ", str(loss[val_count]))
            print("Validation accuracy: ", str(val_accuracy[val_count]) + "\n")
            iteration_list[val_count] = i
            val_count += 1

    # 最后存储deploy model的权重
    # Run init net and create main net
    workspace.RunNetOnce(deploy_model.param_init_net)
    workspace.CreateNet(deploy_model.net, overwrite=True)

    # Use mobile_exporter's Export function to acquire init_net and predict_net
    init_net, predict_net = mobile_exporter.Export(workspace, deploy_model.net, deploy_model.params)

    # Locations of output files
    full_init_net_out = osp.join(checkpoint_dir, init_net_out)
    full_predict_net_out = osp.join(checkpoint_dir, predict_net_out)

    # Simply write the two nets to file
    with open(full_init_net_out, 'wb') as f:
        f.write(init_net.SerializeToString())
    with open(full_predict_net_out, 'wb') as f:
        f.write(predict_net.SerializeToString())
    print("Model saved as " + full_init_net_out + " and " + full_predict_net_out)
if __name__ == "__main__":
    download_cifar10()
    pre_train_test()
    generate_lmdb()
    train_val_save_func()