# lenet demo
# build and test with caffe

import sys
import os
import caffe
import numpy as np
from numpy import *
import matplotlib.pyplot as plt
root_file_path = "/home/sunzy/workspace/pyhome/CaffeProject/CaffeProject/pulsar/deep_learning/temp"

train_net_path = 'mnist/custom_auto_train.prototxt'
test_net_path = 'mnist/custom_auto_test.prototxt'
solver_config_path = 'mnist/custom_auto_solver.prototxt'

from caffe import layers as L, params as P


def lenet(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()

    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                             transform_param=dict(scale=1. / 255), ntop=2)

    n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.fc1, in_place=True)
    n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.score, n.label)

    return n.to_proto()


def lenet_solver():
    from caffe.proto import caffe_pb2
    s = caffe_pb2.SolverParameter()

    # Set a seed for reproducible experiments:
    # this controls for randomization in training.
    s.random_seed = 0xCAFFE

    # Specify locations of the train and (maybe) test networks.
    s.train_net = os.path.join(root_file_path, train_net_path)
    s.test_net.append(os.path.join(root_file_path, test_net_path))
    s.test_interval = 500  # Test after every 500 training iterations.
    s.test_iter.append(100)  # Test on 100 batches each time we test.

    s.max_iter = 10000  # no. of times to update the net (training iterations)

    # EDIT HERE to try different solvers
    # solver types include "SGD", "Adam", and "Nesterov" among others.
    s.type = "SGD"

    # Set the initial learning rate for SGD.
    s.base_lr = 0.01  # EDIT HERE to try different learning rates
    # Set momentum to accelerate learning by
    # taking weighted average of current and previous updates.
    s.momentum = 0.9
    # Set weight decay to regularize and prevent overfitting
    s.weight_decay = 5e-4

    # Set `lr_policy` to define how the learning rate changes during training.
    # This is the same policy as our default LeNet.
    s.lr_policy = 'inv'
    s.gamma = 0.0001
    s.power = 0.75
    # EDIT HERE to try the fixed rate (and compare with adaptive solvers)
    # `fixed` is the simplest policy that keeps the learning rate constant.
    # s.lr_policy = 'fixed'

    # Display the current training loss and accuracy every 1000 iterations.
    s.display = 1000

    # Snapshots are files used to store networks we've trained.
    # We'll snapshot every 5K iterations -- twice during training.
    s.snapshot = 1000
    s.snapshot_prefix = os.path.join(root_file_path, 'mnist/custom_net')

    # Train on the GPU
    s.solver_mode = caffe_pb2.SolverParameter.CPU

    return s


def lenet_train(inner_solver_config_path):
    solver = None  # ignore this workaround for lmdb data (can't instantiate two solvers on the same data)
    solver = caffe.get_solver(inner_solver_config_path)

    ### solve
    niter = 1000  # EDIT HERE increase to train for longer
    test_interval = niter / 10
    # losses will also be stored in the log
    train_loss = zeros(niter)
    test_acc = zeros(int(np.ceil(niter / test_interval)))

    # the main solver loop
    for it in range(niter):
        solver.step(1)  # SGD by Caffe

        # print(solver.net.blobs['data'].data.shape)
        # if it == 1:
        #     break

        # store the train loss
        train_loss[it] = solver.net.blobs['loss'].data

        # run a full test every so often
        # (Caffe can also do this for us and write to a log, but we show here
        #  how to do it directly in Python, where more complicated things are easier.)
        if it % test_interval == 0:
            print 'Iteration', it, 'testing...'
            correct = 0
            for test_it in range(100):
                solver.test_nets[0].forward()
                correct += sum(solver.test_nets[0].blobs['score'].data.argmax(1)
                               == solver.test_nets[0].blobs['label'].data)
            test_acc[it // test_interval] = correct / 1e4
    _, ax1 = plt.subplots()
    ax2 = ax1.twinx()
    ax1.plot(arange(niter), train_loss)
    ax2.plot(test_interval * arange(len(test_acc)), test_acc, 'r')
    ax1.set_xlabel('iteration')
    ax1.set_ylabel('train loss')
    ax2.set_ylabel('test accuracy')
    ax2.set_title('Custom Test Accuracy: {:.2f}'.format(test_acc[-1]))
    plt.show()



def main():
    # define the lenet model
    # train proto
    with open(os.path.join(root_file_path, train_net_path), 'w') as f:
        f.write(str(lenet(os.path.join(root_file_path, 'mnist/mnist_train_lmdb'), 64)))
    # test proto
    with open(os.path.join(root_file_path, test_net_path), 'w') as f:
        f.write(str(lenet(os.path.join(root_file_path, 'mnist/mnist_test_lmdb'), 100)))

    # define the solver
    with open(os.path.join(root_file_path, solver_config_path), 'w') as f:
        f.write(str(lenet_solver()))

    # train the lenet
    lenet_train(os.path.join(root_file_path, solver_config_path))



if __name__ == "__main__":
    main()