# lenet demo
# build and test with caffe

import sys
import os
import caffe
import numpy as np
from numpy import *
import matplotlib.pyplot as plt

from caffe import layers as L, params as P

from pulsar.deep_learning.htru1_subints import htru1_subints_test

name = "subints"

# root_file_path = "/home/sunzy/workspace/pyhome/CaffeProject/CaffeProject/pulsar/deep_learning/temp"
root_file_path = "/home/dataology/workspace/caffe/CaffeProject/pulsar/deep_learning/temp"

train_net_path = '%s/custom_auto_train.prototxt' % name
test_net_path = '%s/custom_auto_test.prototxt' % name
solver_config_path = '%s/custom_auto_solver.prototxt' % name


# train pos: 800, neg: 60000 all: 60800
# test  pos: 396, neg: 30392 all: 30788

def lenet(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()

    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                             ntop=2)

    n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.fc1, in_place=True)
    # num of output is 2
    n.score = L.InnerProduct(n.relu1, num_output=2, weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.score, n.label)

    return n.to_proto()


def lenet_solver(model_id, hyper_para):
    """
    define the solver
    :param model_id: the id of model
    :param hyper_para: parameters of the solver
    :return:
    """
    from caffe.proto import caffe_pb2
    s = caffe_pb2.SolverParameter()

    # Set a seed for reproducible experiments:
    # this controls for randomization in training.
    s.random_seed = 0xCAFFE

    # Specify locations of the train and (maybe) test networks.
    s.train_net = os.path.join(root_file_path, train_net_path)
    s.test_net.append(os.path.join(root_file_path, test_net_path))
    s.test_interval = 500  # Test after every 500 training iterations.
    s.test_iter.append(308)  # Test on 100 batches each time we test.

    s.max_iter = 10000  # no. of times to update the net (training iterations)

    # EDIT HERE to try different solvers
    # solver types include "SGD", "Adam", and "Nesterov" among others.
    s.type = "Nesterov"

    # Set the initial learning rate for SGD.
    # s.base_lr = 0.0001  # EDIT HERE to try different learning rates
    s.base_lr = hyper_para["base_lr"]
    # Set momentum to accelerate learning by
    # taking weighted average of current and previous updates.
    s.momentum = 0.9
    # Set weight decay to regularize and prevent overfitting
    s.weight_decay = 5e-4

    # Set `lr_policy` to define how the learning rate changes during training.
    # This is the same policy as our default LeNet.
    s.lr_policy = 'inv'
    s.gamma = 0.01
    s.power = 0.75
    # EDIT HERE to try the fixed rate (and compare with adaptive solvers)
    # `fixed` is the simplest policy that keeps the learning rate constant.
    # s.lr_policy = 'fixed'

    # Display the current training loss and accuracy every 1000 iterations.
    s.display = 1000

    # Snapshots are files used to store networks we've trained.
    # We'll snapshot every 5K iterations -- twice during training.
    # s.snapshot = 1000
    s.snapshot = hyper_para["snapshot"]
    s.snapshot_prefix = os.path.join(root_file_path, '%s/%d_custom_net' % (name, model_id))

    # Train on the GPU
    s.solver_mode = caffe_pb2.SolverParameter.GPU

    return s


def lenet_train(model_id, inner_solver_config_path, hyper_para):
    solver = None  # ignore this workaround for lmdb data (can't instantiate two solvers on the same data)
    solver = caffe.get_solver(inner_solver_config_path)

    ### solve
    # niter = 4000  # EDIT HERE increase to train for longer
    niter = hyper_para["niter"]
    # test_interval = niter / 20
    test_interval = 100
    # losses will also be stored in the log
    train_loss = zeros(niter)
    test_acc = zeros(int(np.ceil(niter / test_interval)))

    # the main solver loop
    for it in range(niter):

        solver.step(1)  # SGD by Caffe

        # print(dir(solver))
        # print(solver.net.blobs['data'].data.shape, solver.net.blobs.keys)
        # print(solver.net.blobs['data'].data[0, 0, :, :])

        # print(solver.net.blobs['label'].data)

        # store the train loss
        train_loss[it] = solver.net.blobs['loss'].data

        # run a full test every so often
        # (Caffe can also do this for us and write to a log, but we show here
        #  how to do it directly in Python, where more complicated things are easier.)
        if it % test_interval == 0 or it == niter-1:
            print('Iteration', it, 'testing...')
            correct = 0
            tp = 0
            real_p = 0
            pre_p = 0
            for test_it in range(308):
                solver.test_nets[0].forward()
                correct += sum(solver.test_nets[0].blobs['score'].data.argmax(1)
                               == solver.test_nets[0].blobs['label'].data)
                tp_temp, real_p_temp, pre_p_temp = \
                    assessment(solver.test_nets[0].blobs['score'].data.argmax(1),
                           solver.test_nets[0].blobs['label'].data)
                tp += tp_temp
                real_p += real_p_temp
                pre_p += pre_p_temp
            print("P:%f R:%f" % (tp/(pre_p+1e-10), tp/(real_p+1e-10)))
            print(correct)
            if it == niter-1 and tp/(pre_p+1e-10) == 0.0 and tp/(real_p+1e-10) == 0.0:
                return 1
            test_acc[it // test_interval] = correct / 1e4

    print("training done")
    return 0
    # _, ax1 = plt.subplots()
    # ax2 = ax1.twinx()
    # ax1.plot(arange(niter), train_loss)
    # ax2.plot(test_interval * arange(len(test_acc)), test_acc, 'r')
    # ax1.set_xlabel('iteration')
    # ax1.set_ylabel('train loss')
    # ax2.set_ylabel('test accuracy')
    # ax2.set_title('Custom Test Accuracy: {:.2f}'.format(test_acc[-1]))
    # plt.show()

def assessment(predict_label, ground_truth):
    # print(predict_label.shape, ground_truth.shape)
    tp = 0
    real_p = 0
    pre_p = 0
    for i in range(predict_label.shape[0]):
        if predict_label[i] == 1:
            pre_p += 1
        if ground_truth[i] == 1:
            real_p += 1
        if predict_label[i] == 1 and ground_truth[i] == 1:
            tp += 1
    return tp, real_p, pre_p



def train(model_id, train_batch_size, test_batch_size, solver_hyper_para):
    """
    train the net
    :param train_batch_size: SGD batch size for training
    :param test_batch_size: SGD batch size for testing
    :param solver_hyper_para: hyper parameters of the solver
    :return:
    """
    # define the lenet model
    # lmdb_root_path = "/home/sunzy/workspace/data/MedlatTrainingData/lmdb/"
    lmdb_root_path = "/usr/data/MedlatTrainingData/lmdb/"

    # train proto
    with open(os.path.join(root_file_path, train_net_path), 'w') as f:
        f.write(str(lenet(os.path.join(lmdb_root_path, '%s_train_lmdb' % name), train_batch_size)))
    # test proto
    with open(os.path.join(root_file_path, test_net_path), 'w') as f:
        f.write(str(lenet(os.path.join(lmdb_root_path, '%s_test_lmdb' % name), test_batch_size)))


    # define the solver
    with open(os.path.join(root_file_path, solver_config_path), 'w') as f:
        f.write(str(lenet_solver(model_id, solver_hyper_para)))

    # train the lenet
    # f: the return value
    # f==1 -> p==0 and r==0, so do not test
    f = lenet_train(model_id, os.path.join(root_file_path, solver_config_path), solver_hyper_para)

    if f == 0:
        print("testing")
        # test the lenet
        p, r = htru1_subints_test.test(model_id, solver_hyper_para["niter"])
        f1 = 2*p*r/(p+r)

        print(p, r, f1)

        with open(os.path.join(root_file_path, "%s/result.txt" % name), 'a') as f:
            f.write("id %d=> P:%f R:%f F1:%f\n" % (model_id, p, r, f1))
    else:
        print("P==0 and R==0")

def select_best_hyper_para():
    """
    select best hyper parameters
    :return:
    """
    # the set of hyper parameters
    # 0: id
    # 1: train batch size
    # 2: test batch size
    # 3: niter
    # 4: base learning rate
    # 5: niter of snapshot
    n = 12
    hyper_paras = [
                   #  [0, 16, 100, 2000, 0.001, 1000],
                   #  [1, 32, 100, 2000, 0.001, 1000],
                   #  [2, 64, 100, 2000, 0.001, 1000],
                   # [3, 8, 100, 2000, 0.001, 1000],
                   # [4, 16, 100, 2000, 0.0001, 1000],
                   #  [5, 32, 100, 2000, 0.0001, 1000],
                   #  [6, 64, 100, 2000, 0.0001, 1000],
                   # [7, 8, 100, 2000, 0.0001, 1000],
                   [8, 16, 100, 10000, 0.00001, 5000],
                    [9, 32, 100, 10000, 0.00001, 5000],
                    [10, 64, 100, 10000, 0.00001, 5000],
                   [11, 8, 100, 10000, 0.00001, 5000]
                    ]
    for i in range(len(hyper_paras)):
        model_id = hyper_paras[i][0]
        train_batch_size = hyper_paras[i][1]
        test_batch_size = hyper_paras[i][2]
        solver_hyper_para = dict()
        solver_hyper_para["niter"] = hyper_paras[i][3]
        solver_hyper_para["base_lr"] = hyper_paras[i][4]
        solver_hyper_para["snapshot"] = hyper_paras[i][5]
        print(model_id, solver_hyper_para)
        train(model_id, train_batch_size, test_batch_size, solver_hyper_para)



def main():
    select_best_hyper_para()

if __name__ == "__main__":
    main()