import random
import os, sys
import math
from numpy import arange

# specify the directory and file of trainer and predictor
trainer = "../../svd_feature binaryClassification_null.conf" 
predictor = "../../svd_feature_infer binaryClassification_null.conf"
binarytrans = "../../tools/make_feature_buffer "
dataset_name = "ua.base.example.multi"
# set the parameters of trainer and predictor
trainer_param = {"learning_rate": 0.002, "wd_item": 0.003, "wd_user":0.003,
        "num_item": 7779, "num_user": 27938, "num_global": 0,
        "num_factor": 128, "active_type": 0,
        "test:buffer_feature": '"train.buffer"', "input_type": 0,
        "buffer_feature": '"validate.buffer"', "model_out_folder": '"."'}
predict_param = trainer_param.copy()
trainer_param["num_round"] = 40
predict_param["pred"]=40 
# set prefix of partition name
partition_prefix = "datapartition_"

'''
Partition dataset to tain data set and validate
data set, the ratio of train data to validate data.
Parameters:
    filename: file name of dataset
    ratio: validate_data / (all_data - train_data)
'''
def partitionData(filename=dataset_name, ratio = 0.1):
    fp = open(filename)

    for partition_num in range(10):
        fp.seek(0)
        dir_name = partition_prefix + str(partition_num)
        # create directory for every partition
        try:
            if not os.path.exists(dir_name):
                os.makedirs(dir_name)
        except OSError:
            sys.exit('Fatal: output directory "' + dir_name + 
                '" does not exist and cannot be created') 
        # create train data file and validate data file respectively
        train_fpname = dir_name + "/" + filename + ".train"
        validate_fpname = dir_name + "/" + filename + ".validate"
        train_fp = open(train_fpname, 'w')
        validate_fp = open(validate_fpname, 'w')
        # Take 1 line in every 10 row line
        for i, line in enumerate(fp):
            if i % 10 == random.randint(0, 9):
                validate_fp.write(line)
            else:
                train_fp.write(line)
        train_fp.close()
        validate_fp.close()

        # generate the binary form of feather
        binarytrans_exec_str = binarytrans + " " + train_fpname + " " + \
                dir_name + "/train.buffer"
        print binarytrans_exec_str
        os.system(binarytrans_exec_str)
        binarytrans_exec_str = binarytrans + " " + validate_fpname  + " " + \
                dir_name + "/validate.buffer"
        os.system(binarytrans_exec_str)

    fp.close()

'''
partition dataset into 10 train sets and validate sets,
and init the parameters
parameters:
'''
def init(filename = dataset_name):
    partitionData(filename)
    pass

'''
run once with the provided parameters
'''
def dose(feature_dir, active_type = 0, lr = 0.002, wd_item = 0.003,
        wd_user = 0.003, number_factor = 128):
    trainer_param_str = ""
    predict_param_str = ""

    # adjust the parameters according to current parameters
    temp_trainer_param = trainer_param.copy()
    temp_trainer_param["active_type"] = active_type 
    temp_trainer_param["lr"] = lr
    temp_trainer_param["wd_item"] = wd_item 
    temp_trainer_param["wd_user"] = wd_user
    temp_trainer_param["number_factor"] = number_factor 
    temp_trainer_param["test:buffer_feature"] =  feature_dir + "/" + \
            trainer_param["test:buffer_feature"]
    temp_trainer_param["buffer_feature"] = feature_dir + "/" + \
            trainer_param["buffer_feature"]
    temp_trainer_param["model_out_folder"] = feature_dir + "/" + \
            trainer_param["model_out_folder"]
    # adjust the parameters according to current parameters
    temp_predict_param = predict_param.copy()
    temp_predict_param["active_type"] = active_type 
    temp_predict_param["lr"] = lr
    temp_predict_param["wd_item"] = wd_item 
    temp_predict_param["wd_user"] = wd_user
    temp_predict_param["number_factor"] = number_factor 
    temp_predict_param["test:buffer_feature"] =  feature_dir + "/" + \
            predict_param["test:buffer_feature"]
    temp_predict_param["buffer_feature"] = feature_dir + "/" + \
            predict_param["buffer_feature"]
    temp_predict_param["model_out_folder"] = feature_dir + "/" + \
            predict_param["model_out_folder"]
    temp_predict_param["name_pred"] = feature_dir + "/pred-" + str(lr) + \
            "-" + str(wd_item) + "-" + str(wd_user) + "-" + \
            str(number_factor) + "-" + str(active_type)

    # assemble executable string
    for k, v in temp_trainer_param.items():
        trainer_param_str += k + "=" + str(v) + " "
    for k, v in temp_predict_param.items():
        predict_param_str += k + "=" + str(v) + " "

    # invoke the training and predicting program
    os.system(trainer + " " + trainer_param_str)
    os.system(predictor + " " + predict_param_str)

    # calculate the loss and output result
    pass

'''
Loss functions: linear, sigmod, no_idea
parameter:
    groundfile: the file contains the true values
    predictfile: the file contains the predict values
'''
def loss_linear(groundfile, predictfile):
    # split and get the regularization parameters
    parameters = predictfile.split("-")
    wd_item = float(parameters[2])
    wd_user = float(parameters[3])
    loss = 0.0
    gfile = open(groundfile)
    pfile = open(predictfile)
    for gline, pline in zip(gfile, pfile):
        gvalues = gline.split()
        pvalues = pline.split()
        loss += math.pow(float(gvalues[0]) - float(pvalues[0]), 2) + \
                wd_item * math.pow(float(gvalues[0]), 2) + \
                wd_user * math.pow(float(pvalues[0]), 2)
    return loss

def loss_sigmod(groundfile, predictfile):
    pass

def loss_noidea(groundfile, predictfile):
    pass

'''
statistic the fitting results
'''
def statistic():
    # Open a output file for result 
    loss_file = open("loss.txt", "w")

    # Collection the pred.txts in datapartition_0
    pred_files = []
    for i in os.listdir("./datapartition_0"):
        if os.path.isfile("./datapartition_0/" + str(i)) and \
                i.startswith("pred-"):
            print i
            pred_files.append(i)
    # Read all the predict file with the name "fi" and calculate
    # the loss, average loss and variance
    for fi in pred_files:
        loss_file.write(fi + " ")
        losses = []
        for i in range(10):
            # Check file exists
            grounddata_filename = "datapartition_" + str(i) + "/" + \
                    dataset_name + ".validate"
            predictdata_filename = "datapartition_" + str(i) + "/" + fi
            if os.path.exists(grounddata_filename) and \
                    os.path.exists(predictdata_filename):
                loss = loss_linear(grounddata_filename, predictdata_filename)
                losses .append(loss)
            else:
                print grounddata_filename + "or " + predictdata_filename + \
                        " does not exist" 
        loss_file.write(str(losses) + "\n")
    pass

'''
try all the possible parameters
parameters:
    ith: fit the ith part of all fitting work
    total: the partition number of all the work
    e.g., ith = 1, total = 3, then fit() will 
    start to fit the 1th/3 fitting work
'''
def fit(filename = dataset_name, ith = 1, total = 3):
    init(filename)
    
    # generate the possible value range of these parameters
    active_type = [0, 2, 5]
    lr = arange(0, 1, 0.001)
    number_factor = range(10, 210)
    wd_item = arange(0, 0.006, 0.0002)
    wd_user = arange(0, 0.006, 0.0002)

    # begin the fitting work
    for number_factor_i in number_factor:
        for active_type_i in active_type:
            for lr_i in lr:
                for wd_item_i in wd_item:
                    for wd_user_i in wd_user:
                        # every parameter group runs 10 time in every partition
                        for partition_num in range(10):
                            dose("datapartition_" + str(partition_num),
                                    active_type_i, lr_i, wd_item_i,
                                    wd_user_i, number_factor_i)
                        # execute once for test
                        # return
    # For test
    #dose("datapartition_0/")
    pass

'''
entry of main function
'''
if __name__ == "__main__":
    if len(sys.argv) >= 2:
        dataset_name = sys.argv[1]
    fit(dataset_name)
    statistic()
