import random
import os, sys
import math
from numpy import arange
import numpy as np

# specify the directory and file of trainer and predictor
trainer = "../../svd_feature binaryClassification_null.conf" 
predictor = "../../svd_feature_infer binaryClassification_null.conf"
binarytrans = "../../tools/make_feature_buffer "
#dataset_name = "ua.base.example.purified.multi"
dataset_name = "ua.base.example.purified.multi"
# set the parameters of trainer and predictor
trainer_param = {"learning_rate": 0.002, "wd_item": 0.003, "wd_user":0.003,
        "num_item": 7779, "num_user": 27938, "num_global": 0,
        "num_factor": 128, "active_type": 0,
        "test:buffer_feature": '"validate.buffer"', "input_type": 0,
        "buffer_feature": '"train.buffer"', "model_out_folder": '"."'}
predict_param = trainer_param.copy()
trainer_param["num_round"] = 50
predict_param["pred"] = 50
# set prefix of partition name
partition_prefix = "datapartition_"
# specify the ndcg path
ndcg_path = "./ndcg"
ndcg_fout = open(ndcg_path,"w")
'''
Given groundfile and predictfile, 
calculate the ndcg index for the result.
'''
class QRecResult:
    def __init__(self,qid,uid,real_v,pred_v):
        self.qid = qid
        self.uid = uid
        self.real_v = real_v
        self.pred_v = pred_v

def dcg_at_k(r, k, method=0):
    r = np.asfarray(r)[:k]
    if r.size:
        if method == 0:
            return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
        elif method == 1:
            return np.sum(r / np.log2(np.arange(2, r.size + 2)))
        else:
            raise ValueError('method must be 0 or 1.')
    return 0.

def ndcg_at_k(r, k, method=0):
    dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
    if not dcg_max:
        return 0.
    return dcg_at_k(r, k, method) / dcg_max

def calculteNDCG(groundfile, predictfile):
    fin1 = open(groundfile)
    fin2 = open(predictfile)
    evaluation_lines = []
    ques_dict = {}
    ndcg_v = 0
    for line1,line2 in zip(fin1,fin2):
        line1 = line1.strip()
        line2 = line2.strip()
        line1_list = line1.split(" ")
        line2_list = line2.split(" ")
        quesid = line1_list[-2].split(":")[0]
        userid = line1_list[4].split(":")[0]
        if quesid not in ques_dict:
            ques_dict[quesid] = []
        q_rec_result = QRecResult(quesid, userid, line1_list[0],line2_list[0])
        #insert question rec_result to dict
        ques_dict[quesid].append(q_rec_result)
    #sort each question rec_result list in dict
    for (quesid, ques_list) in ques_dict.items():
        #sorted(ques_list,q_rec_cmp)
        ques_list_sorted = sorted(ques_list,key=lambda ques: ques.pred_v,reverse=True)
        #print ques_list_sorted[0].pred_v,ques_list_sorted[1].pred_v,ques_list_sorted[2].pred_v
        r = []
        for p_v in ques_list_sorted:
            v = int(p_v.real_v)
            r.append(v)
        #print r
        ndcg_5 = ndcg_at_k(r,5)
        ndcg_10 = ndcg_at_k(r,10)
        ndcg_v_each = 0.5 * ndcg_5 + 0.5 * ndcg_10
        ndcg_v = ndcg_v + ndcg_v_each
    ndcg_v = ndcg_v / len(ques_dict)
    return ndcg_v
'''
Partition dataset to tain data set and validate
data set, the ratio of train data to validate data.
Parameters:
    filename: file name of dataset
    ratio: validate_data / (all_data - train_data)
'''
def simplified_train_data(train_fpname, validate_fpname):
    user_set = set()
    ques_set = set()
    fin_validate = open(validate_fpname)
    fin_train = open(train_fpname)
    for line in fin_validate:
        line = line.strip()
        line_list = line.split(" ")
        userid = line_list[4].split(":")[0]
        quesid = line_list[-2].split(":")[0]
        user_set.add(userid)
        ques_set.add(quesid)
    output = []
    for line in fin_train:
        line_temp = line.strip()
        line_temp_list = line.split(" ")
        userid = line_temp_list[4].split(":")[0]
        quesid = line_temp_list[-2].split(":")[0]
        if userid in user_set or quesid in ques_set:
            output.append(line)
    fin_train.close()
    fin_validate.close()
    fout_train = open(train_fpname, "w")
    for line in output:
        fout_train.write(line)

def partitionData(filename=dataset_name, ratio = 0.2):
    fp = open(filename)
    data_list = []
    for line in fp:
        data_list.append(line)
    random.shuffle(data_list)
    len_data = len(data_list)
    len_test = int(len_data*ratio)
    for partition_num in range(int(1/ratio)):
        dir_name = partition_prefix + str(partition_num)
        # create directory for every partition
        try:
            if not os.path.exists(dir_name):
                os.makedirs(dir_name)
        except OSError:
            sys.exit('Fatal: output directory "' + dir_name + 
                '" does not exist and cannot be created') 
        # create train data file and validate data file respectively
        train_fpname = dir_name + "/" + filename + ".train"
        validate_fpname = dir_name + "/" + filename + ".validate"
        train_fp = open(train_fpname, 'w')
        validate_fp = open(validate_fpname, 'w')
        # Take 1 line in every 10 row line
        start = partition_num * len_test
        end = (partition_num + 1) * len_test
        i = 0
        for line in data_list:
            if i >=start and i < end and i < len_data:
                validate_fp.write(line)
                i = i + 1
            else:
                train_fp.write(line)
                i = i + 1

        #train_fp.write(line)
        train_fp.close()
        validate_fp.close()
        '''
        # in multi model, simplifid data is bad.
        # simplified train data
        simplified_train_data(train_fpname, validate_fpname)
        '''
        # generate the binary form of feather
        binarytrans_exec_str = binarytrans + " " + train_fpname + " " + \
                dir_name + "/train.buffer"
        print binarytrans_exec_str
        os.system(binarytrans_exec_str)
        binarytrans_exec_str = binarytrans + " " + validate_fpname  + " " + \
                dir_name + "/validate.buffer"
        os.system(binarytrans_exec_str)
    fp.close()

'''
partition dataset into 10 train sets and validate sets,
and init the parameters
parameters:
'''
def init(filename = dataset_name, ritio = 0.2):
    partitionData(filename, ritio)
    pass

'''
run once with the provided parameters
'''
def dose(feature_dir, active_type = 0, lr = 0.002, wd_item = 0.003,
        wd_user = 0.003, number_factor = 128):
    trainer_param_str = ""
    predict_param_str = ""
    # adjust the parameters according to current parameters
    temp_trainer_param = trainer_param.copy()
    temp_trainer_param["active_type"] = active_type 
    temp_trainer_param["learning_rate"] = lr
    temp_trainer_param["wd_item"] = wd_item 
    temp_trainer_param["wd_user"] = wd_user
    temp_trainer_param["num_factor"] = number_factor 
    temp_trainer_param["test:buffer_feature"] =  feature_dir + "/" + \
            trainer_param["test:buffer_feature"]
    temp_trainer_param["buffer_feature"] = feature_dir + "/" + \
            trainer_param["buffer_feature"]
    temp_trainer_param["model_out_folder"] = feature_dir + "/" + \
            trainer_param["model_out_folder"]
    # adjust the parameters according to current parameters
    temp_predict_param = predict_param.copy()
    temp_predict_param["active_type"] = active_type 
    temp_predict_param["learning_rate"] = lr
    temp_predict_param["wd_item"] = wd_item 
    temp_predict_param["wd_user"] = wd_user
    temp_predict_param["num_factor"] = number_factor 
    temp_predict_param["test:buffer_feature"] =  feature_dir + "/" + \
            predict_param["test:buffer_feature"]
    temp_predict_param["buffer_feature"] = feature_dir + "/" + \
            predict_param["buffer_feature"]
    temp_predict_param["model_out_folder"] = feature_dir + "/" + \
            predict_param["model_out_folder"]
    temp_predict_param["name_pred"] = feature_dir + "/pred-" + str(lr) + \
            "-" + str(wd_item) + "-" + str(wd_user) + "-" + \
            str(number_factor) + "-" + str(active_type)

    # assemble executable string
    for k, v in temp_trainer_param.items():
        trainer_param_str += k + "=" + str(v) + " "
    for k, v in temp_predict_param.items():
        predict_param_str += k + "=" + str(v) + " "

    # invoke the training and predicting program
    os.system(trainer + " " + trainer_param_str)
    os.system(predictor + " " + predict_param_str)

    # calculate the loss and output result

    # calculate the NDCG
    predictfile = temp_predict_param["name_pred"]
    groundfile = feature_dir + "/" + dataset_name + ".validate"
    ndcg = calculteNDCG(groundfile = groundfile, predictfile = predictfile)
    ndcg = feature_dir + "-" + str(lr) + \
            "-" + str(wd_item) + "-" + str(wd_user) + "-" + \
            str(number_factor) + "-" + str(active_type) + " = " + str(ndcg) + "\n"
    ndcg_fout.write(ndcg)
    print ndcg
    pass

'''
Loss functions: linear, sigmod, no_idea
parameter:
    groundfile: the file contains the true values
    predictfile: the file contains the predict values
'''
def loss_linear(groundfile, predictfile):
    # split and get the regularization parameters
    parameters = predictfile.split("-")
    wd_item = float(parameters[2])
    wd_user = float(parameters[3])
    loss = 0.0
    gfile = open(groundfile)
    pfile = open(predictfile)
    for gline, pline in zip(gfile, pfile):
        gvalues = gline.split()
        pvalues = pline.split()
        loss += math.pow(float(gvalues[0]) - float(pvalues[0]), 2) + \
                wd_item * math.pow(float(gvalues[0]), 2) + \
                wd_user * math.pow(float(pvalues[0]), 2)
    return loss

def loss_sigmod(groundfile, predictfile):
    pass

def loss_noidea(groundfile, predictfile):
    pass

'''
statistic the fitting results
'''
def statistic():
    # Open a output file for result 
    loss_file = open("loss.txt", "w")

    # Collection the pred.txts in datapartition_0
    pred_files = []
    for i in os.listdir("./datapartition_0"):
        if os.path.isfile("./datapartition_0/" + str(i)) and \
                i.startswith("pred-"):
            print i
            pred_files.append(i)
    # Read all the predict file with the name "fi" and calculate
    # the loss, average loss and variance
    for fi in pred_files:
        loss_file.write(fi + " ")
        losses = []
        for i in range(10):
            # Check file exists
            grounddata_filename = "datapartition_" + str(i) + "/" + \
                    dataset_name + ".validate"
            predictdata_filename = "datapartition_" + str(i) + "/" + fi
            if os.path.exists(grounddata_filename) and \
                    os.path.exists(predictdata_filename):
                loss = loss_linear(grounddata_filename, predictdata_filename)
                losses .append(loss)
            else:
                print grounddata_filename + " or " + predictdata_filename + \
                        " does not exist" 
        loss_file.write(str(losses) + "\n")
    pass

'''
entry of main function
'''
if __name__ == "__main__":
    if len(sys.argv) >= 2:
        dataset_name = sys.argv[1]
    trainer_param["num_round"] = 100
    predict_param["pred"] = 100
    ritio = 0.1
    init(dataset_name, ritio)
    # generate the possible value range of these parameters
    #active_type = [0, 2, 5]
    active_type = [0]
    #lr = arange(0, 0.002, 0.005)
    lr = [0.005, 0.0075, 0.01]
    number_factor = range(10, 60, 20)
    wd_item = arange(0.03, 0.04, 0.011)
    wd_user = arange(0.02, 0.03, 0.011)
    #dose("datapartition_0", 0, 0.001, 0.003, 0.003, 128)
    
    # begin the fitting work
    for number_factor_i in number_factor:
        for active_type_i in active_type:
            for lr_i in lr:
                for wd_item_i in wd_item:
                    for wd_user_i in wd_user:
                            ndcg_fout = open(ndcg_path,"w")
                            for partition_num in range(int(1/ritio)):
                        # every parameter group runs 10 time in every partition
                                dose("datapartition_" + str(partition_num),
                                    active_type_i, lr_i, wd_item_i,
                                    wd_user_i, number_factor_i)
                            ndcg_fout.close()
                            ndcg = 0.0
                            times= 0
                            ndcg_fin = open(ndcg_path)
                            ndcg_fout_summary = open(ndcg_path + "_summary", "a+")
                            for line in ndcg_fin:
                                line = line.strip()
                                ndcg = ndcg + float(line.split(" ")[-1])
                                times = times + 1
                            ndcg_fout_summary.write("\n" + "avg ndcg = " + str(ndcg/times) + " num_factor = " + str(number_factor_i) + " lr = " + str(lr_i) \
                                + " wd_item = " + str(wd_item_i) + " wd_user = " + str(wd_user_i))
                            ndcg_fin.close()
                            ndcg_fout_summary.close()
                        # execute once for test
                        # return

