# -*- coding:utf-8 -*-
import sys
import numpy as np
import itertools
from collections import OrderedDict, defaultdict

sys.path.append("../")
import lightgbm as lgb
from utils.data_loader import load_pointwise_raw_data
from features.feature_manager import FeatureManager
import pandas as pd


def prepare_lgb_training_data(input_path, saved_path):
    samples, groups = load_pointwise_raw_data(input_path)
    print("number of samples: {}".format(len(samples)))
    print("group lengths: {}".format(len(groups)))

    feature_manager = FeatureManager("./configs/stc.infer.config.json")
    g_idx = 0
    with open(saved_path, 'w', encoding="utf-8") as fw:
        print("\t".join(feature_manager.features))
        fw.write("query\tlabel\t{}\n".format("\t".join(feature_manager.features)))

        for i, g_len in enumerate(groups):
            g_samples = samples[g_idx:g_idx + g_len]
            for (label, query, candidate) in g_samples:
                feat_vector = feature_manager.predict(query.split(), candidate.split())
                feat_str = "\t".join(map(str, feat_vector))
                fw.write("Q{}\t{}\t{}\n".format(i, label, feat_str))
            g_idx += g_len
        print("over")
    pass


def load_features(path):
    features, labels, groups = [], [], []
    qid, g_len = "", 0
    feature_names = []
    with open(path, 'r', encoding='utf-8') as fr:
        for line in fr:
            if not line.startswith("Q"):
                feature_names = line.rstrip().split('\t')[2:]
                continue
            ss = line.rstrip().split("\t")
            label = int(ss[1])
            feat_vec = list(map(float, ss[2:]))
            assert len(feat_vec) == len(feature_names)
            if g_len > 0 and qid != ss[0]:
                groups += [g_len]
                g_len = 0

            g_len += 1
            qid = ss[0]

            labels += [label]
            features += [feat_vec]

    if g_len > 0:
        groups += [g_len]
    assert sum(groups) == len(labels)
    assert len(labels) == len(features)
    return features, labels, groups, feature_names


def train_lgb(params, train_set: lgb.Dataset, valid_set: lgb.Dataset, saved_path=None):
    """
    train lightGBM with special parameter and return the valid set F1-Score
    """
    bst = lgb.train(params,
                    train_set,
                    num_boost_round=params['num_rounds'],
                    valid_sets=valid_set,
                    verbose_eval=10,
                    early_stopping_rounds=20)
    ndcg_1 = bst.best_score['valid_0']['ndcg@1']
    ndcg_3 = bst.best_score['valid_0']['ndcg@3']
    ndcg_5 = bst.best_score['valid_0']['ndcg@5']

    print(bst.best_iteration)
    print(ndcg_1)
    if saved_path is not None:
        bst.save_model(saved_path, num_iteration=bst.best_iteration)
        # bst = lgb.Booster(model_file=saved_path)
    return ndcg_1, ndcg_3, ndcg_5


def generate_lgb_grid_params():
    param_dict = OrderedDict()
    # basic parameter setting, do not need for search
    param_dict['objective'] = ['lambdarank']
    param_dict['metric'] = ['ndcg']
    param_dict['ndcg_eval_at'] = ['1,3,5']
    param_dict['num_rounds'] = [100]
    param_dict['num_leaves'] = [15, 31, 63]

    # param_dict['max_depth'] = np.asarray([8])
    # param_dict['min_data_in_leaf'] = np.asarray([20])

    param_dict['learning_rate'] = np.asarray([0.005])
    param_dict['feature_fraction'] = np.linspace(0.1, 0.8, 8)
    param_dict['bagging_fraction'] = np.linspace(0.1, 0.8, 8)
    param_dict['bagging_freq'] = np.asarray([10, 20, 30])
    param_dict['colsample_bytree'] = np.linspace(0.1, 0.8, 8)
    param_dict['subsample'] = np.linspace(0.1, 1.0, 10)

    return param_dict


def _lgb_grid_search(param_dict, train_set, valid_set, result_saved_path, model_saved_path):
    grid_params = [i for i in itertools.product(*param_dict.values())]
    columns = list(param_dict.keys()) + ['ndcg@1', 'ndcg@3', 'ndcg@5', 'ndcg_avg']

    best_params, best_score = {}, -1.0
    results = pd.DataFrame(columns=columns)
    print('search space size:{}'.format(len(grid_params)))
    for i in range(len(grid_params)):
        params = dict()
        for key, val in zip(param_dict.keys(), grid_params[i]):
            params[key] = val

        ndcg_1, ndcg_3, ndcg_5 = train_lgb(params, train_set=train_set, valid_set=valid_set)
        avg_score = ndcg_1 * 0.5 + ndcg_3 * 0.3 + ndcg_5 * 0.2

        if avg_score > best_score:
            for (key, val) in zip(param_dict.keys(), grid_params[i]):
                best_params[key] = val
            best_score = avg_score
            print('-' * 100)
            print('better score:\tndcg@1: {:.4f}, ndcg@3: {:.4f}, ndcg@5: {:.4f}'
                  .format(ndcg_1, ndcg_3, ndcg_5))

        results.loc[i, list(param_dict.keys())] = grid_params[i]
        results.loc[i, 'ndcg@1'] = ndcg_1
        results.loc[i, 'ndcg@3'] = ndcg_3
        results.loc[i, 'ndcg@5'] = ndcg_5
        results.loc[i, 'ndcg_avg'] = avg_score

    if result_saved_path is not None:
        print('save grid search results into {}'.format(result_saved_path))
        results.to_csv(result_saved_path)

    print('-' * 100)
    print('Train LightGBM with best parameter')
    train_lgb(best_params, train_set, valid_set, saved_path=model_saved_path)
    for (key, val) in best_params.items():
        print('{}:{}'.format(key, val))
    print('best ndcg score on the valid dataset: {:.4f}'.format(best_score))
    pass


def split_train_and_valid(input_path, saved_prefix, valid_size=100):
    samples = defaultdict(list)
    head_line = None
    with open(input_path, 'r', encoding='utf-8') as fr:
        for line in fr:
            if not line.startswith("Q"):
                head_line = line.rstrip()
                continue
            ss = line.rstrip().split('\t')
            samples[ss[0]] += [line.rstrip()]
    g_ids = list(samples.keys())
    import random
    random.shuffle(g_ids)
    with open(saved_prefix + ".valid.txt", 'w', encoding='utf-8') as fw:
        fw.write(head_line+'\n')
        for g_key in g_ids[:valid_size]:
            g_lines = samples[g_key]
            for line in g_lines:
                fw.write(line+"\n")
    with open(saved_prefix + ".train.txt", 'w', encoding='utf-8') as fw:
        fw.write(head_line + '\n')
        for g_key in g_ids[valid_size:]:
            g_lines = samples[g_key]
            for line in g_lines:
                fw.write(line+"\n")

    pass


def lgb_grid_search(train_input_path, valid_input_path, model_saved_path, result_saved_path):
    train_features, train_labels, train_groups, feat_names = load_features(train_input_path)
    train_set = lgb.Dataset(train_features, label=train_labels,
                            free_raw_data=False,
                            group=train_groups, feature_name=feat_names)

    print("train group: {}".format(len(train_groups)))
    print("train samples: {}".format(len(train_labels)))

    valid_features, valid_labels, valid_groups, feat_names = load_features(valid_input_path)
    valid_set = lgb.Dataset(valid_features, label=valid_labels,
                            free_raw_data=False,
                            group=valid_groups, feature_name=feat_names)
    print("valid group: {}".format(len(valid_groups)))
    print("valid samples: {}".format(len(valid_labels)))

    params_dict = generate_lgb_grid_params()

    _lgb_grid_search(params_dict, train_set, valid_set,
                     model_saved_path=model_saved_path,
                     result_saved_path=result_saved_path)
    pass


if __name__ == '__main__':
    # prepare_lgb_training_data("../data/stc/stc.train.tokens.txt", "../data/stc/stc.train.features.txt")

    #
    # split_train_and_valid("../data/stc/stc.train.features.txt", "../data/stc/stc.train.feat")

    lgb_grid_search("../data/stc/stc.train.feat.train.txt",
                    "../data/stc/stc.train.feat.valid.txt",
                    "../lib/lgb.model.txt",
                    "../data/stc/stc.lgb.result.csv")
    pass
