"""
author：石沙
date：2020-12-01
content：本模块用执行训练过程
"""

import sys
sys.path.append('..')
from site_packages.utils.job import DataOp
from models.similarity import cosine_similarity, euclidean_distance, edit_distance, lcs
import pandas as pd
import numpy as np
from models.traditional import WordEmbeddingDoc
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression


pd.set_option('display.max_columns', 10)


def train(X, y):

    # 切分
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)

    # 归一化
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    # 训练
    query_train = [X_train.shape[0]]
    query_val = [X_test.shape[0]]
    model = lgb.LGBMRanker(metric='auc', num_leaves=2, reg_alpha=0.0, reg_lambda=1,
                            max_depth=-1, n_estimators=20, subsample=0.7, colsample_bytree=0.7, subsample_freq=1,
                            learning_rate=0.01, min_child_weight=50, random_state=2018, n_jobs=4)
    model.fit(X_train, y_train, group=query_train,
                     eval_set=[(X_test, y_test)], eval_group=[query_val],
                     eval_at=[5, 10, 20], early_stopping_rounds=10)
    DataOp.save(model, 'lgbm')


class Ranker:

    def __init__(self, training=True, model_type='logistic'):
        self.training = training
        self.model_type = model_type
        if not self.training:
            self.model = DataOp.load_model(self.model_type)
        else:
            self.model = None
        
    def make_features(self, seq1, seq2):
        """
        抽取文本特征
        可以选择如下四类特征进行选择：
        cosine_similarity：余弦相似度,
        euclidean_distance：欧式距离,
        edit_distance：编辑距离,
        lcs：最长公共子序列长度
        """
        embedding_params = {
            'min_count': 2,
            'size': 200,
            'workers': 4,
            'iter': 15,
        }
        embedding = WordEmbeddingDoc(
            model_type='word2vec',
            model_name='word2vec_wld',
            method='mean',
            **embedding_params
        )
        embedding.train()
        embedding_seq1 = embedding.transform(seq1)
        embedding_seq2 = embedding.transform(seq2)
        funcs = [
            cosine_similarity,
            euclidean_distance,
            # edit_distance,
            # lcs
        ]
        feature_list = []
        for F in funcs:
            feature = list(map(F, embedding_seq1, embedding_seq2))
            feature_list.append(np.array(feature).reshape(-1, 1))
        return np.concatenate(feature_list, axis=1)

    def remove_na(self, X, y=None):
        if y is not None:
            array = np.concatenate([X, y.reshape(-1, 1)], axis=1)
            array = array[~np.isnan(array).any(axis=1)]
            return array[:, :-1], array[:, -1]
        else:
            ids = np.arange(X.shape[0])
            array = np.concatenate([X, ids.reshape(-1, 1)], axis=1)
            array = array[~np.isnan(array).any(axis=1)]
            return array[:, :-1], array[:, -1]
        
        
    def train(self, data):
        
        seq1 = data['question1_clean'].tolist()
        seq2 = data['question2_clean'].tolist()
        X = self.make_features(seq1, seq2)
        y = data['label'].astype(float).values
        X, y = self.remove_na(X, y)
        DataOp.save(X, "features", is_model=False)
        
        # 切分
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)

        # 归一化
        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

        # 训练
        if self.model_type == 'lgbm':
            query_train = [X_train.shape[0]]
            query_val = [X_test.shape[0]]
            self.model = lgb.LGBMRanker(metric='auc', num_leaves=2, reg_alpha=0.0, reg_lambda=1,
                        max_depth=-1, n_estimators=20, subsample=0.7, colsample_bytree=0.7, subsample_freq=1,
                        learning_rate=0.01, min_child_weight=50, random_state=2018, n_jobs=4)
            self.model.fit(X_train, y_train, group=query_train,
                 eval_set=[(X_test, y_test)], eval_group=[query_val],
                 eval_at=[5, 10, 20], early_stopping_rounds=10)
        elif self.model_type == 'logistic':
            self.model = LogisticRegression()
            self.model.fit(X_train, y_train)
            y_pred = self.model.predict(X_test)
            print('测试报告如下：')
            print(classification_report(y_test, y_pred))

        DataOp.save(self.model, self.model_type)
        
    def predict(self, query, raw_ranking_result):
        df = pd.DataFrame(raw_ranking_result, columns=['raw'])
        df['query'] = query
        df['raw_list'] = df['raw'].str.split()
        df['query_list'] = df['query'].str.split()
        features = self.make_features(df['query_list'].tolist(), df['raw_list'].tolist())
        features, ids = self.remove_na(features)
        
        df = df.loc[ids, :].copy()
        if self.model_type == 'lgbm':
            df['prob'] = self.model.predict(features)
            return df[['query', 'raw', 'prob']].sort_values(by='prob', ascending=False)
        elif self.model_type == 'logistic':
            result = self.model.predict_proba(features)
            df['prob'] = result[:, 1]
            return df[['query', 'raw', 'prob']].sort_values(by='prob', ascending=False)
    
    
if __name__ == '__main__':
    data = DataOp.load_data('data_train')
    query = '想 换 一个 数据线'
    raw_ranking_result = [
        "你好 我 想 退 两双 鞋子",
        "想 重新 买",
        "选 申请 退款 吗",
        "我 要 申请 退货",
        "马上 退 吗"
    ]

    ranker = Ranker(training=True, model_type='logistic')
    ranker.train(data)
    result = ranker.predict(query, raw_ranking_result)
    print(result)

    
