#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : tfidf.py
# @Author: Richard Chiming Xu
# @Date  : 2022/1/11
# @Desc  : 使用ifidf

import numpy as np
import pandas as pd
import jieba
import distance
from tqdm import tqdm
from collections import Counter
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
import warnings

warnings.filterwarnings('ignore')


# 获取百度停用词表
def get_stopwords():
    stop_words = []
    with open('dataprocess/baidu_stopwords.txt', 'r', encoding='utf-8') as f:
        for line in f.readlines():
            stop_words.append(line.replace('\n', ''))
    return stop_words


def cut(content):
    try:
        seg_list = jieba.lcut(content, cut_all=True)
    except AttributeError as ex:
        print(content)
        raise ex
    return seg_list


def rate(words_1, words_2):
    int_list = list(set(words_1).intersection(set(words_2)))
    return len(int_list) / len(set(words_1))


def edit_distance(s1, s2):
    return distance.levenshtein(s1, s2)


def data_anaysis(df):
    # 编辑距离
    df['edit_dist'] = df.apply(lambda row: distance.levenshtein(row['text_a'], row['text_b']), axis=1)

    # 分词
    df['words_a'] = df['text_a'].apply(lambda x: cut(x))
    df['words_b'] = df['text_b'].apply(lambda x: cut(x))
    # 统计字符数
    df['text_a_len'] = df['text_a'].apply(lambda x: len(x))
    df['text_b_len'] = df['text_b'].apply(lambda x: len(x))
    # 统计词个数
    df['words_a_len'] = df['words_a'].apply(lambda x: len(x))
    df['words_b_len'] = df['words_b'].apply(lambda x: len(x))
    # 单词个数比
    df['rate_a'] = df.apply(lambda row: rate(row['words_a'], row['words_b']), axis=1)
    df['rate_b'] = df.apply(lambda row: rate(row['words_b'], row['words_a']), axis=1)

    return df


#  统计词汇列表距离
def word_match_share(row, stops):

    q1words = {}
    q2words = {}
    for word in row['words_a']:
        if word not in stops:
            q1words[word] = 1
    for word in row['words_b']:
        if word not in stops:
            q2words[word] = 1
    if len(q1words) == 0 or len(q2words) == 0:
        # The computer-generated chaff includes a few questions that are nothing but stopwords
        return 0
    shared_words_in_q1 = [w for w in q1words.keys() if w in q2words]
    shared_words_in_q2 = [w for w in q2words.keys() if w in q1words]
    R = (len(shared_words_in_q1) + len(shared_words_in_q2)) / (len(q1words) + len(q2words))
    return R


# 计算权重
def get_weight(count, eps=10000, min_count=2):
    if count < min_count:
        return 0
    else:
        return 1 / (count + eps)


# 折叠所有语句
corpus = []


def count_corpus(df, stops):
    for i in tqdm(range(len(df))):
        row = df.iloc[i]
        text_list = []
        for word in row['words_a']:
            if word not in stops:
                text_list.append(word)
        for word in row['words_b']:
            if word not in stops:
                text_list.append(word)
        corpus.append(' '.join(text_list))
    return corpus


# 统计tfidf距离
def tfidf_word_match_share(row, stops, weights):
    q1words = {}
    q2words = {}
    for word in row['words_a']:
        if word not in stops:
            q1words[word] = 1
    for word in row['words_b']:
        if word not in stops:
            q2words[word] = 1
    if len(q1words) == 0 or len(q2words) == 0:
        return 0
    shared_weights = [weights.get(w, 0) for w in q1words.keys() if w in q2words] + [weights.get(w, 0) for w in
                                                                                    q2words.keys() if w in q1words]
    total_weights = [weights.get(w, 0) for w in q1words] + [weights.get(w, 0) for w in q2words]
    R = np.sum(shared_weights) / np.sum(total_weights)
    return R


def algo(dataset, stop_words):
    train = pd.read_csv('data/' + dataset + '/train.tsv', sep='\t', names=['text_a', 'text_b', 'label'])
    test = pd.read_csv('data/' + dataset + '/test.tsv', sep='\t', names=['text_a', 'text_b', 'label'])


    if len(set(train['label'])) >2:
        train = train[train['label'].isin(['0', '1'])]
        train['label'] = train['label'].astype('int')

    test['label'] = -1
    train = train.dropna()
    test = test.dropna()
    # 获取字数，分词，词长度，占比
    train = data_anaysis(train)
    test = data_anaysis(test)
    # 针对词的相似度
    train['word_match'] = train.apply(lambda row: word_match_share(row, stop_words), axis=1)
    test['word_match'] = test.apply(lambda row: word_match_share(row, stop_words), axis=1)
    # 针对tfidf的相似度
    corpus = []
    corpus += count_corpus(train, stop_words)
    corpus += count_corpus(test, stop_words)
    words = (" ".join(corpus)).lower().split()
    counts = Counter(words)
    weights = {word: get_weight(count) for word, count in counts.items()}
    train['tfidf_word_match'] = train.apply(lambda row: tfidf_word_match_share(row, stop_words, weights), axis=1)
    test['tfidf_word_match'] = test.apply(lambda row: tfidf_word_match_share(row, stop_words, weights), axis=1)

    # 最后数据处理
    train['text_len_diff'] = abs(train['text_a_len'] - train['text_b_len'])
    train['word_len_diff'] = abs(train['words_a_len'] - train['words_b_len'])

    test['text_len_diff'] = abs(test['text_a_len'] - test['text_b_len'])
    test['word_len_diff'] = abs(test['words_a_len'] - test['words_b_len'])

    # 建模
    fretures = ['text_len_diff', 'word_len_diff', 'word_match', 'tfidf_word_match']
    X = train[fretures]
    y = train['label']
    test_features = test[fretures]

    model = lgb.LGBMClassifier(num_leaves=128,
                               max_depth=10,
                               learning_rate=0.01,
                               n_estimators=2000,
                               subsample=0.8,
                               feature_fraction=0.8,
                               reg_alpha=0.5,
                               reg_lambda=0.5,
                               random_state=2022,
                               metric='auc',
                               boosting_type='gbdt',
                               subsample_freq=1,
                               bagging_fraction=0.8)
    skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2022)
    prob = []
    for k, (train_index, test_index) in enumerate(skf.split(X, y)):
        X_train, X_val = X.iloc[train_index], X.iloc[test_index]
        y_train, y_val = y.iloc[train_index], y.iloc[test_index]
        # 训练
        model = model.fit(X_train,
                          y_train,
                          eval_set=[(X_val, y_val)],
                          eval_metric='auc',
                          verbose=True)
        # 正式预测
        test_y_pred = model.predict_proba(test_features)
        prob.append(test_y_pred)

    # 保存结果
    new_result = (prob[0] + prob[1] + prob[2] + prob[3] + prob[4]) / 5
    final_result = []
    for i in new_result:
        if i[0] > 0.5:
            final_result.append(0)
        else:
            final_result.append(1)

    res = pd.DataFrame()
    res['prediction'] = final_result
    res['index'] = res.index
    res.to_csv('./result/' + dataset + '.tsv', index=False, sep='\t')


if __name__ == '__main__':
    data_list = ['bq_corpus', 'lcqmc', 'paws-x-zh']
    stop_words = get_stopwords()
    for dataset in data_list:
        algo(dataset, stop_words)
