# -*- coding: utf-8 -*-
import pickle
from data_preprocessing import return_data_frame
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')

def return_top_k(df_after, mid_data, question_id_1, id_all, filter_model, model):
    '''
    返回 question_id_1 的所有相似问题。
    '''
    proba_dict = dict()
    # 首先找出目标问题和相应的结构化数据
    x_1 = df_after.loc[df_after['id']==question_id_1].copy()
    x_1.drop('id', axis=1, inplace=True)
    x_1 = x_1.values

    for question_id_2 in id_all:
        if question_id_1 == question_id_2:
            continue
        # 遍历其他所有问题
        x_2 = df_after.loc[df_after['id']==question_id_2].copy()
        x_2.drop('id', axis=1, inplace=True)
        x_2 = x_2.values
        x = np.abs(x_1-x_2)   
        
        # 计算两个问题相似的概率
        class_pred = model.predict(x)
        proba = model.predict_proba(x)

        df_tmp = mid_data.loc[mid_data['questionID_1']==question_id_1]
        df_tmp = df_tmp.loc[df_tmp['label']==1]['questionID_2'].values
        question_id_already = df_tmp
        if question_id_2 in question_id_already:
            proba_dict[question_id_2] = 1
        else:
            proba_dict[question_id_2] = proba[0][1]
        
    top_k = sorted(proba_dict.items(), key=lambda x: x[1], reverse=True)[:10]
    return top_k

    
    
if __name__ == '__main__':
    # 读取机器学习模型
    path = r'../附件/model.pkl'
    model = pickle.load(open(path, 'rb'))

    # 读取特征过滤模型
    filter_path = r'../附件/filter_model.pkl'
    filter_model = pickle.load(open(filter_path, 'rb'))

    path_label = r'../附件/附件2.csv'
    path_text = r'../附件/附件1.csv'

    _, id_labels, duplicates = return_data_frame(path_label,
                                     path_text,
                                     if_other=False)
                                     
    df_text = pickle.load(open(r'../附件/label_text.pkl', 'rb'))
    id_all = list(df_text.id.values)
    df_data_label = pd.read_csv(filter_path)
    df_after = update_features(df_text, filter_model)
    
    path = r'../附件/mid_data.csv'
    mid_data = pd.read_csv(path)
    
    question_id = 90834
    top_k = return_top_k(question_id, id_all, filter_model, model)
    
    print(top_k)
            
            
