# -*- coding: utf-8 -*-
"""
Created on Thu May 23 22:00:12 2019

@author: gby
"""

import pandas as pd
import re
import math
import numpy as np

# ============================================binary feature(same feature)=====================
def get_city(region):
    return region.split(',')[-1]

def binary_feature(users_att_base,user_pairs,atts,print_title=True): # 大约一条0.1秒
    if print_title:
        print("Extracting binary_features......")

    SAME_atts = []
    for att in atts:
        SAME_atts.append("IS-"+att+"-SAME")

    IS_SAME_matrix = []
    for row in user_pairs[:len(user_pairs)].iterrows(): 
        id1,id2 = row[1]['id1'],row[1]['id2']
       #一次性把用户的所有att取出来，然后把所有id1的att向量堆起来，形成一个句矩阵。
       #然后id1的矩阵和id2的矩阵直接利用numpy的矩阵‘=’运算得到是否相等
        id1_att_vec = users_att_base[users_att_base.user_id==id1][atts]
        id2_att_vec = users_att_base[users_att_base.user_id==id2][atts]
        IS_SAME_vec = np.array(id1_att_vec) == np.array(id2_att_vec)
        try:
            IS_SAME_matrix.append(IS_SAME_vec[0])
        except Exception as e:
            print('binary_feature ERROR for user-pair:%d,%d'%(id1,id2))
            print(e)
            IS_SAME_matrix.append(np.array([False]*len(atts)))
    IS_SAME_df = pd.DataFrame(np.array(IS_SAME_matrix).astype(np.int16),columns = [SAME_atts])
    return IS_SAME_df

# ==============================================gap feature =================================

def get_height_weight(body):
    h = -1.
    w = -1.
    try:
        if body == body:
            h_ = re.findall(r"(\d+\.?\d*) cm",body)
            w_ = re.findall(r"(\d+\.?\d*) kg",body)
            if len(h_)>0:
                h = float(h_[0])
            if len(w_)>0:
                w = float(w_[0])
            return [h,w]
    except Exception as e:
        print('ERRORRRRRRRRRRRRRRRRR:',e)
        print('**',body)
    return [h,w]

def get_age(age):
    if age == age:
        return age
    else:
        return -1

def gap_feature(users_att_base,user_pairs,print_title=True):
    if print_title:
        print("Extracting gap_features......")
#     print("Building feature dict...")
    gap_feature_dict = {}
    for att in ['height','weight','age']:
        gap_feature_dict[att+"-GAP"] = []
#     row_i = 0
#     print("Start iterating...")
    for row in user_pairs[:len(user_pairs)].iterrows(): 
        try:
            id1,id2 = row[1]['id1'],row[1]['id2']
            body1 = list(users_att_base[users_att_base.user_id==id1].body)[0]
            body2 = list(users_att_base[users_att_base.user_id==id2].body)[0]
            age1 = list(users_att_base[users_att_base.user_id==id1].AGE)[0]
            age2 = list(users_att_base[users_att_base.user_id==id2].AGE)[0]
            [h1,w1] = get_height_weight(body1)
            [h2,w2] = get_height_weight(body2)
            a1 = get_age(age1)
            a2 = get_age(age2)
            if h1 == -1 or h2 == -1:
                gap_feature_dict['height-GAP'].append(-1)
            else:
                gap_feature_dict['height-GAP'].append(abs(h1-h2))
            if w1 == -1 or w2 == -1:
                gap_feature_dict['weight-GAP'].append(-1)
            else:
                gap_feature_dict['weight-GAP'].append(abs(w1-w2))
            if a1 == -1 or a2 == -1:
                gap_feature_dict['age-GAP'].append(-1)
            else:
                 gap_feature_dict['age-GAP'].append(abs(a1-a2))
        except Exception as e:
            print('gap_feature ERROR for user-pair:%d,%d'%(id1,id2))
            print(e)
            gap_feature_dict['age-GAP'].append(-1)
    return pd.DataFrame(gap_feature_dict)   

# ===========================================similarity feature:=====================================

def jaccard_similarity(text1,text2):
    words1 = set(re.sub('[,.:;`\'\"?!]','',text1).split(' '))
    words2 = set(re.sub('[,.:;`\'\"?!]','',text2).split(' '))
    inter = words1 & words2
    union = words1 | words2
    return len(inter)/len(union)

def similarity_feature(users_att_base,user_pairs,atts,print_title=True):
    if print_title:
        print("Extracting similarity_feature......")
#     print("Building feature dict...")
    similarity_feature_dict = {}
    for att in atts:
        similarity_feature_dict[att+"-Similarity"] = []
    
    for row in user_pairs[:len(user_pairs)].iterrows():
        id1,id2 = row[1]['id1'],row[1]['id2']
          
        for att in atts:
            try:
          # 对于similarity属性，一般都是一个字符串：
                a1 = list(users_att_base[users_att_base.user_id==id1][att])[0]
                a2 = list(users_att_base[users_att_base.user_id==id2][att])[0]
                # 处理Nan情况：
                if a1 != a1:
                    a1 = ''
                if a2 != a2:
                    a2 = ''
                similarity_feature_dict[att+"-Similarity"].append(jaccard_similarity(a1,a2))
            except Exception as e:
                print('similarity_feature ERROR for user-pair:%d,%d'%(id1,id2))
                print(e)
                similarity_feature_dict[att+"-Similarity"].append(0)
    return pd.DataFrame(similarity_feature_dict)



# ===========================================Latent feature: (包含 num_common_atts)====================

def latent_feature(users_att_base,friends_dict,user_pairs,alpha=0.05,beta=0.05):
    # print('Extracting latent_feature...........this may takes much more time!')
    """
    latent feature存在方向性。这里一律把user-pair中的第一个用户作为new-user。
    所以这里计算latent feature，也是把第一个用户假设成new user的一方来计算的。

    这里可以顺便计算一下num_common_atts
    """
    feature_dict = {'latent_score':[],'num_common_features':[]}
    id1_matrix_nca = []
    id2_matrix_nca = []
    for row in user_pairs[:len(user_pairs)].iterrows():
        
        id1,id2 = row[1]['id1'],row[1]['id2']
        id1_matrix_nca.append(users_att_base[users_att_base.user_id==id1])
        id2_matrix_nca.append(users_att_base[users_att_base.user_id==id2])
        # 获取id2的朋友：
        id2_friends = friends_dict[id2]
        if id1 in id2_friends:
        	id2_friends.remove(id1)
        length = len(id2_friends)

        if length == 0: # id2没有朋友，那直接把latent score设为0 ?
            feature_dict['latent_score'].append(0)
        else:
            #===================
            #使用矩阵运算，直接两个大矩阵相减，属性相同对应的值便为0.
            #然后直接统计矩阵中有多少个0几个。这个时候又可以使用numpy的一个神奇功能。。。。
            id1_atts = users_att_base[users_att_base.user_id==id1]
            id1_matrix = np.array(pd.concat([id1_atts]*length))

            id2_friends_atts_list = []
            for id2_friend in id2_friends:
                id2_friends_atts_list.append(users_att_base[users_att_base.user_id==id2_friend])
            id2_friends_matrix = np.array(pd.concat(id2_friends_atts_list))
            difference_matrix = id1_matrix==id2_friends_matrix #神奇的numpy矩阵操作
            num_latent_links = np.sum(difference_matrix==1)
            num_disconnections = np.sum(difference_matrix==0)
            latent_score = 1/(1+(math.exp(-beta*(num_latent_links-alpha*num_disconnections))))
            feature_dict['latent_score'].append(latent_score)

    id1_matrix_nca = np.array(pd.concat(id1_matrix_nca))
    id2_matrix_nca = np.array(pd.concat(id2_matrix_nca))
    nca_matrix = id1_matrix_nca == id2_matrix_nca
    for row in nca_matrix:
        feature_dict['num_common_features'].append(np.sum(row==1))
        
    return pd.DataFrame(feature_dict)



# ========================多线程，重写函数，使得可以获取目标函数输出：==================
from threading import Thread
class MyThread(Thread):
    def run(self):
        try:
            if self._target:
                self._target(*self._args, **self._kwargs)
                self.result = self._target(*self._args)
        finally:
            del self._target, self._args, self._kwargs
    
    def get_result(self):
        return self.result


#========================================Final================================================
#========================================整合所有特征抽取函数================================================

binary_atts = ['gender','region','spoken_languages']
sim_atts = ['hobbies','I_like_movies', 'I_like_music', 'I_like_books', 'I_mostly_like_listening_to_music']

def get_all_features(users_att_base,friends_dict,user_pairs,binary_atts=binary_atts,sim_atts=sim_atts,alpha=0.05,beta=0.05,print_title=False):
    """
    参数说明：
    --------------必填参数：----------------
    users_att_base: 存储用户及其属性的数据库，dataframe格式，至少包含确定好的10种属性
    friends_dict:好友关系字典
    user_pairs:要进行特征提取的用户对（user pair），dataframe格式，列名分别为'id1','id2'
    
    --------------默认参数-------------
    binary_atts:计算“是否相同”的属性，默认为['gender','region','spoken_languages']
    sim_atts:计算“相似度”的属性，默认为['hobbies','I_like_movies', 'I_like_music', 'I_like_books', 'I_mostly_like_listening_to_music']
    alpha,beta:在计算latent feature时的公式中的两个参数，默认均为0.05
    print_title:是否在抽取特征时打印当前状态，默认打印
    
    --------------输出-----------
    dataframe，列名为'id1','id2'及所有特征（共13个特征）
    """
    users_att_base = users_att_base.reset_index(drop=True) #防止因为index不一致而导致concat出问题
    user_pairs = user_pairs.reset_index(drop=True)
    #多线程：
    thd1 = MyThread(target=binary_feature, args=(users_att_base,user_pairs,binary_atts,print_title))
    thd2 = MyThread(target=gap_feature, args=(users_att_base,user_pairs,print_title))
    thd3 = MyThread(target=similarity_feature, args=(users_att_base,user_pairs,sim_atts,print_title))
    thd4 = MyThread(target=latent_feature, args=(users_att_base,friends_dict,user_pairs,0.05,0.05))
    thd1.start()
    thd2.start()
    thd3.start()
    thd4.start()
    thd1.join()
    thd2.join()
    thd3.join()
    thd4.join()
    binary_features_df = thd1.get_result()
    gap_features_df = thd2.get_result()
    similarity_features_df = thd3.get_result()
    latent_features_df = thd4.get_result()

    all_features_df = pd.concat([user_pairs,binary_features_df,gap_features_df,similarity_features_df,latent_features_df],axis=1)
    return all_features_df
