# -*- coding: utf-8 -*-
"""
Created on Fri May 31 16:27:41 2019

@author: gby
"""

import pandas as pd
import numpy as np
from keras.models import load_model

M = load_model('../match_models/nn_large.h5')



def seed_generation(newuser_candidates_features,match_model=M,extend_base_size=0.3,seed_size=0.1):
    """
    -----------Params:-----------
    newuser_candidates_features:
        通过featureExtraction接口生成的标准特征表，dataframe格式；
        列名为id1、id2以及13个特征；其中id1指的是new user；
        且整张表中id1只有一个值。
    match_model:
        匹配模型，默认使用训练好的神经网络。
        模型的输入为维度(N,13)的dataframe，即标准化的特征表去掉id1、id2.
    extend_base_size:
        从所有的的candidates中，选取作为拓展基的比例，默认30%；
    seed_size:
        从拓展基中提出种子的比例，默认为10%；
    """
    # 先获取extend_base的所有特征
    num_candidates = len(newuser_candidates_features)
    random_indexs = np.random.randint(0,num_candidates,int(extend_base_size*num_candidates))
    extend_base_features = newuser_candidates_features.take(random_indexs)
    
    # 将extend_base的特征使用匹配函数进行计算，并排序，取出种子
    id2s = extend_base_features.id2.reset_index(drop=True)
    scores = match_model.predict(extend_base_features.drop(['id1','id2'],axis=1))
    scores = pd.DataFrame(scores,columns=['score']).reset_index(drop=True)
    extend_base_rank = pd.concat([id2s,scores],axis=1).sort_values(by='score',ascending=False)
    
    seeds = extend_base_rank[:int(seed_size*extend_base_size*num_candidates)].id2
    return [extend_base_rank,seeds]



def get_results(candidates_No,new_user_id):
    """
    传入new user的id和对应candidates的批次号，抽取对应的拓展基和种子。
    """
    newuser_candidates_features = pd.read_csv('../data/extracted_features/Candidates_%d-NewId_%d.csv'%(candidates_No,new_user_id),index_col=0)
    extend_base_rank,seeds = seed_generation(newuser_candidates_features)
    extend_base_rank.to_csv('../data/extracted_features/extendBase_seeds/Candidates_%d-NewId_%d_extend_base.csv'%(candidates_No,new_user_id))
    pd.DataFrame(seeds).to_csv('../data/extracted_features/extendBase_seeds/Candidates_%d-NewId_%d_seeds.csv'%(candidates_No,new_user_id))
    return [extend_base_rank,seeds]


# =====================根据目前已经抽取完成的candidates批次，进行extend_base和seeds的抽取。
"""
目前已完成特征抽取的candidates批次为0,1,2,3,4
已经抽取好extend_base和seeds的不要重复运行！！因为每次都是随机的！！！
------------log--------------
6.1 完成了对0,1,2批次的提取。
6.4 完成了对3,4批次的提取
6.5 完成了对5,6批次的提取
6.9 完成了对7,8,9批次的提取
"""

new_users = pd.read_csv('../data/users_with_100_friends.csv')[:100].id

for canNo in [0,1]:
    print('candidates_%d'%canNo)
    for new_user in new_users:
        extend_base_rank,seeds = get_results(canNo,new_user)













