from util import *
from urllib import urlencode
from rankLib import trainModel, testOne
import time
import json
import os

def getInfo(query):
    person_dict = {}
    paper_dict = {}
    conf_dict = {}

    settings = loadSettings()
    c = Connector()
    
    '''
    get person info
    '''
    if 'person_attri' in settings:
        person_attri = settings['person_attri']
        if type(person_attri) != type([]):
            person_attri = {person_attri}
        search_type = 'search-expert'
        if 'person_start' in settings:
            person_start = settings['person_start']
        else:
            person_start = 0
        if 'person_num' in settings:
            person_num = settings['person_num']
        else:
            person_num = 10
        param_dict = {'u':'vivo', 'q':query, 'start':person_start, 'num':person_num}
        ret = c.search(search_type, param_dict)
        if 'Results' in ret:
            results = ret['Results']
            for result in results:
                id = result['Id']
                value = {}
                for attri in person_attri:
                    if attri in result:
                        value[attri] = result[attri]
                    else:
                        value[attri] = None
                person_dict[id] = value
                
    '''
    get paper info
    '''
    if 'paper_attri' in settings:
        paper_attri = settings['paper_attri']
        if type(paper_attri) != type([]):
            paper_attri = {paper_attri}
        search_type = 'search-publication'
        if 'paper_start' in settings:
            paper_start = settings['paper_start']
        else:
            paper_start = 0
        if 'paper_num' in settings:
            paper_num = settings['paper_num']
        else:
            paper_num = 10
        param_dict = {'u':'vivo', 'q':query, 'start':paper_start, 'num':paper_num}
        ret = c.search(search_type, param_dict)
        if 'Results' in ret:
            results = ret['Results']
            for result in results:
                id = result['Id']
                isReserved = False
                if 'AuthorIds' in result:
                    authors = result['AuthorIds']
                    for author in authors:
                        if author in person_dict:
                            isReserved = True
                            break
                if isReserved :
                    value = {}
                    for attri in paper_attri:
                        if attri in result:
                            value[attri] = result[attri]
                        else:
                            value[attri] = None
                    paper_dict[id] = value

    '''
    get conference info
    '''
    if 'conf_attri' in settings:
        conf_attri = settings['conf_attri']
        if type(conf_attri) != type([]):
            conf_attri = {conf_attri}
        search_type = 'search-conference'
        if 'conf_start' in settings:
            conf_start = settings['conf_start']
        else:
            conf_start = 0
        if 'conf_num' in settings:
            conf_num = settings['conf_num']
        else:
            conf_num = 10
        param_dict = {'u':'vivo', 'q':query, 'start':conf_start, 'num':conf_num}
        ret = c.search(search_type, param_dict)
        if 'Results' in ret:
            results = ret['Results']
            for result in results:
                id = result['Id']
                value = {}
                '''
                TODO:
                '''
                search_type = 'jconf/%d' % id
                param_dict = {'u':'vivo'}
                detail_ret = c.search(search_type, param_dict)
                detail_ret = detail_ret[0]
                for attri in conf_attri:
                    if attri in detail_ret:
                        value[attri] = detail_ret[attri]
                    else:
                        value[attri] = None
                '''
                    get papers of conference
                '''
                if 'conf_paper_include' in settings and settings['conf_paper_include'] == 1:
                    
                    search_type = 'publication/jconf/%d' % id
                    if 'conf_paper_start_year' in settings:
                        conf_paper_start_year = settings['conf_paper_start_year']
                    else:
                        conf_paper_start_year = 2010
                    if 'conf_paper_end_year' in settings:
                        conf_paper_end_year = settings['conf_paper_end_year']
                    else:
                        conf_paper_end_year = 2013
                    if 'conf_paper_start' in settings:
                        conf_paper_start = settings['conf_paper_start']
                    else:
                        conf_paper_start = 0
                    if 'conf_paper_end' in settings:
                        conf_paper_end = settings['conf_paper_end']
                    else:
                        conf_paper_end = 10
                    param_dict = {'u':'vivo', 'startYear':conf_paper_start_year, 'endYear':conf_paper_end_year, 'limStart':conf_paper_start, 'limEnd':conf_paper_end}
                    paper_ret = c.search(search_type, param_dict)
                    value['paper'] = paper_ret                    
                conf_dict[id] = value

    '''
        Do something to get all useful data.
        You could add parameters in Settings.txt
    '''
    return [person_dict, paper_dict, conf_dict]
    
def extractFeature(query, field, person_dict, paper_dict, conf_dict):
    '''
        Given the information of people, papers and conferences,
        evaluate feature vector for each expert.
        Input:
            query: query string
            person_dict, paper_dict, conf_dict: returned by getInfo()
        Output: feature dict, key is expert id and value is a list of float numbers.
    '''

    feature_dict = {}
    
    all_cite_count = {}
    first_cite_count = {}
    paper_count = {}
    
    for person_id, attrs in person_dict.items():
        feature_dict[person_id] = [0.0]
        all_cite_count[person_id] = 0.0
        first_cite_count[person_id] = 0.0
        paper_count[person_id] = 0.0

    for paper_id, paper_attr in paper_dict.items():
        author_list = paper_attr.get('AuthorIds')
        cite_num = paper_attr.get('Citedby')
        if author_list != None:
            for pid in author_list:
                if pid in person_dict:
                    paper_count[pid] += 1
                    all_cite_count[pid] += cite_num
            if len(author_list) > 0 and author_list[0] in person_dict:
                first_cite_count[author_list[0]] += cite_num
    direct_attr = ['publication', 'citation', 'field_pub', 'field_citation']#, 'field_rate']
    special_attr = ['field_rate_order', 'field_citation_order']
    single_attr_list = loadSingleAttrList()

    settings = loadSettings()
    ms_data = loadMsData()
    ms_field = ms_data.get(field)
    order_bound = 3000

    am_attr = ['a', 'c', 'd', 'g', 'h', 'p', 's', 'r']
    am_data = loadAminerData()

    for pid, attrs in feature_dict.items():
        attrs.append(paper_count[pid])
        attrs.append(first_cite_count[pid])
        attrs.append(all_cite_count[pid])
        profile = person_dict.get(pid)
        #print profile
        #exit(0)
        if ms_field != None and str(pid) in ms_field:
            ms_prof = ms_field[str(pid)]
            for da in direct_attr:
                if da in ms_prof:
                    attrs.append(float(ms_prof[da]))
                else:
                    attrs.append(0.0)

            for sa in special_attr:
                if sa in profile:
                    attrs.append(float(profile[sa]))
                else:
                    attrs.append(float(order_bound))
        else:
            for i in range(0, len(direct_attr)):
                attrs.append(0.0)
            for i in range(0, len(special_attr)):
                attrs.append(float(order_bound))

        if single_attr_list != None:
            for single_attr in single_attr_list:
                if str(pid) in single_attr:
                    attrs.append(float(single_attr[str(pid)]))
                else:
                    attrs.append(0.0)
        else:
            for i in range(0, len(single_attr)):
                attrs.append(0.0)

        for ama in am_attr:
            if str(pid) in am_data and ama in am_data.get(str(pid)):
                attrs.append(float(am_data.get(str(pid)).get(ama)))
            else:
                attrs.append(0.0);
                
    return feature_dict

def linearNormalizeByDim(feature_dict, dim):
    max = 0.0
    for id, list in feature_dict.items():
        if max < list[dim]:
            max = list[dim]
    if max == 0.0:
        return feature_dict
    
    for id, list in feature_dict.items():
        list[dim] /= max
        feature_dict[id] = list

    return feature_dict

def normalize(feature_dict):
    dim = len(feature_dict.items()[0][1])
    #print 'dim = %d' % dim
    for i in range(0, dim):
        feature_dict = linearNormalizeByDim(feature_dict, i)
    return feature_dict

def rerank(candidate):

    '''
        Input:  the original list, with several id of experts.
        Output: the result list, 100 experts at most
    '''

    ranked_list = candidate
    print candidate

    if len(ranked_list) > 100:
        ranked_list = ranked_list[:100]
    
    return ranked_list

def evaluate_feature(query, add_dict = None):
    stamp = time.time()
    [person_dict, paper_dict, conf_dict] = getInfo(query)
    #print type(person_dict.keys()[0])
    #print 'information retrival done in %.3fs' % (time.time() - stamp)
    stamp = time.time()

    qv = []
    for paper_id, paper_info in paper_dict.items():
        qv.append(str(paper_id))

    fvd = loadFieldVector()
    #print 'len(fvd) = ', len(fvd)
    #print 'len(qv) = ', len(qv)
    max = 0.0
    field = None
    for f, v in fvd.items():
        cos = binCosSim(v, qv)
        if cos > max:
            max = cos
            field = f
    if field == 'scientific computing' and 'high' in query and 'performance' in query:
        field = 'distributed & parallel computing'
    print query, ' -- ', field, cos

    #print 'step 1 number = %d' % len(person_dict)
    '''
    if query != field:
        [ps, pp, cf] = getInfo(field)
        for key, value in ps.items():
            person_dict[key] = value
        for key, value in pp.items():
            paper_dict[key] = value
        for key, value in cf.items():
            conf_dict[key] = value
    '''
    if query != field:
        [person_dict, paper_dict, conf_dict] = getInfo(field)

    if add_dict != None:
        for id in add_dict.keys():
            if not id in person_dict:
                person_dict[id] = {}
    

    print 'step 2 number = %d' % len(person_dict)
    
    ms_data = loadMsData()
    settings = loadSettings()
    ms_max_field_order = 100
    ms_max_cite_order = 100

    if 'ms_max_field_order' in settings:
        ms_max_field_order = int(settings['ms_max_field_order'])
    if 'ms_max_cite_order' in settings:
        ms_max_cite_order = int(settings['ms_max_cite_order'])
        
    if field in ms_data:
        field_dict = ms_data.get(field)
        for pid, profile in field_dict.items():
            field_rate_order = 3000
            field_cite_order = 3000
            if 'field_rate_order' in profile and profile['field_rate_order'] > 0:
                field_rate_order = profile['field_rate_order']
            if 'field_citation_order' in profile and profile['field_citation_order'] > 0:
                field_cite_order = profile['field_citation_order']
            if field_rate_order > ms_max_field_order and field_cite_order > ms_max_cite_order:
                continue
            #print field_rate_order, field_cite_order
            if not pid in person_dict:
                person_dict[pid] = {}
            for key, value in profile.items():
                person_dict[pid][key] = value
                
    
    print 'step 3 number = %d' % len(person_dict)
    '''
    print len(person_dict)
    print len(paper_dict)
    print len(conf_dict)
    '''
    #print json.dumps(paper_dict, indent = 2)
    feature_dict = extractFeature(query, field, person_dict, paper_dict, conf_dict)
    #print 'feature extraction done in %.3fs' % (time.time() - stamp)
    stamp = time.time()
    #feature_dict = normalize(feature_dict)
    #print 'normalization done in %.3fs' % (time.time() - stamp)
    return feature_dict

def rank(query):
    
    c = Connector()
    settings = loadSettings()
    
    feature_dict = evaluate_feature(query)
    
    #print json.dumps(feature_dict, indent = 2)
    
    stamp = time.time()
    result = testOne(feature_dict)
    if len(result) > 100:
        result = result[:100]
    return result

def train():
    settings = loadSettings()
    dir = settings.get('train_dir')
    if dir == None:
        dir = 'train_dir'
    queryList = os.listdir(dir)
    train_data = []
    for q in queryList:
        '''
        if q != 'high_performance_computing':
            print 'miss'
            continue
        '''
        f = open(dir + os.sep + q, 'r')
        pool = {}
        for line in f.readlines():
            seg = line.strip().split('\t')
            if len(seg) < 3:
                continue
            person_id = int(seg[1])
            pool[person_id] = float(seg[0])
                
        query = q.replace('_', ' ')
        feature_dict = evaluate_feature(query, pool)
        #feature_dict = evaluate_feature(query)
        
        hit = 0
        for person_id, value in pool.items():
            if person_id in feature_dict:
                feature_dict[person_id][0] = float(seg[0])
                hit += 1
            
        print 'recall = ', 1.0 * hit / len(pool)
        print 'candidate number = %d' % len(feature_dict)
        train_data.append(feature_dict)
    genArffFile(train_data, 'train_data.arff')
    trainModel(train_data, True)
    

if __name__ == '__main__':
    begin = time.time()
    #print rank('data mining')
    train()
    end = time.time()
    print 'overall time consumption: %.3fs' % (end - begin)
