# -*- coding: utf-8 -*-
import urllib2
import json, time, threading
import math, operator
from urllib import urlencode

settings = None
ms_data = None
field_vec = None
single_attr_list = None
am_data = None

field_map = [
    'algorithms & theory',
    'security & privacy',
    'hardware & architecture',
    'software engineering',
    'artificial intelligence',
    'machine learning & pattern recognition',
    'data mining',
    'information retrieval',
    'natural language & speech',
    'graphics',
    'computer vision',
    'human-computer interaction',
    'multimedia',
    'networks & communications',
    'world wide web',
    'distributed & parallel computing',
    'operating systems',
    'databases',
    'real-time & embedded systems',
    'simulation',
    'bioinformatics & computational biology',
    'scientific computing',
    'computer education',
    'programming languages',
]

class CrawlThread(threading.Thread):
    connector = None    
    lock = None
    id = None
    param_list = None
    result = None
    def __init__(self, given_id, parent_connector, global_lock, pl, rs):
        threading.Thread.__init__(self)
        self.id = given_id
        self.connector = parent_connector        
        self.lock = global_lock
        self.param_list = pl
        self.result = rs
        
    def run(self):
        while True:
            self.lock.acquire()
            if len(self.param_list) == 0:
                self.lock.release()
                break
            param_tuple = self.param_list[0]
            del self.param_list[0]
            #print 'thread %d is processing ' % self.id, param_tuple
            self.lock.release()
            content = self.connector.search(param_tuple[1], param_tuple[2])
            self.lock.acquire()
            self.result.append((param_tuple[0], content))
            print 'thread %d has done ' % self.id, param_tuple
            self.lock.release()
        self.lock.acquire()
        print 'thread %d is end' % self.id
        self.lock.release()

class Connector(object):
    root_url = 'http://arnetminer.org/services/'
    max_thread_num = 5

    def __init__(self):
        settings = loadSettings()
        if 'max_thread' in settings:
            self.max_thread_num = int(settings['max_thread'])
    
    def request_url(self, url):
        '''
            Get data from a given URL. If connection failures occur, try 3 times at most.
            Input:  the request string of url
            Output: the result string (for http request, return the html code)
            by Yan WANG
        '''
        fails = 0
        content = ''
        log('access', url)
        while fails < 3:
            try:
                req = urllib2.Request(url)
                resp = urllib2.urlopen(req, timeout = 10)
                content = resp.read()
                if len(content) > 1:
                    break
                else:
                    fails += 1
            except Exception as e:
                fails += 1
                print 'Failed for %d time(s)' % fails, e
        log('info', '%d characters are received' % len(content))
        return content

    def search(self, search_type, param_dict, root = None):
        '''
            Submit query and return the result in python dict.
            Input:  search_type: follow the given API, could be 'search-expert' or else.
                    param_dict: a dict including the parameters, e.g.:
                        {'u':'clockwise', 'q':'data mining', 'start': 10, 'num':3}
            Output: the dict or result, parsed from return json object
            by Yan WANG
        '''
        if root == None:
            root = 'http://arnetminer.org/services/'
        if not root.endswith('/'):
            root += '/'
        root += search_type + '?' + urlencode(param_dict).replace('+', '%20')
        #print 'accessing ' + root
        try:
            ret = self.request_url(root)
        except Exception as e:
            log('error', 'failed to access url ' + root)
            return None
        
        try:
            ret = json.loads(ret)
            return ret
        except Exception as e:
            log('error', 'failed to load json')

        try:
            ret = ret.decode('utf-8', 'ignore')
            ret = json.loads(ret)
            return ret
        except Exception as e:
            log('error', 'failed to decode and load json')
        

    def search_multithread(self, input_list):
        '''
            A multi-thread version of search.
            Input:  input_list is a list of tuples with format
                (search_id, search_type, param_dict)
            Output: a list of tuples with format
                (search_id, result_json)
        '''
        result = []
        lock = threading.RLock()
        pool = []
        for i in range(0, self.max_thread_num):
            ct = CrawlThread(i, self, lock, input_list, result)
            pool.append(ct)
        for i in range(0, self.max_thread_num):
            pool[i].start()
        for i in range(0, self.max_thread_num):
            pool[i].join()
        lock.acquire()
        print '%d result has found' % len(result)
        return result

def log(label, content):
    '''
        Record something into log file, format:
            (time) [label] content
        For instance:
            2013-10-12 20:12:29 [info] query:"data mining"
        notice: all '\n' in content and label will be replaced by space (' ')
    '''
    timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    if '\n' in content:
        content = content.replace('\n', ' ')
    if '\n' in label:
        label = label.replace('\n', ' ')
    f = open('soa.log', 'a')
    f.write('%s [%s] %s\n' % (timestamp, label, content.encode('utf-8', 'ignore')))
    f.close()

def loadSettings():
    '''
        This function loads Settings.txt and return the dict.
        Ignore lines that start with '#'.
        Two columns are separated by '\t'
        
        Output: a dict defined in Settings.txt
    '''
    global settings
    if settings != None:
        return settings
    else:
        settings = {}
    f = open('Settings.txt', 'r')
    count = 0
    for line in f:
        count += 1
        if line.startswith('#') or len(line) == 0:
            continue
        seg = line.strip().split('\t')
        if len(seg) != 2 or len(seg[0]) == 0 or len(seg[1]) == 0:
            continue
        if seg[0] in settings:
            if type(settings[seg[0]]) != type([]):
                settings[seg[0]] = [settings[seg[0]]]
            settings[seg[0]].append(seg[1])
        else:
            settings[seg[0]] = seg[1]
    f.close()
    return settings

def loadFieldVector():
    global field_vec
    settings = loadSettings()
    if field_vec == None and settings.get('paper_vector') != None:
        field_vec = {}
        f = open(settings['paper_vector'], 'r')
        for line in f.readlines():
            seg = line.strip().split('\t')
            if len(seg) < 2:
                continue
            field_vec[seg[0]] = seg[1:]
    return field_vec

def loadMsData():
    global ms_data
    if ms_data != None:
        return ms_data
    settings = loadSettings()
    ms_data_file = settings.get('ms_data')
    if ms_data_file == None:
        return None
    f = open(ms_data_file, 'r')
    ms_data = json.loads(''.join(open(ms_data_file, 'r').readlines()))
    return ms_data

def loadAminerData():
    global am_data
    if am_data != None:
        return am_data
    settings = loadSettings()
    am_data_file = settings.get('am_data')
    if am_data_file == None:
        return None
    am_data = json.loads(''.join(open(am_data_file, 'r').readlines()))
    return am_data
    

def loadSingleAttrList():
    global single_attr_list
    if single_attr_list != None:
        return single_attr_list
    settings = loadSettings()
    single_attr_file_list = settings.get('single_attr')
    if single_attr_file_list == None:
        return None
    single_attr_list = []
    for single_attr_file in single_attr_file_list:
        data = json.loads(''.join(open(single_attr_file, 'r').readlines()))
        single_attr_list.append(data)
    return single_attr_list

def binCosSim(list1, list2):
    if len(list1) == 0 or len(list2) == 0:
        return 0.0
    d = {}
    for item in list1:
        d[item] = 1
    for item in list2:
        d[item] = 1
    return 1.0 * (len(list1) + len(list2) - len(d)) / math.sqrt(len(list1) * len(list2))
    
def genArffFile(feature_dict_list, filename):
    sample = feature_dict_list[0].items()[0][1]
    f = open(filename, 'w')
    f.write('@RELATION BIGYANGGOD\n')
    for i in range(1, len(sample)):
        f.write('@ATTRIBUTE dim_%d NUMERIC\n' % i)
    f.write('@ATTRIBUTE score {high, low}\n')
    f.write('@DATA\n')
    for fd in feature_dict_list:
        for id, list in fd.items():
            #print len(list), type(list[0])
            for i in range(1, len(list)):
                f.write('%f,' % list[i])
            if list[0] > 0.0:
                f.write('high')
            else:
                f.write('low')
            f.write('\n')
    f.close()

def init():
    sa = loadSingleAttrList()
    md = loadMsData()
    fv = loadFieldVector()
    st = loadSettings()
    
if __name__ == '__main__':
    #print json.dumps(loadSettings(), indent = 4)
    
    query = 'high performance computer'
    fvd = loadFieldVector()
    buf = ''
    for ni, vi in fvd.items():
        for nj, vj in fvd.items():
            buf += '%.4f ' % binCosSim(vi, vj)
        buf += '\n'
    print buf
    
    param_dict = {'u':'vivo', 'q':query, 'start':0, 'num':10000}
    c = Connector()
    result = c.search('search-publication', param_dict)
    qv = []
    for p in result['Results']:
        qv.append(str(p['Id']))
    r = []
    for fn, fv in fvd.items():
        r.append((fn, binCosSim(fv, qv)))
    for pair in sorted(r, key = operator.itemgetter(1), reverse = True):
        print pair
    
    
    '''
    search_type = 'search-publication'
    
    f = open('paper_dist.txt', 'w')
    c = Connector()
    param_tuple = []
    for i in range(0, len(field_map)):
        param_dict = {'u':'vivo', 'q':field_map[i], 'start':0, 'num':10000}
        param_tuple.append((i, search_type, param_dict))
    result_tuple = c.search_multithread(param_tuple)
    for triple in result_tuple:
        buf = field_map[triple[0]]
        result = triple[1]
        for item in result['Results']:
            buf += '\t' + str(item['Id'])
        f.write(buf + '\n')
    f.close()
    '''
    

'''
    c = Connector()
    pl = []
    for i in range(0, 20):
        param_dict = {'u':'vivo', 'q':'algorithm & theory', 'start':i * 50, 'num':50}
        pl.append((i, 'search-expert', param_dict))
        
    nrl = []
    begin2 = time.time()
    for i in range(0, 20):
        print pl[i]
        #nr = c.search(pl[i][1], pl[i][2])
        #nrl.append(nr)
    end2 = time.time()
    
    
    begin1 = time.time()
    result = c.search_multithread(pl)
    end1 = time.time()
    
    
    print len(nrl), len(result)
    for i in range(0, 20):
        r1 = json.dumps(result[i][1]['Results'])
        r2 = json.dumps(nrl[i]['Results'])
        print len(r1), len(r2)
        print r1[:50]
        print r2[:50]
    print 'Single thread: %.3f' % (end2 - begin2)
    print 'Multi-thread:  %.3f' % (end1 - begin1)
'''
'''
    c = Connector()

    search_type = 'search-expert'
    param_dict = {'u':'clockwise', 'q':'data mining', 'start':10, 'num':3}
    
    print json.dumps(c.search(search_type, param_dict), sort_keys = True, indent = 2)
'''
