import string 
from whoosh.index import create_in  
from whoosh.fields import *  
import jieba.analyse
from whoosh.index import open_dir
from whoosh.scoring import *
from whoosh.matching import *
from whoosh.qparser import QueryParser
import utils
import jieba.analyse
from multiprocessing import Pool
import heapq
from itertools import izip_longest
import collections
from math import exp
import numpy as np
analyzer = jieba.analyse.ChineseAnalyzer();
config = utils.get_config()
dictMap = {};
d = open(config.get('rmls', 'dictionary'), 'r');
index = 0;
for line in d:
    dictMap[line.strip('\n').split('\t')[0].decode('utf-8')] = index;
    index += 1;
d.close();
MAX = len(dictMap);
print MAX;

def findIndex(s):
    if dictMap.has_key(s):
        return dictMap[s];
    else:
        print 'WRONG', s;
        return -1;

def grouper(n, iterable, padvalue=None):
    return izip_longest(*[iter(iterable)]*n, fillvalue=padvalue);

def processDict(syn_dict):
    '''select synonyms form syn_dict'''
    li = []
    for k, v in syn_dict.items():
        for k1, v2 in v.items():
            li.append((v2, k, k1));

    return heapq.nlargest(50000, li);


def update(d, u):
    for k, v in u.iteritems():
        if isinstance(v, collections.Mapping):
            r = update(d.get(k, {}), v);
            d[k] = r;
        else:
            d[k] = d.get(k, 0) + u[k];
    return d;
        

def computeSimilarity(map1, map2):
    sim = 0;
    for k, v in map1.items():
        if map2.has_key(k):
            sim += min(v, map2[k]);
    return sim;

def getSyn(s_map):
    di = {};
    word_dict = {};
    for queryString, count in s_map.items():
        queryString = queryString.decode('utf-8');
        tli = analyzer(queryString);
        for t in tli:
            term = t.text;
            context = queryString.replace(term, ':');
            if word_dict.has_key(term):
                if word_dict[term].has_key(context):
                    word_dict[term][context] += count;
                else:
                    word_dict[term][context] = count;
            else:
                word_dict[term]={};
                word_dict[term][context] = count;
        #if not word_dict.has_key(queryString):
        #    word_dict[queryString] = {};
        #    word_dict[queryString][':']= count;

    print 'word_dict length', len(word_dict);
    keys = word_dict.keys();
    for i in range(len(keys)):
        for j in range(i+1, len(keys)):
            sim = computeSimilarity(word_dict[keys[i]], word_dict[keys[j]]);
            if sim > 0:
                a = sorted([keys[i], keys[j]]);
                if di.has_key(a[0]):
                    di[a[0]][a[1]] = sim;
                else:
                    di[a[0]] = {};
                    di[a[0]][a[1]] = sim;
    return di;

def processChunk(line):
    '''process line for syn dict'''
    s_map = {};
    if line == None:
        return None;
    li = line.strip().split('\t');
    if len(li) != 5:
        return None;
    else:
        sli = li[4].strip().split(',');
        for s in sli:
            query_li = s.split(':');
            if len(query_li) != 2:
                continue;
            s_map[query_li[0]] = int(query_li[1]);
        syn = getSyn(s_map);
    return syn
  

def testing(appinfo):
    syn_dict = {};
    appinfo = open(appinfo, 'r');
    K = 30;
    pool = Pool(processes=K);
    for chunk in grouper(K, appinfo):
        results = pool.map(processChunk, chunk);
        for result in results:
                if result:
                    update(syn_dict, result);

    return processDict(syn_dict);

def getSigmoidScore(x):
    return 1/(1+exp(-x));

if __name__ == '__main__':
    appinfo = '/home/wangshuxin/data.onemonth/app/part-00000';
    syns = testing(appinfo);
    tmp = np.array([x[0] for x in syns]);
    m = np.mean(tmp);
    s = np.std(tmp);
   
    result1 = open('syn_S', 'w');
    result2 = open('syn_I', 'w');
    for syn in syns:
        result1.write('%s\t%s\t%d\n' % (syn[1].encode('utf-8'), syn[2].encode('utf-8'), syn[0]));
        result2.write('%d\t%s\t%f\n' % (findIndex(syn[1]), findIndex(syn[2]), getSigmoidScore((syn[0]-m)/s)));
    result1.close();
    result2.close();

