#!/usr/local/bin/python
# -*- encoding:utf-8 -*-

import numpy as np;
import scipy.sparse as sp
from scipy import linalg
import time
import threading
import os
from scipy.sparse.linalg import eigsh,inv
from multiprocessing import Process, Manager
import sys
import utils
from whoosh.index import open_dir
from whoosh.fields import *
from whoosh.scoring import *
from whoosh.matching import *
import jieba.analyse
from whoosh.qparser import QueryParser


'''This file implements a query feedback algorithms to get synonyms for querys using Rocchio algorithm'''
config = utils.get_config();

analyzer = jieba.analyse.ChineseAnalyzer();
f = formats.Frequency();
schema = Schema(appid = ID(stored=True), content=TEXT(stored=True, analyzer= analyzer, vector = f));
ix = open_dir(config.get('bm25', 'BackupIndex'));
reader = ix.reader();
searcher = ix.searcher();

parser = QueryParser('content', schema);
weightingM = BM25F();
dictMap = {};
d = open(config.get('rmls', 'dictionary'), 'r');
index = 0;
for line in d:
    dictMap[line.strip('\n').split('\t')[0].decode('utf-8')] = index;
    index += 1;
d.close();
MAX = len(dictMap);
print MAX;

def getDocumentFeature(appID):
    di = {}
    docnum = searcher.document_number(appid= appID);
    if not docnum:
        return None;
    if not reader.has_vector(docnum, 'content'):
        return None;
    vec = reader.vector_as('frequency', docnum, 'content');
    for v in vec:
        term = v[0];
        tf = int(v[1]);
        score = weightingM.score(searcher, 'content', term, docnum, tf);
        di[term] = score
    return di;  

def format(di):
    if di == None:
        return None;
    if len(di) == 0:
        return None;
    for key, value in di.items():
        if not dictMap.has_key(key):
            di.pop(key);
    return di;
def updateQuery(value, documentFeature, k):
    if documentFeature == None:
        return;
    if len(documentFeature) == 0:
        return;

    k += 1;

    for w, s in documentFeature.items():
        if value.has_key(w):
            value[w] += s;
        else:
            value[w] = s;

def findIndex(s):
    if dictMap.has_key(s):
        return dictMap[s];
    else:
        print 'WRONG', s;
        return -1;

def findPhraseIndex(s):
    di = {}
    tli = analyzer(s.decode('utf-8'));
    for t in tli:
        term = t.text;
        if di.has_key(term):
            di[term] += 1;
        else:
            di[term] = 1;
    res = [];
    for key, value in di.items():
        if dictMap.has_key(key):
            res.append(dictMap[key]);
    return res;

def processTestSet(testset, result1, result2):
    TOPK = 10;
    testSetFile = open(testset, 'r');
    resultFile1 = open(result1, 'w');
    resultFile2 = open(result2, 'w');
    for line in testSetFile:
        li = line.strip().split('|');
        key = li[0];
        value = {};
        k = 0;
        for i in range(1, len(li)):
            liw = li[i].split(':');
            pos = liw[0];
            appID = liw[1];
            label = int(liw[2]);
            documentFeature = format(getDocumentFeature(appID));
            updateQuery(value, documentFeature, k);
        #write result
        print k;
        valueList = [(y, x) for x, y in value.items()];
        print valueList;
        valueList.sort();
        s = 0.0;
        for i in range(min(len(valueList), TOPK)):
            resultFile1.write('%s\t%s\n' % (key, valueList[i][1].encode('utf-8')));
            s += valueList[i][0];
        for i in findPhraseIndex(key):
            for j in range(min(len(valueList), TOPK)):
                resultFile2.write("%d\t%d\t%f\n" % (i, findIndex(valueList[j][1]), valueList[j][0]/s));
                
    testSetFile.close();
    resultFile1.close();
    resultFile2.close();

if __name__ == '__main__':
    testset = sys.argv[1];
    result1 = sys.argv[2];
    result2 = sys.argv[3];
    processTestSet(testset, result1, result2);
