#!/usr/bin/env bash
#-*- encoding:utf-8 -*- 
from whoosh.index import create_in  
from whoosh.fields import *  
from chinesetokenizer import ChineseAnalyzer
from  itertools import izip_longest
from whoosh.index import open_dir
from whoosh.scoring import *
from whoosh.qparser import QueryParser
import os
from multiprocessing import Process, Pool
import utils

config = utils.get_config(); 
ix = open_dir(config.get('bm25', 'MulFieldsIndex'));
reader = ix.reader();
searcher = ix.searcher();
analyzer = ChineseAnalyzer();
analyzer('init');
parser = QueryParser('content', ix);
weightingM = BM25F();

dictMap = {};
d = open(config.get('rmls', 'dictionary'), 'r');
index  = 0;
for line in d:
    index += 1;
    dictMap[(line.strip('\n').split('\t')[0].decode('utf-8'))] = index;

d.close();
MAX = len(dictMap);
print MAX;

idNotInIndex = 0;
idNotHasVector = 0;
def getQueryFeature(queryString):
    di = {}
    tli = analyzer(queryString.decode('utf-8'));
    #print queryString;
    for t in tli:
        term = t.text;
        if di.has_key(term):
            di[term] += 1;
        else:
            di[term] = 1;
    #print 'QUERY', di;
    return di;

def getDocumentFeature(appID):
    di = {};
    global idNotInIndex;
    global idNotHasVector;
    docnum = searcher.document_number(appid = appID);
    #print docnum
    if not docnum:
        idNotInIndex += 1;
        return di;
    if not reader.has_vector(docnum, 'content'):
        idNotHasVector += 1;
        #print 'HERE2'
        return di;
    vec = reader.vector_as('frequency', docnum, 'content');
    for v in vec:
        term = v[0];
        tf = int(v[1]);
        score =weightingM.score(searcher, 'content', term, docnum, tf);
        di[term] = score;
    #print 'Document', di;
    return di;
        
def format(di, click):
    if len(di.items()) == 0:
        return None;
    formatString = '';
    for key, value in di.items():
        if dictMap.has_key(key):
            k = dictMap[key];
            formatString = '%s %d:%f'%(formatString, k, value);
    #formatString = '%s %d:%f'%(formatString, MAX+1, click);
    return formatString;

logFileList = []
def listAllLogFile(logDir):
    if(os.path.isdir(logDir)):
        for f in os.listdir(logDir):
            sub = os.path.join(logDir, f);
            listAllLogFile(sub);
    elif os.path.isfile(logDir):
        logFileList.append(logDir);

def getTimestamp(f):
    a, name = os.path.split(f);

    if len(name) > 10:
        return name[:10];
    else:
        return name;


def grouper(n, iterable, padvalue=None):
    return izip_longest(*[iter(iterable)]*n, fillvalue=padvalue);


def extract_chunk(line):
    if line == None:
        return None;
    li = line.strip().split('\t');
    if len(li) != 3:
        print line;
        return None;
    appID = li[0]
    opertype = li[1];
    queryString = li[2];

    queryFeature = format(getQueryFeature(queryString),0);
    documentFeature = format(getDocumentFeature(appID),0);
    res  = 1;
    if queryFeature and documentFeature:
        return '%s\t%s\t%s\n'% (res, queryFeature, documentFeature );
    else:
        return None;
K = 50;
def extract(f, destFile, timestamp):
    print 'processing %s' % (f,);
    logf = open(f, 'rU');
    
    pool = Pool(processes=K);
    for chunk in grouper(K, logf):
        results = pool.map(extract_chunk, chunk);
        for r in results:
            if r:
                destFile.write(r);

    pool.close();
    logf.close();


if __name__ == '__main__':
    logDir = '/home/wangshuxin/operlog/';
    destDir = '/home/wangshuxin/destLog/';
    listAllLogFile(logDir);
    for f in logFileList:
        timestamp = getTimestamp(f);
        head, tail = os.path.split(f);
        destFile = open(os.path.join(destDir,tail), 'w');

        destFile.write('*\t%s\n' % timestamp);
        extract(f, destFile, timestamp);
        destFile.close();

