#!/usr/local/bin/python
#-*- encoding:utf-8 -*-

from whoosh.index import create_in  
from whoosh.fields import *
import jieba  
import jieba.analyse
from whoosh.index import open_dir
from whoosh.scoring import *
from whoosh.qparser import QueryParser
import utils
import argparse

'''This script transform the click log to train data'''

config = utils.get_config();

ix = open_dir(config.get('bm25', 'BackupIndex'));
reader = ix.reader();
searcher = ix.searcher();
analyzer = jieba.analyse.ChineseAnalyzer();

parser = QueryParser('content', ix);
weightingM = BM25F();

dictMap = {};
d = open(config.get('rmls', 'dictionary'), 'r');
index  = 0;
for line in d:
    dictMap[(line.strip('\n').split('\t')[0].decode('utf-8'))] = index;
    index += 1;
d.close();
MAX = len(dictMap);
print MAX;

idNotInIndex = 0;
idNotHasVector = 0;
def getQueryFeature(queryString):
    di = {}
    tli = analyzer(queryString.decode('utf-8'));
    #print queryString;
    for t in tli:
        term = t.text;
        if di.has_key(term):
            di[term] += 1;
        else:
            di[term] = 1;
    #print 'QUERY', di;
    return di;

def getDocumentFeature(appID):
    di = {};
    global idNotInIndex;
    global idNotHasVector;
    docnum = searcher.document_number(appid = appID);
    #print docnum
    if not docnum:
        idNotInIndex += 1;
        return di;
    if not reader.has_vector(docnum, 'content'):
        idNotHasVector += 1;
        #print 'HERE2'
        return di;
    vec = reader.vector_as('frequency', docnum, 'content');
    for v in vec:
        term = v[0];
        tf = int(v[1]);
        score =weightingM.score(searcher, 'content', term, docnum, tf);
        di[term] = score;
    #print 'Document', di;
    return di;
        
def format(di, click):
    formatString = '';
    for key, value in di.items():
        if dictMap.has_key(key):
            k = dictMap[key];
            formatString = '%s %s:%f'%(formatString, k, value);
    #formatString = '%s %d:%f'%(formatString, MAX+1, click);
    if formatString.strip() == '':
        return None;
    else:
        return formatString;

#reading logFile to a dict
logFile = open('/home/wangshuxin/operlog.onemonth', 'r');

logDict = {};
appDict = {}
for line in logFile:
    li = line.strip().split('\t');
    if len(li) != 3:
        print line;
        continue;
    appID = li[0];
    click_count = int(li[1]);
    queryString = li[2];

    if logDict.has_key(queryString):
        if logDict[queryString].has_key(appID):
            logDict[queryString][appID] += click_count;
        else:
            logDict[queryString][appID] = click_count;
    else:

        logDict[queryString] = {};
        logDict[queryString][appID] = click_count;
    
    if appDict.has_key(appID):
        appDict[appID] += click_count;
    else:
        appDict[appID] = click_count;

logFile.close();

def getValidLen(app_map):
    c = 0;
    for key in app_map.keys():
        docnum = searcher.document_number(appid=key);
        if docnum:
            c += 1;
    return c;
#wring trainFile
count = 0;
trainingFile = open('Train/train.month.set', 'w');

#trainingFile.write('*\t%d\t%d\n'%(MAX, MAX));
logLen = len(logDict);
for queryString, app_map in logDict.items():
    l = getValidLen(app_map);
    clickQ = sum(app_map.values());
    queryFeature = format(getQueryFeature(queryString),clickQ);
    if l == 0 or queryFeature == None:
        continue;
    count += 1;
    #print '%d/%d' % (count, logLen);
    trainingFile.write('#\t%d\t%d\n'%(count, l));
    for appID, res in app_map.items():
        clickD = appDict[appID];
        documentFeature = format(getDocumentFeature(appID),clickD);
        if documentFeature == None:
            continue;
        trainingFile.write('%s\t%s\t%s\n' %(res, queryFeature, documentFeature ))


print 'idNotInIndex:%d, idNotHasVector:%d' %(idNotInIndex, idNotHasVector);
trainingFile.close();
