#!/usr/bin/env python
#-*- encoding:utf-8 -*- 
import string 
from whoosh.index import create_in  
from whoosh.fields import *  
from chinesetokenizer import ChineseAnalyzer
#from whoosh.analysis import RegexAnalyzer  
#analyzer = RegexAnalyzer(ur"([\u4e00-\u9fa5])|(\w+(\.?\w+)*)")

from whoosh.index import open_dir
from whoosh.scoring import *
from whoosh.matching import *
from whoosh.qparser import QueryParser

analyzer = ChineseAnalyzer();

f = formats.Frequency();
schema = Schema(appid = ID(stored=True), content=TEXT(stored=True, analyzer= analyzer, vector = f));
ix = open_dir("titleindex");
reader = ix.reader();
searcher = ix.searcher();

parser = QueryParser('content', schema);
weightingM = BM25F();
dictMap = {};
d = open('dictionary', 'r');
index = 0;
for line in d:
    index += 1;
    dictMap[line.strip('\n').split('\t')[0].decode('utf-8')] = index;

d.close();
MAX = len(dictMap);
print MAX;

def getQueryFeature(queryString):
    di = {}
    tli = analyzer(queryString.decode('utf-8'));
    for t in tli:
        term = t.text;
        if di.has_key(term):
            di[term] += 1;
        else:
            di[term] = 1;
    return di; 

def format(di):
    if di == None:
        return None;
    if len(di.items()) == 0:
        return None;
    formatString = '';
    for key, value in di.items():
        #print key;
        if dictMap.has_key(key):
            #print 'XXXX'
            k = dictMap[key];
            formatString = '%s %d:%f' %(formatString, k, value);
    return formatString;


results = open('all_query', 'r');

testingFile = open('all_query.set', 'w');

testingFile.write('*\t%d\t%d\n' %(MAX, MAX));
queryID = 0;
for line in results:
    queryString = line.strip();
    queryID += 1;
    queryFeature = format(getQueryFeature(queryString));
    testingFile.write('%d\t%s\t%s\n'%(queryID,queryFeature,queryString));

results.close();
testingFile.close();
