#-*- coding:utf-8 -*- 
import codecs 
from whoosh.index import create_in  
from whoosh.fields import *  
from chinesetokenizer import ChineseAnalyzer
#from whoosh.analysis import RegexAnalyzer  
#analyzer = RegexAnalyzer(ur"([\u4e00-\u9fa5])|(\w+(\.?\w+)*)")

from whoosh.index import open_dir

from whoosh.qparser import QueryParser

analyzer = ChineseAnalyzer();
def computeDis(queryString, appString):
    qset = set([a.text for a in analyzer(queryString.decode('utf-8'))]);
    aset = set([a.text for a in analyzer(appString.decode('utf-8'))]);
    #print 'queryString %s, appString %s\n' %(queryString, appString);
    #print 'qset %s, aset %s\n' % (qset, aset);

    a = len(qset.intersection(aset));
    b = len(qset.union(aset));
    r = a/float(b);
    if r < 0.0000001:
        return False;
    else:
        return True;
    #return qset.issubset(aset) or aset.issubset(qset);

ix = open_dir("IndexDir/titleIndex");
parser = QueryParser('content',ix.schema);

filter = open('finalunmatchLog', 'w');
with open('unmatchLog', 'r') as f:
    with ix.searcher() as searcher:
        count = 0;
        total  = 0;
        uncount = 0;
        for line in f:
            li = line.strip().split('\t');
            queryString = li[1];
            appString = li[0];
            appID = li[2];
            dis = computeDis(queryString, appString);
            if dis == False :
                filter.write('%s\t%s\t%s\n' %(appString, appID, queryString));
                uncount += 1;
            else:
                count += 1;
            total += 1;

filter.close();
print 'total:%d, count:%d, uncount:%d' % (total, count, uncount);
