# Time-stamp: <2011-06-08 Wed 00:46:31>

from whoosh.qparser import MultifieldParser, QueryParser
from whoosh.analysis import *
from whoosh.query import *
from whoosh.qparser import *
from whoosh.scoring import *
import whoosh.index as index
from whoosh.spelling import SpellChecker
from datetime import datetime
import os,subprocess,sys,getopt,shutil

PDFVIEWER="evince"              # for mac user, please use PDFVIEWER="open"
INDEXDIR="index"
RESULTDIR="result"
SYMBSEARCH=False                 # enable search path like: mgf->M_(x)->result, highly experimental
querytext=""
helptext=r"""

,----[ TSE ]
| please give a query.
|   * python searcher.py gaussian distribution
|   * python searcher.py tex:int
`----

get latest TSE by:
svn checkout https://tse-a-theorem-search-engine.googlecode.com/svn/trunk/ tse-a-theorem-search-engine

"""

def searchquery(qtext,symbsearch=False):
    parser=MultifieldParser(["content","tags"], ix.schema, {"content":1.0,"tags":0.3})
    parser.add_plugin(FieldAliasPlugin({"mathcontent": ["tex", "latex"]}))
    query=parser.parse(qtext)
    results = searcher.search(query)
    if symbsearch:
        newquery=set([v for (u,v) in query.all_terms() if u=="content"])
        query_math=QueryParser("context", ix_math.schema).parse(" ".join([v for v in newquery]))
        results_math = searcher_math.search(query_math)
        if len(results_math)>0:
            query_symb=QueryParser("mathcontent", ix.schema, group=AndGroup).parse(results_math[0]["symb"])
            results_symb=searcher.search(query_symb, limit=3)
            if len(results_symb)>0:
                results.upgrade_and_extend(results_symb)
    return results

def correctquery(orgquery):
    result=""
    for v in my_analyzer2(orgquery):
        if "tex:" in v.text or "latex:" in v.text:
            return None              # do not check spelling on tex/latex symbol
        suggestions = speller.suggest(v.text, number=1)
        if suggestions:
            result+=suggestions[0]+" "
    if len(result.strip())>0:
        print "--- did you mean \"%s\"?"%(result.strip())
        return None
    return unicode(result.strip())

def main(argv=None):
    global querytext
    if argv is None:
        argv = sys.argv
    if len(argv)==1:
        print helptext
        sys.exit(0)
    elif len(argv)>1:
        querytext=unicode(' '.join(str(v) for v in argv[1:]))

if __name__ == "__main__":
    main()



# prepare the directory for results
if not os.path.exists(RESULTDIR):
    os.mkdir(RESULTDIR)
else:
    print "cleaning results..."
    shutil.rmtree(RESULTDIR)
    os.mkdir(RESULTDIR)

rankicon=182
startnote=r"\ding{@}"
marginnote=r"\marginpar{\P@}"
sectiontext=r"\mysection{%s}{%s results}"
bartext=r"\mybox{%.2f}{%.2f}{%.2f}"
my_analyzer2= SpaceSeparatedTokenizer() | LowercaseFilter() | StopFilter()
my_analyzer = RegexTokenizer() | LowercaseFilter() | StopFilter() | StemFilter()

ix = index.open_dir(INDEXDIR, indexname="normal")
reader = ix.reader()
ix_math=index.open_dir(INDEXDIR,indexname="math")
reader_math=ix_math.reader()
# Load a spelling dictionary stored in the same directory
# as the main index
speller = SpellChecker(ix.storage)
searcher=ix.searcher()
searcher_math=ix_math.searcher()
c_querytext=querytext

results=searchquery(querytext,SYMBSEARCH)

if (len(results)==0):
    c_querytext=correctquery(querytext)
    if c_querytext:
        results=searchquery(c_querytext,SYMBSEARCH)
    if (len(results)==0):
        print '\"%s\" returns no result!'%(c_querytext)
        print "cleaning temp files..."
        shutil.rmtree(RESULTDIR)
        print "closed."
        sys.exit(0)


resthm=[]
resrem=[]
reslem=[]
resdef=[]
rescor=[]
displayorder=[("mthm",-1),("mrem",-1),("mlem",-1),("mdef",-1),("mcor",-1)]

removenum=0
if len(results)>10:
    THRESHOLD=sum([v.score for v in results])/len(results)
    oldresultlen=len(results)
    results=[h for h in results if h.score>THRESHOLD]
    removenum=oldresultlen-len(results)
print '\"%s\" returns %d results...'%(c_querytext,len(results))
if removenum>0:
    print "--- %d results with low score have been omitted."%(removenum)

for h in results:
    ricon=str(rankicon+h.rank)
    pnum=str(h['pagenum'])
    mymargin=marginnote.replace("@",pnum)
    mysicon=startnote.replace("@",ricon)
    fcontent="%s %s %s\n\n%s"%(mysicon,mymargin,h['orgcontent'],r"\vspace{0.2cm}")
    typ=h['typ']
    if typ==u"mthm":
        resthm.append(fcontent)
        if displayorder[0][1]<h.score:
            displayorder[0]=("mthm",h.score)
    elif typ==u"mrem":
        resrem.append(fcontent)
        if displayorder[1][1]<h.score:
            displayorder[1]=("mrem",h.score)
    elif typ==u"mlem":
        reslem.append(fcontent)
        if displayorder[2][1]<h.score:
            displayorder[2]=("mlem",h.score)
    elif typ==u"mdef":
        resdef.append(fcontent)
        if displayorder[3][1]<h.score:
            displayorder[3]=("mdef",h.score)
    elif typ==u"mcor":
        rescor.append(fcontent)
        if displayorder[4][1]<h.score:
            displayorder[4]=("mcor",h.score)

displayorder=sorted(displayorder, key=lambda d: d[1], reverse=True)

print "generating results..."

# readtemplate
fp=open("template.tex")
template=fp.read()
fp.close()

allcontent=""
# write to a tex file
for (u,v) in displayorder:
    if v>0:
        if u=="mthm":
            mysecttext=sectiontext%("Theorems",str(len(resthm)))+"\n"
            allcontent+=mysecttext+'\n\n'.join([s for s in resthm])+'\n\n'
        elif u=="mrem":
            mysecttext=sectiontext%("Remarks",str(len(resrem)))+"\n"
            allcontent+=mysecttext+'\n\n'.join([s for s in resrem])+'\n\n'
        elif u=="mdef":
            mysecttext=sectiontext%("Definitions",str(len(resdef)))+"\n"
            allcontent+=mysecttext+'\n\n'.join([s for s in resdef])+'\n\n'           
        elif u=="mlem":
            mysecttext=sectiontext%("Lemmas",str(len(reslem)))+"\n"
            allcontent+=mysecttext+'\n\n'.join([s for s in reslem])+'\n\n'
        elif u=="mcor":
            mysecttext=sectiontext%("Corollaries",str(len(rescor)))+"\n"
            allcontent+=mysecttext+'\n\n'.join([s for s in rescor])+'\n\n'

tnum=10
if len(results)<10:
    tnum= len(results)


drawtext=""
maxscore=results[0].score
for h in results:
    drawtext+=bartext%(h.rank*0.4,h.rank*0.4+0.3,h.score/maxscore*5)+"\n"

xlen=4.2/10*tnum
xshift=13.8+(4.2-xlen)-0.2

maxscore="%.1f"%(maxscore)
finaltex=template.replace("@query",c_querytext).replace("@nowtime",str(datetime.now()).split('.')[0]).replace("@content",allcontent).replace("@totalnum",str(tnum)).replace("@drawplot",drawtext).replace("@maxscore",maxscore).replace("@xlen",str(xlen)).replace("@xshift",str(xshift))

# write result
fp=open(RESULTDIR+"/result.tex","w")
fp.write(finaltex)
fp.close()


os.chdir("./"+RESULTDIR)

with open(os.devnull, 'wb') as devnull:
    subprocess.check_call(['latex','result.tex'], stdout=devnull, stderr=subprocess.STDOUT)
    subprocess.check_call(['dvipdf','result.dvi', 'result.pdf'], stdout=devnull, stderr=subprocess.STDOUT)

print "showing the result page..."
subprocess.check_call([PDFVIEWER, "result.pdf"])

print "cleaning temp files..."
os.chdir("../")
shutil.rmtree(RESULTDIR)

searcher.close()

print "closed."
