# Time-stamp: <2011-06-06 Mon 08:55:18>

import re, os, shutil
from whoosh.index import create_in
from whoosh import index
from whoosh.fields import *
from whoosh.spelling import SpellChecker
from whoosh.analysis import *
from datetime import datetime

INDEXDIR="index"

# re to extract theorems from TEX file
re_thm = r"""\\begin{(?P<type>\w*)}\[(?P<page>\d*?)\](?P<contents>.*?)\\end{(\1)}"""
re_math = r"""(?:\\(?!emph|textbf|begin|end)\w*?(({.*?})+|\s))|(?:\\\[.*?\\\])|(?:\$.*?\$)|(?:\\begin{eqnarray\**}.*?\\end{eqnarray\**})|(?:\\emph|textbf)"""
re_math_concl=r"""(?:\\\[.*?\\\])|(?:\\begin{eqnarray\**}.*?\\end{eqnarray\**})"""
re_only_math=r"""(\\\[(?P<m1>.*?)\\\])|(\$(?P<m2>.*?)\$)|(?:\\begin{eqnarray\**}(?P<m3>.*?)\\end{eqnarray\**})"""
re_simple_math=r"""\$(?P<m1>.*?)\$"""
re_symb1=r"""\w+[_\^]\{\D*?\}"""
re_symb2=r"""\w\(\w\)"""
re_tab=re.compile(r"""\t+""")
symbcontext_dict={}

def UpdateSymbContext(symb, sent, rev=False):
    global symbcontext_dict
    context=[v.text for v in my_analyzer(sent)]
    if rev:
        context.reverse()
    # print "%s:%s"%(symb,context)
    allsymb=compile_re_symb1.findall(symb)
    allsymb+=compile_re_symb2.findall(symb)
    if re.match(r"""\A\\\w+$""",symb):
        allsymb.append(symb)
    allsymb=set(allsymb)
    if len(allsymb)>0:
        for i in range(len(context)):
            this_boost=5-2*i # encourage short-dist words
            if this_boost>0:
                this_string="%s "%context[i]*this_boost
                for tsymb in allsymb:
                    if len(tsymb.strip())==0:
                        continue
                    tsymb=tsymb.lower()
                    if tsymb in symbcontext_dict:
                        symbcontext_dict[tsymb]+= this_string
                    else:
                        symbcontext_dict[tsymb]= this_string

def UpdateSymbDict(sent):
    c=compile_re_simple_math.search(sent)
    lastend=0
    if c:
        lastsymb=c.group(1)
    while(c):
        left_sent=sent[lastend:c.start()]
        # process last word score: from left to right decrease score # w1<3> w2<2> w3<1>
        UpdateSymbContext(lastsymb,left_sent, True)
        lastend=c.end()
        c=compile_re_simple_math.search(sent,lastend)
        if c:
            right_sent=sent[lastend:c.start()]
            UpdateSymbContext(lastsymb,right_sent)
        else:
            right_sent=sent[lastend:]
            UpdateSymbContext(lastsymb,right_sent)
            break
        lastsymb=c.group(1)     # make this as lastsymb
        

# process the text content
my_analyzer = RegexTokenizer() | LowercaseFilter() | StopFilter() | StemFilter()
my_analyzer2= RegexTokenizer() | LowercaseFilter() | StopFilter()

# schema used in our data base
schema = Schema(typ=KEYWORD(lowercase=True, stored=True), # def,thm,remark,...
                tags=TEXT(analyzer=my_analyzer), # keywords extracted from math symbol
                content=TEXT(analyzer=my_analyzer, stored=True),                   # major text content
                orgcontent=STORED,
                mathcontent=NGRAMWORDS(minsize=3, maxsize=7, tokenizer=SpaceSeparatedTokenizer()),
                pagenum=NUMERIC(signed=False, stored=True),               # page number
                date=DATETIME
                )

# # schema used in our data base
schema_math = Schema( context=TEXT(phrase=False, stored=False),                   # major text content
                      symb=STORED
                )


# # prepare the directory for indexing
# if not os.path.exists(INDEXDIR):
#     os.mkdir(INDEXDIR)
# else:
#     print "cleaning old index..."
#     shutil.rmtree(INDEXDIR)
#     os.mkdir(INDEXDIR)

ix = create_in(INDEXDIR, schema, indexname="normal")
writer = ix.writer()
ix_math = create_in(INDEXDIR, schema_math, indexname="math")
writer_math=ix_math.writer()

# read all theorems from database
fp=open('theorem.tex')
rawtxt=fp.read()
fp.close()

fp=open('keymapping')
allkeys=[re_tab.sub("\t",v).split('\t') for v in fp.readlines() if len(v.strip())>0]
fp.close()

def GetKeyword(content):
    keyword=""
    for c in content:
        for k in allkeys:
            if re.search(k[0].strip(),c):
                keyword+=" "+k[1].strip()
    return unicode(keyword.strip())
            

# method 1: using a compile object
compile_re_thm = re.compile(re_thm,  re.IGNORECASE| re.DOTALL)
compile_re_math = re.compile(re_math,  re.IGNORECASE| re.DOTALL)
compile_re_math_concl=re.compile(re_math_concl, re.IGNORECASE| re.DOTALL)
compile_re_only_math=re.compile(re_only_math,re.IGNORECASE| re.DOTALL)
compile_re_simple_math=re.compile(re_simple_math, re.IGNORECASE|re.DOTALL)
compile_re_symb1=re.compile(re_symb1, re.IGNORECASE|re.DOTALL)
compile_re_symb2=re.compile(re_symb2, re.IGNORECASE|re.DOTALL)
match_thm = compile_re_thm.findall(rawtxt)


print "find", len(match_thm), "statements."

print "indexing..."
allwords=set()

for mat in match_thm:
    this_date=datetime.utcnow()
    this_pnum=int(mat[1])
    this_content=unicode(compile_re_math.sub("",mat[2]))
    for token in my_analyzer2(this_content):
        allwords.add(token.text)
    this_org=unicode(mat[2].strip())
    UpdateSymbDict(unicode(compile_re_math_concl.sub("",this_org)))
    this_tags=u""
    like_concl=compile_re_math_concl.findall(mat[2])
    if len(like_concl)>0:       # may have a conclusion
        this_tags=GetKeyword(like_concl)
        for token in this_tags.split(' '):
            allwords.add(token)
    only_math=compile_re_only_math.findall(mat[2])
    this_onlymath=unicode(" ".join([msym[3].strip() for msym in only_math]))
    this_typ=unicode(mat[0])
    writer.add_document(typ=this_typ, tags=this_tags, content=this_content, pagenum=this_pnum, date=this_date, orgcontent=this_org,mathcontent=this_onlymath)

writer.commit(optimize=True)

for u,v in symbcontext_dict.items():
    writer_math.add_document(symb=unicode(u),context=v)
writer_math.commit(optimize=True)
    

print "building dictionary..."
# creat spelling check dictionary
ix = index.open_dir("index")
speller = SpellChecker(ix.storage)
speller.add_words(allwords)
print "done."
print "you are ready to search."
