#!/usr/bin/python

import sys
import re

class TOKEN_TYPE(object):
    values = ["HEADING","P","PRE","BLOCKQUOTE","ATTRIBUTION","DIVIDER","EMPH","STRONG","INS","DEL","HL","TEXT"]
    class __metaclass__(type):
        def __getattr__(self, name):
            return TOKEN_TYPE.values.index(name)
    @staticmethod
    def nameOf(i):
        return TOKEN_TYPE.values[i]
    @staticmethod
    def isBlock(i):
        return i<6
    @staticmethod
    def isInline(i):
        return (i>=6 and i<11)
    @staticmethod
    def isBlockSpanning(i):
        return (i>=8 and i<11)


class Token:
    type=TOKEN_TYPE.TEXT
    data=[]
    is_start=True
    is_end=False
    userindex=0



class proseupTokenizer:
    inlinepattern = re.compile(r"(?<!\\)(\{[^\+\-%][^%]*%|\*|~|\{\+|\+\}|\{\-|\-\}|\{%|%\})")
    def handle_inline(self,text):
        splittext=self.inlinepattern.split(text)
        emph_on=False
        strong_on=False
        
        lastemph=False

        retlist=[]
        

        for part in splittext:
            if part=="*":
                if not emph_on:
                    lastemph=True
                newtag=Token()
                newtag.type=TOKEN_TYPE.EMPH
                newtag.is_start=not emph_on
                newtag.is_end=emph_on
                emph_on=not emph_on
                retlist+=[newtag]
                
            elif part=="~":
                if not strong_on:
                    lastemph=False
                newtag=Token()
                newtag.type=TOKEN_TYPE.STRONG
                newtag.is_start=not strong_on
                newtag.is_end=strong_on
                strong_on=not strong_on
                retlist+=[newtag]
            elif part=="{-":
                newtag=Token()
                newtag.type=TOKEN_TYPE.DEL
                newtag.is_start=True
                newtag.is_end=False
                retlist+=[newtag]
            elif part=="-}":
                newtag=Token()
                newtag.type=TOKEN_TYPE.DEL
                newtag.is_start=False
                newtag.is_end=True
                retlist+=[newtag]
            elif part=="{+":
                newtag=Token()
                newtag.type=TOKEN_TYPE.INS
                newtag.is_start=True
                newtag.is_end=False
                retlist+=[newtag]
            elif part=="+}":
                newtag=Token()
                newtag.type=TOKEN_TYPE.INS
                newtag.is_start=False
                newtag.is_end=True
                retlist+=[newtag]
            elif part[:1]=="{" and part[-1:]=="%":
                newtag=Token()
                newtag.type=TOKEN_TYPE.HL
                newtag.is_start=True
                newtag.is_end=False
                #matchobj=re.search('\{(?:%([^%\*~\{\}<>]+)%)?(?:%([^%\*~\{\}<>]+)%)*%',part)
                #tupledata=matchobj.group(1,2)
                tupledata=part[1:-1].split(";",1)
                newtag.data=[]
                if len(tupledata)==2:
                    if len(tupledata[1])>0:
                        newtag.data=tupledata
                    elif len(tupedata[0])>0:
                        newtag.data[tupledata[0]]
                elif len(tupledata)==1 and len(tupledata[0])>0:
                    newtag.data=tupledata
                retlist+=[newtag]
            elif part=="%}":
                newtag=Token()
                newtag.type=TOKEN_TYPE.HL
                newtag.is_start=False
                newtag.is_end=True
                retlist+=[newtag]
            else:
                newtag=Token()
                newtag.type=TOKEN_TYPE.TEXT
                newtag.is_start=False
                newtag.is_end=False
                newtag.data=[part]
                retlist+=[newtag]
        if emph_on and strong_on:
            
            newtag1=Token()
            newtag1.type=TOKEN_TYPE.EMPH
            newtag1.is_start=False
            newtag1.is_end=True
            
            newtag2=Token()
            newtag2.type=TOKEN_TYPE.EMPH
            newtag2.is_start=False
            newtag2.is_end=True
            
            if lastemph:
                retlist+=[newtag2,newtag1]
            else:
                retlist+=[newtag1,newtag2]

        elif emph_on:
            newtag=Token()
            newtag.type=TOKEN_TYPE.EMPH
            newtag.is_start=False
            newtag.is_end=True
            retlist+=[newtag]
        elif strong_on:
            newtag=Token()
            newtag.type=TOKEN_TYPE.STRONG
            newtag.is_start=False
            newtag.is_end=True
            retlist+=[newtag]

        return retlist



    def handle_header(self,text):
        headertag="#"
        count=0
        while text.startswith(headertag):
            headertag=headertag+"#"
            count=count+1
        starttag=Token()
        starttag.type=TOKEN_TYPE.HEADING
        starttag.is_start=True
        starttag.is_end=False
        starttag.data=[count]
        
        endtag=Token()
        endtag.type=TOKEN_TYPE.HEADING
        endtag.is_start=False
        endtag.is_end=True
        endtag.data=[count]
        return [starttag]+self.handle_inline(text.strip().strip('#').strip())+[endtag]

    def handle_divider(self,text):
        headertag="@"
        count=0
        while text.startswith(headertag):
            headertag=headertag+"@"
            count=count+1

        dividertag=Token()
        dividertag.type=TOKEN_TYPE.DIVIDER
        dividertag.is_start=True
        dividertag.is_end=True
        dividertag.data=[count]
        return [dividertag]
        
    def handle_paragraph(self,text):
        starttag=Token()
        starttag.type=TOKEN_TYPE.P
        starttag.is_start=True
        starttag.is_end=False

        endtag=Token()
        endtag.type=TOKEN_TYPE.P
        endtag.is_start=False
        endtag.is_end=True
        return [starttag]+self.handle_inline(text.strip())+[endtag]

    def handle_pre(self,textarr):
        starttag=Token()
        starttag.type=TOKEN_TYPE.PRE
        starttag.is_start=True
        starttag.is_end=False

        endtag=Token()
        endtag.type=TOKEN_TYPE.PRE
        endtag.is_start=False
        endtag.is_end=True

        joinstr="\n"
        for line in textarr:
            joinstr+=line
            joinstr+="\n"
        return [starttag]+self.handle_inline(joinstr)+[endtag]

    def handle_blockquote(self,text):
        starttag=Token()
        starttag.type=TOKEN_TYPE.BLOCKQUOTE
        starttag.is_start=True
        starttag.is_end=False

        endtag=Token()
        endtag.type=TOKEN_TYPE.BLOCKQUOTE
        endtag.is_start=False
        endtag.is_end=True
        

        outtext=text[1:].strip()
        if outtext[-1]==">":
            return [starttag]+self.handle_inline(outtext[:-1])+[endtag]
        else:
            part=outtext.rpartition(">-")
            if part[1]!=">-":
                return [starttag]+self.handle_inline(part[2])+[endtag]
            else:
                attstarttag=Token()
                attstarttag.type=TOKEN_TYPE.ATTRIBUTION
                attstarttag.is_start=True
                attstarttag.is_end=False

                attendtag=Token()
                attendtag.type=TOKEN_TYPE.ATTRIBUTION
                attendtag.is_start=False
                attendtag.is_end=True
                return [starttag]+self.handle_inline(part[0].strip())+[endtag,attstarttag]+self.handle_inline(part[2].strip())+[attendtag]
    
    
    def tokenizeFile(self,f):
        tokenlist=[]

        in_pre=False
        linearr=[]
        for line in f:
            if len(line)>0 and line[-1]=="\n":
                line=line[:-1]
            
            if len(line)>=4 and line.startswith("----"):
                if in_pre:
                    tokenlist+=self.handle_pre(linearr)
                    linearr=[]
                    in_pre=False
                else:
                    in_pre=True    
            elif in_pre:
                linearr.append(line)
            elif len(line)>0 and line[0]=='#':
                tokenlist+=self.handle_header(line)
            elif len(line)>0 and line[0]=='<':
                tokenlist+=self.handle_blockquote(line)
            elif len(line)>0 and line[0]=='@':
                tokenlist+=self.handle_divider(line)
            elif len(line.strip())>0:
                tokenlist+=self.handle_paragraph(line)
        return tokenlist

if __name__ == "__main__":
    usagestr="""
    Usage:
    proseup.py [file]
    If file is absent, accepts input from stdin
    """
    usefile=False

    if len(sys.argv)>1:
        try:
            f = open(sys.argv[1], 'r')
        except IOError:
            print 'Error: Can\'t open file.\n'+usagestr
            sys.exit(0)
        else:
            usefile=True
    else:
        f = sys.stdin
    mytokenizer=proseupTokenizer()
    mytokenlist=mytokenizer.tokenizeFile(f)
    if usefile:
        f.close()



    # now, to convert to html
    def getOnTokenAtIndexForType(tokenlist,index,type):
        if index<=0 or index>=len(tokenlist):
            return None
        newindex=index-1
        while newindex>=0:
            if tokenlist[newindex].type==type:
                if tokenlist[newindex].is_start==True and tokenlist[newindex].is_end==False:
                    return tokenlist[newindex]
                elif tokenlist[newindex].is_start==False and tokenlist[newindex].is_end==True:
                    return None
            newindex-=1
        return None


    def isTypeOn(tokenlist,index,type):
        toke=getOnTokenAtIndexForType(tokenlist,index,type)
        if toke==None:
            return False
        else:
            return True
    def removeRepeatsAndEmpties(tokenlist):
        i=len(tokenlist)-1
        while i>=0:
            if tokenlist[i].type==TOKEN_TYPE.TEXT and len(tokenlist[i].data[0])==0:
                tokenlist.pop(i)
            if tokenlist[i].type==TOKEN_TYPE.DEL or tokenlist[i].type==TOKEN_TYPE.INS or tokenlist[i].type==TOKEN_TYPE.HL:
                ison=isTypeOn(tokenlist,i,tokenlist[i].type)
                if ison and tokenlist[i].is_start:
                    tokenlist.pop(i)
                elif not ison and tokenlist[i].is_end:
                    tokenlist.pop(i)
            i-=1
        return tokenlist



    mytokenlist=removeRepeatsAndEmpties(mytokenlist)

    #give each block level element a unique index
    uindex=1
    for i,toke in enumerate(mytokenlist):
        if(TOKEN_TYPE.isBlock(toke.type)):
            mytokenlist[i].userindex=uindex
            if mytokenlist[i].is_end==True:
                uindex+=1

    newtokenlist=[]
    for i, toke in enumerate(mytokenlist):
        if toke.type==TOKEN_TYPE.TEXT:
            sublist=[toke]
            for j,type in enumerate(reversed(TOKEN_TYPE.values[:-1])):
                ontoke=getOnTokenAtIndexForType(mytokenlist,i,len(TOKEN_TYPE.values[:-1])-j-1)
                if ontoke!=None:
                    newclose=Token()
                    newclose.type=ontoke.type
                    newclose.is_start=False
                    newclose.is_end=True
                    newclose.userindex=ontoke.userindex
                    sublist=[ontoke]+sublist+[newclose]
            newtokenlist+=sublist
        elif toke.type==TOKEN_TYPE.DIVIDER:
            newtokenlist+=[toke]


    def doCancellation(tokenlist):
        counter=0
        while counter<(len(tokenlist)-1):
            t1=tokenlist[counter]
            t2=tokenlist[counter+1]
            if t1.type==t2.type and t1.is_start!=t2.is_start and t1.is_end!=t2.is_end and t1.userindex==t2.userindex:
                tokenlist.pop(counter)
                tokenlist.pop(counter)
                if counter>0:
                    counter-=1
            else:
                counter+=1
        return tokenlist

    newtokenlist=doCancellation(newtokenlist)
    outstr=""
    lastheadnum=0
    for toke in newtokenlist:
        if toke.type==TOKEN_TYPE.HEADING and toke.is_start==True:
            lastheadnum=toke.data[0]
            outstr+="<h"+str(toke.data[0])+">"
        elif toke.type==TOKEN_TYPE.HEADING and toke.is_end==True:
            outstr+="</h"+str(lastheadnum)+">\n"
        elif toke.type==TOKEN_TYPE.P and toke.is_start==True:
            outstr+="<p>"
        elif toke.type==TOKEN_TYPE.P and toke.is_end==True:
            outstr+="</p>\n"
        elif toke.type==TOKEN_TYPE.PRE and toke.is_start==True:
            outstr+="<pre>"
        elif toke.type==TOKEN_TYPE.PRE and toke.is_end==True:
            outstr+="</pre>\n"
        elif toke.type==TOKEN_TYPE.BLOCKQUOTE and toke.is_start==True:
            outstr+="<blockquote>"
        elif toke.type==TOKEN_TYPE.BLOCKQUOTE and toke.is_end==True:
            outstr+="</blockquote>\n"
        elif toke.type==TOKEN_TYPE.ATTRIBUTION and toke.is_start==True:
            outstr+="<div class=\"attribution\">"
        elif toke.type==TOKEN_TYPE.ATTRIBUTION and toke.is_end==True:
            outstr+="</div>\n"
        elif toke.type==TOKEN_TYPE.DIVIDER:
            outstr+="<hr class=\"divider_"+str(toke.data[0])+"\" />\n"
        elif toke.type==TOKEN_TYPE.EMPH and toke.is_start==True:
            outstr+="<i>"
        elif toke.type==TOKEN_TYPE.EMPH and toke.is_end==True:
            outstr+="</i>"
        elif toke.type==TOKEN_TYPE.STRONG and toke.is_start==True:
            outstr+="<b>"
        elif toke.type==TOKEN_TYPE.STRONG and toke.is_end==True:
            outstr+="</b>"
        elif toke.type==TOKEN_TYPE.INS and toke.is_start==True:
            outstr+="<ins>"
        elif toke.type==TOKEN_TYPE.INS and toke.is_end==True:
            outstr+="</ins>"
        elif toke.type==TOKEN_TYPE.DEL and toke.is_start==True:
            outstr+="<del>"
        elif toke.type==TOKEN_TYPE.DEL and toke.is_end==True:
            outstr+="</del>"
        elif toke.type==TOKEN_TYPE.HL and toke.is_start==True:
            outstr+="<span"
            if len(toke.data)==2:
                if len(toke.data[0])>0:
                    outstr+=" style=\"background:"+toke.data[0]+"\""
                else:
                    outstr+=" style=\"background:lightyellow\""
                if len(toke.data[1])>0:
                    outstr+=" title=\""+toke.data[1]+"\""
            elif len(toke.data)==1 and len(toke.data[0])>0:
                outstr+=" style=\"background:"+toke.data[0]+"\""
            else:
                outstr+=" style=\"background:lightyellow\""
            outstr+=">"
        elif toke.type==TOKEN_TYPE.HL and toke.is_end==True:
            outstr+="</span>"
        elif toke.type==TOKEN_TYPE.TEXT:
            outstr+=toke.data[0]
    print outstr
