#!/usr/bin/python
# -*- encoding:utf-8 -*-

import os,sys,re
import locale
import urllib,urllib2
import urlparse

try:
    import json
except:
    try:
        import simplejson
        json = simplejson
    except:
        raise Exception("Python >= 2.6 is need, or you can install simplejson")

def is_indexpage(page):
    return True

def get_indexpage(page):
    return page

def get_ch_urls(page):
    ms=re.findall(r'<a href="?(?P<url>(?P<ptn>.+?)\.html?)"?.*?>(?P<txt>.+?)</a>',page,re.I)
    l = list()
    # is the ptn group in pattern 001 002 003 ... ?
    for m in ms:
        try:
            int(m[1])
            l.append((m[0], m[2]))
        except:
            pass
    if len(l) > len(ms) * 3.0 / 4:
        return l
    
    # is the txt group in pattern 001 002 003 ... ?
    l1 = list()
    for m in ms:
        try:
            int(m[2])
            l1.append((m[0], m[2]))
        except:
            pass
    if len(l1) > len(ms) * 3.0 / 4:
        return l1
    if len(l)>0 and len(l) == len(l1):
        return l1
    
    # is the txt group in pattern 一 二 三 ... ?
    l2 = list()
    for m in ms:
        if re.search(ur"[一二三四五六七八九十百佰千仟]+", m[2].decode("utf8")):
            l2.append((m[0], m[2]))
    if len(l2) > len(ms) * 3.0 / 4:
        return l
    if len(l)>0 and len(l) == len(l2):
        return l
    
    raise Exception("cannot find chapter url pattern")

def html2txt(page):
    if not re.search(r"<body.*?>", page, re.I|re.DOTALL):
        txt = page
    elif not re.search(r"</body>", page, re.I|re.DOTALL):
        txt = re.search(re.compile(r'<body.*?>.+', re.I|re.DOTALL), page).group()
    else:
        txt = re.search(re.compile(r'<body.*?>.+</body>', re.I|re.DOTALL), page).group()
    txt = re.sub(re.compile(r"<br.*?>", re.I), "\n", txt)
    txt = re.sub(re.compile(r"<script.*?>.*?</script>", re.I|re.DOTALL), "", txt)
    txt = re.sub(re.compile(r"<style.*?>.*?</style>", re.I|re.DOTALL), "", txt)
    txt = re.sub(re.compile(r"<a.*?>.*?</a>", re.I|re.DOTALL), "", txt)
    txt=re.sub(r"<.+?>", "", txt)
    escape_map =  {"\r\n":"\n",
                   "&nbsp;":" ",
                   "&gt;":">",
                   "&lt;":"<",
                   "&amp;":"&",
                   "&quot;":"\"",
                   }
    for a in escape_map:
        txt = txt.replace(a,escape_map[a])
    return re.sub(r"\n(?![\s　])", "", txt)
    #return re.sub(r"\n(?![\s　])", "", re.sub("<.+?>", "", re.search(r'<pre><span class="swy1">.+</span></pre>',page,re.DOTALL).group()).replace("\r\n","\n"))

def usage():
    print u"""Usage:
%s 书名 [目录页网址]
        从tianyabook.com下载，并转换为TXT电子书，如果提供了[目录页网址]，则将其作为目录页
%s -c 书名 HTML文件1 [HTML文件2] ...
        将[HTML文件1] [HTML文件2] ...转换为TXT电子书，文件名为[书名.txt]
""" % (sys.argv[0], sys.argv[0])

def main():
    if len(sys.argv) < 2 or (sys.argv[1] == "-c" and len(sys.argv) < 4):
        usage()
        return 1

    if sys.argv[1] == "-c":
        return convert_book()
    else:
        return download_book()

def convert_book():
    bn = sys.argv[2]
    f = open(bn+".txt", "wb")
    for fn in sys.argv[3:]:
        f.write(html2txt(open(fn).read()))
    return 0

def download_book():
    bn = sys.argv[1]
    tyindexurl = None
    if len(sys.argv) > 2:
        tyindexurl = sys.argv[2]

    if not tyindexurl:
        #search google
        d=json.load(urllib2.urlopen('http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=%s+site%%3Awww.tianyabook.com'%urllib.quote_plus(bn.decode(locale.getpreferredencoding()).encode('utf8'))))
        if len(d["responseData"]["results"]) == 0:
            print u"没有找到这本书"
            return 2
        tyindexurl=d["responseData"]["results"][0]["url"].encode("utf8")
        
    
    try:
        os.mkdir(bn)
    except:
        pass
    os.chdir(bn)
    
    #try to download index page
    indexpage=urllib2.urlopen(tyindexurl).read().decode("gb18030").encode("utf8")
    #if 回目录 link found, we are not in index, follow that link to index page
    if not is_indexpage(indexpage):
        tyindexurl, indexpage = get_indexpage(indexpage)
    #else we are in index page
    print "index page url:",tyindexurl
    f = open(bn+".txt", "wb")
    for item in get_ch_urls(indexpage):
        f.write("\n" + "\n".join(["-"*20, item[1], "-"*20]) + "\n")
        churl=urlparse.urljoin(tyindexurl, item[0])
        garbage, path = urllib.splittype(churl)
        garbage, path = urllib.splithost(path or "")
        path, garbage = urllib.splitquery(path or "")
        path, garbage = urllib.splitattr(path or "")
        fn=os.path.basename(path)
        if not os.path.exists(fn):
            open(fn, "wb").write(urllib2.urlopen(churl).read().decode("gb18030").encode("utf8"))
        f.write(html2txt(open(fn).read()))
        
    return 0
        

if __name__ == '__main__':
    sys.exit(main())
    
