#! /usr/bin/env python
#coding=utf-8
import urllib2, gzip, StringIO, time, sqlite3, os, thread
from lxml import etree, html
from BeautifulSoup import UnicodeDammit
#from lxml.html.soupparser import fromstring

PATH = os.path.dirname(os.path.abspath(__file__))

def openzipurl(url):
    """open every url with Gzip,return it's DOM tree"""
    request = urllib2.Request(url.encode('utf8','ignore'))
    request.add_header('Accept-encoding', 'gzip')
    request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.904.0 Safari/535.7')
    opener = urllib2.build_opener()
    compresseddata = opener.open(request).read()
    compressedstream = StringIO.StringIO(compresseddata)
    gzipper = gzip.GzipFile(fileobj=compressedstream)
    data = gzipper.read()
    tree = html.document_fromstring(data.decode('GB2312','ignore'))
#    tree = fromstring(data)
    #print etree.tostring(tree,pretty_print=True)
    return tree

def baidufristhrefcatcher(chaptername):
    """input a keyword and return the first search result"""
    url = 'http://www.baidu.com/s?wd=' + urllib2.quote(chaptername.encode('utf8','ignore'))
    root = openzipurl(url)
    return root.xpath("//*[name()='table'][@class='result']//a/@href")[0]

def bingcatcher(chaptername):
    """input a keyword and return the first search result"""
    url = u"http://cn.bing.com/search?q=" + urllib2.quote(chaptername.encode("utf8"))
    print url.encode('utf8','ignore')
    root = openzipurl(url)
    return root.xpath("//div[@class='sb_tlst']//a/@href")[0]

def qidianchaptercatcher(url):
    """input a www.qidian.com url and return chapter name"""
    root = openzipurl(url)
    return root.xpath("//div[@class = 'list']//a/text()")

def googlecatcher(chaptername):
    url = u"http://www.google.co.jp/search?hl=zh-CN&source=hp&biw=&bih=&q=" + urllib2.quote(chaptername.encode("utf8")) + "&btnG=Google+%E6%90%9C%E7%B4%A2"
    root = openzipurl(url)
    return root.xpath("//h3//a/@href")[0]

def fic_handler():
    print 'start fiction!'
    con = sqlite3.connect(os.path.join(PATH, 'db.db'))
    cur = con.cursor()
    fiction_list = cur.execute('select * from Fiction_Fiction').fetchall()
    for fiction in fiction_list:
        print fiction[1].encode('utf8','ignore')
        db_chapter_list = cur.execute('select * from Fiction_Chapter where fiction_id = ?',(fiction[0],)).fetchall()
        new_chapter_list = qidianchaptercatcher(fiction[2])
        db_len = len(db_chapter_list)
        if db_len < len(new_chapter_list):
            cur1 = con.cursor()
            for chapter in new_chapter_list[db_len:]:
                cur1.execute('insert into Fiction_Chapter(fiction_id,chapter,url) values(?,?,?) ',(fiction[0], chapter,''))#IO,编码
            cur1.close()
        con.commit()
        print time.ctime()
        time.sleep(10)
    cur.close()
    con.close()
    print 'fiction done!'

def oldchap_handler():
    print 'update old chapter!'
    con = sqlite3.connect(os.path.join(PATH, 'db.db'))
    cur = con.cursor()
    cur1 = con.cursor()
    list = []
    fiction_list = cur.execute('select * from Fiction_Fiction').fetchall()
    for fiction in fiction_list:
        list.extend(cur1.execute('select * from Fiction_Chapter where fiction_id = ?',(fiction[0],)).fetchall()[-2:])
    print len(list)
    #print list
    for chapter in list:
        print chapter
        print time.ctime()
        print chapter[2].encode('utf8','ignore')
        url = googlecatcher(chapter[2])
        #url = baidufristhrefcatcher(chapter[2])
        #print url
        cur2 = con.cursor()
        cur2.execute('update Fiction_Chapter set url = ? where id = ?',(url,chapter[0]))
        con.commit()
        cur2.close()
        time.sleep(20)
    cur.close()
    cur1.close()
    con.close()
    print 'update old chapter done!'

def newchap_handler():
    print 'start new chapter!'
    con = sqlite3.connect(os.path.join(PATH, 'db.db'))
    cur = con.cursor()
    chapter_list = cur.execute('select * from Fiction_Chapter where url = ?',('',))
    for chapter in chapter_list.fetchall():
        print time.ctime()
        print chapter[2].encode('utf8','ignore')
        cur2 = con.cursor()
        url = googlecatcher(chapter[2])
        #url = baidufristhrefcatcher(chapter[2])
        #print url
        cur2.execute('update Fiction_Chapter set url = ? where id = ?',(url,chapter[0]))
        con.commit()
        cur2.close()
        time.sleep(20)
    cur.close()
    con.close()
    print 'new chapter done!'

def main():
#    cursor.execute("PRAGMA table_info(tablename)")
#    print cursor.fetchall()
    while True:
        thread.start_new_thread(fic_handler, ())
        time.sleep(300)
        thread.start_new_thread(oldchap_handler, ())
        time.sleep(300)
        thread.start_new_thread(newchap_handler, ())
        time.sleep(300)

if __name__ == '__main__':main()
    #print baidufristhrefcatcher('第八百三十四章 玉皇太子 黑暗蔓陀罗胎藏大结界'.decode('utf8','ignore'))

