# -*- coding: utf-8 -*-
'''
Created on 09-04-2013

@author: LONG HOANG GIANG
'''
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from urlparse import urljoin
import traceback
import re
import pyLib
import json
import logging
import workerpool
import datetime



logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d/%m/%Y %H:%M:%S')


class TungHoanh():
    
    __config_path = "/longhoanggiang/tunghoanh/v2/"
    __config_cookie = ""
    
    def __init__(self):
        if not os.path.exists(self.__config_path):
            os.makedirs(self.__config_path, 0777)
    
    def setCookie(self, cookie):
        if cookie.strip() != '':
            self.__config_cookie = cookie
        
    def getCookie(self):
        return self.__config_cookie

    def getCat(self):
        http = pyLib.loadWeb('http://tunghoanh.com')
        self.setCookie(http.getcookie())
        tree = http.build_tree()
        data = []
        stored = []
        for node in tree.xpath("//div[@id='sidebar']/div[@class='cat_border']//ul[@id='cat']/li[position()>2]/a"):
            name = pyLib.stringify(node).replace(' ¤', '').strip()
            link = node.get('href')
            if link == '': continue
            link = urljoin('http://tunghoanh.com', link)
            path = pyLib.toAscii(name).replace(' ', '-').lower()
            data.append({'path': path, 'name': name, 'url': link})
            stored.append({'path': path, 'name': name})
        pyLib.gzip(self.__config_path + "data", pyLib.encryptCipher(json.dumps(stored)))
        return data
        
    def getListStoryByCat(self, item):
        
        url = item['url']
        
        path = self.__config_path + item['path'] + "/"
        if not os.path.exists(path): os.makedirs(path, 0777)
        
        data = []
        save_data = []
        while True:
            http = pyLib.loadWeb(url, cookiestr=self.getCookie())
            self.setCookie(http.getcookie())
            tree = http.build_tree()
            for node in tree.xpath("//div[@class='list_content']/ul/li"):
                name = pyLib.stringify(node.xpath("./p[@class='td2']/a")[0])
                link = node.xpath("./p[@class='td2']/a")[0].get('href')
                if link == '': continue
                link = urljoin(url, link)
                subpath = pyLib.extractText(r"/([^/]+)\.html", link, 1)
                if subpath == '': continue
                author = pyLib.stringify(node.xpath("./p[@class='td3']")[0])
                lastchapter = pyLib.stringify(node.xpath("./p[@class='td4']")[0])
                data.append({'name': name, 'path': subpath, 'author': author, 'url': link, 'absolutepath': path + subpath + "/"})
                save_data.append({'name': name, 'path': subpath, 'author': author, 'lastchapter': lastchapter})
            nextNode = tree.xpath("//div[@class='paging']/a[@class='pagecurrent']/following-sibling::a[1]")
            if len(nextNode) == 0: break
            nextUrl = nextNode[0].get('href')
            if nextUrl == '': break
            url = urljoin(url, nextUrl)
        pyLib.gzip(path + "data", pyLib.encryptCipher(json.dumps(save_data)))
        return data
    
    def getListChapter(self, item):
        logging.debug("getListChapter {0}".format(item['url']))
        url = item['url']
        data = []
        save_data = []
        http = pyLib.loadWeb(url, cookiestr=self.getCookie())
        self.setCookie(http.getcookie())
        tree = http.build_tree()
        for node in tree.xpath("//div[@class='story_chapter']//div[@class='chapter']/a"):
            href = node.get('href')
            if url == '': continue
            href = urljoin(url, href)
            tree = pyLib.loadWeb(href).build_tree()
            for inode in tree.xpath("//div[@class='list_chapters'][1]//select/option"):
                href = inode.get('value', '')
                if href == '': continue
                href = urljoin(url, href)
                name = pyLib.stringify(inode)
                fileId = pyLib.crc32unsigned(href)
                data.append({'name': name, 'url': href, 'path': fileId, 'absolutepath': item['absolutepath'] + 'files/{0}'.format(fileId)})
                save_data.append({'name': name, 'path': fileId})
            break
        pyLib.gzip(item['absolutepath'] + "data", pyLib.encryptCipher(json.dumps(save_data)))
        return data
        
    def getDetailContent(self, item):
        logging.info("getDetailContent: {0}, url: {1}".format(item['name'], item['url']))
        try:
            url = item['url']
            chapid = pyLib.extractText(r"/([^/]+)\.html", url, 1)
            content = self.post(chapid, url)
            print content
            content = """<b>{0}</b><br /><hr /><br />{1}""".format(item['name'], content)
            content = re.sub(r"\n", "<br />", content).strip()
            if len(content) == 0:
                message = "ERROR: NULL Content with URL: {0}".format(item['url'])
                raise Exception, message
            pyLib.gzip(item['absolutepath'], json.dumps(content))
        except:
            traceback.print_exc()
            return False
        return True
        
    def post(self, chapId, referer):
        url = 'http://tunghoanh.com/chapter/{0}.html'.format(chapId)
#        html = pyLib.loadWeb(url, cookiestr=self.getCookie(), referer=referer, sendajax=True).gethtml()
        tree = pyLib.loadWeb(url, cookiestr=self.getCookie(), referer=referer, sendajax=True).build_tree()
        for node in tree.xpath("//span[contains(@style, 'letter-spacing')]"):
            
            attrib = node.get('style', '')
            if re.search(r"letter-spacing: -[\d]+px", attrib):
                pyLib.Etree.clean(node) 
        html = pyLib.Etree.tostring(tree)
#        print html
        html = pyLib.cleanHTMLEntities(html)
        raw = pyLib.cleanHTML(html)
        
        raw = re.sub(r"http://[^ ]+", "", raw)
        
        return raw.strip()
    
    def startCrawl(self):
        categories = self.getCat()
        for cat in categories:
            stories = self.getListStoryByCat(cat)
            for story in stories:
                logging.info("start crawler story {0}".format(story['name']))
                chapters = self.getListChapter(story)
                pool = workerpool.WorkerPool(size=5)
                pool.map(self.getDetailContent, chapters)
                pool.shutdown()
                pool.wait()
                logging.info("finished story {0}".format(story['name']))
        return
        
    def startCrawlOne(self, url):
        if type(url).__name__ == 'list':
            for link in url:
                self.startCrawlOne(link)
        else:
            subpath = pyLib.extractText(r"/([^/]+)\.html", url, 1)
            if subpath == '': subpath = "untitled-{0}".format(datetime.datetime.now())
            path = "/longhoanggiang/truyentext/" + subpath
            if not path.endswith("/"): path += "/"
            pyLib.createIfNotExistsPath(path)
            chapters = self.getListChapter({'url': url, 'absolutepath': path, 'path': subpath})
            pool = workerpool.WorkerPool(size=10)
            pool.map(self.getDetailContent, chapters)
            pool.shutdown()
            pool.wait()
        return
        
    def startCrawlerCat(self, url):
        self.__config_path = "/longhoanggiang/tunghoanh/cat/"
        if not url.endswith('/'): url += '/'
        path = pyLib.extractText(r"tunghoanh\.com/([^/]+)/", url, 1).lower()
        stories = self.getListStoryByCat({'name': '', 'path': path, 'url': url})
        for story in stories:
            logging.info("start crawler story {0}".format(story['name']))
            chapters = self.getListChapter(story)
            pool = workerpool.WorkerPool(size=5)
            pool.map(self.getDetailContent, chapters)
            pool.shutdown()
            pool.wait()
    
    def getStoryListName(self, url):
        while True:
            http = pyLib.loadWeb(url, cookiestr=self.getCookie())
            self.setCookie(http.getcookie())
            tree = http.build_tree()
            for node in tree.xpath("//div[@class='list_content']/ul/li"):
                name = pyLib.stringify(node.xpath("./p[@class='td2']/a")[0])
                link = node.xpath("./p[@class='td2']/a")[0].get('href')
                if link == '': continue
                link = urljoin(url, link)
                
                print pyLib.toAscii(name).lower()
            nextNode = tree.xpath("//div[@class='paging']/a[@class='pagecurrent']/following-sibling::a[1]")
            if len(nextNode) == 0: break
            nextUrl = nextNode[0].get('href')
            if nextUrl == '': break
            url = urljoin(url, nextUrl)
        
    
if __name__ == '__main__':
    
    
#    t = TungHoanh()
#    t.getDetailContent({'url': 'http://tunghoanh.com/co-vo-tong-giam-doc-xinh-dep-cua-toi/chuong-1332-7GSaaab.html', 'name': 'TEST'})
    
    url = raw_input("URL: ")
    if url.startswith("http"):
        t = TungHoanh()
        t.startCrawlOne(url)
    else:
        logging.error("url must start with http")
#    import argparse
#    parser = argparse.ArgumentParser()
#    try:
#        parser.add_argument("-c", "--cat", action="store_true", default=False)
#        parser.add_argument("-u", "--url", action="append")
#        args = parser.parse_args()
#        w = TungHoanh()
#        if len(args.url) > 0:
#            if args.cat:
#                for url in args.url:
#                    w.startCrawlerCat(url)
#            else:
#                w.startCrawlOne(args.url)
#    except:
#        parser.print_help()
    logging.info("finished")
    os._exit(1)
#    
    
    
        
        