# -*- coding: utf-8 -*-
'''
Created on Aug 24, 2012

@author: LONG HOANG GIANG
'''
import os
import sys
sys.path.append(os.path.expanduser('/home5/vietcntt/longhoanggiang/python'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from CrawlerLib import commonlib, html2text
from Model.storymodelv2 import StoryModel
from lxml import etree
from urlparse import urljoin
import datetime
import mechanize
import re
import time
import traceback
import workerpool


OUTPUT = '/longhoanggiang/khoahoc/'

def getDetail(url):
    tree = commonlib.loadweb(url).build_tree()
    if tree == None: return
    content = tree.xpath("//div[@id='divContent']")
    if content == None or len(content) == 0: return
    html = commonlib.Etree.tostring(content)
    detail = html2text.html2text(html, baseurl=url, download=commonlib.downloadImage, path=OUTPUT, prefix='', w100=True)
    detail = re.sub(r"\n", "<br />\n", detail)
    return detail 
    
def process(kurl, maxpage=50):
    smodel = StoryModel(DBNAME)
    smodel.open(True)
    for page in range(1,maxpage + 1):
        cc = 0
        while cc<3:
            try:
                print '> processing page {0}'.format(page)
                url = 'http://www.khoahoc.com.vn/{0}/index{1}.aspx'.format(kurl, page)
                tree = commonlib.loadweb(url).build_tree()
                if tree == None: break
                links = []
                for xp in ["//div[@id='QTM_ctrlCategory_pnlTopArticles']//a[@class='title1']", "//div[@class='catArticle']/table//td/a", "//div[@class='catArticle']/ul[@class='spl_links2']/li/a"]:
                    for item in tree.xpath(xp):
                        href = commonlib.normalize_str(item.get('href'))
                        if href == '': continue
                        href = urljoin(url, href)
                        links.append({'name': commonlib.stringify(item), 'url': href})
                
                def loadURL(item):
                    commonlib.loadweb(item['url'])
                
                pool = workerpool.WorkerPool(size=10)
                pool.map(loadURL, links)
                pool.shutdown()
                pool.wait()
                
                for item in links:
                    ci = 0
                    while ci<3:
                        try:
                            detail = getDetail(item['url'])
                            if detail == None: raise
                            item['name'] = re.sub(r"'", "\'", item['name'])
                            name = commonlib.toUpper(item['name'])
                            detail = '''<b>{0}</b><br /><hr /><br />{1}'''.format(item['name'], detail)
                            smodel.add_story(name, detail, 0)
                            print '> OK, saved {0}'.format(commonlib.normalize_str(item['name']))
                            break
                        except:
                            ci += 1
                            traceback.print_exc()
                            print '>> wait 5s for retry'
                            time.sleep(5)
                break
            except:
                cc += 1
                traceback.print_exc()
                print '>> wait 5s for retry'
                time.sleep(5)
    smodel.close()
    

if __name__ == '__main__':
    
    BIAN1001 = '1001bian'
    KHOAHOCVUTRU = 'khoahocvutru'
    YHOCDSONG = 'yhocdsong'
    
    categories = {BIAN1001: 'khampha/1001-bi-an', KHOAHOCVUTRU: 'khampha/vu-tru', YHOCDSONG: 'doisong/yhoc'}
    DBNAME = "/longhoanggiang/database/{0}".format(BIAN1001)
    process(categories[BIAN1001], 45)
    
#    getDetail('http://www.khoahoc.com.vn/khampha/1001-bi-an/42237_Hinh-ve-ki-la-4000-nam-tuoi-o-Nga.aspx')
#    print getDetail('http://www.khoahoc.com.vn/khampha/sinh-vat-hoc/thuc-vat/41812_Cay-gai-dau-ngan-ung-thu-vu-di-can.aspx')
    
    print '> Finished at {0}'.format(datetime.datetime.now())
    os._exit(1)
    