# -*- coding: utf-8 -*-
import workerpool
import lxml.html
import cStringIO as StringIO
from lxml import etree
from mechanize import Browser
from pymongo import Connection
from urlparse import urljoin
import re 
import hashlib
import os
import sys
from urllib import urlretrieve
import time
import datetime
import logging
from mongolog.handlers import MongoHandler

stringify = etree.XPath("string()")
url = 'http://cafef.vn/lich-su-kien.chn'
localFilepath = '/home/hoangnamhai/HarvestedData/cafef/'

connection = Connection('localhost', 27017)
db = connection["cafef"]
collection = db["lichthitruong"]

warning_if_less = 100   # neu so tu cua bai viet < 100 thi warning

log = logging.getLogger('cafef')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))

def getElementText(elem):
    if elem == None: return ''
    t = lxml.html.fromstring(etree.tostring(elem))
    return t.text_content().strip()

def buildTree(url):    
    br = Browser()
    html = br.open(url).read()
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    return tree

def wordCount(str):
    wordNum = 0
    for line in str.strip().split('\n'):
        line = re.sub(r"\s+", ' ', line)
        wordNum += len(line.split(' '))
    return wordNum

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s
    
def saveImage(url, maCK):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    try:
        path = localFilepath + maCK + "/"
        localFilename = '{0}{1}.jpg'.format(path, getMd5FileName(url))
        print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(path + getMd5Path(url)):
                os.makedirs(path + getMd5Path(url))
            urlretrieve(url, localFilename)
        return localFilename
    except:
        err_str = "saveImage error: {0} --> url: {1}".format(sys.exc_info()[1], url)
        print err_str
        log.error(err_str)
        pass
    return ''

def getThongTinSuKien(sukienLink, maCK):
    log.debug("Start getThongTinSuKien, pram: {0}:{1}".format(sukienLink, maCK))
    data = []
    wordsNum = 0
    try:
        tree = buildTree(sukienLink)
        print "URL: ", sukienLink
        images = tree.xpath("//td[contains(@style, 'padding-top:10px')]/table/tr[2]/td[1]//img")
        if len(images)>0:
            images = urljoin(sukienLink, images[0].get('src'))
            data.append({'type': 'image', 'data': images})
            saveImage(images, maCK)
        for item in tree.xpath("//div[@class='KenhF_Content_News3']/descendant::*"):
            if item.tag == 'a':
                link = urljoin(sukienLink, item.get('href'))
                data.append({'type': 'link', 'data': link, 'title': getElementText(item).strip()})
            else:
                if item.text is not None:
                    myText = re.sub(r"&#13;&lt;!--.*?--&gt;&#13;", '', re.sub(r"[\r\n\t]", '', getElementText(item).strip()))
                    if myText != '': 
                        data.append({'type': 'text', 'data': myText})
                        wordsNum += wordCount(myText)
                if item.tail is not None:
                    myText = item.tail.strip()
                    if myText != '': 
                        data.append({'type': 'text', 'data': myText})
                        wordsNum += wordCount(myText)
    except:
        err_str = "{0} --> url: {1}".format(sys.exc_info()[1], sukienLink)
        print err_str
        log.error(err_str)
        pass
    if wordsNum < warning_if_less:
        log.warn("So tu cua bai viet < {0}, url: {1}".format(warning_if_less, sukienLink))
    return data
     
iCount = 0  
def getLich(html):
    log.debug("Start getLich, param: {0}".format(html))
    global collection
    try:
        print 'getLich'
        parser = etree.HTMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser)
        data = {}
        global iCount
        for rowData in tree.xpath("//*[@id='ctl00_ContentPlaceHolder1_LichSuKien1_grvLichSuKien']//tr[position()>1]"):
            data['ngayGD'] = getElementText(rowData.xpath('./td[1]')[0])
            data['ngayDK'] = getElementText(rowData.xpath('./td[2]')[0])
            data['ngayTH'] = getElementText(rowData.xpath('./td[3]')[0])
            data['maCK'] = getElementText(rowData.xpath('./td[4]//a')[0])
            data['sukien'] = getElementText(rowData.xpath('./td[6]')[0])
            data['sukienLink'] = urljoin(url, rowData.xpath('./td[6]//a')[0].get('href'))
            data['_id'] = getMD5Hash(data['sukienLink'])
            if collection.find_one({'_id': data['_id']}):
                print "\nThis event already exist in database!"
                continue
            data['noidungSuKien'] = getThongTinSuKien(data['sukienLink'], data['maCK'])
            data['lastupdate'] = str(datetime.datetime.now())
            data['timeStamp'] = str(time.time())
            iCount += 1
            collection.save(data)
    except:
        err_str = "{0}".format(sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass

br = Browser()
getLich(br.open(url).read())
br.select_form(name='aspnetForm')
pages = []
for control in br.form.controls:
    if control.type == 'submit':
        if control.value == '1':
            continue
        pages.append({control.name: control.value})
log.debug("Total page of lich thi truong cafef: {0}".format(len(pages)))

br.form.set_all_readonly(False)

def process(iPage):
    log.debug("Start process, param: {0}".format(iPage))
    br.form[iPage.keys()[0]] = str(iPage.values()[0])
    submit_response = br.submit()
    getLich(submit_response.read())

    
log.debug("Start crawler cafef lich thi truong")
pool = workerpool.WorkerPool(size=5)
pool.map(process, pages)
pool.shutdown()
pool.wait()

log.info("Tong so lich thi truong lay duoc {0}".format(iCount))    
log.debug("crawler cafef lich thi truong finished")
print "\nFinished !"
sys.exit()

