# -*- coding: utf-8 -*-
import workerpool
import hashlib
import datetime
import time
import os
import re
import cStringIO as StringIO
import sys
import cookielib
import logging
from mongolog.handlers import MongoHandler
from mechanize import Browser
from urllib import urlretrieve
from lxml import etree
from pymongo import Connection
from urlparse import urljoin


localFilepath = '/home/hoangnamhai/HarvestedData/cafef/'
stringify = etree.XPath("string()")

log = logging.getLogger('cafef')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))

warning_if_less = 100   # neu so tu cua bai viet < 100 thi warning
max_so_bai_lay = 300    # so bai toi da cua mot lan crawler
duplicate_bai_viet = 30 # so bai trung tren 1 page
current_bai_da_lay = 0

connection = Connection('localhost', 27017)
db = connection["cafef"]
collection = db["tinthitruongCK"]

### INCLUDE FUNCTION ###

def getElementText(elem):
    if elem == None: return ''
    if elem.text == None: return ''
    return stringify(elem).strip()

def buildTree(url, outputHTML=False):
#    result = urllib.urlopen(url)
#    html = result.read()
    br = Browser()
    br.open(url)
    html = br.response().read()
    if outputHTML: print html
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    return tree

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s

def saveImage(url):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    try:
        localFilename = '{0}{1}.jpg'.format(localFilepath, getMd5FileName(url))
        #print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(localFilepath + getMd5Path(url)):
                os.makedirs(localFilepath + getMd5Path(url))
            urlretrieve(url, localFilename)
        return localFilename
    except:
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
    return ''

def wordCount(str):
    wordNum = 0
    for line in str.strip().split('\n'):
        line = re.sub(r"\s+", ' ', line)
        wordNum += len(line.split(' '))
    return wordNum

def getAttributeText(elem, attrib):
    if elem == None or attrib == '': return ''
    return elem.get(attrib)

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return ''

def strToASCII(str):
    if str == '': return ''
    listPattern = [r"á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", r"Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   r"đ", r"Đ", r"í|ì|ỉ|ị|ĩ", r"Í|Ì|Ỉ|Ị|Ĩ", r"é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", r"É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   r"ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", r"Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   r"ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", r"Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", r"ý|ỳ|ỷ|ỵ|ỹ", r"Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    str = str.encode('utf-8', 'replace')
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str

### END INCLUDE FUNCTION ###

def getChiTiet(url):
    log.debug("Start getChiTiet: param {0}".format(url))
    results = []
    try:
        tree = buildTree(url)
        wordsNum = 0
        for item in tree.xpath("//div[@class='contentDetail']/following::*[1]/descendant::*"):
            if item.tag == 'img':
                #print "Anh: ", item.get('src')
                imageURL = urljoin(url, item.get('src'))
                results.append({'type': 'image', 'data': imageURL})
                saveImage(imageURL)
            else:
                if item.text is not None and item.tag != 'style':
                    myText = re.sub(r"[\r\n\t]", '', item.text.strip())
                    if myText != '': results.append({'type': 'text', 'data': myText})
                    wordsNum += wordCount(myText)
                if item.tail is not None and item.tag != 'style':
                    myText = re.sub(r"[\r\n\t]", '', item.tail.strip())
                    if myText != '': results.append({'type': 'text', 'data': myText})
                    wordsNum += wordCount(myText)
        print "\n-->Total words: ", wordsNum
        if wordsNum < warning_if_less:  log.info("Warning: so luong tu nho hon {0} ->> url: {1}".format(warning_if_less, url))
    except:
        err_str = "{0} --> url: {1}".format(sys.exc_info()[1], url)
        print err_str
        log.error(err_str)
        pass
    return results
        
def getListBaiViet(url):
    log.debug("Start getListBaiViet: param {0}".format(url))
    global collection, current_bai_da_lay, duplicate_bai_viet
    if current_bai_da_lay > max_so_bai_lay:
        return
    try:
        tree = buildTree(url)
        data = {}
        current_dupplicate = 0
        for item in tree.xpath("//span[contains(@id, 'ListByCategoryDIv1_dtlNew')]/span"):
            data['detailLink'] = urljoin(url, getAttributeText(item.xpath("./div/div/div/a")[0], 'href'))
            data['_id'] = getMD5Hash(data['detailLink'])
            if collection.find_one({'_id': data['_id']}):
                if current_dupplicate > duplicate_bai_viet:
                    return
                current_dupplicate += 1
                continue
            data['thumbnail'] = ''
            for iThumb in item.xpath("./div/div/div/a/img"):
                data['thumbnail'] = urljoin(url, iThumb.get('src'))
            
            data['lastupdate'] = getElementText(item.xpath(".//div[@class='dxncItemDate_news']")[0])
            data['title'] = getElementText(item.xpath(".//a[@class='dxncItemHeadernews']")[0])
            data['description'] = getElementText(item.xpath(".//a[@class='dxncItemContent_news']")[0])
            data['timeStamp'] = str(time.time())
            data['tag'] = 'cafef'
            print "Title: ", strToASCII(data['title'])
            print "Description: ", strToASCII(data['description'])
            data['detail'] = getChiTiet(data['detailLink'])
            current_bai_da_lay += 1
            collection.save(data)
            
    except:
        err_str = "{0} --> url: {1}".format(sys.exc_info()[1], url)
        print err_str
        log.error(err_str)
        pass
    
def getListPage(url = "http://cafef.vn/thi-truong-chung-khoan/trang-1.chn"):
    log.debug("start getListPage: param {0}".format(url))
    listPage = []
    try:    
        tree = buildTree(url)
        lastPage = int(extractWithRegEx(r"-(\d+).", getAttributeText(tree.xpath("//table[@class='CafeF_Paging']//td[contains(., 'Cuối')]/a".decode('utf-8'))[0], "href"), 1))
        for iPage in range(1, lastPage + 1):
            listPage.append("http://cafef.vn/thi-truong-chung-khoan/trang-{0}.chn".format(iPage))
    except:
        err_str = "{0} --> url: {1}".format(sys.exc_info()[1], url)
        print err_str
        log.critical(err_str)
        raise 
    return listPage
    
def process(url):
    log.debug("Start process page: {0}".format(url))
    print "\n ==== CURRENT PAGE ====\n", url
    global current_bai_da_lay, max_so_bai_lay
    getListBaiViet(url)
    
log.debug("Start crawler cafef tin thi truong chung khoan")
listPages = getListPage()
pool = workerpool.WorkerPool(size=10)
pool.map(process, listPages)
pool.shutdown()
pool.wait()

log.info("Total Product Crawled: {0}".format(current_bai_da_lay))
print "\n -+- Total news crawled: ", current_bai_da_lay    
print "\nFinished"
log.debug("crawler cafef tin thi truong chung khoan finished")

sys.exit()


