#-*- coding: utf-8 -*-
#Lấy tin tức thị trường chứng khoán từ cafeF.vn

import urllib
from urllib import urlretrieve
import re, os
import lxml.html
import hashlib
import cStringIO as StringIO
from lxml import etree
import workerpool
from pymongo import Connection
from urlparse import urljoin
import time, datetime
import traceback
from termcolor import cprint
import html2text
from mechanize import Browser
import mechanize
import commonlib

stringfy = etree.XPath('string()')
baseUrl = 'http://cafef.vn'
localFilePath = '/home/hoangnamhai/HarvestedData/cafeF/'

conn = Connection('localhost', 27017)
db = conn['cafeF']
coll = db['tinthitruong']

def buildTree(url, outputHTML=False, isXML=False):
    try:
        html = urllib.urlopen(url).read()
        if not isXML:
            parser = etree.HTMLParser(encoding='utf-8')
        else:
            parser = etree.XMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser)
        if outputHTML == True: print html
        return tree
    except:
        traceback.print_exc()

def buildTree1(html):
    try:
        parser = etree.HTMLParser(encoding='utf-8')
        return etree.parse(StringIO.StringIO(html), parser)
    except:
        traceback.print_exc()

def getAttributeText(elem, attrib):
     if elem == None or attrib == '': return ''
     return elem.get(attrib)

def extractWithRegEx(pat, matchStr, matchIdx):
     try:
         result = ''
         rexp = re.compile(pat)
         m = rexp.search(matchStr)
         if (m!=''):
             result = m.group(matchIdx)
         return result
     except:
         traceback.print_exc()
     return ''

def getElementText(element):
    try:
        if element == None: return ''
        eText = lxml.html.fromstring(etree.tostring(element))
        return eText.text_content().strip()
    except:
        traceback.print_exc()

def getMd5Hash(textToHash):
    try:
        return hashlib.md5(textToHash).hexdigest()
    except:
        traceback.print_exc()

def getMd5Path(stringToHash):
    try:
        s = getMd5Hash(stringToHash)
        s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3])
        return s
    except:
        traceback.print_exc()

def getMd5FileName(stringToHash):
    try:
        s = getMd5Path(stringToHash)
        s = '{0}/{1}/{2}/{3}/{4}'.format(s[0], s[1], s[2], s[3], s[4:])
        return s
    except:
        traceback.print_exc()

def saveImage(url):
    try:
        #path = localFilePath + "/"
        localFileName = "{0}{1}.jpg".format(localFilePath, getMd5FileName(url))
        print 'saveImage: {0} -> {1}'.format(url, localFileName)
        if not os.path.isfile(localFileName):
            if not os.path.exists(localFilePath + getMd5Path(url)):
                os.makedirs(localFilePath + getMd5Path(url))
            urlretrieve(url, localFileName)
        return localFileName
    except:
        traceback.print_exc()
    return ''

def getListTinTuc(url):
    try:
        data = {}
        tree = buildTree(url)
        if tree == None: return ''
        contentNode = tree.xpath("//div[@class='cc-list']/div[@class='item clearfix']")
        for node in contentNode:
            for iLink in node.xpath(".//div[@class='cclinfo']/h4/a"):
                data['detailLink'] = urljoin('http://cafef.vn', iLink.get('href'))
                data['title'] = iLink.text
                data['_id'] = getMd5Hash(data['detailLink'])
                if coll.find_one({'_id':data['_id']}):
                    cprint('Tin tức đã tồn tại !', 'yellow')
                    continue
            for iThumb in node.xpath(".//div[@class='cclimg']/a/img[@class='imgBorder']"):
                lThumb = commonlib.getAttribText(iThumb, 'src')
                #print lThumb
                data['thumnail'] = urljoin('http://cafef.vn', lThumb)
            for iDate in node.xpath(".//div[@class='date']"):
                data['lastupdate'] = iDate.text
            data['timeStamp'] = str(time.time())
            for iDes in node.xpath(".//div[@class='cclinfo']//p"):
                data['description'] = iDes.text
            data['detail'] = getChiTiet(data['detailLink'])
            coll.save(data)
    except:
        traceback.print_exc()

def getChiTiet(url):
    try:
        result = []
        tree = buildTree(url)
        if tree == None: return ''
        for item in tree.xpath("//div[@id='mainDetail']//div[@id='ctl00_contentMain_ucNewsContent1_divImage']/a/img"):
            urlImage = commonlib.getAttribText(item, 'src')
            urlImage = urljoin('http://cafef.vn', urlImage)
            print urlImage
            result.append({'type':'image','data':urlImage})
            saveImage(urlImage)
        content = tree.xpath("//div[@id='mainDetail']//div[@class='KenhF_Content_News3']")
        if len(content)>0:
            primaryNode = content[0]
            html = etree.tounicode(primaryNode)
            text = html2text.html2text(html)
            result.append({'type':'text','data':text})
            #text = re.sub(r'\s+', ' ', text)
            #print text
        return result
    except:
        traceback.print_exc()

def getListPages():
    try:
        url='http://cafef.vn/thi-truong-chung-khoan/trang-1.chn'
        listPage = []
        tree = buildTree(url)
        if tree == None: return ''
        lastPage = int(extractWithRegEx(r"-(\d+).", getAttributeText(tree.xpath("//div[@class='paging']/a[5]".decode('utf-8'))[0], "href"), 1))
        #print lastPage
        for iPage in range(1, lastPage + 1):
            listPage.append("http://cafef.vn/thi-truong-chung-khoan/trang-{0}.chn".format(iPage))
        return listPage
    except:
        traceback.print_exc()

def process(url):
    try:
        cprint('Trang: {0} '.format(url), 'green')
        getListTinTuc(url)
    except:
        traceback.print_exc()

if __name__ == '__main__':
    #getListPages()
    listPage = getListPages()
    pool = workerpool.WorkerPool(size=10)
    pool.map(process, listPage)
    pool.shutdown()
    pool.wait()
    #getChiTiet('http://cafef.vn/20120105012720609CA31/tai-cau-truc-ttck-dam-bao-loi-ich-co-dong-ndt.chn')
    #getListTinTuc('http://cafef.vn/thi-truong-chung-khoan/trang-1.chn')
    #os._exit(1)
