#-*- coding: utf-8 -*-
#Lấy thông tin thị trường - trang CafeF.vn

import urllib
import re, os
import lxml.html
import hashlib
import cStringIO as StringIO
from lxml import etree
import workerpool
from pymongo import Connection
from urlparse import urljoin
import time, datetime
import traceback
from termcolor import cprint
import html2text
from mechanize import Browser
import mechanize
import commonlib

stringfy = etree.XPath('string()')
link = 'http://cafef.vn/lich-su-kien.chn'
localFilePath = '/home/hoangnamhai/HarvestedData/cafeF/'
MAX_DUPLICATED = 2

conn = Connection('localhost', 27017)
db = conn['cafeF']
coll = db['lichthitruong']

def buildTree(url, outputHTML=False, isXML=False):
    try:
        html = urllib.urlopen(url).read()
        if not isXML:
            parser = etree.HTMLParser(encoding='utf-8')
        else:
            parser = etree.XMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser)
        if outputHTML == True: print html
        return tree
    except:
        traceback.print_exc()

def buildTree1(html):
    try:
        parser = etree.HTMLParser(encoding='utf-8')
        return etree.parse(StringIO.StringIO(html), parser)
    except:
        traceback.print_exc()


def getElementText(element):
    try:
        if element == None: return ''
        eText = lxml.html.fromstring(etree.tostring(element))
        return eText.text_content().strip()
    except:
        traceback.print_exc()

def getMd5Hash(textToHash):
    try:
        return hashlib.md5(textToHash).hexdigest()
    except:
        traceback.print_exc()

def getMd5Path(stringTohash):
    try:
        s = getMd5Hash(stringToHash)
        s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3])
        return s
    except:
        traceback.print_exc()

def getMd5FileName(stringToHash):
    try:
        s = getMd5Path(strignToHash)
        s = '{0}/{1}/{2}/{3}/{4}'.format(s[0], s[1], s[2], s[3], s[4:])
        return s
    except:
        traceback.print_exc()

def saveImage(url, maCK):
    try:
        path = localFilePath + maCK + "/"
        localFileName = "{0}{1}{2}.jpg".format(path, getMd5FileName(url))
        print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(path + getMd5Path(url)):
                os.makedirs(path + getMd5Path(url))
            urlretrieve(url, localFileName)
        return localFilename
    except:
        traceback.print_exc()
    return ''

cj = mechanize.LWPCookieJar()

def browser(url, data={}):
    try:
        br = Browser()            
        response = None
        if len(data) > 0:
            query = urllib.urlencode(data)
            response = br.open(url, query)
        else:
            response = br.open(url)
        return br, response
    except:
        traceback.print_exc()

def getInfoEvent(eventLink, maCK):
    try:
       data = []
       tree = buildTree(eventLink)
       if tree == None: return ''
       images = tree.xpath("//td[contains(@style, 'padding-top:10px')]/table/tr[2]/td[1]//img")
       if len(images)>0:
           images = urljoin(sukienLink, images[0].get('src'))
           data =[{'type':'image','data':images}]
           saveImage(url, maCK)
       content = tree.xpath("//div[@class='KenhF_Content_News3']/descendant::*")
       #html = etree.tounico tml2text.html2text(html)
       #print len(content)

       for item in content:
           if item.tag == 'a':
               link = urljoin(eventLink, item.get('href'))
               data.append({'type': 'link', 'data': link, 'title': getElementText(item).strip()})
           else:
               if item.text is not None:
                   myText = re.sub(r"&#13;&lt;!--.*?--&gt;&#13;", '', re.sub(r"[\r\n\t]", '', getElementText(item).strip()))
                   if myText != '':
                        data.append({'type': 'text', 'data': myText})
               if item.tail is not None:
                   myText = item.tail.strip()
                   if myText != '':
                       data.append({'type': 'text', 'data': myText})                           
    except:
       traceback.print_exc()
    return data
    
def getLichSuKien(url):
    try:
        data = {}
        d = {}
        curentDuplicated = 0
        while 1:
            br, res = browser(url, d)
            br.select_form(name='aspnetForm')
            br.form.set_all_readonly(False)
            html = res.read()
            tree = buildTree1(html)
            if tree == None: break
            content = tree.xpath("//table[@id='ctl00_ContentPlaceHolder1_LichSuKien2_grvLichSuKien']//tr[position()>1]")
            #print len(content)
            if len(content)>0:
                fbreak = False
                for rowData in content:
                    data['ngayGD_KHQ'] = getElementText(rowData.xpath("./td[1]")[0])
                    #print data['ngayGD_KHQ']
                    data['ngayDKC'] = getElementText(rowData.xpath("./td[2]")[0])
                    data['ngayTH'] = getElementText(rowData.xpath("./td[3]")[0])
                    data['maCK'] = getElementText(rowData.xpath("./td[4]//a")[0])
                    data['sanGD'] = getElementText(rowData.xpath("./td[5]")[0])
                    data['sukien'] = getElementText(rowData.xpath("./td[6]")[0])
                    #print rowData.xpath("./td[6]//a")[0].get('href')
                    data['sukienLink'] = urljoin('http://cafef.vn', rowData.xpath("./td[6]//a")[0].get('href'))
                    data['_id'] = getMd5Hash(data['sukienLink'])
                    if coll.find_one({'_id':data['_id']}):
                        cprint('Sự kiện đã tồn tại !', 'red')
                        continue
                    data['noidung_sukien'] = getInfoEvent(data['sukienLink'], data['maCK'])
                    data['lastUpdate'] = str(datetime.datetime.now())
                    data['timestamp'] = str(time.time())
                    coll.save(data)
               
                nextNode = tree.xpath("//table[@class='CafeF_Paging']//td/input[@class='btn_Search_Selected']/../following-sibling::*[1]/input")
                if len(nextNode) == 0: break
                target = commonlib.getAttribText(nextNode, 'name')
                value = commonlib.getAttribText(nextNode, 'value')

                d = {'__EVENTTARGET':'',
                    '__EVENTARGUMENT': '',
                    '__VIEWSTATE': '''{0}'''.format(br['__VIEWSTATE']),
                    'ctl00$ContentPlaceHolder1$LichSuKien2$hdfStatus':br['ctl00$ContentPlaceHolder1$LichSuKien2$hdfStatus'],
                    'ctl00$ContentPlaceHolder1$LichSuKien2$hdfPageIndex':'',
                    'ctl00$ContentPlaceHolder1$LichSuKien2$hdfSymbol':'{0}'.format(br['ctl00$ContentPlaceHolder1$LichSuKien2$hdfSymbol']),
                    'ctl00$ContentPlaceHolder1$LichSuKien2$hdfDate1':br['ctl00$ContentPlaceHolder1$LichSuKien2$hdfDate1'],
                    'ctl00$ContentPlaceHolder1$LichSuKien2$hdfDate2':br['ctl00$ContentPlaceHolder1$LichSuKien2$hdfDate2'],
                    'ctl00$ContentPlaceHolder1$LichSuKien2$dlType': '0',
                    'ctl00$ContentPlaceHolder1$LichSuKien2$txtKeyword': '',
                    'ctl00$UcFooter2$hdIP':'''{0}'''.format(br['ctl00$UcFooter2$hdIP']),
                    'ctl00$ContentPlaceHolder1$LichSuKien2$dpkTradeDate1$txtDatePicker': '',
                    'ctl00$ContentPlaceHolder1$LichSuKien2$dpkTradeDate2$txtDatePicker': '',
                    target: value
                    }
                print '********************************************'

                print 'DEBUG: Next page: {0}'.format(value)
                print '********************************************'
                #print d
                coll.save(data)
    except:
        traceback.print_exc()
        
if __name__ == '__main__':

    getLichSuKien(link)
    os._exit(1)





