# -*- coding: utf-8 -*-
'''
Created on Oct 17, 2010

Lấy dữ liệu từ laisuat.vn
Tham khảo Mongo: http://www.rkblog.rk.edu.pl/w/p/mongodb-data-management-python/

@author: HoangNamHai
'''
import hashlib
import sys
import urllib
import cStringIO as StringIO
from lxml import etree
import lxml.html
from pymongo import Connection
from urlparse import urljoin
import datetime
import logging
from mongolog.handlers import MongoHandler
import workerpool

# Khởi tạo
stringify = etree.XPath("string()")

log = logging.getLogger('LaisuatVn')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))

def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getElementText(elem):
    if elem == None: return ''
    t = lxml.html.fromstring(etree.tostring(elem))
    return t.text_content().strip()

def extractDataTable(url, type):
    print 'extractData({0}, {1})'.format(url, type)
    log.debug("Start extractDataTable({0}, {1})".format(url, type))
    kq = []
    try:
        result = urllib.urlopen(url)
        html = result.read()
        parser = etree.HTMLParser(encoding='utf-8')
        tree   = etree.parse(StringIO.StringIO(html), parser)
        for bankRow in tree.xpath("//tr[@class='rows']"):
            data = {}
            data['logo'] = urljoin(url, bankRow.xpath(".//a[@class='logobank']//img")[0].get("src").strip())
            data['link'] = urljoin(url, bankRow.xpath(".//a[@class='logobank']")[0].get("href").strip())
            data['_id'] = getMD5Hash(data['link']+type)
            cellCount = 0
            cells = {}
            for bankRate in bankRow.xpath(".//td[@class='logoItems']/following-sibling::*"):
                cellCount = cellCount + 1
                cells["td" + str(cellCount)] = unicode(getElementText(bankRate)).strip()
                if cells["td" + str(cellCount)] == 'None': cells["td" + str(cellCount)] = ''
            data['data'] = cells  
            data['type'] = type  
            data['lastupdate'] = str(datetime.datetime.now()) 
            kq.append(data)
    except:
        error = "{0} >> {1}, url={2}".format(sys.exc_info()[0], sys.exc_info()[1], url)
        print error
        log.error(error)
        pass
    if len(kq) < 1:
            log.warn("Khong lay duoc truong du lieu nao extractDataTable({0}, {1})".format(url, type))
    return kq

def process(arr):
    url, colName = arr[0], arr[1]
    global db
    collection = db[colName]  
    log.debug("Start process {0} >> {1}".format(url, colName))
    print "Process {0}>>{1}".format(url, colName)
    try:
        types = ['usr=CN', 'usr=DN']
        for type in types:
            link = url + '&' + type
            if type == 'usr=CN':
                type = 'CaNhan'
            else:
                type = 'DoanhNghiep'
            for c in extractDataTable(link, type):
#                print c
                collection.save(c)
    except:
        error = "{0} >> {1}, url={2}".format(sys.exc_info()[0], sys.exc_info()[1], url)
        print error
        log.error(error)
        pass
        
connection = Connection('localhost', 27017)
db = connection['laisuat']
listLink = []
listLink.append(["http://laisuat.vn/Pages/DepositRates.aspx?cur=VND", "guitien"])
listLink.append(["http://laisuat.vn/Pages/DepositRates.aspx?cur=USD", "guiusd"])
listLink.append(["http://laisuat.vn/Pages/DepositRates.aspx?cur=VAN", "guivang"])
listLink.append(["http://laisuat.vn/Pages/LoanInterestRates.aspx?cur=TIEUDUNG", "vaytieudung"])
listLink.append(["http://laisuat.vn/Pages/LoanInterestRates.aspx?cur=MUANHA", "vaymuanha"])
listLink.append(["http://laisuat.vn/Pages/LoanInterestRates.aspx?cur=MUAXE", "vaymuaxe"])

log.debug("Start crawler laisuatvn")
pool = workerpool.WorkerPool(size=6)
pool.map(process, listLink)
pool.shutdown()
pool.wait()
log.debug("Crawler laisuatvn finished")


print "Đã xong", datetime.datetime.now()
