# -*- coding: utf-8 -*-
import urllib
from urllib import urlretrieve
from lxml import etree
from pymongo import Connection
import cStringIO as StringIO
import hashlib
import datetime
import workerpool
import sys
import os
import re
from urlparse import urljoin
import time
import logging
from mongolog.handlers import MongoHandler

log = logging.getLogger('groupon')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))

warning_if_less = 200   # Neu bai viet it hon 200 tu thi se warning
total_product_get = 0

connection = Connection('localhost', 27017)
db = connection["groupon"]
collection = db["groupon"]


stringify = etree.XPath("string()")
localFilepath = "/home/hoangnamhai/HarvestedData/groupon/"
cityArr = ['ha-noi', 'ho-chi-minh']
cityName = {"ha-noi": "Hà Nội", "ho-chi-minh": "TP Hồ Chí Minh"}


################# FUNCTION INC SECTION ###################
def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s

def wordCount(str):
    wordNum = 0
    for line in str.strip().split('\n'):
        line = re.sub(r"\s+", ' ', line)
        wordNum += len(line.split(' '))
    return wordNum

def saveImage(url):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    try:
        localFilename = '{0}{1}.jpg'.format(localFilepath, getMd5FileName(url))
        print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(localFilepath + getMd5Path(url)):
                os.makedirs(localFilepath + getMd5Path(url))
            urlretrieve(url, localFilename)
        return localFilename
    except:
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
    return ''             

def buildTree(url, outputHTML=False):
    result = urllib.urlopen(url)
    html = result.read()
    #print html
    if outputHTML: print html
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    return tree

def strToASCII(str):
    if str == '': return ''
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    str = str.encode('utf-8', 'replace')
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str


def extractNumber(str):
    if str is None or str == '': return ''
    str = str.encode('utf-8', 'replace')
    str = re.sub("[^0-9]", '', str)
    return str

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return ''

def standardlizeTimeValue(timeStr):
    now = datetime.datetime.now()
    preg = re.compile("([0-9]+).+\s([0-9]+).+\s([0-9]+).+")
    m = preg.search(timeStr)
    if m:
        return now - datetime.timedelta(days = int(m.group(1)), hours = int(m.group(2)), minutes = int(m.group(3)))
    return now - datetime.timedelta(days = 2)

    
################# END FUNCTION INC SECTION ###################
def extractTextFromNode(node, isGetAttrib=False, attrib=''):    
    ret = ""
    try:
        if node is not None:
            if isGetAttrib:
                ret = node[0].get(attrib).strip()
            elif node is not None and len(node)>0:
                if node[0].text is not None:
                    ret = node[0].text.strip()
                elif node[0].tail is not None and node[0].tail.strip()!="":
                    ret = node[0].tail.strip()
                else:
                    ret = stringify(node[0]).strip()
    except:
        print "ERORR: ", sys.exc_info()[0], sys.exc_info()[1]
    return ret


runhau = {"url": "http://runhau.vn/",
          "content": "//div[@class='view-content']/div[@class='item-list']//li",
          "title": ".//div[@class='views-field-title']/span", 
          "detailLink": ".//div[@class='views-field-nid']/span[@class='field-content']/a", 
          "expired": ".//div[@class='views-field-field-date-finish-value']//div[@id='countdowntimer']",
          "currentBuyer": ".//div[@class='views-field-phpcode-1']/span", 
          "minBuyer": ".//div[@class='views-field-field-toi-thieu-value']/span", 
          "basePrice": ".//div[@class='views-field-field-giatri-value']//span",
          "saleoffPrice": "./div[@class='views-field-sell-price']//span[contains(@class, 'uc-price-product')]", 
          "detailXPath": "//div[@id='detail_content']/p"
          }


def processDetail(url):
    log.debug("Start processDetail, [params: {0}".format(url))
    global warning_if_less
    results = []
    wordsNum = 0
    try:
        tree = buildTree(url)
        for item in tree.xpath("//div[@id='detail_content']/p"):
            if item.text is None:
                isIMG = False
                for iChild in item.xpath("./descendant-or-self::*"):
                    if iChild.tag == 'img':
                        isIMG = True
                        imageURL = iChild.get('src')
                        imageURL = urljoin(url, imageURL)
                        saveImage(imageURL)
                        results.append({'type': 'text', 'data': imageURL})
                        break
                if isIMG: pass
                myText = stringify(item).strip()
                myText = re.sub(r"[\r\n\t]", '', re.sub(r"\s+", " ", myText))
                if myText != '':
                    results.append({'type': 'text', 'data': myText})
            else:
                myText = stringify(item)
                myText = myText.strip()
                if myText != '':
                    results.append({'type': 'text', 'data': myText}) 
                    wordsNum += wordCount(myText)
    except:                     
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
    if wordsNum < warning_if_less: log.warn("Bai viet co so tu < {0}, [url={1}]".format(warning_if_less, url))
    return results                
        

#processDetail("http://runhau.vn/hanoi-giam-45-lau-tu-chon-tai-nha-hang-nhat-ban-kimono")


def processSite(icity):
    log.debug("Start processSite, [params = {0}".format(icity))
    global total_product_get, warning_if_less, collection,  runhau
    data = {}
    url = urljoin(runhau['url'], icity)
    try: 
        tree = buildTree(url)
        for item in tree.xpath(runhau['content']):
            detailLinkNode = item.xpath(runhau['detailLink'])
            detailLink = urljoin(url, extractTextFromNode(detailLinkNode, True, 'href'))
            data['_id'] = getMD5Hash(detailLink)
            isExist = collection.find_one({'_id': data['_id']})
            if not isExist:
                total_product_get += 1
                titleNode = item.xpath(runhau['title'])
                data['title'] = extractTextFromNode(titleNode)
                data['detailLink'] = detailLink
                basePriceNode = item.xpath(runhau['basePrice'])
                data['basePrice'] = extractNumber(extractTextFromNode(basePriceNode))
                saleoffPriceNode = item.xpath(runhau['saleoffPrice'])
                data['saleoffPrice'] = extractNumber(extractTextFromNode(saleoffPriceNode))
                minBuyerNode = item.xpath(runhau['minBuyer'])
                data['minBuyer'] = extractTextFromNode(minBuyerNode)
                currentBuyerNode = item.xpath(runhau['currentBuyer'])
                data['currentBuyer'] = extractTextFromNode(currentBuyerNode)
                expiredNode = item.xpath(runhau['expired'])
                expiredText = extractTextFromNode(expiredNode)
                data['expiredDate'] = str(standardlizeTimeValue(expiredText))
                
                thmbnailNode = tree.xpath("//span[@class='field-content']//img[contains(@class, 'imagecache imagecache-product_full imagecache-default')]")
                if len(thmbnailNode)>0:
                    thmbImg = thmbnailNode[0].get('src')
                    thmbImg = urljoin(url, thmbImg)
                    data['thumbnail'] = thmbImg
                    saveImage(thmbImg)
                data['tag'] = "runhau.vn"
                data['totalProduct'] = ''
                data['start'] = ''
                data['detail'] = processDetail(detailLink)
                data['city'] = cityName[icity]
                data['lastupdate'] = str(datetime.datetime.now())
                data['timeStamp'] = str(time.time())
                print "\nProcessing: ", strToASCII(data['title'])
                print "\nURL: ", data['detailLink']
                
                collection.save(data)
    except:
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
            
            
#processSite("ha-noi")
log.debug("Start crawler runhau")
pool = workerpool.WorkerPool(size=2)
pool.map(processSite, cityArr)
pool.shutdown()
pool.wait()
log.debug("Crawler runhau finished")
log.info("Total product crawled: {0}".format(total_product_get))
print "\nFinished", datetime.datetime.now()
sys.exit()