# -*- coding: utf-8 -*-
import urllib
from urllib import urlretrieve
from lxml import etree
from pymongo import Connection
import cStringIO as StringIO
import hashlib
import datetime
import sys
import os
import re
import time
from urlparse import urljoin
from mongolog.handlers import MongoHandler
import logging


stringify = etree.XPath("string()")
localFilepath = "/home/hoangnamhai/HarvestedData/groupon/"
cityName = {"ha-noi": "Hà Nội", "tp-ho-chi-minh": "TP Hồ Chí Minh", "toan-quoc": "Toàn Quốc"}

log = logging.getLogger('groupon')
log.setLevel(logging.DEBUG)
log.addHandler(MongoHandler.to('mongolog', 'log'))

warning_if_less = 200   # Neu bai viet it hon 200 tu thi se warning
total_product_get = 0

connection = Connection('localhost', 27017)
db = connection["groupon"]
collection = db["groupon"]

################# FUNCTION INC SECTION ###################
def getMD5Hash(textToHash=None):
    return hashlib.md5(textToHash).hexdigest()

def getMd5Path(stringToHash):
    s = getMD5Hash(stringToHash)
    s = '{0}/{1}/{2}/'.format(s[0], s[1], s[2])
    return s

def getMd5FileName(stringToHash):
    s = getMD5Hash(stringToHash)    
    s = '{0}/{1}/{2}/{3}'.format(s[0], s[1], s[2], s[3:])
    return s

def wordCount(str):
    wordNum = 0
    for line in str.strip().split('\n'):
        line = re.sub(r"\s+", ' ', line)
        wordNum += len(line.split(' '))
    return wordNum

def saveImage(url):
    ''' Lưu ảnh xuống local với tên dựa file local dựa vào hash Md5. Nếu local đã có file rồi thì 0 load nữa. '''
    try:
        localFilename = '{0}{1}.jpg'.format(localFilepath, getMd5FileName(url))
        print 'saveImage: {0} -> {1}'.format(url, localFilename)
        if not os.path.isfile(localFilename):
            if not os.path.exists(localFilepath + getMd5Path(url)):
                os.makedirs(localFilepath + getMd5Path(url))
            urlretrieve(url, localFilename)
        return localFilename
    except:
        err_str = 'saveImage error: {0} >> {1}'.format(url, sys.exc_info()[1])
        print err_str
        log.error(err_str)
        pass
    return ''             

def buildTree(url, outputHTML=False):
    result = urllib.urlopen(url)
    html = result.read()
    #print html
    if outputHTML: print html
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    return tree

def strToASCII(str):
    if str == '': return ''
    listPattern = ["á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", "Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                   "đ", "Đ", "í|ì|ỉ|ị|ĩ", "Í|Ì|Ỉ|Ị|Ĩ", "é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", "É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                   "ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", "Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                   "ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", "Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", "ý|ỳ|ỷ|ỵ|ỹ", "Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
    rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
    str = str.encode('utf-8', 'replace')
    for idx in range(len(listPattern)):
        str = re.sub(listPattern[idx], rep[idx], str)
    return str


def extractNumber(str):
    if str is None or str == '': return ''
    str = str.encode('utf-8', 'replace')
    str = re.sub("[^0-9]", '', str)
    return str

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            if matchIdx == 0: result = m.group()
            else: result = m.group(matchIdx)
        return result
    except:
        return ''
    
################# END FUNCTION INC SECTION ###################

muachung = {
          "url": "http://muachung.vn/",
          "content": "//div[contains(@id,'productItem')]/div[@class='contentBox']",
          "title": "./div[@class='titleBox']/a", 
          "detailLink": "./div[@class='titleBox']/a", 
          "expired": "./parent::*/following-sibling::*[1]",
          "currentBuyer": ".//div[@class='buyerStatus']//b", 
          "minBuyer": ".//div[@class='productStatus']/div[contains(@class, 'cBarText3')]", 
          "basePrice": ".//div[@class='mTop5']//strike",
          "saleoffPrice": ".//div[@class='topTextPrice']/div[@class='numPrice mTop5']", 
          "detailXPath": "//div[@class='blueTitleDetail']//div[@class='contentBox']/*"
          }


def getElementText(node, isGetAttrib=False, attrib=''):    
    if node is None: return ''
    if isGetAttrib and attrib != '':
        return node[0].get(attrib).strip()
    if node[0].text is not None:
        return node[0].text.strip()
    return stringify(node[0]).strip()

def getTextBySplit(str, spl, sidx, idx=None):
    seg = str.split(spl)
    if idx is not None: return seg[sidx][idx]
    else:   return seg[sidx]
    
def extractDateFromString(str):
    preg = re.compile(r"'([0-9]+)h([0-9]+)\s.+\s([0-9]+)/([0-9]+)/([0-9]+)'")
    m = preg.search(str)
    ret = []
    ret.append(int(m.group(1)))
    ret.append(int(m.group(2)))
    ret.append(int(m.group(3)))
    ret.append(int(m.group(4)))
    ret.append(int(m.group(5)))
    return ret

def processDetail(url, contentXPath):
    log.debug("Start processDetail, param: {0}, {1}".format(url, contentXPath))
    contents = []
    wordsNum = 0
    try:
        tree = buildTree(url)
        contentNodes = tree.xpath(contentXPath)
        for item in contentNodes:
            flag = False
            for iChild in item.xpath("./descendant::*"):
                if iChild.tag == 'img':
                    imageURL = iChild.get('src')
                    imageURL = urljoin(url, imageURL)
                    saveImage(imageURL)
                    contents.append({'type': 'image', 'data': imageURL})
                    flag = True
            if not flag:                 
                myText = re.sub("\s+", " ", stringify(item)).strip()   
                if myText != '': contents.append({'type': 'text', 'data': myText})
                wordsNum += wordCount(myText)    
    except:
        err_str = "{0} --> url: {1}".format(sys.exc_info()[1], url)
        print err_str
        log.error(err_str)        
        pass
    
    if wordsNum < warning_if_less: log.warn("Bai viet nho hon {0} tu [{1}]".format(warning_if_less, url))
    return contents
        
       
def processASite(listXPath):
    log.debug("Start processASite, param: {0}".format(listXPath))
    global total_product_get, collection
    data = {}
    url = listXPath['url']
    print url
    try:
        tree = buildTree(url)
        contentNodes = tree.xpath(listXPath['content'])
        if contentNodes is not None and len(contentNodes)>0:
            for iContent in contentNodes:
                linkDetailNode = iContent.xpath(listXPath['detailLink'])
                if len(linkDetailNode)>0:
                    linkDetail = urljoin(url, getElementText(linkDetailNode, True, 'href'))
                    data['_id'] = getMD5Hash(linkDetail)
                    isExist = collection.find_one({'_id': data['_id']})
                    
                    if not isExist:
                        total_product_get += 1
                        # extract title and link to detail page
                        titleNode = iContent.xpath(listXPath['title'])
                        data['title'] = getElementText(titleNode)
                        data['detailLink'] = linkDetail
                        data['city'] = getElementText(iContent.xpath("./div[@class='titleBox']/span")).rstrip(':')
                        data['thumbnail'] = ''
                        if len(iContent.xpath(".//div[@class='mainBoxBorder']")) > 0:
                            data['thumbnail'] = urljoin(url, extractWithRegEx(r"'(http.+)'", iContent.xpath(".//div[@class='mainBoxBorder']")[0].get('style'), 1))
                            saveImage(data['thumbnail'])
                        # extract price and sale off price
                        basePriceNode = iContent.xpath(listXPath['basePrice'])
                        data['basePrice'] = extractNumber(getElementText(basePriceNode))
                        saleoffPriceNode = iContent.xpath(listXPath['saleoffPrice'])
                        data['saleoffPrice'] = extractNumber(getElementText(saleoffPriceNode))
                        
                        # ngay san pham pha gia con hieu luc
                        expiredNode = iContent.xpath(listXPath['expired'])
                        expiredDate = getElementText(expiredNode).split(';')[1]
                        
                        data['start'] = getTextBySplit(expiredDate, ',', 1)
                        dateArr = extractDateFromString(expiredDate)
                        dateObj = datetime.datetime(dateArr[4], dateArr[3], dateArr[2], dateArr[0], dateArr[1])
                        data['expiredDate'] = str(dateObj)
                        
                        # minimum buyer and current buyer
                        data['minBuyer'] = getTextBySplit(expiredDate, ',', 4)
                        data['currentBuyer'] = getElementText(iContent.xpath(listXPath['currentBuyer']))
                        data['detail'] = processDetail(linkDetail, listXPath['detailXPath'])
                        data['tag'] = "muachung.vn"
                        data['lastupdate'] = str(datetime.datetime.now())
                        data['timeStamp'] = str(time.time())
                        data['totalProduct'] = ''
                        collection.save(data)
                        
                        # print to test
                        print "\n-----------------------------------------"
                        print strToASCII(data['title'])
                        print data['detailLink']
                        print data['basePrice']
                        print data['saleoffPrice']
    except:
        #print 'processPageList exception: {0}'.format(url)
        err_str = "{0} --> url: {1}".format(sys.exc_info()[1], url)
        print err_str
        log.error(err_str)
        pass

log.debug("Start crawler muachung")
processASite(muachung)
log.debug("Crawler muachung finished")
log.info("Total product crawled: {0}".format(total_product_get))

print "Đã xong", datetime.datetime.now()
sys.exit()