# -*- coding: utf-8 -*-
import workerpool
import mechanize
from mechanize import Browser
import re
import cStringIO as StringIO
import lxml.html
from lxml import etree
from pymongo import Connection
from urllib import urlencode
import zlib
import datetime
import sys
import logging
from mongolog.handlers import MongoHandler

# <!--- basic function --
def getCRC32Unsign(textToHash=None):
    return str(zlib.crc32(textToHash) & 0xffffffffL)

def buildTree(url):
    ua6300 = "Nokia6300/2.0 (04.20) Profile/MIDP-2.0 Configuration/CLDC-1.1 UNTRUSTED/1.0"
    br = Browser()
    br.addheaders = [("User-Agent", ua6300)]
    br.open(url)
    html = br.response().read()
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(StringIO.StringIO(html), parser)
    return tree

def getElementText(elem):
    if elem is None: return ''
    t = lxml.html.fromstring(etree.tostring(elem))
    return t.text_content().strip()

def getAttributeText(node, attrb):
    if node is None or attrb == '': return ''
    return node.get(attrb)

def extractWithRegEx(pat, matchStr, matchIdx):
    ''' Hàm chạy regex và trả kết quả ở group[matchIdx]. Nếu không có kết quả -> trả về null'''
    try:
        result = ''
        rexp = re.compile(pat)
        m = rexp.search(matchStr)
        if (m!=''):
            result = m.group(matchIdx)
        return result
    except:
        return ''

def strToASCII(str):
    if str == '': return ''
    try:
        listPattern = [r"á|à|ả|ạ|ã|â|ấ|ầ|ẩ|ậ|ẫ|ă|ắ|ằ|ẳ|ặ|ẵ", r"Á|À|Ả|Ạ|Ã|Â|Ấ|Ầ|Ẩ|Ậ|Ẫ|Ă|Ắ|Ằ|Ẳ|Ặ|Ẵ",
                       r"đ", r"Đ", r"í|ì|ỉ|ị|ĩ", r"Í|Ì|Ỉ|Ị|Ĩ", r"é|è|ẻ|ẹ|ẽ|ê|ế|ề|ể|ệ|ễ", r"É|È|Ẻ|Ẹ|Ẽ|Ê|Ế|Ề|Ể|Ệ|Ễ",
                       r"ó|ò|ỏ|ọ|õ|ô|ố|ồ|ổ|ộ|ỗ|ơ|ớ|ờ|ở|ợ|ỡ", r"Ó|Ò|Ỏ|Ọ|Õ|Ô|Ố|Ồ|Ổ|Ộ|Ỗ|Ơ|Ớ|Ờ|Ở|Ợ|Ỡ",
                       r"ú|ù|ủ|ụ|ũ|ư|ứ|ừ|ử|ự|ữ", r"Ú|Ù|Ủ|Ụ|Ũ|Ư|Ứ|Ừ|Ử|Ự|Ữ", r"ý|ỳ|ỷ|ỵ|ỹ", r"Ý|Ỳ|Ỷ|Ỵ|Ỹ"]
        rep = ['a', 'A', 'd', 'D', 'i', 'I', 'e', 'E', 'o', 'O', 'u', 'U', 'y', 'Y']
        str = str.encode('utf-8', 'replace')
        for idx in range(len(listPattern)):
            str = re.sub(listPattern[idx], rep[idx], str)
        return str
    except:
        pass

# -- end basic function --!>

def compareURL(curURL, nextURL):
    curStart = extractWithRegEx("start=(\d+)", curURL, 1)
    nextStart = extractWithRegEx("start=(\d+)", nextURL, 1)
    print "\n\tcurstart {0} -- nextstart {1}".format(curStart, nextStart)
    if curStart == nextStart:   return True
    return False

def checkName(str, acceptW, rejectW):
#    str = strToASCII(str.lower())
    str = str.lower().encode('utf-8', 'ignore')
#    print "\t", str, '  ', pat
    if rejectW != '' and acceptW != '':
        if re.search(rejectW, str) or not re.search(acceptW, str):   return False
    elif acceptW != '':
        if not re.search(acceptW, str): return False
    elif rejectW != '':
        if re.search(rejectW, str):   return False
    return True

def getLatLon(url):
    try:
        preg = re.compile(r"latlng=(\d+),(\d+)")
        m = preg.search(url)
        lat = str(float(m.group(1)) / 1000000.0)
        lng = str(float(m.group(2)) / 1000000.0)
        return {'lat': lat, 'lon': lng}
    except:
        return {'lat': '', 'lon': ''}

def categoriesProcessT1(icat):
    try:
        for idistrict in district:
            getLocation(icat, idistrict)
    except:
        print sys.exc_info()
        pass

def categoriesProcessT2(icat):
    getLocation(icat, "Hà Nội, Hanoi, Việt nam")
    
def getLocation(cat, idistrict):
    log.debug("Start getLocation({0}, {1})".format(cat, idistrict))
    kw = cat['keyword'] + " " + idistrict
    print ".....Loading..... >> start getLocation({0}) ...".format(kw)
    url = "http://maps.google.com/m?{0}".format(urlencode({'q': kw}))
    # -- connect to mongodb --
    try:
        connection = Connection('localhost', 27017)
        db = connection['diadiem']
        collection = db[cat['cat']]
        while 1:
            tree = buildTree(url)
            if len(tree.xpath("//div[@class='bf']")) < 1:
                print "!!! Non content :("
                break
            for item in tree.xpath("//div[@class='bf']"):
                addr = getElementText(item.xpath(".//div[@class='k2e4se']")[0])
                name = getElementText(item.xpath(".//a[@class='uf']")[0])
                if cat['acceptWords'] is not None and cat['rejectWords'] is not None:
                    if not checkName(name, cat['acceptWords'], cat['rejectWords']): continue
                link = getAttributeText(item.xpath(".//a[@class='uf']")[0], 'href')
                latlon = getLatLon(link)
                tel = ''
                id = name.encode('ascii', 'ignore') + latlon['lat'] + latlon['lon']
                if len(item.xpath(".//a[@class='j4nalh']")) > 0:
                    tel = getElementText(item.xpath(".//a[@class='j4nalh']")[0])
                keyword = cat['keyword']
                
    #             -- display screen --
                print "\tName: {0} {1} {2}".format(strToASCII(name), latlon['lat'], latlon['lon'])
                print "\tAdress: ", strToASCII(addr)
                print "\tKeyword: ", strToASCII(keyword)
                print "\tTel: ", tel, "\n\t------------------------"
                # -- end display screen --
                collection.save({'_id': getCRC32Unsign(id), 'name': name, 'address': addr, 'tel': tel, 'latlon': latlon, 'keyword': keyword, 'district': idistrict})
                
            tiepNode = tree.xpath("//a[contains(., 'Trang tiếp')]".decode('utf-8'))
            if len(tiepNode) < 1:   break
            next = mechanize.urljoin(url, getAttributeText(tiepNode[0], 'href'))
            if compareURL(url, next): break
            print ">>> goto next page: {0}".format(strToASCII(next))
            url = next
    except:
        err = "{0} - {1} >> {2}".format(cat, idistrict, sys.exc_info()[0])
        print err
        log.error(err)
        pass

if __name__ == '__main__':
    
    rjWords = {'nhahang': 'công ty|cty|ảnh viện|studio|bia hơi|cao đẳng|đại học|trung tâm|showroom|cửa hàng|đại lý|cà phê|karaoke|cafe'}
    
    categoryT1 = [{'cat': 'hotel', 'keyword': '"khach san" OR hotel', 'acceptWords':'', 'rejectWords': ''}, 
                  {'cat': 'nhanghi', 'keyword': '"nha nghi" OR "nha khach"', 'acceptWords':'', 'rejectWords': ''}, 
                  {'cat': 'atm', 'keyword': '+atm', 'acceptWords': 'atm', 'rejectWords': ''},
                  {'cat': 'doxe', 'keyword': 'Garage Cho Thuê Bãi Đậu Xe -hotel', 'acceptWords':'xe|gara|ô tô|garage|vận tải', 'rejectWords': 'hotel|khách sạn|địa ốc'}, 
                  {'cat': 'bank', 'keyword': '"ngan hang" OR bank -atm', 'acceptWords':'ngân hàng|bank', 'rejectWords': 'học viện'}, 
                  {'cat': 'nhahang', 'keyword': '"nha hang" OR Restaurant -hotel', 'acceptWords':'', 'rejectWords': rjWords['nhahang']}, 
                  {'cat': 'tramxangdau', 'keyword': 'Trạm OR "dai ly" OR "cua hang" +"Xăng Dầu"', 'acceptWords':'xăng|dầu', 'rejectWords': ''}, 
                  {'cat': 'quanbia', 'keyword': '"nha hang" OR quan +bia', 'acceptWords': 'bia|beer', 'rejectWords': 'cửa hàng|công ty|cty|đại lý'},
                  {'cat': 'sieuthi', 'keyword': 'sieu thi OR plaza OR "market"', 'acceptWords':'siêu thị|plaza|market', 'rejectWords': ''}
                  ]
                    
    categoryT2 = [{'cat': 'benhvien', 'keyword': '+"bệnh viện"', 'acceptWords': 'benh vien|bệnh viện|y tế|viện|y te|cấp cứu', 
                   'rejectWords': 'laptop|thẩm mỹ|triết|nông nghiệp|khoa học'},
                   {'cat': 'nhahang', 'keyword': 'nha hang near thanh nien tay ho', 'acceptWords':'nhà hàng', 'rejectWords': ''}]
    
    district = ["Q.Hà Đông", "Q.Cầu Giấy", "Q.Hoàn Kiếm", "Q.Hai Bà Trưng", "Q.Hoàng Mai", "Q.Thanh Xuân", "Q.Tây Hồ", "Q.Ba Đình", "Q.Long Biên", "Q.Đống Đa"]
    # -- test one category
#    categoriesProcessT1(categoryT1[4])
    log = logging.getLogger('mapsgoogle')
    log.setLevel(logging.DEBUG)
    log.addHandler(MongoHandler.to('mongolog', 'log'))
    
    log.debug("Start crawler google map")
    pool = workerpool.WorkerPool(size=5)
    pool.map(categoriesProcessT2, categoryT2)       
    pool.shutdown()
    pool.wait()
    
    pool = workerpool.WorkerPool(size=5)
    pool.map(categoriesProcessT1, categoryT2)
    pool.map(categoriesProcessT1, categoryT1)
    pool.shutdown()
    pool.wait()
    
    pool = workerpool.WorkerPool(size=5)
    pool.map(categoriesProcessT1, categoryT1)
    pool.shutdown()
    pool.wait()
    
    log.debug("crawler google map finished")
    print "Finished at {0}".format(str(datetime.datetime.now()))
    sys.exit()


