# -*- coding: utf-8 -*-

import urllib
from urlparse import urljoin 
import cStringIO as StringIO
import traceback
import gzip
import re, os
from lxml import etree
from termcolor import cprint
import commonlib
import hashlib
from pymongo import Connection
#from mongoDbLog import mongoDbLog

dbname = 'diachiso'
conn = Connection('localhost', 27017)
db = conn[dbname]
diachiso_category = db['diachiso_category']
diachiso_listdiadiem = db['diachiso_listdiadiem']
diachiso_region = db['region']
stringify = etree.XPath("string()")
baseLink = 'http://diachiso.vn/'

def getHTML(url, outputHTML=False):
    try:    
        if type(url).__name__ == 'unicode': url = url.encode('utf-8')
        response = urllib.urlopen(url)
        html = response.read()
        content_type = response.info().getheader('Content-Encoding', '')
        if content_type == 'gzip':
            gzipper = gzip.GzipFile(fileobj=StringIO.StringIO(html))
            html = gzipper.read()
        if outputHTML: print html
        return html 
    except:
        traceback.format_exc()

def buildTree(html, isXML=False):
    try:
        if isXML==False:
            parser = etree.HTMLParser(encoding='utf-8')
        else:
            parser = etree.XMLParser(encoding='utf-8')
        tree = etree.parse(StringIO.StringIO(html), parser)
        return tree
    except:
        traceback.print_exc()

def regexString(pattern, text):
    try:
        if pattern==None or text==None or pattern=='' or text=='': return
        preg = re.compile(pattern)
        m = preg.search(text)
        if m: return m
    except:
        traceback.print_exc()

def checkDuplicate(link, adr):
    try:
        if link==None or link=='': return None, None
        if type(link).__name__ == 'unicode': link = link.encode('utf-8')
        hashUrl = hashlib.md5(link).hexdigest()
        '''
        reId =regexString('/(\w+)-(\d+)/', link)
        if reId: aId = '{0}-{1}'.format(reId.group(1), reId.group(2)) 
        else: aId = commonlib.getMD5Hash(link)   
        '''
        result = adr.find_one({'hashUrl' : hashUrl})
        if result!=None:  
            print "Dữ liệu đã tồn tại !"
            return 1, hashUrl
        return 0, hashUrl   
    except:
        traceback.print_exc()
        return None, None


def getParentLink():
    try:
        serCities = {'dich-vu-ha-noi':'Hà Nội',
                    'dich-vu-ho-chi-minh':'Thành phố Hồ Chí Minh'
                   } 
        #diachiso_region.save(serCities)
        data = {}
        temp = ''
        #for key, value in serCities.items():
        link = 'http://diachiso.vn/{0}'.format('dich-vu-ha-noi')
        html = getHTML(link)
        tree = buildTree(html)
        if tree == None: return ''
        contentNode = tree.xpath(".//div[@class='wrap']//div[@id='divChildServices']/table[@id='10_DataList1']")
        for itemNode in contentNode:
            for i in itemNode.xpath(".//tr/td//li/a"):
                items = []
                adName = re.sub(r'\s+', " ", stringify(i))
                temp = adName
                #cateId = i.get('id')   
                adLink = i.get('href')
                items.append(adLink)
                data[temp] = items
        return data
    except:
        traceback.print_exc()

def getChildLink():
    try:
        dataLink = getParentLink()
        childData = {}
        ptemp = {}
        for key, value in dataLink.items():
            pitems = []
            ptemp = key
            link = '{0}'.format(value[0])
            html = getHTML(link)
            tree = buildTree(html)
            if tree == None: return ''
            contentNode = tree.xpath(".//div[@class='wrap']//div[@id='divChildServices']/table[@id='10_DataList2']")
            data = {}
            temp = ''
            for itemNode in contentNode:
                for i in itemNode.xpath(".//tr/td//li/a"):
                    items = []
                    adName = re.sub(r'\s+', " ", stringify(i))
                    temp = adName
                    temp = re.sub(r'\d+',' ',adName)
                    temp = temp.replace("( )", '')
                    adLink = i.get('href')
                    #adLink = regexString('/(\w+)-(\d+)/', adLink)
                    items.append(adLink)
                    data[temp] = items
            pitems.append(data)
            childData[ptemp] = pitems
        return childData     
    except:
        traceback.print_exc()
    
def getCategory():
    try:
        
        for k, v in getChildLink().items():
            #check_exist, hashUrl = checkDuplicate(k)
            #if  check_exist == 1: continue
            print k
            cprint(v[0],'green')
            #data = {}
            #temp = ''
            for i, t in v[0].items():
                #item
                print i
                print t[0]
                check_exists, hashU = checkDuplicate(t[0], diachiso_category)
                if check_exists==1: continue
                html = getHTML(t[0])
                tree = buildTree(html)
                if tree == None: return ''
                contentNode = tree.xpath("//div[@class='main clearfix']//table[@id='10_DataList1']")
                for itemNode in contentNode:
                    for item in itemNode.xpath(".//tr/td//li/a"):
                        serviceId = item.get('id')
                        serviceId = serviceId[7:]
                        title = item.text
                        title = re.sub(r'\s+',' ', title)
                        title = re.sub(r'\d+', '', title)
                        title = title.replace('()', '')
                        doc =({'_id': serviceId,
                                'name': title,
                       #'name': t[0]
                               })
                        diachiso_category.save(doc)
           
    except:
        traceback.print_exc()

def getListAdrress():
    try:
        for key, value in getChildLink().items():
            print key
            #print value
            for k, v in value[0].items():
                print k
                #print v
                link = v[0]
                cprint(link, 'red')
                html = getHTML(link)
                tree = buildTree(html)
                if tree == None: return ''
                serviceId = getId(link)
                for x in range(0,3):
                    listLink = 'http://diachiso.vn/Service/LoadShopServiceByRoad?serviceId={0}&districtId=0&roadId=0&orderby=Name&page={1}&currentPage={2}'.format(serviceId, x+1, x)
                    processLink(listLink)
    except:
        traceback.print_exc()

def processLink(link):
    try:
        cprint(link, 'green')
        html = getHTML(link)
        tree = buildTree(html)
        if tree == None: return ''
        lNode = tree.xpath("//div[@id='tab_1']//div[@class='article-main']/h4/a")
        if len(lNode)>0:
           for litem in lNode:
                tendiadiem = litem.text
                url = litem.get('href')
                check_exists, hashUrl = checkDuplicate(url, diachiso_listdiadiem)
                if check_exists==1: continue
                doc =({'hashUrl': hashUrl,
                       'url': url,
                      'tendiadiem': tendiadiem
                      })
                diachiso_listdiadiem.save(doc)
    except:
        traceback.print_exc()

def getDetailAdrress():
    try:
        for key, value in getChildLink().items():
            print key
            print value[0]
    except:
        traceback.print_exc()

def getId(link):
    try:
        html = getHTML(link)
        tree = buildTree(html)
        if tree == None: return ''
        preg = re.compile(r'serviceId = \'(.+)\';')
        m = preg.search(html)
        if m: 
            serviceId = m.group(1)
        return serviceId
    except:
        traceback.print_exc()

if __name__ == '__main__':
    #print getChildLink()
    #getCategory()
    getListAdrress()
    #getDetailAdrress()
    
    #test()       
    os._exit(1)



