# -*- coding: utf-8 -*-

import urllib2
from lxml import etree
import json
import re
import requests

import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from dianp_item import DianpItem
from common.dianp_common import refindall

class DianpParse(object):
    def __init__(self):
        pass

    
    @staticmethod
    def check_page_404(page):
        html = etree.HTML(page)
        info = html.xpath('//title/text()')
        for item in info:
            if '404' in item:
                return True
        if len(page) < 1500:
            return True
        return False
        
    '''
        获取产品tag
        in  :page
        out :max pagenum  ['环境(120)']
    '''
    @staticmethod    
    def get_tag_list(page):
        tagS = []
        html = etree.HTML(page)
        arrX = html.xpath('//*[@id="photoNav"]/ul/li')
        for item in arrX:
                tag = item.xpath('.//a/text()')
                href = item.xpath('.//a/@href')
                if len(tag) != 0:
                    tag = re.sub('\(\d+\)', '', tag[0])
                    tagS.append({'tag':tag, 'url':href[0]})
        return tagS
        
             
    '''
        获取tag下所有图片id
        in  :page
        out :max pagenum
    '''
    @staticmethod    
    def get_picid_list(page):
        return self.get_max_pagenum(page)
        

        
    '''
        获取大图
        in  :page
        out :max pagenum
    '''
    @staticmethod    
    def get_pic_tag(page):
        html = etree.HTML(page)
        tag_url = html.xpath('//*[@id="photoNav"]//a/@href')
        return tag_url

    '''
        获取最大分页数
        in  :page
        out :max pagenum
    '''
    @staticmethod
    def get_max_pagenum(page):
        pageS = []
        html = etree.HTML(page)
        arrX = html.xpath('//a[@class="PageLink"]/@title')
        if len(arrX) == 0:
            return 1
        for item in arrX:
            pageS.append(int(item))
        return max(pageS)
    
    '''
        获取列表页的餐馆id list
        in  : page
        out : restid  list
    '''
    @staticmethod
    def get_rest_list(page):
        restIds = []
        html = etree.HTML(page)
        arrX = html.xpath('//*[@id="shop-all-list"]//div[@class="pic"]/a/@href')
        for item in arrX:
            restIds.append(item.replace('/shop/',''))
        return restIds

    @staticmethod
    def get_food_tag(page):
        html = etree.HTML(page)
        tagarr = html.xpath('//div[@class="picture-tag"]/a/text()')
        if len(tagarr) == 0:
            return ''
        else:
            return '|'.join(tagarr) 
    
        
    @staticmethod
    def get_restinfo(page):
        item = DianpItem.restInfo()
        html = etree.HTML(page)
        shopName = html.xpath('//h1[@class="shop-name"]/text()')
        item['shopName'] = re.sub('\s|""','',shopName[0])
        item['city'] = refindall('cityName:(.*?),', page, '', '\s|\n|"')
        item['cityId'] = refindall('cityId:(.*?),', page, '', '\s|\n|"')
        item['shopId'] = refindall('shopId:(.*?),', page, '', '\s|\n|"')
        item['shopCityId'] = refindall('shopCityId:(.*?),', page, '', '\s|\n|"')
        item['citylats'] = refindall('cityGlat:(.*?),', page, '', '\s|\n|"')
        item['citylngs'] = refindall('cityGlng:(.*?),', page, '', '\s|\n|"')
        item['power'] = refindall('power:(.*?),', page, '', '\s|\n|"')
        item['shopType'] = refindall('shopType:(.*?),', page, '', '\s|\n|"')
        item['categoryName'] = refindall('categoryName:(.*?),', page, '', '\s|\n|"')
        item['cityEnName'] = refindall('cityEnName:(.*?),', page, '', '\s|\n|"')
        #item['shopName'] = refindall('shopName:(.*?),', page, '', '\s|\n|"')
        
        item['star'] = refindall('shopPower:(.*?),', page, 0, '\s|\n|"')
        item['reviewCount'] = re.findall('\d+',html.xpath('//*[@id="reviewCount"]/text()')[0])[0]
        item['avgPriceTitle'] = html.xpath('//*[@id="avgPriceTitle"]/text()')[0].replace('人均：', '').replace('元','').replace('-', '0')
        comment_score = html.xpath('//*[@id="comment_score"]/span/text()')
        item['taste'] = comment_score[0].replace('口味：','')
        item['environment'] = comment_score[1].replace('环境：','')
        item['service'] = comment_score[2].replace('服务：','')
        address = html.xpath('//*[@itemprop="street-address"]/@title')[0]
        if '(' in address:
            addarr = re.findall('(.*?)\((.*?)\)', address)
            item['address'] = addarr[0][0]
            item['crossRoad'] = addarr[0][1]
        else:
            item['address'] = address
        '''
        defaultPics = html.xpath('//a[@class="J_main-photo"]/img/@src')
        if len(defaultPics) == 0:
            item['defaultPic'] = ''
        else:
            item['defaultPic'] = defaultPics[0]
        '''
        tels = html.xpath('//*[@itemprop="tel"]/text()')
        if len(tels) > 1:
            item['tel'] = ' '.join(tels)
        if len(tels) == 0:
            item['tel'] = ''
        if len(tels) == 1:
            item['tel'] = tels[0]
        
        item['lats'] = refindall('shopGlat:(.*?),', page, '', '\s|"')
        item['lngs'] = refindall('shopGlng:(.*?),', page, '','\s|"' )
        item['mainRegionId'] = refindall('mainRegionId:(.*?),', page, 0, '\s')
        item['mainCategoryId'] = refindall('mainCategoryId:(.*?),', page, 0, '\s')
        item['picTotal'] 
        picTotal = html.xpath('//*[@id="pic-count"]/text()')
        if len(picTotal) == 0:
            item['picTotal'] = '0'
        else:
            item['picTotal'] = re.sub('\n|\s', '', picTotal[0])

        branchNums = 0
        item['chainId'] = refindall('shopGroupId:(.*?),', page, 0, '\s')
        if item['chainId'] != 0:
            item['branchNums'] = refindall('其它(\d+)家分店', page, 0, '\s')
            if item['branchNums'] == 0:
                item['chainId'] = 0  
        #other info

        otherP = html.xpath('//p[@class="info info-indent"]')
        for itemp in otherP:
            tag = itemp.xpath('./span[@class="info-name"]/text()')
            if len(tag) == 0:
                continue
            info = itemp.xpath('./span[@class="item"]/text()')
            if re.search(u'别.*名', tag[0]) != None:
                item['altName'] = info[0]
            if re.search(u'餐厅简介', tag[0]) != None:
                item['restIntro'] = re.sub('\n|\s', '', itemp.xpath('string(.)'))
            if re.search(u'营业时间', tag[0]) != None:
                item['openTime'] = re.sub('\s|\n', '', info[0])

        

        rgCates = html.xpath('//div[@class="breadcrumb"]/a/@href')
        rgKeys = ['cusineId', 'secondCusineId']
        rgCateDict = dict(zip(rgKeys, rgCates[2:]))
        for k,v in rgCateDict.items():
            rgArr = v.split('/')
            cate = rgArr[-1]
            #if 'r' in cate and 'g' not in cate:
                #item[k] = cate
                
            if 'r' in cate and 'g' in cate:
                item[k] = re.findall('(g\d+)', cate)[0]
        if item['secondCusineId'] != 0:
            item['secondCusineId'] = int(item['secondCusineId'][1:])
        
        return {'restinfo':item}
    
    @staticmethod
    def get_restpic(page):
        picList = []
        html = etree.HTML(page)
        picPoint = html.xpath('//*[@class="J_list"]')
        for item in picPoint:
            picid = item.xpath('./div[@class="img"]/a/@href')[0]
            picid = re.sub('\D', '', picid)
            intros = item.xpath('./div[@class="picture-info"]/*[@class="name"]//a/text()')
            if len(intros) == 0:
                intro = ''
            else:
                intro = intros[0]
            picList.append({'id':picid, 'intro':intro})
        return picList
    
        
    @staticmethod
    def get_recommend_params(restInfo):
        params = {}
        
        params['shopId'] = restInfo['shopId']
        params['mainCategoryId'] = restInfo['mainCategoryId']
        params['shopType'] = restInfo['shopType']
        params['shopCityId'] = restInfo['shopCityId']
        params['power'] = restInfo['power']
        params['cityId'] = restInfo['cityId']
        params['shopName'] = restInfo['shopName']
        
        return params

    @staticmethod
    def get_billboad_params(restInfo):
        params = {}
        params['shopId']= restInfo['shopId']
        params['cityId']= restInfo['cityId']
        params['shopName']= restInfo['name']
        params['power']= restInfo['power']
        params['mainCategoryId']= restInfo['mainCategoryId']
        params['shopType']= restInfo['shopType'] 
        params['mainRegionId']= restInfo['mainRegionId']
        params['shopGlat']= restInfo['lats']
        params['shopGlng']= restInfo['lngs']
        params['cityGlat']= restInfo['citylats']
        params['cityGlng']= restInfo['citylngs']
        params['cityEnName']= restInfo['cityEnName']
        params['categoryName']= restInfo['categoryName']
        params['shopCityId']= restInfo['shopCityId']
            
        return params
        
    @staticmethod
    def get_recommend_food(foodJson):
        foodList = []
        foods = json.loads(foodJson)
        for item in foods['allDishes']:
            #if item['finalPrice'] == None:
            #    item['finalPrice'] = 0
            foodList.append({'name':item['dishTagName'],'price':item['finalPrice'],'count':item['tagCount'], 'foodLabel':''})
        return foodList

    @staticmethod
    def get_rest_billboad(billboadJson):
        billboadList = []
        billboads = json.loads(billboadJson)
        for item in billboads['asideModel']['myListBeans']:
                #billBoad = {'listId':item['listId'], 'title':item['title'], 'shopCount':item['shopCount'], 'viewCount':item['viewCount'], 'content':item['content']}
                billBoad = {'listId':item['listId'], 'title':item['title'], 'shopCount':item['shopCount'], 'viewCount':item['viewCount']}
                billboadList.append(billBoad)
        return billboadList

    @staticmethod
    def get_big_pic(page):
        picUrl = ''
        html = etree.HTML(page)
        bigPic = html.xpath('//*[@id="J_pic-wrap"]//img/@src')
        if len(bigPic) == 0:
            return ''
        else:
            if bigPic[0][-7] == 'W':
                return bigPic[0][:-7] + 'w' + bigPic[0][-6:]
            else:
                return bigPic[0]
            
    
    @staticmethod
    def get_thread(page):
        pass
    
    @staticmethod
    def is_closed_rest(page):
        if 'shop-closed' in page:
            return True
        else:
            return False

    @staticmethod
    def get_user_think(page):
        thinkList = []
        html = etree.HTML(page)
        thinkListP = html.xpath('//span[@class="good J-summary"]/a/text()')
        thinkListP = thinkListP +  html.xpath('//span[@class="bad J-summary"]/a/text()')
        for item in thinkListP:
            res = re.findall('(.*?)\((.*?)\)', item)
            if len(res) == 0:
                continue
            thinkList.append({'tag':res[0][0], 'count':res[0][1]})
        return thinkList

    @staticmethod
    def get_res_billboard(page):
        billboard = []
        html = etree.HTML(page)
        billboards = html.xpath()
        

    @staticmethod
    def get_res_districtid(page):
        html = etree.HTML(page)
        districts = html.xpath('//a[@data-ga-index="2"]/@href')
        if len(districts) == 0:
            return ''
        else:
            return re.findall('(r\d+)', districts[0])[0]
        
    @staticmethod
    def get_review_mainpage(page):
        reviewList = []
        html = etree.HTML(page)
        reviews = html.xpath('//*[@class="comment-item"]')
        for item in reviews:
            reviewinfo = {}
            id = item.xpath('./@data-id')
            reviewinfo['id'] = id[0]
            times = item.xpath('.//span[@class="time"]/text()')
            user = item.xpath('//p[@class="user-info"]/a[@class="name"]/text()')
            reviewinfo['userName'] = user[0]
            reviewinfo['time'] = times[0] 
            commentInfo = item.xpath('./div[@class="content"]//div[@class="info J-info-all Hide"]/p')
            if len(commentInfo) > 0:
                reviewinfo['commentInfo'] = commentInfo[0].xpath('string(.)')
            else:
                commentInfo = item.xpath('./div[@class="content"]//p[@class="desc"]')
                reviewinfo['commentInfo'] = commentInfo[0].xpath('string(.)')
            star = item.xpath('.//p[@class="shop-info"]/span[1]/@class')
            star = star[0][-2:]
            reviewinfo['star'] = star
            rstarr = item.xpath('.//p[@class="shop-info"]/span[@class="item"]/text()')
            for i in rstarr:
                if '口味' in i:
                    reviewinfo['taste'] = i.replace('口味：', '')
                if '环境' in i:
                    reviewinfo['environment'] = i.replace('环境：', '')
                if '服务' in i:
                    reviewinfo['service'] = i.replace('服务：', '')
            others = item.xpath('.//div[@class="content"]//dl[@class="recommend-info clearfix"]/dt')
            others2 = item.xpath('.//div[@class="content"]//dl[@class="recommend-info clearfix"]/dd')
            otherInfo = []
            for iteml in zip(others, others2):
                tag = iteml[0].xpath('string(.)')
                tag = tag.replace('：', '')
                info = iteml[1].xpath('./*/text()')
                otherInfo.append({'tag':tag, 'info':info})
            reviewinfo['others'] = json.dumps(otherInfo)
            
            reviewList.append(reviewinfo)
        return reviewList
    

    @staticmethod
    def get_review(page):
        reviewList = []
        html = etree.HTML(page)
        reviews = html.xpath('//*[@class="comment-list"]/ul/li')
        for item in reviews:
            reviewinfo = {}
            useinfo = {}
            id = item.xpath('@data-id')[0]
            commentInfo = item.xpath('./div[@class="content"]//div[@class="J_brief-cont"]/text()')[0]
            user = item.xpath('.//p[@class="name"]/a/text()')[0]
            reviewinfo['UserName'] = user
            reviewtime = item.xpath('.//span[@class="time"]/text()')[0]
            reviewinfo['commentDate'] = reviewtime
            star = item.xpath('.//div[@class="user-info"]/span/@class')[0]
            reviewinfo['star'] = star[-2:]
            rstarr = item.xpath('.//span[@class="rst"]/text()')
            for i in rstarr:
                if '口味' in i:
                    reviewinfo['taste'] = i.replace('口味', '')
                if '环境' in i:
                    reviewinfo['environment'] = i.replace('环境', '')
                if '服务' in i:
                    reviewinfo['service'] = i.replace('服务', '')
            others = item.xpath('div[@class="content"]//div[@class="comment-recommend"]')
            reviewinfo['id'] = id
            reviewinfo['commentInfo'] = re.sub('\s|\n', '', commentInfo)
            otherInfo = {}
            for comment in others:
                tag = comment.xpath('./text()')[0]
                info = comment.xpath('./a/text()')[0]
                tag = re.sub('\n| ', '', tag)
                otherInfo[tag] = info
            reviewinfo['otherInfo'] = otherInfo
            reviewList.append(reviewinfo)
        return  reviewList
        
#import requests
#res = requests.get('http://www.dianping.com/shop/14743854')
#print DianpParse.get_big_pic(res.content)
#print DianpParse.get_res_districtid(res.content)
#data = open('500001.txt', 'r').read()
#print DianpParse.get_restinfo(res.content)
