import requests
import base64
import re
from lxml import etree
import time
from fontTools.ttLib import TTFont
import pandas as pd
# 反爬手段：
# 1.一次跳转后即识别爬虫，源码会出验证，必须加上完整请求头
# 2.字体反爬(数字简单映射，汉字矢量坐标渲染)
class DP:
    def __init__(self, url, comment_dic):
        self.url = url
        self.s = requests.session()
        self.s.proxies = {'https': '125.65.79.60:3311'}
        self.s.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/80.0.3987.116 Safari/537.36',
            'Cookie': '必填',
            'Referer': 'http://www.dianping.com/hangzhou/ch30/g141'
        })
        self.ls = []
        self.comment_dic = comment_dic

    def choose_area(self):
        response = self.s.get(self.url)
        xml = etree.HTML(response.text)
        area_list = xml.xpath('//div[@id="region-nav-sub"]/a')
        for area in area_list[1:]:      # 各分区
            area_name = area.xpath('.//text()')[0]
            area_href = area.xpath('./@href')[0]
            self.split_area(area_name, area_href)

    def split_area(self, name, href):
        self.s.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/80.0.3987.116 Safari/537.36',
            'Cookie': '必填',
            'Referer': 'http://www.dianping.com/hangzhou/ch30/g141r58'
        })
        res = self.s.get(href)
        xml = etree.HTML(res.text)
        shop_list = xml.xpath('//div[@id="shop-all-list"]/ul/li')
        for sh in shop_list:        # 每家店
            shop_href = sh.xpath('.//div[@class="tit"]/a/@href')[0]
            shop_aver = sh.xpath('.//div[@class="comment"]/a[@class="mean-price"]//text()')
            shop_tuangou = sh.xpath('.//a[@data-click-name="shop_info_groupdeal_click"]/@title')
            self.enter_shop(name, href, shop_href, shop_aver, shop_tuangou)

    def enter_shop(self, name, block_href, shop_href, shop_aver, tuangou):
        self.s.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/80.0.3987.116 Safari/537.36',
            'Cookie': '必填',
            'Referer': block_href,
            'Host': 'www.dianping.com',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive'
        })
        count = 0
        while 1:
            time.sleep(2.5)
            res = self.s.get(shop_href)
            html = res.text
            xml = etree.HTML(html)
            infos = xml.xpath('//div[@id="comment"]/ul/li')
            if infos or count == 5:
                break
            else:
                count += 1
                print('额外第%d次请求'%count, shop_href)
        li, li1 = [], []
        for info in infos:
            comment_encode_list = ''.join(info.xpath('.//p[@class="desc"]//text()')).replace(' ', '')  # 评论文本，反爬
            aver = info.xpath('.//span[@class="average"]/text()')       # 人均消费
            vote = ''.join(info.xpath('.//div[@class="actions"]/a[1]/text()'))   # 赞
            aver = ''.join([self.strConvertBySelfFont(i, 'shopNum.woff') for i in aver])
            shop_aver = ''.join([self.strConvertBySelfFont(i, 'shopNum.woff') for i in shop_aver]).replace(' ', '')
            if comment_encode_list:
                comment_encode = str([''.join(comment_encode_list)])[2:-2]
                encode_li = re.findall(r'(\\u[\w]{4})', comment_encode)
                li.append((''.join(comment_encode_list), aver, vote))
            else:
                print(html)
        if tuangou or li:
            dic = {'分区': name, '团购': tuangou, '人均消费': shop_aver, '评论/个人消费/赞': li}
            self.ls.append(dic)
            print(len(self.ls), dic)
        time.sleep(1)

    def save_font(self, font, font_path):
        font_base64_decode = base64.b64decode(font)
        with open(font_path, 'wb') as f:
            f.write(font_base64_decode)

    def isZnChar(self, c):
        assert len(c) == 1
        b = c.encode(encoding='utf8')
        h = b.hex()
        return len(h) == 6

    def charToUnicode(self, c):
        assert self.isZnChar(c)
        b = c.encode(encoding='utf8')
        h = b.hex()
        i = h[:2]
        j = h[2:4]
        k = h[4:]
        return hex(int(str(bin(int(i, 16)))[-4:] + str(bin(int(j, 16)))[-6:] + str(bin(int(k, 16)))[-6:], 2))

    def strConvertBySelfFont(self, pre_string, font_path):
        font = TTFont(font_path)
        intToNameDict = font.getBestCmap()
        nameToID = font.getReverseGlyphMap()
        cur_string = ''
        for i in pre_string:
            if self.isZnChar(i):
                char_unicode_hex = self.charToUnicode(i)
                char_unicode_int = int(char_unicode_hex, 16)
                if char_unicode_int in intToNameDict:
                    GlyName = intToNameDict[char_unicode_int]
                    ID = nameToID[GlyName]
                    cur_string += str(ID - 1)
                else:
                    cur_string += i
            else:
                cur_string += i
        return cur_string

if __name__ == '__main__':
    url = 'http://www.dianping.com/hangzhou/ch30/g141r58'  # 杭州-上城区-按摩足疗
    # 初步思路，先抓到本地，再找到字体文件下载，数字类型可以直接匹配解码，汉字则取出矢量坐标做图，二值化去除阴影提高识别率，合成大图转PDF进行OCR，
    # 再人工简单对比修正后，将列出的代码与汉字合成字典，对本地数据解码另存
    df = pd.read_table('大众点评评论字典.txt', sep=',', encoding='utf-8', engine='python')
    comment_dic = dict(zip(df.index, df['编码']))
    list_df = []
    dp = DP(url, comment_dic)
    dp.choose_area()
    pd.DataFrame(dp.ls).to_excel('上城区.xlsx', index=False)
