# encoding: utf-8
from pipes import quote
import requests
from fake_useragent import UserAgent
from fontTools.ttLib import TTFont
import sys
import io
from lxml import html
import re

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')
sys.setrecursionlimit(1000000)
etree = html.etree
ua = UserAgent()


class Dianping(object):
    def __init__(self):
        self.url = 'https://s3plus.meituan.net/v1/mss_0a06a471f9514fc79c981b5466f56b91/svgtextcss/cdcfac6e2178c4aced1bd6f393a2e784.css'
        self.start_url = 'https://www.dianping.com/search/keyword/7/0_{}'
        self.xq_url = 'https://www.dianping.com/shop/{}'
        self.kv = {
            'user-agent': ua.random,
            "Connection": 'close',
            'Referer': 'https://www.dianping.com/',
            'cookie': 'fspop=test; cy=7; cye=shenzhen; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=17963780453c8-0f717895c20b43-31614c0c-1fa400-17963780453c8; _lxsdk=17963780453c8-0f717895c20b43-31614c0c-1fa400-17963780453c8; _hc.v=c4e87342-e951-bdb2-c773-dd37f0a21b51.1620871481; s_ViewType=10; thirdtoken=f6328241-b04f-4874-98a5-c3bfd87fe635; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1620871481,1620876634; _dp.ac.v=e6d174d1-19f7-43d5-8986-cc95ab5475f9; dplet=58fe61ae17690dd63ce538cc4748934a; dper=a6f4bb9dd3b52a2b4e6b8cb02ee0a4b68a0cc394b992a9c3a81148f257af429fbe824ff9292c19a6e80dac4123ea414939c2e4d0414db80a6c91bc8cdb3f910da2d569a965fdbaf7a8a0a6aa808fdba4d5ff5267d853d987774904ae3c3554c8; ll=7fd06e815b796be3df069dec7836c3df; ua=dpuser_9025757366; ctu=176b698d6eb22f57f4ac645ddbe1804dfd71c619c9b60e18720d2eac2f2044b8; uamo=17346978542; _lxsdk_s=17963c675d3-f74-f2-893%7C%7C76; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1620877316'
        }

    # 根据字体的css_url把字体文件保存到本地
    def get_ziti(self):
        res = requests.get(self.url)
        font = re.findall(
            r'font-family: "(.*?)";src.*?(//s3plus\.meituan\.net/v1/mss_73a511b8f91f43d0bdae92584ea6330b/font/\w+.woff)',
            res.text, re.S)
        font_list = ['https:' + x[1] for x in font]
        font_name = [x[0] for x in font]
        for i in font_list:
            result = requests.get(i)
            file_name = i.split('/')[-1]
            with open(file_name, 'wb')as f:
                f.write(result.content)

    def parse_ziti(self, class_name, datas):
        if class_name == 'num':  # 评论数， 人均消费， 口味环境服务分数
            woff_name = '03d60e54.woff'
        elif class_name == 'address':  # 店铺分类，哪个商圈
            woff_name = '1b5c460f.woff'
        else:
            woff_name = '3635bec2.woff'  # 店铺具体地址
        # 评分
        font_data = TTFont(woff_name)
        # font_data.saveXML(woff_name)  # 保存xml便于做分析
        words = '1234567890店中美家馆小车大市公酒行国品发电金心业商司超生装园场食有新限天面工服海华水房饰城乐汽香部利子老艺花专东肉菜学福饭人百餐茶务通味所山区门药银农龙停尚安广鑫一容动南具源兴鲜记时机烤文康信果阳理锅宝达地儿衣特产西批坊州牛佳化五米修爱北养卖建材三会鸡室红站德王光名丽油院堂烧江社合星货型村自科快便日民营和活童明器烟育宾精屋经居庄石顺林尔县手厅销用好客火雅盛体旅之鞋辣作粉包楼校鱼平彩上吧保永万物教吃设医正造丰健点汤网庆技斯洗料配汇木缘加麻联卫川泰色世方寓风幼羊烫来高厂兰阿贝皮全女拉成云维贸道术运都口博河瑞宏京际路祥青镇厨培力惠连马鸿钢训影甲助窗布富牌头四多妆吉苑沙恒隆春干饼氏里二管诚制售嘉长轩杂副清计黄讯太鸭号街交与叉附近层旁对巷栋环省桥湖段乡厦府铺内侧元购前幢滨处向座下臬凤港开关景泉塘放昌线湾政步宁解白田町溪十八古双胜本单同九迎第台玉锦底后七斜期武岭松角纪朝峰六振珠局岗洲横边济井办汉代临弄团外塔杨铁浦字年岛陵原梅进荣友虹央桂沿事津凯莲丁秀柳集紫旗张谷的是不了很还个也这我就在以可到错没去过感次要比觉看得说常真们但最喜哈么别位能较境非为欢然他挺着价那意种想出员两推做排实分间甜度起满给热完格荐喝等其再几只现朋候样直而买于般豆量选奶打每评少算又因情找些份置适什蛋师气你姐棒试总定啊足级整带虾如态且尝主话强当更板知己无酸让入啦式笑赞片酱差像提队走嫩才刚午接重串回晚微周值费性桌拍跟块调糕'
        gly_list = font_data.getGlyphOrder()[2:]
        new_dict = {}
        for index, value in enumerate(words):
            new_dict[gly_list[index]] = value
        rel = ''
        # print(class_name,datas)
        # print('=======================')
        for j in datas:
            if j.startswith('u'):
                try:
                    rel += new_dict[j]
                except Exception as e1:
                    rel += self.err(j)
            else:
                rel += j
        return rel

    def err(self, da):
        font_data = TTFont('03d60e54.woff')
        # font_data.saveXML('03d60e54.woff')  # 保存xml便于做分析
        words = '1234567890店中美家馆小车大市公酒行国品发电金心业商司超生装园场食有新限天面工服海华水房饰城乐汽香部利子老艺花专东肉菜学福饭人百餐茶务通味所山区门药银农龙停尚安广鑫一容动南具源兴鲜记时机烤文康信果阳理锅宝达地儿衣特产西批坊州牛佳化五米修爱北养卖建材三会鸡室红站德王光名丽油院堂烧江社合星货型村自科快便日民营和活童明器烟育宾精屋经居庄石顺林尔县手厅销用好客火雅盛体旅之鞋辣作粉包楼校鱼平彩上吧保永万物教吃设医正造丰健点汤网庆技斯洗料配汇木缘加麻联卫川泰色世方寓风幼羊烫来高厂兰阿贝皮全女拉成云维贸道术运都口博河瑞宏京际路祥青镇厨培力惠连马鸿钢训影甲助窗布富牌头四多妆吉苑沙恒隆春干饼氏里二管诚制售嘉长轩杂副清计黄讯太鸭号街交与叉附近层旁对巷栋环省桥湖段乡厦府铺内侧元购前幢滨处向座下臬凤港开关景泉塘放昌线湾政步宁解白田町溪十八古双胜本单同九迎第台玉锦底后七斜期武岭松角纪朝峰六振珠局岗洲横边济井办汉代临弄团外塔杨铁浦字年岛陵原梅进荣友虹央桂沿事津凯莲丁秀柳集紫旗张谷的是不了很还个也这我就在以可到错没去过感次要比觉看得说常真们但最喜哈么别位能较境非为欢然他挺着价那意种想出员两推做排实分间甜度起满给热完格荐喝等其再几只现朋候样直而买于般豆量选奶打每评少算又因情找些份置适什蛋师气你姐棒试总定啊足级整带虾如态且尝主话强当更板知己无酸让入啦式笑赞片酱差像提队走嫩才刚午接重串回晚微周值费性桌拍跟块调糕'
        gly_list = font_data.getGlyphOrder()[2:]
        new_dict = {}
        for index, value in enumerate(words):
            new_dict[gly_list[index]] = value
        return new_dict[da]

    # 获取网页上需要的数据
    def get_page_info(self):
        with open('dazhong.html', 'r', encoding='utf-8') as f:
            html_ = f.read()
        html_.strip()  # 网页源码每个数字对应的内容，保留括号里的内容，把括号前面的内容替换为*
        html_ = re.sub(r"(&nbsp;*)", r"", html_)  # 网页源码每个数字对应的内容，保留括号里的内容，把括号前面的内容替换为*
        html_ = re.sub(r"&#x(\w+?);", r"*\1", html_)  # 网页源码每个数字对应的内容，保留括号里的内容，把括号前面的内容替换为*
        html1 = etree.HTML(html_)
        # 所有数据的标签
        # 标签名称
        # class_name = html1.xpath("./div[2]/div[2]/a[1]/b/svgmtsi/@class")[0]
        num_name = html1.xpath('//p[@class="expand-info tel"]/d/@class')[0]
        addr_name = html1.xpath('//h1[@class="shop-name"]/e/@class')[0]
        # 拿评论的数据['1', '*e2ac', '*f0f9', '*e2ac', '*e8a0']
        # 遍历列表，把*号开头的去掉，并与uni拼接成新的，如果是1，则放在列表里，得到新的列表 ['1', 'unie2ac', 'unif0f9', 'unie2ac', 'unie8a0']
        comment_phone = html1.xpath('//p[@class="expand-info tel"]//text()')  # 电话
        comment_phone_list = ['uni' + i.strip('*') if i.startswith('*') else i for i in comment_phone]
        phone = self.parse_ziti(num_name, comment_phone_list)

        comment_name = html1.xpath('//h1[@class="shop-name"]//text()')  # 店名
        comment_name_list = ['uni' + i.strip('*') if i.startswith('*') else i for i in comment_name]
        name = self.parse_ziti(addr_name, comment_name_list)

        comment_addr = html1.xpath("//div[@id='J_map-show']/span//text()")  # 地址
        comment_addr_list = ['uni' + i.strip('*') if i.startswith('*') else i for i in comment_addr]
        addr = self.parse_ziti(addr_name, comment_addr_list)
        t = phone.split(' ')
        t1 = []
        for i in t:
            if re.match('[0-9]\w+', i) is not None:
                t1.append(i)
        print(name[:name.find('手机')].strip(), addr.strip(),t1)

    def main(self):
        r = requests.get(self.start_url.format(quote('火锅')), headers=self.kv)
        r.encoding = 'utf-8'
        rp = etree.HTML(r.text)
        url = rp.xpath("//div[@class='pic']/a/@href")
        name = rp.xpath('//div[@class="tit"]//h4/text()')
        print(name)
        r1 = requests.get(url[0], headers=self.kv)
        r1.encoding = 'utf-8'
        with open('dazhong.html', 'w', encoding='utf-8')as f:  # 这里我把html保存到本地方便使用，不然超级容易被封ip
            f.write(r1.text)
        for i in url:
            self.get_page_info()


if __name__ == '__main__':
    a = Dianping()
    a.main()
    a.get_ziti()
    a.get_page_info()
