#coding:utf8
import requests
import re
from lxml import etree
import random
import parsel


# 发起请求
def post_request(url=None):
    # 如果啥也不传就是评论url
    if url == None:
        # 网址
        url = 'http://www.dianping.com/shop/H9FNPpCqj1Tu98oD/review_all'
        # 请求头
        headers = {
            'Connection': 'keep-alive',
            'Host': 'www.dianping.com',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
            'Cookie': '_lxsdk_cuid=173ec1d725e80-001e5367555543-7e647c65-1fa400-173ec1d725fc8; _lxsdk=173ec1d725e80-001e5367555543-7e647c65-1fa400-173ec1d725fc8; _hc.v=9a0b663c-e5a8-7e01-077a-b8d4d7220afa.1597394220; s_ViewType=10; fspop=test; ctu=d2a46ed772193566de2cd33b3ff8ea407fbae2e64a575314195403e2ec7f168a; cy=2; cye=beijing; cityid=1409; default_ab=shopreviewlist%3AA%3A1; dper=7c9cd1b645ff07fe1fd65310a52d0ecaec1f4776c565f51f814f8977ae4311a9719b4557762c500fc807ad29a87dcda62247f919ae2adf53d941baa50615a28cda1f22b5008b8abf1510119d5e50d2e11f5c1a492fdfd6496e1b216fc68fe44c; ua=%E5%A4%9C%E6%96%97%E5%B0%8F%E7%A5%9E%E7%A4%BE; ll=7fd06e815b796be3df069dec7836c3df; dplet=55671571d3c0655ebe744db1dd4f12c2; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1612327052,1612413958,1612492875,1612503690; aburl=1; Hm_lvt_dbeeb675516927da776beeb1d9802bd4=1612503694; Hm_lpvt_dbeeb675516927da776beeb1d9802bd4=1612503694; wed_user_path=163|0; _lx_utm=utm_source%3Dwww.sogou%26utm_medium%3Dorganic; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1612504818; _lxsdk_s=17770b5aa74-01d-4b4-f85%7C%7C152'
        }
    else:
        # 访问css与svg专用请求头
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
        }

    # 获取响应
    # r = requests.get(url=url, headers=headers, proxies=ip)
    r = requests.get(url=url, headers=headers)
    # print(f'能用的ip地址：{ip}')
    print("访问成功")
     # 获取文本
    text = r.text.encode("gbk", "ignore").decode("gbk", "ignore")  # 解决报错双重严格限制

    return text


# 获取css背景信息, 并返回映射字典内容url以及背景类字典坐标信息
def css_info():
    text = post_request()
    css_url = re.search('<link rel="stylesheet" type="text/css" href="(//s3plus.meituan.*?)"', text)[1]
    # 拼接css_rul
    total_css_url = 'http:' + css_url
    text1 = post_request(total_css_url)
    # 正则匹配类名、x坐标、y坐标
    background_name_list = re.findall(r'\.(.*?){background:-(.*?).0px -(.*?)\.0px;}', text1)
    # 存放类信息字典
    back_dict = {}
    for item in background_name_list:
        # key: 类名  [x, y]
        back_dict[item[0]] = [item[1], item[2]]

    # 映射字典内容url
    svg_url = re.findall(r'svgmtsi\[class\^="euk"\].*?background-image: url\((.*?)\)', text1)
    total_svg_url = 'http:' + svg_url[0]
    # 返回两个关键信息
    return total_svg_url, back_dict


# 映射文本获取
def mapping_text(flag):
    # 请求链接
    url = css_info()[0]
    print(url)
    # 返回文本
    text2 = post_request(url)
    # 第一种字体格式
    if flag == '2':
        select = parsel.Selector(text2)
        # 找到所有的text标签
        content_list = select.css('text')
        # 存放信息滴字典
        y_text_dict = {}
        for content in content_list:
            # 获取文本
            # print(content.css("text::text").get())
            line_text = content.css("text::text").get()
            # 获取y属性
            # print(content.css("text::attr(y)").get())
            line_y = content.css("text::attr(y)").get()
            # 保存到字典当中（高度y当作建，内容文本当作值）
            y_text_dict[line_y] = line_text
        return y_text_dict
    #  第二种svg格式
    if flag == '1':
        select = parsel.Selector(text2)
        # 找到所有的path标签
        path_list = select.css('path')
        print("第二种svg格式")
        # print(path_list)
        y_list = []
        for path in path_list:
            # 获取高度
            y = path.css("path::attr(d)").get().split(' ')[1]
            y_list.append(y)
        line_text_list = []
        content_list = select.css('textPath')
        for content in content_list:
            # 获取文本
            line_text = content.css("textPath::text").get()
            line_text_list.append(line_text)
        # 存放信息滴字典
        y_text_dict = {}
        for k, v in zip(y_list, line_text_list):
            y_text_dict[k] = v
        return y_text_dict


# 获取svg字体信息
def svg_character(name_class_list, y_text_dict):
    # 拿到这个类名对应的字体的高度
    character_list = []
    for name_class in name_class_list:
        try:
            y = back_dict[name_class][1]
            # print(f'y:{y}')
            # 拿到这个类名对应的字体的x轴
            x = back_dict[name_class][0]
        except:
            print('error')

        # print(f'x:{x}')
        # 找对应的行数
        # 找对应的行数
        for index in y_text_dict.keys():
            # 第一个比y大的行数
            # print(index)
            if int(index) > int(y):
                # 计算x的位置, 14为字体的大小
                position = int(int(x) / 14)
                # print(position)
                # 从index这一行获取这个字体信息
                find_it = y_text_dict[index][position]
                # print(y_text_dict[index])
                # print(find_it, end="->")
                character_list.append(find_it)
                # 找到后跳出
                break
    # print(character_list)
    return character_list


# 获取评论
def search_comment(background_info, r_text):
    # 获取文本
    text = post_request()
    # print(text)
    # 转化为xpath对象
    tree = etree.HTML(text)
    # 拿到所有的评论列表svg类名
    svg_info_list = tree.xpath(r'.//div[@class="main-review"]/div[@class="review-truncated-words"]/svgmtsi/@class')
    # print(svg_info_list)
    # TODO: 正则匹配div class="review-truncated-words">下面滴那一段东西,然后直接拼接起来
    # TODO: 使用字体映射替换<svgmtsi class="vtejr"></svgmtsi>这一堆东西，然后在聚合
    # comment_list = re.findall(r'<div class="review-truncated-words">((.*?)<svgmtsi class="(.*?)"></svgmtsi>).*?<div class="more-words">', text, re.S)
    comment_list = re.findall(r'<div class="review-truncated-words">(.*?)<div class="more-words">', text, re.S)
    new_comment_list = []
    # 开始处理每一段评论
    try:
        for comment in comment_list:
            # print(comment)
            # 获取class_info属性
            class_info = re.findall(r'<svgmtsi class="(.*?)"></svgmtsi>', comment, re.S)
            # 将svg标签映射为字体获得列表
            character_list = svg_character(class_info, r_text)
            # print(character_list)
            # 开始替换评论中的<svg巴拉巴拉
            for character, name in zip(character_list, class_info):
                # print(character, name)
                # 替换字体评论
                comment = comment.replace(f'<svgmtsi class="{name}"></svgmtsi>', character).strip().replace('&#x20;', ' ')\
                    .replace('&#x0A;', ' ')
                # 去掉图片链接脏数据
                comment = re.sub(r'<img class="emoji-img" src=".*?" alt=""/>', '', comment)
            # 添加
            new_comment_list.append(comment)
    except:
        print("error")
    print(new_comment_list)
    # print(comment_list)
    return new_comment_list


def save_comment(info_list):
    with open('大众点评信息.txt', 'w', encoding='utf8') as f:
        for info in info_list:
            f.writelines(info)
            f.write('\n')


if __name__ == '__main__':
    # # ip池
    # ip_list = [
    #     {'http': 'http://8.135.103.42:80'},
    #     {'http': 'http://112.80.248.75:80'},
    #     {'http': 'http://113.214.13.1:1080'},
    # ]
    #
    # # 随机获取ip
    # ip = ip_list.pop(random.randint(0, len(ip_list) - 1))
    # print(f'正在使用的ip地址：{ip}')
    # 背景信息
    back_dict = css_info()[1]
    # 如果是textLength属性标签, 就传1, 否则传2
    # 获得映射字典
    r_text_dict = mapping_text('2')
    # print(back_dict)
    # print(r_text_dict)
    # 获取评论
    comment_list = search_comment(back_dict, r_text_dict)
    # 保存评论
    save_comment(comment_list)