# coding:utf8
import requests
import re
from lxml import etree
import random
import parsel
import pprint

# 这个代码小夜斗新添加了ip代理池（其实就是随便复制粘贴了几个能用的代理放到列表里面，
# 然后随机取出其中一个，如果真滴访问量大的小伙伴建议和小夜斗一样这么做哈！）
# 然后在将其保存到字典中来！
# 列表中的字典
# ip_list = [
#     {'http': 'http://61.153.251.150:22222'},
#     {'http': 'http://117.157.197.18:3128'},
# ]

# TODO： 评论区的信息并没有加载到网页当中(请求头问题)
# 每一个网页中的cookie都是不同滴
# 请求头
headers = {
    'Connection': 'keep-alive',
    'Host': 'www.dianping.com',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
    'Cookie': 'Cookie: _lxsdk_cuid=173ec1d725e80-001e5367555543-7e647c65-1fa400-173ec1d725fc8; _lxsdk=173ec1d725e80-001e5367555543-7e647c65-1fa400-173ec1d725fc8; _hc.v=9a0b663c-e5a8-7e01-077a-b8d4d7220afa.1597394220; s_ViewType=10; fspop=test; ua=dpuser_5191363077; ctu=d2a46ed772193566de2cd33b3ff8ea407fbae2e64a575314195403e2ec7f168a; cy=2; cye=beijing; cityid=1409; default_ab=shopreviewlist%3AA%3A1; _lx_utm=utm_source%3Dwww.sogou%26utm_medium%3Dorganic; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1612158059,1612158139,1612166936,1612327052; thirdtoken=db6b0a0e-c710-4b3f-a083-b9fed37ad4fd; dper=7c9cd1b645ff07fe1fd65310a52d0eca3f820f2c408bac5cbe1065d16321ba289d73446448580d71587a5ef790a7f2171b96ef8e556a5adbae82a31a5ce5d975815bfa8720f6cad4c9ebca38ab510dfc842471b9d4044afbb67beed0a2748fe3; ll=7fd06e815b796be3df069dec7836c3df; ctu=e8d18ba96a5973c4241e98ea92e6ef18f2b6806cefea83baee63e0dc52d6474b79afc8e7246462cc70d0c6c9c0b01d74; dplet=186d217716bb58d41809c4132e4b1188; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1612327415; _lxsdk_s=177662e6072-ce-65b-a68%7C%7C212'
}

headers2 = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
}

headers3 = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}
# 网址
url = 'http://www.dianping.com/shop/H9FNPpCqj1Tu98oD/review_all'

# 发起请求
r = requests.get(url=url, headers=headers)
# proxies 设置代理 ip 信息
# r = requests.get(url=url, headers=headers, proxies=ip_list.pop(random.randint(0, len(ip_list) - 1)))
# 如果状态码为200显示正常
if r.status_code == 200:
    print("访问成功")
    text = r.text.encode("gbk", "ignore").decode("gbk", "ignore")  # 解决报错双重严格限制
    # print(text)
    # 获取存放映射关系css链接
    css_url = re.search('<link rel="stylesheet" type="text/css" href="(//s3plus.meituan.*?)"', text)[1]
    # 拼接css_rul
    total_css_url = 'http:' + css_url
    # print(total_css_url)
    # TODO: 404 r1，换个请求头
    r1 = requests.get(url=total_css_url, headers=headers2)
    print(r1.status_code)
    text1 = r1.text.encode("gbk", "ignore").decode("gbk", "ignore")  # 解决报错双重严格限制
    # 保存css文件
    # \：转译符号
    svg_url = re.findall(r'svgmtsi\[class\^="qfs"\].*?background-image: url\((.*?)\)', text1)
    # print(svg_url)
    # 加一个http
    total_svg_url = 'http:' + svg_url[0]
    print(total_svg_url)
    # 获取所有的文本信息
    r2 = requests.get(url=total_svg_url, headers=headers3)
    text2 = r2.text.encode("gbk", "ignore").decode("gbk", "ignore")

    select = parsel.Selector(text2)

    # 找到所有的text标签
    content_list = select.css('text')
    # print(content_list)
    # 新建一个空字典
    y_text_dict = {}
    for content in content_list:
        # 获取文本
        # print(content.css("text::text").get())
        line_text = content.css("text::text").get()
        # 获取y属性
        # print(content.css("text::attr(y)").get())
        line_y = content.css("text::attr(y)").get()
        # 保存到字典当中（高度y当作建，内容文本当作值）
        y_text_dict[line_y] = line_text
    pprint.pprint(y_text_dict)


else:
    print("被反爬了！小夜斗赶紧跑路！")
