# -*- coding: utf-8 -*-


from lxml import etree
from fake_useragent import UserAgent
import time
import random
import os
import requests
import shutil
import base64
import re
import json
import uuid
from _datetime import datetime


# 代理服务器
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = "H01I644F9J45ODID"
proxyPass = "C61D2EF7EE18A03A"
headers = {
    # "User-Agent": ua.random
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': 'uuid=3c8bbb38-5ca0-4986-b41b-3dd29e3e2f46; _lxsdk_cuid=1709e07a1fb1c-019a9d43b728c5-4313f6b-144000-1709e07a1fcc8; _lxsdk=1709e07a1fb1c-019a9d43b728c5-4313f6b-144000-1709e07a1fcc8; client-id=55f1a938-7ec9-48bd-b5d7-4467e611ef68; _hc.v=7f74887c-61ef-e889-8e46-5c9f998dbb0a.1583199270; lat=31.157242; lng=121.50137; _lxsdk_s=1709e07a1fd-cc-307-3aa%7C%7C6',
    'DNT': '1',
    'Host': 'www.meituan.com',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'none',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'
}

# 获取页面url
def get_url():
    for page in range(1, 10):
        print("======当前页:" + str(page) + "==========")
        url = f"https://sh.meituan.com/meishi/b2/pn{page}/"
        url = f"https://sh.meituan.com/meishi/pn{page}/"
        # url = "http://test.abuyun.com/"

        html = get_request(url)
        print("=======已获取首页========")
        data = re.findall(r'"poiInfos":(.*?)},"comHeader"', html.text, flags=re.DOTALL)
        datajson = json.loads(("".join(data)))
        for d in datajson:
            title = d['title'] # 标题
            poiId = d['poiId'] # poiId
            frontImg = d['frontImg']
            avgScore = d['avgScore'] # 评分
            allCommentNum = d['allCommentNum'] # 评论数量
            address = d['address'] # 地址
            avgPrice = d['avgPrice'] # 平均价格
            # 获取二级页面评论信息
            urlChild = f"https://www.meituan.com/meishi/{poiId}/"
            # html1 = get_request(urlChild)
            html1 = requests.get(urlChild, headers=headers, verify=False)
            dataC = re.findall(r'"detailInfo":(.*?),"photos"', html1.text, flags=re.DOTALL)
            # re.findall(r'"photos":(.*?),"recommended"', html1.text, flags=re.DOTALL) # 推荐菜图片
            # re.findall(r'"recommended":(.*?),"crumbNav"', html1.text, flags=re.DOTALL) # 推荐菜名称
            datajsonC = json.loads(("".join(dataC)))
            phone = datajsonC['phone']
            openTime = datajsonC['openTime']
            comments = json.loads(get_request_comments(poiId, urlChild).text)
            # 展开评论信息
            for c in comments['data']['comments']:
                username = c['userName'] # 用户名
                comment = c['comment'] # 评论内容
                merchantComment = c['merchantComment'] # 商家回复
                # 时间
                dateArray = datetime.fromtimestamp(int(c["commentTime"]) / 1000)
                commentTime = dateArray.strftime("%Y-%m-%d %H:%M:%S")
            # 具体评分项
            for a in comments['data']['tags']:
                count = a['count']
                tag = a['tag']
        # time.sleep(random.randint(1, 2))


def get_request(url):
    try:
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        headers = {
            "User-Agent": ua.random,
            # 'Proxy-Authorization': "Basic " + base64.urlsafe_b64encode(
            #     bytes((proxyUser + ":" + proxyPass), "ascii")).decode("utf8")
            # "Proxy-Authorization": "Basic " + base64.b64encode((proxyUser + ":" + proxyPass).encode('utf-8'))
            #     .decode()
        }
        proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
            "host": proxyHost,
            "port": proxyPort,
            "user": proxyUser,
            "pass": proxyPass,
        }
        proxies = {
            "http": proxyMeta,
            "https": proxyMeta,
        }
        html = requests.get(url, headers=headers, verify=False)
        # html = requests.get(url, proxies=proxies, headers=headers)
    except Exception as ex:
        print("-------------访问错误------------")
        print(ex)
        return get_request(url)
    return html

def get_request_comments(id, originUrl):
    try:
        url = "https://www.meituan.com/meishi/api/poi/getMerchantComment"
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        headers = {
            "User-Agent": ua.random,
            # "Proxy-Authorization": "Basic " + base64.b64encode((proxyUser + ":" + proxyPass).encode('utf-8'))
            #     .decode()
        }
        data = {
            "uuid": str(uuid.uuid4()),
            "platform": "1",
            "partner": "126",
            "originUrl": originUrl,
            "riskLevel": "1",
            "optimusCode": "10",
            "id": str(id),
            "userId": "",
            "offset": "0",
            "pageSize": "10",
            "sortType": "1",
        }
        proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
            "host": proxyHost,
            "port": proxyPort,
            "user": proxyUser,
            "pass": proxyPass,
        }
        proxies = {
            "http": proxyMeta,
            "https": proxyMeta,
        }
        html = requests.get(url, params=data, headers=headers, verify=False)
        # html = requests.get(url, proxies=proxies, headers=headers)
    except Exception as ex:
        print("-------------访问错误------------")
        print(ex)
        return get_request_comments(id, originUrl)
    return html


if __name__ == '__main__':
    get_url()
