# -*- coding: utf-8 -*-

import requests
from bs4 import BeautifulSoup
from urllib import request
from fontTools.ttLib import TTFont
import os
from lxml import etree
import json
import parse_font_css
import re
from pymongo import MongoClient
import bs4
from fake_useragent import UserAgent
import time
from DaZhongDianPing.parse_css import *

"""
大众点评
首页、二级页面为woff字体
全部评论页面为css字体
"""



headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    'DNT': '1',
    'Host': 'www.dianping.com',
    'Referer': 'http://www.dianping.com/shanghai/ch10',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36',
    'Cookie':'fspop=test; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=178e7d96e36c8-0aa6df2fb8e3dd-3f356b-144000-178e7d96e36c8; _lxsdk=178e7d96e36c8-0aa6df2fb8e3dd-3f356b-144000-178e7d96e36c8; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1618797490; _hc.v=885ad584-5539-cf4d-d385-4ffdc373cde2.1618797490; dplet=bb4b6641f33943c966eb8eaacc1e59ed; dper=a0ad0d12bf0a275f569194a92332fbf9ef51183a185cdce3fcd2a9945bb39d3632d0fa485bcbf5eb9c6238fb461b57c06546a48d86a5aa722a7c3a82c380cc9f7337049a05a3c2dee367582b3ab9133e167fe0a86de6654a359bcbf3e75cd82d; ll=7fd06e815b796be3df069dec7836c3df; ua=dpuser_97303534367; ctu=cba2688231bdcbd2c63ab7bd78fdb1819ad252bd06d63ed9f414a2f6cccd61ca; uamo=13841212966; s_ViewType=10; cy=1; cye=shanghai; _lxsdk_s=178e7d96e37-1ec-81f-dfd%7C%7C1096; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1618797904; _lxsdk_s=1778c91ebd3-450-09f-449%7C%7C65'
}
# 推荐菜使用
cookie = "fspop=test; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=1778c91ebd2c8-0c6ee03b8ca71-53e3566-144000-1778c91ebd2c8; _lxsdk=1778c91ebd2c8-0c6ee03b8ca71-53e3566-144000-1778c91ebd2c8; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1612971110; _hc.v=94d1c314-7b8e-f0ff-bb94-dcac7fd3135f.1612971110; lgtoken=0adfe6bb7-d617-4d05-bfdd-142042eaa5f9; dplet=e4475c090ff737a9870dbe4e25209eb3; dper=7cc262a5f449278011198325371656ab6b619b6c4f949f44d9e83d557748aef4eb1774a3c2c8f3d99c36f5d2a2825792d57483a3191240907d7de8c5a02fc85960ee3157f1dcd5f2ef9aecdd8f986f0c4a62ae30abb81e2a18eb6648c9d3b648; ll=7fd06e815b796be3df069dec7836c3df; ua=dpuser_97303534367; ctu=cba2688231bdcbd2c63ab7bd78fdb181b3c5f8e3ad5e8b09364aa3669cdf9443; uamo=13841212966; cy=1; cye=shanghai; s_ViewType=10; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1612971228; _lxsdk_s=1778c91ebd3-450-09f-449%7C%7C65"

#
# startPage = 1
# url = f"http://www.dianping.com/anshan/ch10/r12401p{startPage}"


# 获取首页内容
def get_contents(url):
    # cookies = get_cookie_dict(cookie)
    html = requests.get(url, headers=headers)
    # soup = BeautifulSoup(html.text, "lxml")
    pase_html = html.text

    # shouNum解析
    print("========shouNum解析==========")
    tmp_shopNum = re.findall(r'"shopNum">&#x(.*?);', pase_html)
    for t in tmp_shopNum:
        pase_shouNum = showNum_font['&#x' + t + ';']
        pase_html = pase_html.replace('"shopNum">&#x' + t + ";", '"shopNum">' + pase_shouNum)

    # tagName解析
    print("========tagName解析==========")
    tmp_tagName = re.findall(r'"tagName">&#x(.*?);', pase_html)
    for t in tmp_tagName:
        pase_tagName = tagName_font['&#x' + t + ';']
        pase_html = pase_html.replace('"tagName">&#x' + t + ";", '"tagName">' + pase_tagName)

    soup = BeautifulSoup(pase_html, "lxml")

    for s in soup.find('div', attrs={'id': 'shop-all-list'}).find('ul').find_all('li'):
        shopid = s.find_all('div')[0].a['data-shopid']  # shopid
        name = s.find_all('div')[0].img['title']  # 店铺名称
        nameurl = s.find_all('div')[0].a['href']  # 店铺链接
        imgurl = s.find_all('div')[0].img['src']  # 店铺链接
        num = ""
        price = ""
        caixi = ""
        area = ""
        address = ""
        tjc = ""
        kouwei = ""
        huanjing = ""
        fuwu = ""
        # 几星商户 提取的是class，替换部分样式，并除以10
        start = s.find('div', attrs={'class': 'comment'}).div.div.span['class'][1]\
            .replace("star_", "")
        start = int(start) / 10
        # 总分
        score = s.find('div', attrs={'class': 'comment'}).div.text.replace("\n", "")
        # 点评数量、人均消费
        t1 = s.find('div', attrs={'class': 'comment'}).find_all('a')
        num = t1[0].text.replace("\n", "").replace("条点评", "")
        price = t1[1].text.replace("\n", "").replace("人均", "").replace("-", "").replace("￥", "").strip()
        # 菜系
        t2 = s.find('div', attrs={'class': 'tag-addr'}).find('span')
        caixi = t2.text
        # 区域
        t3 = s.find('div', attrs={'class': 'tag-addr'}).find_all('span')[1]
        area = t3.text
        # 详细地址
        # for t2_2 in t3[2].find_all('svgmtsi'):
        #     temp = t2_2.text.encode('unicode-escape').decode('utf-8').replace('\\u', '&#x') + ';'
        #     address += str(fontDict[temp])
        address = s.find('div', attrs={'class': 'operate J_operate Hide'}).find_all('a')[1]['data-address']
        # 推荐菜
        for c in s.find('div', attrs={'class': 'recommend'}).find_all('a'):
            tjc += c.text + "|"
        tjc = tjc[:-1]
        # 口味
        # unicode-> utf8  &#xe19d.&#xf302
        kouwei = s.find('span', attrs={'class': 'comment-list'}).find_all('b')[0].text
        # 环境
        # unicode-> utf8  &#xe19d.&#xf302
        huanjing = s.find('span', attrs={'class': 'comment-list'}).find_all('b')[1].text
        # 服务
        # unicode-> utf8  &#xe19d.&#xf302
        fuwu = s.find('span', attrs={'class': 'comment-list'}).find_all('b')[2].text
        print("========================")
        print(name)
        print(f"几星商户:{start}")
        print(f"总分:{score}")
        print(f"{num}条点评")
        print(f"人均:{price}")
        print(f"菜系:{caixi}")
        print(f"区域:{area}")
        print(f"地址:{address}")
        print(f"口味:{kouwei} 环境:{huanjing} 服务:{fuwu}")
        print(f"推荐菜:{tjc}")
        # # 获取电话
        # get_tel(nameurl)

        # # 获取推荐菜
        get_likeFood(nameurl)

        # # 获取评价、标签、菜品(无价格)
        # get_allReview(shopid)


# 获取电话号码 手动下载2级页面中的字体文件num
def get_tel(url):
    html = requests.get(url, headers=headers)
    pase_html = html.text
    # num解析
    print("========num解析==========")
    tmp_num = re.findall(r'"num">&#x(.*?);', pase_html)
    for t in tmp_num:
        pase_num = num1_font['&#x' + t + ';']
        pase_html = pase_html.replace('"num">&#x' + t + ";", '"num">' + pase_num)
    soup = BeautifulSoup(pase_html, "lxml")
    # 电话号码
    tel = soup.find('p', attrs={'class': 'expand-info tel'}).text
    print(tel)

# 推荐菜 手动下载2级页面中的字体文件
def get_likeFood(url):
    # startPage = 1
    # url = f"http://www.dianping.com/anshan/ch10/r12401p{startPage}"
    # url = f"http://www.dianping.com/shop/58935240"
    # url = "http://www.dianping.com/anshan/ch10/r12401p1"
    cookies = get_cookie_dict(cookie)
    postData = {
        'shopId': '10347049',
        'cityId': '58',
        'shopName': '大韩味馆',
        'power': '5',
        'mainCategoryId': '114',
        'shopType': '10',
        'shopCityId': '58',
        '_token': 'eJxVT8tuwjAQ/Je9YiW24zxv0ApERFoBgYZUHAgPY9EkTpxCSNV/r6PSQ08zOzOrnf2CenqAgGCMGUFwPdYQADGw4QCCRmnH9jyfUMtl2GII9v81F2MEWb1+huCd2JaDLNfZ9spCC7+K57AtelCqKWWoB8imOgLnppGBad5uN+MgdoUUBTf2ZW6qcylNok+6mPm6CuiNPO43LIoRc/p6lmMjgmlvXXpL4+6Bzd8c6Xd0VAleaHYM23ipmKpOi0jFb2QVNS9d1HaxaMN4712H5WUzkVU5Pi4T2WZpXpXrdDBPXqNozQvOR/NsEn5uJqFM+fXDF1lVN/iezBLPPg2SWNjyNKPqSUzT1bhajciSD7tWbCh8/wAu8GZg',
        'uuid': 'caf374f6-970d-7502-926f-3665c9ad3ec5.1588896578',
        'platform': '1',
        'partner': '150',
        'optimusCode': '10',
        'originUrl': 'http://www.dianping.com/shop/10347049'
    }
    # url = "http://www.dianping.com/ajax/json/shopDynamic/shopTabs?shopId=66879742&cityId=58&shopName=%E9%9F%A9%E6%98%8E%E6%B4%9E%E9%9F%A9%E5%9B%BD%E9%A4%90%E5%8E%85&power=5&mainCategoryId=114&shopType=10&shopCityId=58&_token=eJxVjl1vgjAYhf9Lb2mgpYUqiRdsi04y5gT8YvFC0AnBQrGdosv%2B%2B2rmLnZ1zvu85yTnCxzHW%2BBhhBDFEJx2R%2BABbCLTBRAoqT8OYw5mxCYMUQjyf4wiG0GQHedPwHvHDnEhIXh9I5EGv6Tn0jW8W1tbm8KbgGysI6BQSniWdT6fzW25qUVZ78284ZYsGmG5bo%2F1GbX1FKAbPNENrdVdN3dVf3eot%2BusLPe1drugS2JJZfsRhTJZ4FmoXq9hd03KLkjy3slvqtVItM1wFy9Fl6W8beapMV1OwnC%2Brwv%2FIHhaZPzwuaoiPjGWPFCtQV6MC8%2BvhnAUn26rt8Du941F%2FDiz2EI%2Bx%2BN0NmyzzeihanwSSSn8wQB8%2FwDY0Gfz&uuid=03c9670f-f197-1fa7-98bc-cbcc6ebd4d82.1577507703&platform=1&partner=150&optimusCode=10&originUrl=http%3A%2F%2Fwww.dianping.com%2Fshop%2F66879742"
    url = "http://www.dianping.com/ajax/json/shopDynamic/shopTabs"
    html = requests.get(url, headers=headers, cookies=cookies)
    pase_html = html.text

    # dishname解析
    print("========dishname解析==========")
    tmp_dishname = re.findall(r'"dishname\\">&#x(.*?);', pase_html)
    for t in tmp_dishname:
        pase_dishname = dishname_font['&#x' + t + ';']
        pase_html = pase_html.replace('"dishname\\">&#x' + t + ";", '"dishname\\">' + pase_dishname)

    data_json = json.loads(pase_html)
    for d in data_json['allDishes']:
        name = d['dishTagName']
        price = d['shopPrice']

    soup = BeautifulSoup(pase_html, "lxml")

    # 好评
    haoping = soup.find('label', attrs={'class': 'filter-item J-filter-good'})\
        .find('span', attrs={'class': 'count'}).text.replace("(", "").replace(")", "")
    # 中评
    zhongping = soup.find('label', attrs={'class': 'filter-item J-filter-common'}) \
        .find('span', attrs={'class': 'count'}).text.replace("(", "").replace(")", "")
    # 差评
    chaping = soup.find('label', attrs={'class': 'filter-item J-filter-bad'}) \
        .find('span', attrs={'class': 'count'}).text.replace("(", "").replace(")", "")

    print(" 好评:" + haoping + " 中评:" + zhongping + " 差评:" + chaping)


# 获取评价分数、评价标签、推荐菜(无价格)
def get_allReview(shopId):
    postData = {
        'shopId': shopId,
        'cityId': '58',
        'shopType': '10',
        'tcv': 'pbaa9cgj3g',
        '_token': 'eJxVjklvgzAUhP/LOyOwWQxB6oGKpCLCRApL1FQ5QBagxGymkKbqf6+j0kNPM+97M9J8Qe+dwMYIIR1LMJ57sAHLSCYgwcDFx7CsBcYYqUgVgeN/ZmIkQdYnLthv2NCIpJnk8CBbAX6JRfSDNFtVWFWXHgKZJyJQDENrK8o0TfKpTOu2rHP52DCFF02rmDrGRLV0MQVEg0WiIbSaNZ11+Lup2C6yvMxr4c7rWxRynXeXLeXRDseUj2FVDbzUghBdg0/r5kfJsr/zfHRo98puecqK6555ruHt41WXpS/PrF+1GTPKeJdcGmZ0cU7C1vcz5yNcFnSoFpv7e7BJHOquPcd5gu8f1kthQA==',
        'platform': '1',
        'partner': '150',
        'optimusCode': '10',
        'originUrl': f'http://www.dianping.com/shop/{shopId}'
    }
    url = "http://www.dianping.com/ajax/json/shopDynamic/allReview"
    html = requests.get(url, headers=headers, params=postData)
    data_json = json.loads(html.text)
    dict_data = dict()
    dict_data['reviewCountAll'] = data_json['reviewCountAll']  # 总评价数量
    dict_data['reviewCountPic'] = data_json['reviewCountPic']  # 图片评论数量
    dict_data['reviewCountGood'] = data_json['reviewCountGood']  # 好评数量
    dict_data['reviewCountCommon'] = data_json['reviewCountCommon']  # 中评数量
    dict_data['reviewCountBad'] = data_json['reviewCountBad']  # 差评数量
    # 标签
    count = 0
    for d in data_json['summarys']:
        dict_data['summaryString_' + str(count)] = d['summaryString']  # 名称
        dict_data['summaryCount_' + str(count)] = d['summaryCount']  # 名称
        count += 1
    # 推荐菜 无价格
    count = 0
    for c in data_json['dishTagStrList']:
        dict_data['dishTag_' + str(count)] = c
        count += 1
    print("===============================")
    print(str(dict_data))


# 获取评论
def get_comments(url, startPage):

    cookies = get_cookie_dict(cookie)
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Cookie": "fspop=test; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=1778c91ebd2c8-0c6ee03b8ca71-53e3566-144000-1778c91ebd2c8; _lxsdk=1778c91ebd2c8-0c6ee03b8ca71-53e3566-144000-1778c91ebd2c8; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1612971110; _hc.v=94d1c314-7b8e-f0ff-bb94-dcac7fd3135f.1612971110; dplet=e4475c090ff737a9870dbe4e25209eb3; dper=7cc262a5f449278011198325371656ab6b619b6c4f949f44d9e83d557748aef4eb1774a3c2c8f3d99c36f5d2a2825792d57483a3191240907d7de8c5a02fc85960ee3157f1dcd5f2ef9aecdd8f986f0c4a62ae30abb81e2a18eb6648c9d3b648; ll=7fd06e815b796be3df069dec7836c3df; ua=dpuser_97303534367; ctu=cba2688231bdcbd2c63ab7bd78fdb181b3c5f8e3ad5e8b09364aa3669cdf9443; uamo=13841212966; cy=1; cye=shanghai; s_ViewType=10; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1612972042; _lxsdk_s=1778c91ebd3-450-09f-449%7C%7C377",
        "DNT": "1",
        "Host": "www.dianping.com",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
    }
    # html = requests.get(url, headers=headers, cookies=cookies)
    html = requests.get(url, headers=headers)
    html_pase = html.text
    a = re.findall('<p class="not-found-words">抱歉！页面无法访问......</p>', html_pase, re.S)
    b = re.findall("<div class='logo' id='logo'>验证中心</div>", html_pase, re.S)
    if a or b:
        return
    soup = BeautifulSoup(html_pase, 'lxml')
    # 点评标签
    if soup.find(name='div', attrs={'class':'content'}) is not None:
        comments_tag_soup = soup.find(name='div', attrs={'class':'content'}).find_all(name='span')
        comments_tag = ""
        for c in comments_tag_soup:
            comments_tag += c.text.replace("\n", "").replace(" ", "").strip() + "|"
    # 评论-图片数量
    if soup.find(name='label', attrs={'class':'filter-item filter-pic'}) is not None:
        pic_num = soup.find(name='label', attrs={'class':'filter-item filter-pic'}).find(name='span').text
        # 评论-好评数量
        good_num = soup.find(name='label', attrs={'class':'filter-item filter-good'}).find(name='span')\
            .text.replace("(", "").replace(")", "")
        # 评论-中评数量
        middle_num = soup.find(name='label', attrs={'class':'filter-item filter-middle'}).find(name='span')\
            .text.replace("(", "").replace(")", "")
        # 评论-差评数量
        bad_num = soup.find(name='label', attrs={'class':'filter-item filter-bad'}).find(name='span')\
            .text.replace("(", "").replace(")", "")
    else:
        pic_num = soup.find(name='label', attrs={'class':'filter-item J-filter-pic'}).find(name='span').text
        # 评论-好评数量
        good_num = soup.find(name='label', attrs={'class':'filter-item J-filter-good'}).find(name='span') \
            .text.replace("(", "").replace(")", "")
        # 评论-中评数量
        middle_num = soup.find(name='label', attrs={'class':'filter-item J-filter-common'}).find(name='span') \
            .text.replace("(", "").replace(")", "")
        # 评论-差评数量
        bad_num = soup.find(name='label', attrs={'class':'filter-item J-filter-bad'}).find(name='span') \
            .text.replace("(", "").replace(")", "")
    print("===========下载字体==============")
    comment_div_list = soup.find_all(name='div', attrs={'class':'main-review'})
    CSSUrl = get_CSS_URL(html_pase)
    CSSContent = get_CSS_Content(CSSUrl, CSS_collection)
    SVG_list_url = get_SVG_URL(CSSContent)
    SVG_dic = get_SVG_Content(SVG_list_url, SVG_collection)
    data_list = list()
    print("===========解析中==============")
    for comment_div in comment_div_list:
        data = dict()
        # score_tag = get_score(comment_div)  # 获取评分
        user_name = get_User_Name(comment_div)
        food = get_Food_List(comment_div)
        comment_time = get_Comment_Time(comment_div)
        zan_num, response_num = get_Zan_Reponse_Num(comment_div)
        comment_content = get_Comment_Content(comment_div, CSSContent, SVG_dic)
        food = "|".join(food)
        # score = "|".join(score_tag)  # 获取评分
        data = {
            'userID': user_name,  # 用户名称
            'likeFood': food,  # 点赞数量
            'commentTime': comment_time,  # 发布时间
            'zanNum': zan_num,  # 赞数量
            'responseNum': response_num,  # 回应数量
            # 'score': score,  # 评分
            'content': comment_content
        }
        data_list.append(data)
        print("=========================")
        print(str(data))
        # insertItem("喜茶_评论", data)


# 将cookies字符串转换dict格式
def get_cookie_dict(str):
    itemDict = {}
    items = str.split(';')
    for item in items:
        arr = item.split('=')
        key = arr[0].replace(' ', '')
        value = arr[1]
        itemDict[key] = value
    return itemDict


def insertItem(tableName, data):
    my_set = db[tableName]
    my_set.insert_one(data)


if __name__ == "__main__":
    conn = MongoClient('127.0.0.1', 27017)
    db = conn["dazhongdianping"]
    CSS_collection = db['CSS']
    SVG_collection = db['SVG']

    # 解析字体及下载woff文件
    url = "http://www.dianping.com/shanghai/ch10"
    html = requests.get(url, headers=headers)
    woff_url, css_html = parse_font_css.get_ttf_url(html.text)
    # 下载对应字体
    parse_font_css.get_shopnum_woff(css_html)
    parse_font_css.get_address_woff(css_html)
    parse_font_css.get_tagName_woff(css_html)
    # 加载字体
    showNum_font, tagName_font, address_font, num1_font, review_font, dishname_font = parse_font_css.convert_Data()
    # # 首页信息
    # for p in range(1, 51):
    #     url = f"http://www.dianping.com/shanghai/ch10/p{p}"
    #     get_contents(url)


    # 获取评价分数、评价标签、推荐菜(无价格)
    # get_allReview("45507899")


    # # 推荐菜
    # get_likeFood("http://www.dianping.com/shop/79433870")


    # 获取评论
    for p in range(1, 801):
        print(f"=========第【{p}】页===========")
        url = f"http://www.dianping.com/shop/58935240/review_all/p{p}"
        url = f"http://www.dianping.com/shop/867689767/review_all/p{p}"
        url = f"http://www.dianping.com/shop/G47ph7ugkt15xUQN/review_all/p{p}"
        get_comments(url, p)
        time.sleep(5)

