# -*- coding: utf-8 -*-
"""
Created on Mon Jul  9 16:42:52 2018

@author: bin
"""

import requests
from bs4 import BeautifulSoup
import time
import random
import re
import os
import pandas as pd
from fake_useragent import UserAgent

ua = UserAgent()

# 设置cookies

cookie = '_lxsdk_s=18d2ff21580-ea-db9-a8b%7C%7C178; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1705906452; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1705906445; WEBDFPID=x0w357398yw1518v12w50y6zz26963v681w7uu78245979588745w5x4-2021266411097-1705906409532CWSYOAE8916c935af78ba3b6588b79f938ed96b1579; ll=7fd06e815b796be3df069dec7836c3df; s_ViewType=10; dper=02022520b585bbc3d50052b0b522755e46470bca50a6fc74fdfbeaaecabb5df082a4f9cf0b17b2339fde2441bb58b31639e61da412b32a6ed52a00000000ab1d0000df132b073482881baaec80a8811e69025c60a9f7365aa4796a3dcf22ee0db56e6efa77d0b438c5c60d9d5b809f806a45; qruuid=bba0d2df-2223-4d73-8888-c4afac2ce87c; _hc.v=d3a5b937-283b-d3c4-2d1f-718b30138999.1705906411; _lxsdk=18d2ff2157fc8-0fe5ef7827e916-49193101-1ea000-18d2ff2157fc8; _lxsdk_cuid=18d2ff2157fc8-0fe5ef7827e916-49193101-1ea000-18d2ff2157fc8'
# 修改请求头
headers = {
    'User-Agent': ua.random,
    'Cookie': cookie,
    'Connection': 'keep-alive',
    'Host': 'www.dianping.com',
    'Referer': 'http://www.dianping.com/shop/l87AcPXlZlCVgjsL/review_all/p2'
}


# 获取html页面
def getHTMLText(url, code="utf-8"):
    try:
        time.sleep(random.random() * 8 + 2)
        r = requests.get(url, timeout=5, headers=headers)
        r.raise_for_status()
        r.encoding = code
        return r.text
    except:
        print("产生异常")
        return "产生异常"
def remove_emoji(text):
    try:
        highpoints = re.compile(u'[\U00010000-\U0010ffff]')
    except re.error:
        highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
    return highpoints.sub(u'',text)

# 从html中提起所需字段信息
def parsePage(html, shpoID):
    infoList = []  # 用于存储提取后的信息，列表的每一项都是一个字典
    soup = BeautifulSoup(html, "html.parser")

    for item in soup('div', 'main-review'):
        cus_id = item.find('a', 'name').text.strip()
        comment_time = item.find('span', 'time').text.strip()
        try:
            comment_star = item.find('span', re.compile('sml-rank-stars')).get('class')[1]
        except:
            comment_star = 'NAN'
        cus_comment = item.find('div', "review-words").text.strip()
        scores = str(item.find('span', 'score'))
        try:
            kouwei = re.findall(r'口味：([\u4e00-\u9fa5]*)', scores)[0]
            huanjing = re.findall(r'环境：([\u4e00-\u9fa5]*)', scores)[0]
            fuwu = re.findall(r'服务：([\u4e00-\u9fa5]*)', scores)[0]
        except:
            kouwei = huanjing = fuwu = '无'

        infoList.append({'cus_id': cus_id,
                         'comment_time': comment_time,
                         'comment_star': comment_star,
                         'cus_comment': remove_emoji(cus_comment),
                         'kouwei': kouwei,
                         'huanjing': huanjing,
                         'fuwu': fuwu,
                         'shopID': shpoID})
    return infoList


# 构造每一页的url，并且对爬取的信息进行存储
def getCommentinfo(shop_url, shpoID, page_begin, page_end, excel_file):
    data = {'cus_id': [], 'comment_time': [], 'comment_star': [], 'cus_comment': [],
            'kouwei': [], 'huanjing': [], 'fuwu': [], 'shopID': []}

    for i in range(page_begin, page_end):
        try:
            url = shop_url + 'p' + str(i)
            html = getHTMLText(url)
            infoList = parsePage(html, shpoID)

            print('成功爬取第{}页数据,有评论{}条'.format(i, len(infoList)))

            for info in infoList:
                data['cus_id'].append(info['cus_id'])
                data['comment_time'].append(info['comment_time'])
                data['comment_star'].append(info['comment_star'])
                data['cus_comment'].append(info['cus_comment'])
                data['kouwei'].append(info['kouwei'])
                data['huanjing'].append(info['huanjing'])
                data['fuwu'].append(info['fuwu'])
                data['shopID'].append(info['shopID'])

            # 断点续传中的断点
            if (html != "产生异常") and (len(infoList) != 0):
                with open('xuchuan.txt', 'a') as file:
                    duandian = str(i) + '\n'
                    file.write(duandian)
            else:
                print('休息60s...')
                time.sleep(60)
        except:
            print('跳过本次')
            continue

    df = pd.DataFrame(data)
    df.to_excel(excel_file, index=False)

    return


def xuchuan():
    if os.path.exists('xuchuan.txt'):
        file = open('xuchuan.txt', 'r')
        nowpage = int(file.readlines()[-1])
        file.close()
    else:
        nowpage = 0
    return nowpage


# 根据店铺id，店铺页码进行爬取
def craw_comment(shopID='l87AcPXlZlCVgjsL', page=147):
    shop_url = "http://www.dianping.com/shop/" + shopID + "/review_all/"

    # 读取断点续传中的续传断点
    nowpage = xuchuan()
    excel_file = '4.xlsx'

    getCommentinfo(shop_url, shopID, nowpage, page, excel_file)


if __name__ == "__main__":
    craw_comment()
