# -*- coding: utf-8 -*-
import scrapy
import requests

from mtProject.items import MtprojectItem
import re
import math

simulateBrowserHeader = {
    'Accept': 'application/json',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    'Cookie': '',
    'Host': 'wh.meituan.com',
    'Referer': 'https://wh.meituan.com/meishi/',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}


class MeituanspiderSpider(scrapy.Spider):
    name = 'meituanSpider'
    allowed_domains = ['wh.meituan.com', 'www.meituan.com']
    start_urls = ['https://wh.meituan.com/meishi/']

    def parse(self, response):
        # 运行时间：7‘14
        for i in range(15):
            if i == 0:
                response = requests.get(self.start_urls[0], headers=simulateBrowserHeader)
            else:
                url = "https://wh.meituan.com/meishi/pn%d/" % (i + 1)
                print("page: " + url)
                response = requests.get(url, headers=simulateBrowserHeader, )
            items = []
            item = MtprojectItem()
            entire_data = re.findall('{"poiId":.*?}', response.text)

            for data in entire_data:
                pagesize = 200
                poiId = re.findall('"poiId":[0-9]*', data)
                name = re.findall('"title":".*?"', data)
                score = re.findall('"avgScore":.*?,', data)
                address = re.findall('"address":".*?"', data)
                cn = re.findall('"allCommentNum":[0-9]*', data)
                price = re.findall('"avgPrice":[0-9]*', data)

                commentNum = int(cn[0][16:])
                item['poi_id'] = poiId[0][8:]
                item['restaurant_name'] = name[0][9:].replace('"', '')
                item['avg_score'] = score[0][11:].replace(',', '')
                item['avg_price'] = price[0][11:]
                item['comment_num'] = commentNum
                item['restaurant_addr'] = address[0][11:].replace('"', '')
                item['comment_list'] = []
                item['tag_list'] = []

                if commentNum < pagesize:
                    pagesize = commentNum
                    page_num = math.ceil(pagesize / 50)
                    offset = 0
                    if page_num > 1:
                        for p in range(page_num):
                            offset = p * 50
                            url = call_interface(poiId[0][8:], pagesize, offset)
                            self.parse_comment(url, item)
                    else:
                        url = call_interface(poiId[0][8:], pagesize, offset)
                        self.parse_comment(url, item)
                else:
                    page_num = 4
                    for p in range(page_num):
                        offset = p * 50
                        url = call_interface(poiId[0][8:], pagesize, offset)
                        self.parse_comment(url, item)

                items.append(item)
                yield item
        return

    def parse_comment(self, url, item):

        print('Current URL: ' + url)
        response = requests.get(url, headers=simulateBrowserHeader)
        if item['tag_list']:
            pass
        else:
            tag = re.findall('"tag":".*?"', response.text)
            item['tag_list'] = tag

        comment = re.findall('"comment":".*?"', response.text)
        star = re.findall('"star":[0-9]', response.text)
        for i in range(len(comment)):
            comment[i] += '->' + star[i][-1]

        item['comment_list'] += comment


# call interface to get json data
def call_interface(poiId, pagesize, offset):
    origin_url = 'https%3A%2F%2Fwww.meituan.com%2Fmeishi%2F' + poiId + '%2F&'
    url = 'https://www.meituan.com/meishi/api/poi/getMerchantComment?' \
          'uuid=485ecd5d6b5441e9b20d.1569475108.1.0.0&' \
          'platform=1&' \
          'partner=126&' \
          'originUrl=%s' \
          'riskLevel=1&' \
          'optimusCode=10&' \
          'id=%s&' \
          'userId=&' \
          'offset=%d&' \
          'pageSize=%d&' \
          'sortType=1' % (origin_url, poiId, offset, pagesize)
    return url
