from copy import deepcopy

import scrapy
from lxml import etree
from scrapy import Selector
from spider_qunaer import items
from warehouse import models
from bs4 import BeautifulSoup


# parent_item.instance.children.add(child_item.instance)

class QunaerSpider(scrapy.Spider):
    name = 'qunaer'
    # allowed_domains = ['travel.qunar.com']
    # 启始
    start_urls = ['https://www.dianping.com']
    city_list = ['chongqing', 'chengdu', 'guiyang', 'yunnan']
    # 记录页面
    page_num = 1
    models.SpiderLog.objects.create()
    # models.Scenery.objects.all().delete()
    # models.Evaluate.objects.all().delete()

    """
    处理店铺页面的信息
    """

    def parse(self, response):
        # 先创建一个景点对象
        item_shop = items.SpiderShopItem()
        soup = BeautifulSoup(response.text, 'html.parser')
        # 获取到景点的列表
        shop_list = soup.find('div', class_="shop-list J_shop-list shop-all-list").find_all('li')

        # 拿到每个景点
        for tmp in shop_list:
            # 这里获取景点名称、排名等信息
            # 'shop_id', 'shop_name', 'shop_recommend', 'shop_type',
            # 'shop_star', 'shop_local', 'shop_review', 'shop_avg_price'
            item_shop['shop_id'] = tmp.find('a', attrs={'data-hippo-type': 'shop'}).get('href')
            item_shop['shop_name'] = tmp.find('h4').text
            item_shop['shop_recommend'] = tmp.find('div', class_="recommend").text.splitlines()[2:]
            item_shop['shop_type'] = tmp.find('div', class_="tag-addr").find_all('a')[0].find('span').text
            item_shop['shop_star'] = float(
                tmp.find('div', class_="star_icon").find_all('span')[0].get('class')[1].split('_')[1]) / 10
            item_shop['shop_local'] = tmp.find('div', class_="tag-addr").find_all('a')[1].find('span').text
            item_shop['shop_review'] = tmp.find('a', attrs={'class': 'review-num'}).find('b').text
            item_shop['shop_avg_price'] = tmp.find('a', attrs={'class': 'mean-price'}).find('b').text

            # 删除原来的数据
            scenery = models.Shop.objects.filter(scenery_name=item_shop["shop_id"]).first()
            if scenery:
                scenery.evaluates.all().delete()
                scenery.delete()
            # 拿到详情页的链接
            detail_url = tmp.find('a', attrs={'data-hippo-type': 'shop'}).get('href')
            if not models.Shop.objects.filter(shop_id=item_shop["shop_id"]).first():
                # 这里对详情页发起请求，结果会传到get_detail()
                yield scrapy.Request(detail_url, callback=self.get_detail, encoding="utf-8", dont_filter=True,
                                     meta={"item_shop": deepcopy(item_shop)})

        # 下一页，总共爬取50页
        if self.page_num < 50:
            self.page_num += 1
            # https: // www.dianping.com / chongqing / ch10
            yield scrapy.Request(url=f"{self.start_urls[0]}/{self.city_list[0]}/ch10/p{self.page_num}", callback=self.parse)

    """处理详情页的信息"""

    def get_detail(self, response):
        # 拿到景点的详细信息
        item_shop = response.meta["item_shop"]

        item_shop["city"] = self.city_list[0]

        # 这里提交，数据会到pipelines处理
        yield item_shop
        # item_shop.save()

        # 第一页评论
        self.get_evalute(response)
        i = 0
        # 第2-5页评论
        for path in response.xpath("//div[@class='b_paging']/a"):
            if i >= 4:
                break
            evalute_path = path.xpath("./@href").extract_first()
            i += 1
            print("evalute_path:", evalute_path)
            yield scrapy.Request(evalute_path, callback=self.get_evalute, encoding="utf-8", dont_filter=True,
                                 meta={"item_shop": deepcopy(item_shop)})

    def get_evalute(self, response):
        """
        处理评论数据
        """
        item_shop = response.meta["item_shop"]
        evalute_list = response.xpath("//ul[@id='comment_box']/li")
        if not evalute_list:
            return None
        for evalute in evalute_list:
            # 创建评论类，获取到评论的信息
            item_evalute = items.SpiderCommentItem()
            item_evalute["content"] = \
                evalute.xpath("./div[1]/div[1]/div[@class='e_comment_content']").xpath('string(.)').extract()[
                    0].replace(
                    "阅读全部", "").replace("\n", "").replace("\r", "")  # 内容
            item_evalute['send_time'] = evalute.xpath("./div[1]/div[1]/div[5]/ul/li[1]/text()").extract_first()  # 评论时间
            item_evalute['user_name'] = evalute.xpath("./div[2]/div[2]/a/text()").extract_first()
            # 用户名
            score = evalute.xpath("./div[1]/div[1]/div[2]/span/span/@class").extract_first()
            if score:
                score = score.split("star_")[-1]
            if score:
                item_evalute['shop_star'] = score  # 评分
            else:
                item_evalute['score'] = 0
            item_evalute['shop_url'] = item_shop['shop_url']  # 景
            # 这里提交，数据会到pipelines处理
            # item_evalute.save()
            yield item_evalute
            item_shop.instance.evaluates.add(item_evalute.instance)
        # yield item_shop
