import scrapy
import json
import re
from urllib.parse import quote
from scrapy.http import Request
from v_baike_demo.items import VBaikeDemoItem
from bs4 import BeautifulSoup


# v百科的爬虫代码，ScrapyStudyVbaike继承了scrapy.spiders.Spider类
class ScrapyStudyVbaike(scrapy.spiders.Spider):
    # 爬虫的名称，后面运行时会使用
    name = 'vbaike'

    # 爬虫开始请求的地址方法，即引擎最先调用的方法
    def start_requests(self):
        urls = [
            'https://baike.baidu.com/api/vbaike/list?count=8&page=1&keyWord=',
            'https://baike.baidu.com/api/vbaike/list?count=8&page=2&keyWord='
        ]
        for url in urls:
            yield Request(callback=self.parse, url=url)

    # 解析提取下载器下载的数据，response是下载器交到引擎的数据
    def parse(self, response):
        # self.log(response.body)
        result = json.loads(response.body)
        # item_list = []
        for tmp_item in result:
            item_id = tmp_item['itemId']
            title = tmp_item['title']
            url_str = quote(title)
            # 过滤特殊字符串
            title = re.sub(r'[？\\*|“<>:/]', '', title)
            title_photo_url = tmp_item['titlePhoto']
            self.log(str(item_id) + '---' + title + '-----' + title_photo_url)
            item = VBaikeDemoItem()
            item['title'] = title
            item['item_id'] = item_id
            item['title_photo_url'] = title_photo_url
            sub_url = 'https://baike.baidu.com/vbaike/' + url_str + '/' + str(item_id)
            # 将子网页交给你引擎请求提取子页面数据
            yield Request(callback=self.detail_content, url=sub_url, meta={'item': item, 'type': 'sub'})

        #  item_list.append(item)
        # 打印日志

    # self.log(item_list)
    # return item_list

    # 使用beautifulsoup提取数据
    def detail_content(self, response):
        html_str = response.body
        item = response.meta['item']
        soup = BeautifulSoup(html_str, 'html5lib')
        flag = response.meta['type']
        if flag == 'sub':
            try:
                div_tag = soup.find('div', {'class': 'content'})
                p_content = div_tag.find('p')
                h_content = div_tag.find('h1')
                item['content'] = h_content.string + ':' + p_content.string
            except AttributeError:
                item['content'] = '无数据'
        return item
