#!/usr/bin/env python  # encoding: utf-8
'''
  @author: HJW
  @license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
  @contact: hejunwang01@126.com
  @file: sc_spider.py
  @time: 2022/1/17 0017 下午 22:39
  @desc:
  '''

import scrapy
from my_scrapy.items import MyScrapyItem

import  logging

logger = logging.getLogger(__name__)

class ScSpider(scrapy.Spider):
    name = 'myspider'
    allowed_domains = ["book.zongheng.com"]
    # allowed_domains = ["https://www.maoyan.com/films"]
    start_urls = [
        "http://book.zongheng.com/showchapter/1124510.html",
        # "https://www.maoyan.com/films",
    ]

    def parse(self, response, **kwargs):
        # 实现网页的解析
        # 首先抽取所有的文章
        papers = response.xpath("//li")
        for paper in papers:
            item = MyScrapyItem()

            item['name'] = paper.xpath('./a/text()').extract_first()
            item['url'] = paper.xpath("./a/@href").extract_first()

            logger.warning(item)  # 打印日志
            yield scrapy.Request(
                item['url'],
                callback=self.detail_parse,
                meta={'item':item}
            )


        # paper1 = response.xpath("//dd")
        # print(paper1)
        # for p in paper1:
        #     item = MyScrapyItem()
        #     item['name'] = p.xpath('./a/text()').extract_first()
        #     # item['url'] = p.xpath('//div[@class="movie-item film-channel"]/a/@href').extract_first()
        #     # item['content'] = p.xpath('//div[@class="channel-detail channel-detail-orange"]/text()').extract_first()
        #     yield



    def detail_parse(self,response):
        item = response.meta['item']
        tmp = response.xpath('//*[@id="readerFt"]/div/div[1]/div[5]/p/text()').extract()
        # l = re.sub(r'[a-zA-Z",:<>]', '', tmp)
        item['content'] = tmp

        yield item
