import scrapy
import re
from doubandetail.items import DoubandetailItem
class DbSpider(scrapy.Spider):
    name = "db"
    # 需要注释掉
    # allowed_domains = ["movie.douban.com"]
    start_urls = ["https://movie.douban.com/chart"]

    def parse(self, response):
        print(response.text)
        tr_list = response.xpath('//div[@class="indent"]/div/table/tr[@class="item"]')
        for tr in tr_list:
            # 获取每个详情页的url
            detail_url = tr.xpath('./td[1]/a/@href').extract_first()
            # 请求子页面
            # print(detail_url)
            yield scrapy.Request(detail_url, callback=self.parse_detial)

    # 解析子页面数据
    def parse_detial(self, response):
        # 默认携带我们settings.py中所配置的请求头进行请求
        # print(response.request.headers)
        item = DoubandetailItem()
        item['name'] = response.xpath('//*[@id="content"]/h1/span[1]/text()').extract_first()  # 电影名称
        item['director'] = response.xpath('//*[@id="info"]/span[1]/span[2]/a/text()').extract_first()  # 导演
        item['screenwriter'] = ''.join(response.xpath('//*[@id="info"]/span[2]/span[2]//text()').extract())  # 编剧
        item['to_star'] = ''.join(response.xpath('//*[@id="info"]/span[3]/span[2]//text()').extract())  # 主演
        item['type'] = '/'.join(response.xpath('//span[@property="v:genre"]//text()').extract())  # 类型
        # item['link_report'] = re.sub('(/)|(\s)|(\u3000)|(\'\n\')', '', link_report)
        print(item)
        return item