# -*- coding: utf-8 -*-
import scrapy
import re
import json
import time
from ..items import ScrapyporjectItem
from ..items import DoubanFilmdetailItem


class DoubanSpider(scrapy.Spider):
    name = 'douban'
    allowed_domains = ['https//movie.douban.com']
    start_urls = ['http://movie.douban.com/top250?start=0&filter=']
    num = 0

    def parse(self, response, ):
        # print(response.body.decode())
        div_list = response.xpath("//div[@class='info']")
        # print(div_list[0].extract())
        # filmname=response.xpath("//div[@class='info']/div/a/span[1]/text()").extract()
        # print(filmname)
        if div_list:
            # with open("doubanfilm.txt","a",encoding="utf-8")as f:
            for node in div_list:
                # item={}
                # 用ScrapyprojectItem对象来存储结构化数据
                item = ScrapyporjectItem()  # 比字典安全
                filmname = node.xpath("./div/a/span[1]/text()").extract()[0]
                detail = node.xpath("./div[2]/p/text()").extract()[0]
                # director=re.findall("导演?:? ?(.*)",detail)[0]
                if "主演" in detail:
                    actor = re.findall("主演?:? ?(.*)", detail)[0]
                else:
                    actor = '无'
                point = node.xpath("./div/div/span/text()").extract()[0]
                item["电影名"] = filmname
                # item["导演"]=director
                item["主演"] = actor
                item["评分"] = point
                # 使用yield返回
                yield item
                # 处理为json串存储
                # data=json.dumps(dict(item),ensure_ascii=False)+"\n"
                # f.write(data)
                print(item)
                # 拿到详情页的url并返回
                # time.sleep(0.5)
                detail_url = node.xpath("./div/a/@href").extract()[0]
                # 用meta将parse与get_detail进行关联，可以直接打印出名字，评分，主演，简介
                yield scrapy.Request(detail_url, callback=self.get_detail, dont_filter=True,meta={"info":item})
                yield item

            # 换页，把下一页的url教给引擎
            self.num += 25
            page_url = 'http://movie.douban.com/top250?start={}&filter='.format(self.num)
            yield scrapy.Request(page_url, callback=self.parse, dont_filter=True)
        else:
            return

    def get_detail(self, response):
        # 解析详情页

        detail = response.xpath('//div[@id="link-report"]//span[@property="v:summary"]/text()').extract()[0]
        data_new="".join([i.strip()for i in detail])
        data = DoubanFilmdetailItem()
        data.update(response.meta["info"])# meta会跟随response返回   可通过response.meta接收   添加到新的item里
        data['简介'] = data_new
        print(dict(data))
        yield data
