# -*- coding: utf-8 -*-
import scrapy
from douban.agent import agent
import random
from douban import settings
import json
from douban.items import DoubanItem






class DbanSpider(scrapy.Spider):
    name = 'dban'
    # allowed_domains = ['douban.com']
    # start_urls = ['http://douban.com/']
    custom_settings = {
        "DEFAULT_REQUEST_HEADERS" : {
            "User-Agent":random.choice(agent),
            # 'Host': 'https://www.douban.com',
        },
    }
    def start_requests(self):

        base_url = 'https://www.douban.com/gallery/all?column_id=18'

        yield scrapy.Request(base_url,callback=self.parse_urls,)

    def parse_urls(self,response):
        # str_html = response.xpath('//ul[@class="topic-link-list "]//li')
        # clothes_url = str_html.xpath('./a/@href')[0].extract() # 彩妆试色链接
        # empty_bottle_url = str_html.xpath('./a/@href')[2].extract() # 空瓶记链接
        # make_up_url = str_html.xpath('./a/@href')[5].extract() # 彩妆试色链接
        for page in range(1020,0,-20):
            clothes_url = 'https://m.douban.com/rexxar/api/v2/gallery/topic/84/items?sort=hot&start={page}&count=20&status_full_text=1&ck=DMov'.format(page=page)
            yield scrapy.Request(clothes_url,callback=self.parse_detail)
        for pa in range(1660,0,-20):
            empty_bottle_url = 'https://m.douban.com/rexxar/api/v2/gallery/topic/25/items?sort=hot&start={page}&count=20&status_full_text=1&ck=DMov'.format(page=pa)
            yield scrapy.Request(empty_bottle_url,callback=self.parse_detail)
        for p in range(380,0,-20):
            make_up_url = 'https://m.douban.com/rexxar/api/v2/gallery/topic/933/items?sort=hot&start={page}&count=20&status_full_text=1&ck=DMov'.format(page=p)
            yield scrapy.Request(make_up_url, callback=self.parse_detail)
    def parse_detail(self,reponse):
        db_html = json.loads(reponse.text)
        db_items = db_html.get('items')
        for items in db_items:
            info_item = DoubanItem()
            # 如果是日记
            if items.get('target').get('type') == 'note':
                info_item["abstract"] = items.get('abstract','')  # 简介内容
                info_item["title"] = items.get('target').get('title', '')
                info_item["update_time"] = items.get('target').get('update_time') # 更新时间
                info_item["read_count"] = items.get('target').get('read_count',0) # 阅读数
                info_item["likers_count"] = items.get('target').get('likers_count',0) # 点赞数
                info_item["timeline_share_count"] = items.get('target').get('timeline_share_count',0) # 推荐数
                info_item["comments_count"] = items.get('target').get('comments_count',0) # 回应数
                info_item["db_type"] = items.get('target').get('type') #类型
                info_item["author_uid"] = items.get('target').get('author').get('id')#作者信息
                info_item["author_name"] = items.get('target').get('author').get('name')
                info_item["author_url"] = items.get('target').get('author').get('url')
                info_item['topic_name'] = items.get('topic').get('name') # 主题
                yield info_item

            # 如果是广播
            if items.get('target').get('type') == 'status':
                  # 简介内容
                info_item["update_time"] = items.get('target').get('status').get('create_time')  # 更新时间
                info_item["likers_count"] = items.get('target').get('status').get('likers_count',0) # 点赞数
                info_item["timeline_share_count"] = items.get('target').get('status').get('reshares_count', 0)  # 推荐
                info_item["comments_count"] = items.get('target').get('status').get('comments_count', 0)  # 回应数
                info_item["db_type"] = items.get('target').get('type') # 类型
                info_item["author_uid"] = items.get('target').get('status').get('author').get('id')  # 作者信息
                info_item["author_name"] = items.get('target').get('status').get('author').get('name')
                info_item["author_url"] = items.get('target').get('status').get('author').get('url')
                info_item['topic_name'] = items.get('topic').get('name')  # 主题
                info_item["abstract"] = items.get('abstract', '')  # 简介内容
                info_item["title"] = ''
                info_item["read_count"] = ''
                yield info_item