import scrapy
import time,datetime,json
from ..items import ActivityItem
import copy

# 获取活动列表
class ActivitySpider(scrapy.Spider):
    name = 'activity'
    allowed_domains = ['smzdm.com']
    start_urls = ['https://zhiyou.smzdm.com/chuangzuohuodong/ajax_processing_list?page=1&type=article&last_type=article']
    custom_settings = {
        'ITEM_PIPELINES': {'zdmHotBtwDate.pipelines.ActivityListPipeline': 400}
    }

    second_page = 2
    max_page = 3    #爬取到多少页
    url = 'https://zhiyou.smzdm.com/chuangzuohuodong/ajax_processing_list?page=%s&type=article&last_type=article'
    # start_date = '2020-06-01'
    # end_date = '2021-06-30' # 2021-03-11
    data = []

    def parse(self, response):
        pipe_item = ActivityItem()
        activity_data = json.loads(response.text)
        activity_list = activity_data['data']['list']

        #print(len(activity_list))

        for item in activity_list:
            pipe_item['activity_id'] = item['id']
            pipe_item['activity_name'] = item['activity_name']
            pipe_item['activity_url'] = item['activity_url']
            pipe_item['img'] = item['img']
            pipe_item['start_time'] = item['start_time']
            pipe_item['end_time'] = item['end_time']
            pipe_item['status'] = item['status_text']
            
            meta_data = {'item':copy.deepcopy(pipe_item)}
            yield scrapy.Request(item['activity_url'],callback=self.parse_detail,meta=meta_data)

        while self.second_page <= self.max_page:
            #print(self.url%self.second_page)
            yield scrapy.Request(self.url%self.second_page,callback=self.parse)
            self.second_page+=1

    def parse_detail(self,response):
        item = response.meta['item']
        post_link = response.xpath("//a[re:match(text(),'^#(.+)#$|^#(.+)#\s+$')]/@href").extract_first()    #参与活动的社区页链接
        item['post_link'] = post_link if post_link else ''
        # item['text_field'] = text_field
        yield item
        #list_box = response.xpath("//div[@class='list list_exp']")
        #print(response.xpath("//span[@class='xilie']/span[1]/text()").extract_first())
        