from ..getTime import get_time
from ..createTimeList import create_time_list

import scrapy
from scrapy import Selector, Request
from scrapy.http import HtmlResponse
from ..items import NewsItem
from pyquery import PyQuery as pq



class ChinanewsSpider(scrapy.Spider):
    name = "chinanews"
    allowed_domains = ["chinanews.com"]
    # start_urls = ["https://www.chinanews.com/scroll-news/"]

    def __init__(self):
        self.data_time_list = get_time()
        self.request_url = f"https://www.chinanews.com/scroll-news/{self.data_time_list[0]}/{self.data_time_list[1]}/news.shtml"
        self.time_list = create_time_list(self.data_time_list[1], self.data_time_list[2])

    def start_requests(self):
        yield Request(
            url = self.request_url
        )

    def parse(self, response):
        sel = Selector(response)
        list_items = sel.xpath('/html/body/div[3]/div[1]/div[2]/ul/li')
        for list_item in list_items:
            # 新闻标题：title
            # 新闻类型：type
            # 新闻url：url
            # 新闻发布时间：datetime
            news_item = NewsItem()
            news_item['title'] = list_item.xpath('.//div[2]/a/text()').extract_first()
            news_item['type'] = list_item.xpath('.//div[1]/a/text()').extract_first()
            news_item['url'] = list_item.xpath('.//div[2]/a/@href').extract_first()
            news_item['datetime'] = list_item.xpath('.//div[3]/text()').extract_first()
            print(30*"*")
            url = f"https://www.chinanews.com{news_item['url']}" 
            print(url)
            print(url)
            # yield news_item
            # url = "https://www.chinanews.com" + str(news_item['url'])
            yield Request(
                url = url,
                callback=self.parse_detail,
                cb_kwargs={'item': news_item}
            )

    def parse_detail(self, response, **kwargs):
        news_item = kwargs['item']
        news_item['html'] = response.text

        yield news_item

    
