import scrapy
from ..items import CbcItem
from selenium import webdriver
from selenium.webdriver import FirefoxOptions
from threading import Thread, Lock
class ChinaSpider(scrapy.Spider):
    name = 'china'
    allowed_domains = ['https://www.cbc.ca/']
    start_urls = ['https://www.cbc.ca/search?q=china&section=news']
    #测试
    # start_urls = ['https://www.cbc.ca/news/canada/calgary/bonterra-obsidian-hostile-takeover-1.5849908']

    def __init__(self):
        # 在初始化淘宝对象时，创建driver
        super(ChinaSpider, self).__init__(name='china')
        option = FirefoxOptions()
        option.headless = True
        self.driver = webdriver.Firefox(options=option)

    #测试
    # def parse(self, response):
    #     context =response.xpath("//div[@class='story']/span/p/text()").extract()
    #     print(context)

    def parse(self, response):
        # self.driver.get(response.url)
        # for i in range(3):
        #     button = self.driver.find_element_by_xpath("//*[@id='content']/div/div[4]/section/div[1]/div[2]/div/button")
        #     self.driver.execute_script("arguments[0].click();", button)
        #     print("我已经点击了%d" % i)
        #     time.sleep(3)
            # yield scrapy.Request(
            #     response.url,
            #     callback=self.parse,
            #     dont_filter=True
            # )
        tr_list = response.xpath("//div[@class='contentListCards']/a")
        for tr in tr_list:
            item = CbcItem()
            href = tr.xpath("./@href").extract_first()
            time = tr.xpath(
                    "./div/div/div[@class='card-content-bottom']/div/div/time/@datetime").extract_first()
            year = time[:4]
            month = time[5:7]
            print(year+month)
            if int(month) >=1 and int(year) == 2020:
                if (href[1:5] == 'news'):
                    item["title"] = tr.xpath("./div/div/div[@class='card-content-top']/h3/text()").extract_first()
                    item["brief"] = tr.xpath(
                        "./div/div/div[@class='card-content-top']/div[@id='d-card-']/text()").extract_first()
                    item['herf'] = 'https://www.cbc.ca' + href
                    item['publish_date'] = tr.xpath(
                        "./div/div/div[@class='card-content-bottom']/div/div/time/@datetime").extract_first()
                    yield scrapy.Request(
                        item['herf'],
                        callback=self.parse_detail,
                        meta={"item": item},
                        dont_filter=True
                    )
            #页面循环完毕后进行点击
            # self.driver.get(response.url)
            # button = self.driver.find_element_by_xpath("//*[@id='content']/div/div[4]/section/div[1]/div[2]/div/button")
            # self.driver.execute_script("arguments[0].click();", button)
            # print("我已经点击了")
            # time.sleep(5)
            # yield scrapy.Request(
            #      response.url,
            #      callback=self.parse,
            #      dont_filter=True
            # )
    def parse_detail(self, response):
        item = response.meta['item']
        item["content"] = response.xpath("//div[@class='story']/span/p/text()").extract()
        item["content"] = "".join(item["content"])
        print(item)
        yield(item)