from datetime import datetime
import re

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapytest.items import ScrapytestItem


class CodingceSpider(CrawlSpider):
    name = 'codingce'
    allowed_domains = ['163.com']

    start_urls = ['http://news.163.com/']

    rules = (
        Rule(LinkExtractor(allow=r'.*\.163\.com/\d{2}/\d{4}/\d{2}/.*\.html'), callback='parse', follow=True),
    )

    def parse(self, response):
        item = ScrapytestItem()
        content = '<br>'.join(response.css('.post_content p::text').getall())
        if len(content) < 100:
            return

        title = response.css('h1::text').get()

        category = response.css('.post_crumb a::text').getall()[-1]
        print(category, "=======category")
        time_text = response.css('.post_info::text').get()
        timestamp_text = re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', time_text).group()
        timestamp = datetime.fromisoformat(timestamp_text)
        print(title, "=========title")
        print(content, "===============content")
        print(timestamp, "==============timestamp")
        print(response.url)

        self.get_title(response, item)
        self.get_time(response, item)
        self.get_source(response, item)
        self.get_source_url(response, item)
        self.get_text(response, item)
        self.get_url(response, item)
        return item

