

import datetime
import scrapy
from bs4 import BeautifulSoup, Comment
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.parsertime import compTime
from tuiqiuspiderPro.utils.saveImge import SaveImg
from tuiqiuspiderPro.utils.base import delattrs


class TransfermarktSpider(scrapy.Spider):
    name = 'transfermarkt'
    allowed_domains = ['transfermarkt.com']
    start_urls = ['https://www.transfermarkt.com/aktuell/newsarchiv/aktuell/new/datum/2021-05-09']

    def parse(self, response, **kwargs):
        urls = response.xpath('.//div[@class="weiterlesen"]/a/@href').extract()
        for url in urls:
            yield scrapy.Request(url="http://www.transfermarkt.com" + url, callback=self._articel)

    def _articel(self, response):
        item = TuiqiuspiderproItem()
        ts = response.xpath('.//div[@class="news-header"]/span/text()').extract_first()
        publish_time = self.parser_ts(ts)
        if compTime(str(publish_time)):
            item['title'] = response.xpath('.//div[@class="news-content"]/h1/text()').extract_first().strip()
            item['author'] = response.xpath('.//div[@class="news-header"]/span').extract()[1]
            content = response.xpath('.//div[@class="news-content"]').extract_first()
            item['content'] = delattrs(content)
            item['source'] = "transfermarkt"
            item['source_href'] = response.url
            item['publish_time'] = self.parser_ts(ts)
            item['tag'] = "足球综合"
            yield item

    def saveImg(self, content):
        soup = BeautifulSoup(content, 'lxml')
        comments = soup.find_all(text=lambda text: isinstance(text, Comment))
        [comment.extract() for comment in comments]
        images = soup.find_all('img')
        for image in images:
            image['src'] = SaveImg(image['src'])
        return soup

    def parser_ts(self, ts):
        return datetime.datetime.strptime(ts, '%d.%m.%Y - %H:%M')
