import scrapy
from datetime import datetime
from bs4 import BeautifulSoup, Comment
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.parsertime import compTime
from tuiqiuspiderPro.utils.saveImge import SaveImg


class WhoscoredSpider(scrapy.Spider):
    name = 'whoscored'
    allowed_domains = ['es.whoscored.com']
    start_urls = ['https://es.whoscored.com/Editorial']

    def parse(self, response, **kwargs):
        BaseUrl = "https://es.whoscored.com/Editorial"
        articleUrls = response.xpath('.//ul[@class="ws-editorial-list-items"]/a/@href').extract()
        for articleUrl in articleUrls:
            # articleUrl = "/Articles/V8vmYuOj0kCuamWK-4NVCw/Show/Real-Madrid-vs-Sevilla-LaLiga-team-news-and-prediction"
            scrapy.Request(url=BaseUrl+articleUrl, callback=self.parseArticle)

    def parseArticle(self, response):
        item = TuiqiuspiderproItem()
        ts = response.xpath('.//span[@class="post-date"]/text()').extract_first()
        publish_time = self.parseTime(ts)
        if compTime(publish_time):
            item['title'] = response.xpath('.//div[@class="post-header"]/h1/a/text()').extract_first().strip()
            item['author'] = response.xpath('.//div[@class="post-stamp"]/a/text()').extract()[1].strip()
            # content = response.xpath('.//span[@class="post-text"]').extract_first()
            # item['content'] = self.saveImage(content)
            item['content'] = response.xpath('.//span[@class="post-text"]').extract_first()
            item['source_href'] = response.url
            item['source'] = "whoscored"
            item['publish_time'] = publish_time
            item['tag'] = "足球综合"
            yield item

    def parseTime(self, timeData):
        ts = timeData.split(',')[1].strip()
        timearry = datetime.strptime(ts, "%B. %d %Y %H:%M")
        publish_time = datetime.strftime(timearry, "%Y-%m-%d %H:%M:%S")
        return publish_time

    def saveImage(self, content):
        soup = BeautifulSoup(content, 'lxml')
        comments = soup.find_all(text=lambda text: isinstance(text, Comment))
        [comment.extract for comment in comments]
        imageUrls = soup.find_all('img')
        for imageUrl in imageUrls:
            imageUrl['src'] = SaveImg(imageUrl['src'])
        return soup
