import scrapy
from bs4 import BeautifulSoup
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.parsertime import compTime
from tuiqiuspiderPro.utils.saveImge import SaveImg


class LeisuSpider(scrapy.Spider):
    name = 'leisu'
    allowed_domains = ['leisu.com']
    start_urls = ['https://www.leisu.com/news/catalog-zuqiu/']

    def parse(self, response, **kwargs):
        item = TuiqiuspiderproItem()
        alink = response.xpath('.//div[@class="news-list"]/a')
        for link in alink:
            url = link.xpath('@href').extract_first()
            data = scrapy.Request(url, callback=self._article, meta={'item': item}, dont_filter=False)
            data.meta['item'] = item
            yield data

    def _article(self, response):
        item = response.meta['item']
        publish_time = str(response.xpath('.//div[@class="publish-time"]/text()').extract_first()).split()
        item['publish_time'] = (publish_time[1] + " " + publish_time[2]).replace("/", '-')
        if compTime(item['publish_time']):
            item['tag'] = response.xpath('.//div[@class="crumbs"]/a[3]/text()').extract_first()
            item['title'] = response.xpath('.//h1[@class="title"]/text()').extract_first()
            item['author'] = str(response.xpath('.//div[@class="author"]/text()').extract_first()).split("：")[1]
            item['content'] = response.xpath('.//div[@class="article-content"]').extract_first()
            item['source'] = "雷速体育"
            item['source_href'] = response.url
        # if compTime(item['publish_time']):
        #     soup = BeautifulSoup(content, 'lxml')
        #     item['content'] = self.saveImages(soup)
            yield item

    # def saveImages(self, soup):
    #     images = soup.find_all("img")
    #     for image in images:
    #         img_link = SaveImg(image['src'])
    #         image['src'] = img_link
    #     return soup
