import scrapy
from copy import deepcopy
from bs4 import BeautifulSoup, Comment
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.parsertime import compTime
from tuiqiuspiderPro.utils.saveImge import SaveImg


class HupuSpider(scrapy.Spider):
    name = 'hupu'
    allowed_domains = ['hupu.com']
    start_urls = ['https://voice.hupu.com/china', 'https://voice.hupu.com/soccer']

    def parse(self, response, **kwargs):
        item = TuiqiuspiderproItem()
        if response.url.endswith("china"):
            item['tag'] = "中超"
        if response.url.endswith("soccer"):
            item['tag'] = "足球综合"
        urls = response.xpath('.//div[@class="list-hd"]/h4/a/@href').extract()
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse_article, meta={"item": deepcopy(item)})

    def parse_article(self, response):
        item = response.meta['item']
        pub_time = response.xpath('.//span[@id="pubtime_baidu"]/text()').extract_first().strip()
        if compTime(pub_time):
            item['title'] = response.xpath('.//h1[@class="headline"]/text()').extract_first().strip()
            item['author'] = response.xpath('.//span[@id="editor_baidu"]/text()').extract_first().split("：")[1]
            item['content'] = response.xpath('.//div[@class="artical-content"]').extract_first()
            # item['content'] = self.save_images(content)
            item['publish_time'] = pub_time
            item['source'] = "虎扑"
            item['source_href'] = response.url
            yield item

    # def save_images(self, content):
    #     soup = BeautifulSoup(content, 'lxml')
    #     comments = soup.find_all(text=lambda text: isinstance(text, Comment))
    #     [comment.extract() for comment in comments]
    #     img_urls = soup.find_all('img')
    #     for image in img_urls:
    #         image['src'] = SaveImg(image['src'])
    #     return soup


if __name__ == '__main__':
    author = '编辑：姚凡'
    print("name:%s" % author.split("：")[1])
