from copy import deepcopy
import scrapy
from bs4 import BeautifulSoup, Comment
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.base import delattrs
from tuiqiuspiderPro.utils.saveImge import SaveImg


class Zhibo8Spider(scrapy.Spider):
    name = 'zhibo8'
    allowed_domains = ['zhibo8.cc']
    start_urls = ['https://m.zhibo8.cc/json/hot/24hours.htm']

    def parse(self, response, **kwargs):
        item = TuiqiuspiderproItem()
        news = response.json()['news'][:100]
        for new in news:
            if new['type'] == "zuqiu":
                item['publish_time'] = new['updatetime']
                tags = new['label'].split(',')
                item['tag'] = tags[0] if tags[0] != '足球' else tags[1]
                item['title'] = new['oTitle']
                item['author'] = new['from_name']
                if new['from_url']:
                    item['source_href'] = new['from_url']
                else:
                    item['source_href'] = "https://news.zhibo8.cc/zuqiu" + new['url']
                yield scrapy.Request(
                    url="http://news.zhibo8.cc" + new['url'],
                    callback=self._article,
                    meta={"item": deepcopy(item)}
                )

    def _article(self, response):
        item = response.meta['item']
        content = response.xpath('.//div[@id="signals"]').extract_first()
        item['content'] = delattrs(content)
        item['source'] = "直播8"
        yield item

    def downImage(self, content):
        soup = BeautifulSoup(content, 'lxml')
        comments = soup.find_all(text=lambda text: isinstance(text, Comment))
        [comment.extract() for comment in comments]
        img_urls = soup.find_all("img")
        for img_url in img_urls:
            if img_url['src'].startswith('//'):
                img_src = SaveImg("http:" + img_url['src'])
            else:
                img_src = SaveImg(img_url['src'])
            img_url['src'] = img_src
        return soup