import logging
import scrapy
from copy import deepcopy
from bs4 import BeautifulSoup, Comment
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.parsertime import compTime
from tuiqiuspiderPro.utils.saveImge import SaveImg


class DongqiudiSpider(scrapy.Spider):
    name = 'dongqiudi'
    allowed_domains = ['dongqiudi.com']
    start_urls = [
        'https://api.dongqiudi.com/app/tabs/web/3.json',  # 英超
        'https://api.dongqiudi.com/app/tabs/web/4.json',  # 意甲
        'https://api.dongqiudi.com/app/tabs/web/5.json',  # 西甲
        'https://api.dongqiudi.com/app/tabs/web/6.json',  # 德甲
        'https://api.dongqiudi.com/app/tabs/web/56.json',  # 中超'
    ]

    def parse(self, response, *args):
        item = TuiqiuspiderproItem()
        resp = response.json()
        for article in resp['articles']:
            if compTime(article['published_at']) and article['channel'] != "mini_top" and article['is_video'] is False:
                item['tag'] = resp['label']
                item['publish_time'] = article['published_at']
                try:
                    data = scrapy.Request(url=article['share'], callback=self._article, meta={'item': deepcopy(item)})
                    data.meta['item'] = item
                    yield data
                except Exception as e:
                    logging.error("[+]:列表获取失败 %s,url:%s, error msg:%s" % (article['share'], resp['url'], e))

    def _article(self, response):
        item = response.meta['item']
        soup = BeautifulSoup(response.text, 'lxml')
        comments = soup.findAll(text=lambda text: isinstance(text, Comment))
        [comment.extract() for comment in comments]
        try:
            article = soup.find("div", {"class": "news-left"})
            tips = article.find('p', {"class": "tips"}).find_all('span')
            if tips and len(tips) > 1:
                item['author'] = tips[1].get_text()
            else:
                item['author'] = tips[0].get_text()
            content = self.__delattrs(article.find('div', {'class': 'con'}))
            item['title'] = response.xpath('.//h1[@class="news-title"]/text()').extract_first()
            # item['content'] = str(self.__saveImg(content)).replace("data-src", "src")
            item['content'] = str(content).replace("data-src", "src")
            item['source'] = "懂球帝"
            item['source_href'] = response.url
            yield item
        except Exception as e:
            logging.error("[+]:%s 资讯获取失败 %s, error msg :%s" % (item['title'], response.url, e))

    def __delattrs(self, soup):
        for descendant in soup.descendants:
            if descendant.name and descendant.attrs and descendant.name != 'img':
                del descendant.attrs
        return soup

    def __saveImg(self, soup):
        images = soup.find_all('img')
        for img in images:
            ImgLink = str(img.attrs['orig-src']).split('?')[0]
            img_link = SaveImg(ImgLink)
            del img.attrs['orig-src']
            img.attrs['data-src'] = img_link
        return soup
