import re
import scrapy
from bs4 import BeautifulSoup, Comment
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.parsertime import parsePublishTime, compTime
from tuiqiuspiderPro.utils.saveImge import SaveImg
from tuiqiuspiderPro.utils.base import delattrs


class WangyisportSpider(scrapy.Spider):
    name = 'wangyi'
    allowed_domains = ['sports.163.com']
    start_urls = [
        'https://sports.163.com/china/',
        'https://sports.163.com/world/'
    ]

    def parse(self, response, **kwargs):
        item = TuiqiuspiderproItem()
        nav_name = response.xpath('.//a[@ne-role="tab-nav"]/text()').extract()
        tags = [str(name).replace('\n', "").strip() for name in nav_name if str(name).replace('\n', "").strip()]
        urls = response.xpath('.//a[@ne-role="tab-nav"]/@href').extract()
        tag_urls = dict(zip(tags, urls))
        for key, value in tag_urls.items():
            if value:
                if response.url.endswith('world/'):
                    data = scrapy.Request(url=value, callback=self.__parse_global_tag, meta={'item': item, "tag": key})
                    data.meta['item'] = item
                    yield data
                if response.url.endswith('china/'):
                    data = scrapy.Request(url=value, callback=self.__parse_china_tag, meta={'item': item, "tag": key})
                    data.meta['item'] = item
                    yield data

    def __parse_global_tag(self, response):
        item = response.meta['item']
        tag = response.meta['tag']
        urls = []
        if tag in ["英超", '西甲', '意甲']:
            urls = response.xpath('.//div[@class="news_title"]/h3/a/@href').extract()
        if tag in ['国家队', '欧联', '欧冠', '德甲']:
            urls = response.xpath('.//div[@class="news_item"]/h3/a/@href').extract()
        for url in urls:
            data = scrapy.Request(url=url, callback=self.__article, meta={'item': item, "tag": tag})
            data.meta['item'] = item
            yield data

    def __parse_china_tag(self, response):
        pass

    def __article(self, response):
        item = response.meta['item']
        article_info = response.xpath('.//div[@class="post_info"]/text()').extract_first()
        publish_time = parsePublishTime(article_info)
        print("#"*50, publish_time)
        if compTime(publish_time):
            item['tag'] = response.meta['tag']
            item['title'] = response.xpath('.//h1[@class="post_title"]/text()').extract_first()
            item['publish_time'] = publish_time
            item['author'] = re.compile('[\u4E00-\u9FA5]+').findall(article_info)[1]
            # content = response.xpath('.//div[@class="post_body"]').extract()
            # item['content'] = self.save_img(content)
            item['content'] = response.xpath('.//div[@class="post_body"]').extract()
            item['source_href'] = response.url
            item['source'] = "网易新闻"
            yield item

    def save_img(self, content):
        soup = BeautifulSoup(content, 'lxml')
        comments = soup.find_all(text=lambda text: isinstance(text, Comment))
        [comment.extract() for comment in comments]
        img_urls = soup.find_all("img")
        for img in img_urls:
            img_src = SaveImg("http:" + img['src'])
            img['src'] = img_src
        return re.sub('<a[^>]+>', '<a>', str(delattrs(soup)).replace(chr(0x3000), ""))
