import re
import scrapy
from bs4 import BeautifulSoup, Comment
from tuiqiuspiderPro.items import TuiqiuspiderproItem
from tuiqiuspiderPro.utils.parsertime import compTime, parseTime
from tuiqiuspiderPro.utils.saveImge import SaveImg
from tuiqiuspiderPro.utils.base import delattrs


class SinaSpider(scrapy.Spider):
    name = 'sina'
    allowed_domains = ['sports.sina.com.cn']
    start_urls = [
        'http://sports.sina.com.cn/china/',
        'http://sports.sina.com.cn/global/'
    ]

    def parse(self, response, **kwargs):
        item = TuiqiuspiderproItem()
        if response.url.endswith('china/'):
            li = response.xpath('.//ul[@class="clearfix"]/li/a/@href').extract()
            label = response.xpath('.//ul[@class="clearfix"]/li/a/text()').extract()
            urls = dict(zip(label, li))
            for key, value in urls.items():
                data = scrapy.Request(url=value, callback=self._china_article_urls,
                                      meta={'middleware': 'SeleniumMiddleware', 'item': item, 'tag': key})
                data.meta['item'] = item
                yield data
        if response.url.endswith('global/'):
            li = response.xpath('.//ul[@class="udv-clearfix"]/li/a/@href').extract()[:12]
            label = response.xpath('.//ul[@class="udv-clearfix"]/li/a/text()').extract()[:12]
            urls = dict(zip(label, li))
            for key, value in urls.items():
                data = scrapy.Request(url=value, callback=self._global_article_urls,
                                      meta={'middleware': 'SeleniumMiddleware', 'item': item, 'tag': key})
                data.meta['item'] = item
                yield data

    def _china_article_urls(self, response):
        item = response.meta['item']
        tag = response.meta['tag']
        article_urls = []
        if tag == '中超':
            article_urls = response.xpath('.//a[@class="feed-card-txt-detail"]/@href').extract()
        elif tag == '国足':
            article_urls = response.xpath('.//h2[@class="item-title "]/a/@href').extract()
        elif tag == '女足':
            article_urls = response.xpath('.//div[@class="c_t_impnews"]/ul/li/a/@href').extract()
        elif tag == '亚冠':
            article_urls = response.xpath('.//div[@class="news-item  img-news-item"]/h2/a/@href').extract()
        elif tag == '中甲':
            article_urls = response.xpath('.//dt[@class="news-tit ellipsis"]/a/@href').extract()
        for article_url in article_urls:
            data = scrapy.Request(url=article_url, callback=self._article, meta={'item': item, 'tag': tag})
            data.meta['item'] = item
            yield data

    def _global_article_urls(self, response):
        item = response.meta['item']
        tag = response.meta['tag']
        article_urls = []
        soup = BeautifulSoup(response.text, 'lxml')
        if tag in ["欧冠", "英超", "西甲", "意甲"]:
            article_urls = response.xpath('.//div[@class="feed-card-item"]/h2/a/@href').extract()
        elif tag == "世界杯":
            article_urls = soup.find('div', {'class': 'tianyi__wrap-a'}).find_all('a').attrs['href']
        elif tag == '德甲':
            article_urls = response.xpath('.//li[@class="news"]/a/@href').extract()
        for article_url in article_urls:
            data = scrapy.Request(url=article_url, callback=self._article, meta={'item': item, 'tag': tag})
            data.meta['item'] = item
            yield data

    def _article(self, response):
        item = response.meta['item']
        title = response.xpath(".//h1/text()").extract_first()
        publish_time = parseTime(response.xpath('.//span[@class="date"]/text()').extract_first())
        author = response.xpath('.//div[@class="date-source"]/a/text()').extract_first()
        if compTime(publish_time):
            item['author'] = author
            item['title'] = title
            item['publish_time'] = publish_time
            item['tag'] = response.meta['tag']
            # item['content'] = self.save_img(response.xpath('.//div[@class="article"]').extract_first())
            item['content'] = response.xpath('.//div[@class="article"]').extract_first()
            item['source_href'] = response.url
            yield item

    def save_img(self, content):
        soup = BeautifulSoup(content, 'lxml')
        comments = soup.find_all(text=lambda text: isinstance(text, Comment))
        [comment.extract() for comment in comments]
        img_urls = soup.find_all("img")
        for img in img_urls:
            img_src = SaveImg("http:" + img['src'])
            img['src'] = img_src
        return str(delattrs(soup)).replace(chr(0x3000), "")

