import scrapy
import re
from solidot.items import NewsItem


class QuotesSpider(scrapy.Spider):

    name='news'
    start_urls = ['http://www.bbc.com','https://www.solidot.org']

    def start_requests(self):
        for url in self.start_urls :
            if url == 'https://www.solidot.org':
                yield scrapy.Request(url, self.solidot_parse,meta={'url':url})
            if url == 'http://www.bbc.com':
                yield scrapy.Request(url=url,callback=self.bbc_parse,meta={'url':url})



    def solidot_parse(self,response):
        for info in response.css('div.block_m'):
            a = info.css('div.talk_time::text').extract()[2]
            strinfo = re.compile(' ')
            b = strinfo.sub('',a)

            item = NewsItem()
            item['news_from'] =  response.meta['url']
            item['news_url'] =  response.meta['url']
            item['news_date'] = b
            item['news_title'] =  info.css('div.bg_htit h2 a::text').extract_first()
            item['news_info'] =  info.css('div.p_mainnew').extract()

            yield item

    def bbc_parse(self, response):
        top_url =  response.meta['url']
        div = response.css('div.module__content')
        urls = div.css('a.block-link__overlay-link::attr(href)').extract()
        for url in urls:
            yield response.follow(url=url, callback=self.news_of_bbc,meta={'top_url':top_url})

    def news_of_bbc(self, response):
        # def extract_with_css(query):
        #     return response.css(query).extract_first().strip()

        if response.css('h1.story-body__h1::text').extract()==[]:
            return 1

        item = NewsItem()
        # item['news_from'] =  response.meta['top_url']
        item['news_url'] =  response.url
        # item['news_date'] = response.css('div.date  ::text').extract_first()
        item['news_title'] =  response.css('h1.story-body__h1::text').extract()
        item['news_info'] =  response.css('div.story-body__inner').extract()

        yield item