# -*- coding: utf-8 -*-
import scrapy
import os


class ToScrapeCSSSpider(scrapy.Spider):
    name = "BBC"
    start_urls = [
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_1.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_2.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_3.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_4.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_5.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_6.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_7.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_8.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_9.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_11.shtml',
                    'http://www.kekenet.com/broadcast/Normal/Jan19voa/List_10.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_1.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_2.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_3.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_4.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_5.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_6.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_7.shtml',
                    'http://www.kekenet.com/broadcast/Normal/july19voa/List_8.shtml',
                ]
    def parse(self, response):
        for quote in response.css("ul#menu-list"):
            next_page_url = quote.css("li h2 a::attr(href)").getall()
            for url in next_page_url:
                yield scrapy.Request(response.urljoin(url))

        article = response.css("span#article_eng")
        if len(article) < 2:
            article = response.css("div#article")
        paragraph = article.css('p::text').getall()
        paragraph = [p for p in paragraph if len(p) >= 10]
        text = "".join(paragraph)
        text.replace("\r", "")
        text.replace("\n", "")
        text.replace("\"", "")
        if len(text) > 10: 
            yield {"text": text}
        
