# -*- coding: utf-8 -*-
import scrapy,os
from sinaNews.items import SinanewsItem

class SinaSpider(scrapy.Spider):
    name = 'sina'
    allowed_domains = ['sina.com.cn']
    start_urls = ['https://news.sina.com.cn/guide/']

    def parse(self, response):
        items=[]
        # #所有大类的url和标题
        parentTitle=response.xpath('//h3[@class="tit02"]/a/text()').extract()
        parentUrls=response.xpath('//h3[@class="tit02"]/a/@href').extract()
        # parentTitle=['新闻']
        # parentUrls=['http://news.sina.com.cn/']
        #所有小类的url和标题
        # subUrls=['http://news.sina.com.cn/china/']
        # subTitle=['国内']
        subUrls=response.xpath('//div[@id="tab01"]//li/a/@href').extract()
        subTitle=response.xpath('//div[@id="tab01"]//li/a/text()').extract()

        #爬取所有的大类
        for i in range(0,len(parentTitle)):
            parentFilename="./Data/"+parentTitle[i]
            if not os.path.exists(parentFilename):
                os.makedirs(parentFilename)

            #爬取所有小类
            for j in range(0,len(subUrls)):
                item=SinanewsItem()
                #保存大类的title和URL
                item['parentTitle']=parentTitle[i]
                item['parentUrls']=parentUrls[i]
                #检查小类的URL是否和大类的URL开头
                if_belong=subUrls[j].startswith(item['parentUrls'])  or  subUrls[j].startswith(item['parentUrls'].replace('http:','https:'))
                if (if_belong):
                    subFilename=parentFilename+'/'+subTitle[j]
                    if not os.path.exists(subFilename):
                        os.makedirs(subFilename)
                    item['subUrls'] = subUrls[j]
                    item['subTitle'] = subTitle[j]
                    item['subFilename'] = subFilename
                    items.append(item)

        for item in items:
            yield scrapy.Request(url=item['subUrls'],meta={'meta_1':item},
                                 callback=self.second_parse,dont_filter=False)

    def second_parse(self,response):
        meta_1=response.meta['meta_1']
        sonUrls=response.xpath('//a/@href').extract()

        items=[]
        for i in range(0,len(sonUrls)):
            if_belong=sonUrls[i].endswith('.shtml') and \
                      sonUrls[i].startswith(meta_1['parentUrls'])  or  sonUrls[i].startswith(meta_1['parentUrls'].replace('http:','https:'))
            if (if_belong):
                item=SinanewsItem()
                item['parentTitle']=meta_1['parentTitle']
                item['parentUrls']=meta_1['parentUrls']
                item['subUrls']=meta_1['subUrls']
                item['subTitle']=meta_1['subTitle']
                item['subFilename']=meta_1['subFilename']
                item['sonUrls']=sonUrls[i]
                items.append(item)

            for item in items:
                yield scrapy.Request(url=item['sonUrls'],meta={'meta_2':item},
                callback=self.detail_parse,dont_filter=False)

    def detail_parse(self,response):
        item=response.meta['meta_2']
        content=""
        head1=response.xpath('//h1[@class="main-title"]/text()' )
        head2=response.xpath('//h1[@id="artibodyTitle"]/text()' )

        content_list1=response.xpath('//div[@class="article"]/p/text()').extract()
        content_list2=response.xpath('//div[@id="artibody"]/p/text()').extract()
        content_list3=response.xpath('//div[@class="article"]//p/text()').extract()
        content_list4=response.xpath('//div[@class="article"]//div/text()').extract()
        content_list5=response.xpath('//div[@class="article"]/div/text()').extract()
        for conten_one in (content_list1 or content_list2 or content_list3 or content_list4 or content_list5):
            content+=conten_one
        item['head']=(head1 + head2)
        item['content']=content

        yield item


