import scrapy
import os
from sinaNews.items import SinanewsItem

class SinaSpider(scrapy.Spider):
    name = 'sina'
    allowed_domains = ['sina.com.con']
    start_urls = ['https://sina.com.cn']

    def parse(self, response):
        print("对加载到Lxml中的信息进行查找及过滤")
        items = []

        parTitleb = response.xpath('//div[@class="main-nav"]/div/ul/li/a/b/text()').extract()

        parTitle = response.xpath('//div[@class="main-nav"]/div/ul/li/a/text()').extract()

        parUrl = response.xpath('//div[@class="main-nav"]/div/ul/li/a/@href').extract()

        print(str(parTitle))
        print(str(parUrl))
        print(str(parTitleb))

        j = 0
        print(len(parUrl))
        for i in range(0, 5):
            # 声明单个对象类
            item = SinanewsItem()
            z = i/4
            x = str(z).split('.')[1]
            h = int(str(z).strip('.')[0])
            # print(z)
            # print(x)
            # print(h)
            # print(parTitleb[h])
            if int(x) == 0:
                item['parTitle'] = parTitleb[h]
                # print(str(item['parTitle']))
            else:
                item['parTitle'] = parTitle[j]
                j += 1
                # print(str(item['parTitle']))
            # 标签下的URL地址
            item['parUrl'] = parUrl[i]
            # print(str(item['parTitle']))
            # 获取新闻类别信息
            if "国内" in str(item['parTitle']):
                print("获取国内地址：" + str(item['parUrl']))
                yield scrapy.Request(url=item['parUrl'], meta={'meta_1': item}, callback=self.second_parse, dont_filter=True)
            items.append(item)

        print("获取所有的类型为：" + str(items))
        if len(items) > 0:
            # 创建目录
            for urlName in items:
                filename = 'D:/Desktop/news/' + urlName['parTitle']
                # 如果目录不存在
                if not os.path.exists(filename):
                    os.makedirs(filename)
                # 创建新闻目录
                # len(urlName['subTitle'])
                # for i in range(0, 5):
                #     print(urlName['subTitle'][i])


    # 获取新闻列表信息
    def second_parse(self, respoonse):
        print("获取国内新闻类型下的新闻列表")

        subTitle = respoonse.xpath('//div[@class="left-content-1 marBot"]/div/ul/li/a/text()').extract()
        subUrl = respoonse.xpath('//div[@class="left-content-1 marBot"]/div/ul/li/a/@href').extract()

        item = respoonse.meta['meta_1']
        item["subTitle"] = subTitle
        item['subUrl'] = subUrl
        # len(subUrl)
        for i in range(0, 5):
            print("获取新闻详细信息" + subUrl[i])
            yield scrapy.Request(url=subUrl[i], meta={'meta_2': item}, callback=self.three_parse, dont_filter=True)

    def three_parse(self, response):
        item = response.meta['meta_2']

        print("获取国内新闻详细信息")
        # ceshi = response.xpath('//h1[@class="main-title"]/text()').extract()
        title = response.xpath('//h1[@class="main-title"]/text()').extract()
        content_list = response.xpath('//div[@id="article"]/p/text()').extract()
        # 将p标签里的文本内容合并到一起
        content = ""
        for content_one in content_list:
            content += content_one

        content = str(content).replace('\u3000', "").replace("\xa0", "\n")
        # print(str(ceshi))
        print(str(title))
        print(str(content))
        # print(str(item))

        item["nameTitle"] = title
        item["content"] = content

        yield item

        pass
