# 使用Scrapy爬虫框架爬取新浪网的分类导航信息。网址：http://news.sina.com.cn/guide/
# 第九周作业1)
# 班级：Python五期
# 学员：李子坚

# -*- coding: utf-8 -*-
import scrapy


class SinanewsSpider(scrapy.Spider):
    '''爬取新浪新闻分类导航信息爬虫类'''

    name = 'sinanews'
    allowed_domains = ['www.sina.com.cn']
    start_urls = ['http://news.sina.com.cn/guide/']		#新浪网的分类导航URL

    def parse(self, response):
        '''爬取新浪网的分类导航信息'''
        
        print("正在爬取新浪新闻分类导航信息，请稍候……")
        print("="*50)
        #获取section列表
        seclist = response.css("div.section")
        i = 1
        for section in seclist:
            #爬取每个section标题
            sec_head = section.xpath("./h2")[0]
            if sec_head.xpath("./code"):
                head_list = sec_head.re('<h2 class="tit01".*?>(.*?)<code class="s_dot">(.*?)</code>(.*?)</h2>')
                head_text = "".join(head_list)
            else:
                head_text = sec_head.xpath("./text()").extract_first()
            print("{:<10}{}".format(str(i), head_text))
            print("="*50)

            #获取每个section的子section列表
            subsec_list = section.css("div.clearfix")
            j = 1
            for subsec in subsec_list:
                #爬取每个子section标题
                subhead = subsec.xpath("./h3")[0]
                if subhead.xpath("./a"):
                    subhead_text = subhead.xpath("./a/text()").extract_first()
                elif subhead.xpath("./span"):
                    subhead_text = subhead.xpath("./span/text()").extract_first()
                else:
                    subhead_text = subhead.xpath("./text()").extract_first()
                print("{:<10}{}".format(str(i)+"."+str(j), subhead_text))
                print("-"*50)

                #获取每个子section类别的项目列表
                item_list = subsec.css("ul.list01 li")
                k = 1
                for item in item_list:
                    item_text = item.xpath("./a/text()").extract_first()
                    print("{:<10}{}".format(str(i)+"."+str(j)+"."+str(k), item_text))
                    k += 1

                print("-"*50)
                j += 1

            print("="*50)
            i += 1

        print("新浪新闻分类导航信息爬取完成！")            