# 1、使用Scrapy爬虫框架爬取新浪网的分类导航信息
# -*- coding: utf-8 -*-
import scrapy
from sina.items import SinaItem
from scrapy import Selector

class SinanavspiderSpider(scrapy.Spider):
    name = 'sinaNavSpider'
    allowed_domains = ['news.sina.com.cn']
    start_urls = ['http://news.sina.com.cn/guide/']

    def parse(self, response):
        if response.status==200:
            classlist=response.css("div.section")
            # 循环一级分类
            for cl in classlist:
                # 一级分类名称
                firstname=cl.re('<h2.*?>(.*?)</h2>')
                firstname="".join(firstname).replace('<code class="s_dot">·</code>','')
                print("一级分类",firstname)
                # 循环二级分类
                secondClassList=cl.css("div.clearfix")
                for sc in secondClassList:
                    # 二级分类名称
                    secondname=sc.re('<h3.*?>(?:<a.*?>)?(.*?)(?:</a>)?</h3>')
                    if len(secondname)>0:
                        secondname=secondname[0]
                    print("二级分类：",secondname)
                    # 循环三级分类,并输出
                    classList=sc.css("li")
                    for c in classList:
                        item = SinaItem()
                        item["firstClass"]=firstname
                        item["secondClass"]=secondname
                        item["className"] = c.css("a::text").extract_first()# 分类名称
                        item["url"] = c.css("a::attr(href)").extract_first()# 分类链接
                        print(item)
