# -*- coding: utf-8 -*-
import scrapy

class NewsUrlSpider(scrapy.Spider):
    name = 'news_url_new'
    allowed_domains = ['cignacmb.com']
    # urls = 'http://www.cignacmb.com'
    def start_requests(self):
        urls = 'https://www.cignacmb.com'
        yield scrapy.Request(
            url=urls,
            callback=self.parse,
            dont_filter=True
        )
    def parse(self, response):
        urls = response.xpath("//div[@class='cc-seo']//ul[@class='item-menu']/li/a/@href").extract()
        print(urls)
        for url in urls:
            if url == '/baoxianzhishi/':
                num = 0
                while 1:
                    num += 1
                    newslist_url = 'http://www.cignacmb.com' + url + 'page-{}.html'.format(num)
                    n = 1
                    yield scrapy.Request(
                        url=newslist_url,
                        meta={"n": n},
                        callback=self.parse1,
                        dont_filter=True
                    )
                    if num >= 50:
                        break
            elif url == '/faq/':
                num = 0
                while 1:
                    # page-31.html
                    num += 1
                    newslist_url = 'http://www.cignacmb.com' + url + 'page-{}.html'.format(num)
                    n = 3
                    yield scrapy.Request(
                        url=newslist_url,
                        meta={"n": n},
                        callback=self.parse1,
                        # errback=self.parse_err,
                        dont_filter=True
                    )
                    if num >= 50:
                        break
            else:
                num = 0
                while 1:
                    # page-31.html
                    num += 1
                    newslist_url = 'http://www.cignacmb.com' + url + 'page-{}.html'.format(num)
                    n = 2
                    yield scrapy.Request(
                        url=newslist_url,
                        meta={"n": n},
                        callback=self.parse1,
                        # errback=self.parse_err,
                        dont_filter=True
                    )
                    if num >= 50:
                        break
        # break
    def parse1(self,response):
        # url_list = []
        n = response.meta["n"]
        urls = response.xpath("//ul[@class='ul-list05']/li/div[@class='left']/a/@href").extract()
        for url in urls:
            item = dict(  # 放入字典
                url = 'http://www.cignacmb.com'+ url,
                n = n
            )
            yield item
