# -*- coding: utf-8 -*-
import re
import scrapy


class NewsUrlSpider(scrapy.Spider):
    name = 'news_url'
    allowed_domains = ['cignacmb.com']
    start_urls = ['http://www.cignacmb.com/sitemap.xml']

    def parse(self, response):
        html = response.text
        urls = re.findall(r"<loc>(.*)</loc>",html)
        print(urls)
        for url in urls:
            # print(url)
            yield scrapy.Request(
                url=url,
                callback=self.parse1,
                # errback=self.parse_err,
            )
    def parse1(self,response):
        html = response.text
        urls = re.findall(r"<loc>(.*)</loc>", html)
        # print(len(urls))
        for url in urls:
            # http://www.cignacmb.com/baoxianzhishi/yiliao/2019012821842.html
            # http://www.cignacmb.com/baoxiananli/qita/2019012321714.html
            # http://www.cignacmb.com/faq/2019012421758.html
            txt1 = re.match(r"http:\/\/www\.cignacmb\.com\/baoxianzhishi\/[a-z]+\/(2018|2019)[\d]+\.html",url)
            txt2 = re.match(r"http:\/\/www\.cignacmb\.com\/baoxiananli\/[a-z]+\/(2018|2019)[\d]+\.html",url)
            txt3 = re.match(r"http:\/\/www\.cignacmb\.com\/faq\/(2018|2019)[\d]+\.html",url)
            if txt1:
                url = txt1.group()
                n = 1
            elif txt2:
                url = txt2.group()
                n = 2
            elif txt3:
                url = txt3.group()
                n = 3
            else:
                continue
            item = dict(  # 放入字典
                url = url,
                n = n
            )
            # print(item)
            yield item