import scrapy
from tutorial.items import CovidItem
import re
## scrapy genspider spider  mcash.ctsec.com 生成
class SpiderSpider(scrapy.Spider):
    name = 'spider'
    # 允许爬取的域名列表，爬虫只会处理该列表中域名下的网页链接，避免爬取到其他无关域名的页面
    allowed_domains = ['mcash.ctsec.com']
    start_urls = ['https://mcash.ctsec.com/infofront/antIndex/99d79444-5226-11e6-a786-40f2e968ab88'] ## ctrl + b 打开连接

    def parse(self, response): ## 该方法是默认处理解析的
        ##print(response.text) ## 拿到页面源代码
        ## [@class="ant-list-item"]
        ## //*[@id="__layout"]/div/div/div/section/ul/li/div[1]
        ## ctrl + f 验证xPath
        ## 展示手写xpath
        date_items=response.xpath('//*[@id="__layout"]/div/div/div/section/ul/li/div[1]') # //*[@class="ant-list-item"]/div[1]
        text_items=response.xpath('//*[@id="__layout"]/div/div/div/section/ul/li/div[2]') ## @属性
        ## print(date_items)

        ## print(text_items)
        print(f"一次爬取到的日期信息条数：{len(date_items)} 文本信息条数:{len(text_items)}")
        items = CovidItem()
        text_details=[]
        titled_details = []
        date_details = []
        for item in text_items:
            title = item.xpath('./div[1]/span/text()').extract_first() ## extract[0] ## . 是//*[@id="__layout"]/div/div/div/section/ul/li/div[2]
            text = item.xpath('./div[2]/text()').extract_first() ## text() 是获取文本
             ##“.” 是一个特殊字符，它代表匹配除换行符（\n）之外的任何单个字符
            text =re.sub(r'[a-zA-Z#$&\s\n\r]+'," ",text) ## 字符串替换函数 \. [a-zA-Z#$&\\s\\n\\r]+
            text =''.join(text)
           ## print(f"文本：{text}")
            titled_details.append(title)
            text_details.append(text)

            for item in date_items:
                date =item.xpath('./span/text()').getall()
                date = ' '.join(date)
                date_details.append(date)

            items['title_details']=titled_details
            items['text_details']=text_details
            items['date_details']=date_details
            yield items ## 通过yiede提交到pipelines