import scrapy
import re
from scrapy import cmdline

cmdline.execute('scrapy crawl ')
class StocksSpider(scrapy.Spider):
    name = 'stocks'
    # allowed_domains = ['baidu.com']
    start_urls = ['https://www.baidu.com/']

    def parse(self,response):#解析页面
        for href in response.css('a::attr(href)').extract():# extract()返回选中内容的Unicode字符串,获取其中某个属性
            print(href)
            try:
                url  = href
                yield scrapy.Request(url,callback=self.parse_stock,headers={'Content-Type':'text/html;charset=utf-8','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'},dont_filter=True)
            except:
                continue

    def parse_stock(self,response):
        infoDict = []
        # stockInfo = response.css('.txtl')
        # name = stockInfo.css('.qphox').extract()[0]
        # keyList = stockInfo.css('dt').extract()
        # valueList = stockInfo.css('dd').extract()
        for href in response.css('a::attr(href)').extract():
            infoDict.append(href)
        yield infoDict
        print(infoDict)
        # for i in range(len(keyList)):
        #     key = re.findall(r'>.*</dt>',keyList[i])[0][1:-5]
        #     try:
        #         val = re.findall(r'\d+\.?.*</dd>',valueList[i])[0][0:-5]
        #     except:
        #         val = '--'
        #     infoDict[key] = val
        # infoDict.update(
        #     {'股票名称':re.findall('\s.*\(',name)[0].split()[0] + re.findall('\>.*<',name)[0][1:-1]})
        # yield infoDict
