import scrapy
from scrapy import *
from stockstar.items import *
from scrapy.http import *
class StockSpider(scrapy.Spider):
    name = 'stock' #定义爬虫名称
    # allowed_domains = ['http://dunhuang.hanjilibrary.com/'] #定义爬虫域
    # start_urls = ['http://dunhuang.hanjilibrary.com/resourcemain.aspx']
    allowed_domains = ['http://read.nlc.cn']  # 定义爬虫域
    start_urls = ['http://read.nlc.cn/allSearch/searchList?searchType=10022&showType=1&pageNo=1']
    #定义开始爬虫链接
    def parse (self, response:HtmlResponse) : #撰写爬虫逻辑

        sel = Selector(response)
        # list_itemts = sel.css('#Form1 > div:nth-child(4) > div.zylist > dl > dd')
        list_itemts = sel.css('body > div.YMH2019_New_GJG_bd1.only > div > ul > li')
        for list_itemts in list_itemts:
            item_data = StockstarItem()
            # item_data['id'] = id = list_itemts.css('span.tt::text').extract_first()
            item_data['name'] = list_itemts.css('span.tt::text').extract_first()
            item_data['image_src'] = list_itemts.css('img::attr(src)').extract()
            # item_data['collection'] = list_itemts.css('span.dth3::text').extract_first()
            print(item_data)
            yield item_data
        for i in range(1,400):
            url = 'http://read.nlc.cn/allSearch/searchList?searchType=10022&showType=1&pageNo='+str(i)+'&searchWord='
            url_data = response.urljoin(url)
            # print('=============')
            print(url)
            yield Request(url=url,dont_filter=True)

        # page = int (response.url.split("_")[-1].split(".")[0])#抓取页码
        # item_nodes = response.css('#datalist tr')
        # for item_node in item_nodes:
        #     #根据item文件中所定义的字段内容，进行字段内容的抓取
        #     item_loader = StockstarItemLoader(item=StockstarItem(), selector = item_node)
        #     item_loader.add_css("code", "td:nth-child(1) a::text")
        #     item_loader.add_css("abbr", "td:nth-child(2) a::text")
        #     item_loader.add_css("last_trade", "td:nth-child(3) span::text")
        #     item_loader.add_css("chg_ratio", "td:nth-child(4) span::text")
        #     item_loader.add_css("chg_amt", "td:nth-child(5) span::text")
        #     item_loader.add_css("chg_ratio_5min","td:nth-child(6) span::text")
        #     item_loader.add_css("volumn", "td:nth-child(7)::text")
        #     item_loader.add_css ("turn_over", "td:nth-child(8) :: text")
        #     stock_item = item_loader.load_item()
        #     yield stock_item
        # if item_nodes:
        #     next_page = page + 1
        #     next_url = response.url.replace ("{0}.html".format (page) , "{0}.html".format(next_page))
        #     yield scrapy.Request(url=next_url, callback=self.parse)