# -*- coding: utf-8 -*-

import scrapy
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
# from sqlalchemy import create_engine
# import pandas as pd
# import time as threadTime

class QuotesSpider(scrapy.Spider):
    name = "quotes"
    # engine = create_engine('mysql://root:@127.0.0.1/stock?charset=utf8')

    def start_requests(self):
        # url = 'https://www.cnblogs.com/ms-uap/p/9928254.html'
        url ='https://www.cnblogs.com/pick/'
        tag = getattr(self, 'tag', None)
        if tag is not None:
            url = url + 'tag/' + tag
        yield scrapy.Request(url, self.parse)

    def parse(self, response):
        
        for quote in response.xpath('//*[@id="topics"]/div'):
            yield {
                # 'title': quote.css('#cb_post_title_url::text').extract_first(),
                # 'content': quote.css('#cnblogs_post_body *::text').extract_first(),
                'title': quote.xpath('//*[@id="cb_post_title_url"]//text()').extract_first(),
                'content': quote.xpath('//*[@id="cnblogs_post_body"]/p//text()').extract_first(),
            }


        next_pages = response.xpath('//*[@id="post_list"]/div[2]/div[2]/h3/a/@href').extract()
        if next_pages is not None:
            for np_url in next_pages:   
                yield scrapy.Request(response.urljoin(np_url)) 


        # #cnblogs_post_body > p:nth-child(1)   
        # //*[@id="cnblogs_post_body"]/p[1]

        # #collapsible2 > div.expanded > div.collapsible-content > span  
        # #collapsible1 > div.expanded > div.collapsible-content > div:nth-child(4) > span.text
        # #collapsible0 > div.expanded > div.collapsible-content