# -*- coding: utf-8 -*-
import scrapy
import pymysql
from cxianshengSpider.items import CxianshengspiderItem
# mysql类
class Mysql(object):

    def __init__(self):
        # 定义db
        self.db = pymysql.connect('localhost', 'root', '', 'python')

        # 定义游标
        self.cursor = self.db.cursor()

# 爬虫类
class CxianshengSpider(scrapy.Spider):
    name = 'cxiansheng' # 爬虫名称
    allowed_domains = ['cxiansheng.cn'] # 允许爬取的域名
    start_urls = ['https://cxiansheng.cn/'] # 开始爬取的url


    def return_default_str(self, str):
        return str.strip() if str else ''

    def parse(self, response):
        selectors = response.xpath('//section/article')
        # items = []
        for selector in selectors:
            article_title = selector.xpath('./header/h1/a/text()').get()
            article_url = selector.xpath('./div/p[@class="more"]/a/@href').get()

            article_title = self.return_default_str(article_title)
            article_url = self.return_default_str(article_url)

            # 内容持久化 写入到mysql
            # self.insert_mysql(article_title, article_url)

            item = CxianshengspiderItem(title=article_title, url=article_url)
            # items.append(item)
            yield item
        # yield items

        next_url = response.xpath('//nav[@class="pagination"]/a[@class="extend next"]/@href').get()

        if next_url:
            # 重新发起请求
            yield scrapy.Request(next_url, callback=self.parse)

    # 写入数据库
    def insert_mysql(self, title, url):

        mysql = Mysql()

        sql = "INSERT INTO cxiansheng(title,url) VALUES ('%s', '%s')" % (title, url)

        try:
            # insert
            mysql.cursor.execute(sql)

            # commit
            mysql.db.commit()
        except:
            mysql.db.rollback()


