import sqlite3

import scrapy
from lxml import etree


class LinDeXiSpider(scrapy.Spider):
    name = 'lvyi'
    allowed_domains = ['blog.walterlv.com']

    def start_requests(self):
        target_url = 'https://blog.walterlv.com/'
        urls = [target_url]
        for count in range(1, 100):
            target_url = f'https://blog.walterlv.com/blog/page{count}/'
            urls.append(target_url)
        for url in urls:
            print(f'正在请求第:{urls.index(url)}个链接\nurl：{url}')
            result = scrapy.Request(url=url, callback=self.parse)
            yield result

    def parse(self, response):
        selector = etree.HTML(response.body)
        result_list = selector.xpath("//div[@class='post-preview']/h1/a")

        for title in result_list:
            title_desc = title.text
            url = "https://blog.walterlv.com" + title.attrib.get('href')
            result = self.write_sir_two_info(title_desc, url)
            print(f'正在入库数据 >> {title_desc} >> result = {result}')

    def write_sir_two_info(self, title, url):
        sir_two_database = sqlite3.connect("SirTwoDB.db")
        cu = sir_two_database.cursor()
        insert_sql = f"""
            insert into BlogWarehouse
            (title, url, author)
            values ('{title}', '{url}', '吕毅')
        """
        try:
            cu.execute(insert_sql)
        except BaseException as e:
            sir_two_database.rollback()
            if str(e.args[0]).__contains__('UNIQUE constraint failed'):
                return 1
            return 2
        finally:
            sir_two_database.commit()
        return 0

