import sqlite3

import scrapy
from lxml import etree


class Poetry(scrapy.Spider):
    name = 'poetry'
    allowed_domains = ['so.gushiwen.cn']

    def start_requests(self):
        urls = list()
        for count in range(1, 6):
            # noinspection SpellCheckingInspection
            target_url = f"https://so.gushiwen.cn/mingjus/default.aspx?page={count}&tstr=&astr=&cstr=&xstr="
            urls.append(target_url)
        for url in urls:
            print(f'正在请求第:{urls.index(url)}个链接\nurl：{url}')
            result = scrapy.Request(url=url, callback=self.parse)
            yield result

    def parse(self, response):
        selector = etree.HTML(response.body)
        result_root = selector.xpath('//div[@class="left"]/div[@class="sons"]/div')
        for content_root in result_root:
            children = content_root.findall('a')
            try:
                content = children[0].text
                content_source = f"https://so.gushiwen.cn{children[0].attrib.get('href')}"
                source = children[1].text.strip()
            except Exception as e:
                source = ''
            result = self.write_info(content, content_source, source)
            print(f'正在入库数据 >> {content} >> result = {result}')

    def write_info(self, content, content_source, source):
        sir_two_database = sqlite3.connect("SirTwoDB.db")
        cu = sir_two_database.cursor()
        insert_sql = f"""
            insert into poetry
            (content, content_source, source)
            values ('{content}', '{content_source}', '{source}')
        """
        try:
            cu.execute(insert_sql)
        except BaseException as e:
            sir_two_database.rollback()
            if str(e.args[0]).__contains__('UNIQUE constraint failed'):
                return 1
            return 2
        finally:
            sir_two_database.commit()
        return 0

