import scrapy
from scrapy.exceptions import CloseSpider

from myspider.items import MyspiderItem
from datetime import datetime
import pymysql
import re


# scrapy crawl zsshtest2 --nolog
class Zsshtest2Spider(scrapy.Spider):
    name = "zsshtest2"
    allowed_domains = ["guba.eastmoney.com"]
    start_urls = []
    count = 1  # 数据库编号
    now = datetime.now()  # 获取当前年月
    year = now.year
    month = now.month
    page = 1  # 股票当前页码
    end_page = 500
    stockcode = '000001'
    stockname = '上证指数'
    last_update = None
    last_title = None
    isNull = False
    isnotExist = True
    retrycount = 0

    def __init__(self, *args, **kwargs):
        super(Zsshtest2Spider, self).__init__(*args, **kwargs)
        # 连接数据库
        print("开始初始化")
        self.connection = pymysql.connect(
            host="localhost",
            user="root",
            password="123456",
            database="test03",
            port=3306,
            charset="utf8",
            cursorclass=pymysql.cursors.DictCursor
        )
        # 获取数据库中的 URL 列表并添加到 start_urls
        # self.get_urls_from_db()
        # 重置全局变量

    def start_requests(self):
        for i in range(self.page, self.end_page + 1):
            url = f'https://guba.eastmoney.com/list,zssh000001_{i}.html'
            yield scrapy.Request(url, callback=self.parse)

    def get_urls_from_db(self):
        try:
            with self.connection.cursor() as cursor:
                # 获取表的行数
                count_sql = "SELECT COUNT(*) as count FROM zssh"  # 改pipeline
                cursor.execute(count_sql)
                count_result = cursor.fetchone()
                self.count = count_result['count'] + 1 if count_result else 1

        except Exception as e:
            print(f"Error getting URLs from database: {e}")
        finally:
            self.connection.close()

    def parse(self, response):
        print("parse2开始处理数据前", response.url, datetime.now())
        # print("文本内容", response.url)  # ,response.text)
        pattern = r'"post_last_time":"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"'
        # 查找所有匹配项
        datetime_matches = re.findall(pattern, response.text)
        node_list = response.xpath('//tr[@class="listitem"]')
        for node, post_date in zip(node_list, datetime_matches):  # [0:5]:
            temp = MyspiderItem()
            temp['postdate'] = post_date
            # temp['id'] = self.count
            temp['code'] = self.stockcode
            temp['name'] = self.stockname
            temp['reads'] = node.xpath('./td[1]/div/text()')[0].extract()
            temp['review'] = node.xpath('./td[2]/div/text()')[0].extract()
            temp['title'] = node.xpath('./td[3]/div//text()')[
                0].extract()  # //*[@id="mainlist"]/div/ul/li[1]/table/tbody/tr[2]/td[3]/div/a
            # temp['content'] = response.url
            temp['content'] = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())  # @href
            # content = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())
            temp['author'] = node.xpath('./td[4]/div/a/text()')[0].extract()

            temp['reply'] = -1
            # print(temp)

            yield temp
            self.count += 1
        print("处理数据后", datetime.now())
