import scrapy
from myspider.items import MyspiderItem
from datetime import datetime
import pymysql
import re

# scrapy crawl zsshtest --nolog
class ZsshtestSpider(scrapy.Spider):
    name = "zsshtest"
    allowed_domains = ["guba.eastmoney.com"]
    start_urls = []
    count = 1  # 数据库编号
    now = datetime.now()  # 获取当前年月
    year = now.year
    month = now.month
    page = 1  # 股票当前页码
    stockcode = '000001'
    stockname = '上证指数'
    last_update = None
    last_title = None
    isNull=False
    isnotExist=True
    retrycount=0

    def __init__(self, *args, **kwargs):
        super(ZsshtestSpider, self).__init__(*args, **kwargs)
        # 连接数据库
        print("开始初始化")
        self.connection = pymysql.connect(
            host="localhost",
            user="root",
            password="123456",
            database="test03",
            port=3306,
            charset="utf8",
            cursorclass=pymysql.cursors.DictCursor
        )
        # 获取数据库中的 URL 列表并添加到 start_urls
        self.get_urls_from_db()
        # 重置全局变量

    def get_urls_from_db(self):
        try:
            with self.connection.cursor() as cursor:
                # 获取表的行数
                count_sql = "SELECT COUNT(*) as count FROM zssh"  # 改pipeline
                cursor.execute(count_sql)
                count_result = cursor.fetchone()
                self.count = count_result['count'] + 1 if count_result else 1
                if self.count>1:
                    self.page=27252
                    lastdate='2024-01-31 20:42:33'
                    self.last_title='融资融券明细以及限售股出借明细第二天必须公开，才能做到公平公正。'
                # self.page=self.count//80
                # # 假设数据库中有一个名为 urls 的表，包含 url 字段
                # sql2 = "SELECT `标题` as title, `发布时间` as lastdate FROM zssh ORDER BY `发布时间` ASC, `id` DESC LIMIT 1"
                # cursor.execute(sql2)
                # result2 = cursor.fetchone()
                # if result2 and result2['lastdate']:
                #     lastdate = result2['lastdate']
                #     self.last_title = result2['title']
                #     self.last_update = lastdate
                    date_obj = datetime.strptime(lastdate, "%Y-%m-%d %H:%M:%S")
                    self.year = date_obj.year
                    self.month = date_obj.month
                    self.start_urls.append(f'https://guba.eastmoney.com/list,zssh000001_{self.page}.html')
                    print("最老日期", lastdate)
                else:
                    self.last_update = '9999-99-99'
                    self.start_urls.append('https://guba.eastmoney.com/list,zssh000001.html')
                    self.isNull=True

        except Exception as e:
            print(f"Error getting URLs from database: {e}")
        finally:
            self.connection.close()

    def parse(self, response):
        print("parse", response.url)
        if self.isNull:
            print("进入空集合",response.url)
            self.page += 1
            yield scrapy.Request(
                url=f"https://guba.eastmoney.com/list,zssh000001.html",
                callback=self.parse2,
                dont_filter = True
            )
            return None
        if self.last_title not in response.text:
            print("未找到同步信息，下一页")
            self.page += 1
            yield scrapy.Request(
                url=f"https://guba.eastmoney.com/list,zssh000001_{self.page}.html",
                callback=self.parse,
                dont_filter=True
            )
            return None

        print("文本内容", response.url)  # ,response.text)
        pattern = r'"post_last_time":"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"'
        # 查找所有匹配项
        datetime_matches = re.findall(pattern, response.text)
        # pass
        node_list = response.xpath('//tr[@class="listitem"]')
        print("nodelist")
        if len(node_list) == 0:  # 修改条件某月后的内容
            return None
        print("node")
        for node, post_date in zip(node_list, datetime_matches):  # [0:5]:
            temp = MyspiderItem()
            temp['title'] = node.xpath('./td[3]/div//text()')[0].extract()
            if self.isnotExist:
                if temp['title'] == self.last_title:
                    self.isnotExist = False
                continue

            # postdate = node.xpath('./td[5]/div/text()')[0].extract()
            # mon = int(postdate[:2])
            # if mon > self.month and self.month == 1 and mon == 12:
            #     self.year -= 1
            # self.month = mon
            # temp['postdate'] = f"{self.year}-{postdate}"
            temp['postdate'] = post_date
            temp['id'] = self.count
            temp['code'] = self.stockcode
            temp['name'] = self.stockname
            temp['reads'] = node.xpath('./td[1]/div/text()')[0].extract()
            temp['review'] = node.xpath('./td[2]/div/text()')[0].extract()

            # temp['content'] = response.url
            temp['content'] = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())  # @href
            # content = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())
            temp['author'] = node.xpath('./td[4]/div/a/text()')[0].extract()
            temp['reply'] = -1
            yield temp
            self.count += 1
        # 下一页
        print("已补齐，开始下一阶段")
        self.page += 1
        yield scrapy.Request(
            url=f"https://guba.eastmoney.com/list,zssh000001_{self.page}.html",
            callback=self.parse2,
        )

    def parse2(self, response):
        print("parse2开始处理数据前",response.url,datetime.now())
        # print("文本内容", response.url)  # ,response.text)
        # pass
        pattern = r'"post_last_time":"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"'
        # 查找所有匹配项
        datetime_matches = re.findall(pattern, response.text)
        node_list = response.xpath('//tr[@class="listitem"]')
        # print("nodelist")
        if len(node_list) == 0:  # 修改条件某月后的内容
            print("长度为0")
            if self.retrycount<5:
                self.retrycount+=1
                if self.retrycount==3:
                    self.page+=1
                yield scrapy.Request(
                    url=f"https://guba.eastmoney.com/list,zssh000001_{self.page}.html",
                    callback=self.parse2,
                )
            else:
                print("重试次数耗尽")
                return
        # print("node")
        for node, post_date in zip(node_list, datetime_matches):  # [0:5]:
            temp = MyspiderItem()
            # postdate = node.xpath('./td[5]/div/text()')[0].extract()
            # mon = int(postdate[:2])
            # if mon > self.month and self.month == 1 and mon == 12:
            #     self.year -= 1
            # self.month = mon
            # temp['postdate'] = f"{self.year}-{postdate}"
            temp['postdate'] = post_date
            temp['id'] = self.count
            temp['code'] = self.stockcode
            temp['name'] = self.stockname
            temp['reads'] = node.xpath('./td[1]/div/text()')[0].extract()
            temp['review'] = node.xpath('./td[2]/div/text()')[0].extract()
            temp['title'] = node.xpath('./td[3]/div//text()')[0].extract()  # //*[@id="mainlist"]/div/ul/li[1]/table/tbody/tr[2]/td[3]/div/a
            # temp['content'] = response.url
            temp['content'] = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())  # @href
            # content = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())
            temp['author'] = node.xpath('./td[4]/div/a/text()')[0].extract()

            temp['reply'] = -1
            # print(temp)

            yield temp
            self.count += 1
        print("处理数据后", datetime.now())
        # 下一页
        print("下一页")
        self.page += 1
        yield scrapy.Request(
            url=f"https://guba.eastmoney.com/list,zssh000001_{self.page}.html",
            callback=self.parse2,
        )

