import scrapy
from scrapy.exceptions import CloseSpider
from myspider.items import MyspiderItem
from datetime import datetime
import pymysql
import re


# scrapy crawl stockstest --nolog
class StockstestSpider(scrapy.Spider):
    name = "stockstest"
    allowed_domains = ["guba.eastmoney.com"]
    start_urls = []
    count=0
    connection = None
    def __init__(self, *args, **kwargs):
        super(StockstestSpider, self).__init__(*args, **kwargs)
        print("开始初始化")
        self.get_urls_from_db()

    def get_urls_from_db(self):

        try:
            # 临时连接数据库获取URL
            self.connection = pymysql.connect(
                host="localhost",
                user="root",
                password="123456",
                database="mytest",
                port=3306,
                charset="utf8",
                cursorclass=pymysql.cursors.DictCursor
            )
            print("开始sql时间",datetime.now())
            with self.connection.cursor() as cursor:
                sql = """
                        SELECT p.* 
                        FROM ps_info_page p
                        WHERE p.pagesize IS NOT NULL 
                          AND p.pagesize != '97046'
                          AND p.pagesize >p.page
                        """
                cursor.execute(sql)
                self.results = cursor.fetchall()
                print("执行完sql时间", datetime.now())

        except Exception as e:
            print(f"Error getting URLs from database: {e}")
            self.results = []
        # finally:
        #     if connection:
        #         connection.close()

    def start_requests(self):
        if not hasattr(self, 'results') or not self.results:
            self.logger.error("没有可处理的URL，爬虫将退出")
            return
        for row in self.results[:20]: ######################################爬取10个####################
            stockcode = row.get('code')
            stockname = row.get('name')
            pagesize = row.get('pagesize')
            page = row.get('page')

            if stockcode and stockname:
                for i in range(page+1,pagesize+1):
                    url = f'https://guba.eastmoney.com/list,{stockcode}_{i}.html'
                    yield scrapy.Request(
                        url=url,
                        meta={'stockcode': stockcode, 'stockname': stockname, 'pagesize': pagesize, 'page': i},
                        callback=self.parse
                    )

    def parse(self, response):
        self.count+=1
        if response.meta['pagesize']==response.meta['page'] or self.count>10:
            self.count=0
            self.update_page(response)
        print("爬取页面：", response.url)  # ,response.text)
        pattern = r'"post_last_time":"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"'
        # 查找所有匹配项
        datetime_matches = re.findall(pattern, response.text)
        node_list = response.xpath('//tr[@class="listitem"]')
        stockcode = response.meta['stockcode']
        stockname = response.meta['stockname']
        for node, post_date in zip(node_list, datetime_matches):  # [0:5]:
            temp = MyspiderItem()
            temp['postdate'] = post_date
            # temp['id'] = self.count
            temp['code'] = stockcode
            temp['name'] = stockname
            temp['reads'] = node.xpath('./td[1]/div/text()')[0].extract()
            temp['review'] = node.xpath('./td[2]/div/text()')[0].extract()
            temp['title'] = node.xpath('./td[3]/div//text()')[0].extract()  # //*[@id="mainlist"]/div/ul/li[1]/table/tbody/tr[2]/td[3]/div/a
            # temp['content'] = response.url
            temp['content'] = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())  # @href
            # content = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())
            temp['author'] = node.xpath('./td[4]/div/a/text()')[0].extract()

            temp['reply'] = -1
            # print(temp)

            yield temp

        print("处理数据后", datetime.now())

    def spider_closed(self, spider):
        # 关闭数据库连接
        if self.connection:
            self.connection.close()
        self.logger.info('数据库连接已关闭')

    def update_page(self, response):
        try:
            with self.connection.cursor() as cursor:
                # 使用参数化查询
                sql = "UPDATE ps_info_page SET page = %s WHERE code = %s AND page < %s"
                params = (response.meta["page"], response.meta["stockcode"],response.meta["page"])
                cursor.execute(sql, params)

                # 检查受影响的行数，确认更新是否成功
                rows_affected = cursor.rowcount
                if rows_affected > 0:
                    print(f"{response.meta['stockcode']}:第{response.meta['page']}页 更新成功")
                else:
                    print(f"更新失败：未找到code为{response.meta['stockcode']}的记录")

                self.connection.commit()  # 提交事务
        except Exception as e:
            print(f"更新数据库时发生错误: {e}")
            self.connection.rollback()  # 回滚事务

