import scrapy
from myspider.items import MyspiderItem
import pymysql
import re
from datetime import datetime

# 一月份此代码失效，需要改now
# scrapy crawl gponlytitle --nolog
class GponlytitleSpider(scrapy.Spider):
    name = "gponlytitle"
    allowed_domains = ["guba.eastmoney.com"]
    # start_urls = ["https://guba.eastmoney.com/list,000001,f_3211.html"]
    start_urls = []
    all_start_urls = []
    count = 1  # 数据库编号
    code_name_dict = {}
    now = datetime.now()  # 获取当前年月
    year = now.year
    month = now.month
    index = 0  # 股票在start_urls中的编号
    page = 1  # 股票当前页码
    pattern = r'list,(\d+),f'
    stockcode = None
    stockname = None
    last_update_list = []  # 之前更新到的日期列表
    last_update = None

    def __init__(self, *args, **kwargs):
        super(GponlytitleSpider, self).__init__(*args, **kwargs)
        # 连接数据库
        print("开始初始化")
        self.connection = pymysql.connect(
            host="localhost",
            user="root",
            password="123456",
            database="test03",
            port=3306,
            charset="utf8",
            cursorclass=pymysql.cursors.DictCursor
        )
        # 获取数据库中的 URL 列表并添加到 start_urls
        self.get_urls_from_db()
        # 取出列表第一个信息
        url = self.all_start_urls[self.index]
        self.start_urls.append(url)
        self.index += 1
        match = re.search(self.pattern, url)
        self.stockcode = match.group(1)
        self.stockname = self.code_name_dict.get(self.stockcode)
        # 重置全局变量

    def get_urls_from_db(self):
        try:
            with self.connection.cursor() as cursor:
                # 获取表的行数
                count_sql = "SELECT COUNT(*) as count FROM stockreview2"  # 改pipeline
                cursor.execute(count_sql)
                count_result = cursor.fetchone()
                self.count = count_result['count'] + 1 if count_result else 1
                # 假设数据库中有一个名为 urls 的表，包含 url 字段
                sql = "SELECT * FROM ps_info"
                cursor.execute(sql)
                results = cursor.fetchall()
                # 处理结果
                for row in results[:2]:
                    stockcode = row.get('code')
                    stockname = row.get('name')

                    if stockcode and stockname:
                        self.code_name_dict[stockcode] = stockname
                        self.all_start_urls.append(f"https://guba.eastmoney.com/list,{stockcode},f_1.html")
                        # 使用参数化查询获取单只股票的最大发布时间
                        sql2 = "SELECT MAX(`发布时间`) as lastdate FROM stockreview2 WHERE `代码` = %s"
                        cursor.execute(sql2, (stockcode,))
                        result2 = cursor.fetchone()
                        lastdate = result2['lastdate'] if result2 and result2['lastdate'] else '0'
                        self.last_update_list.append(lastdate)

                print("self.all_start_urls:", self.all_start_urls,"date:",self.last_update_list)

        except Exception as e:
            print(f"Error getting URLs from database: {e}")
        finally:
            self.connection.close()

    # def start_requests(self):#遍历url并对每个url执行parse方法
    def nextstock(self, url):
        self.last_update = self.last_update_list[self.index]
        # 重置全局变量
        self.index += 1
        self.page = 1
        self.now = datetime.now()
        self.year = self.now.year
        self.month = self.now.month

        match = re.search(self.pattern, url)
        self.stockcode = match.group(1)
        self.stockname = self.code_name_dict.get(self.stockcode)



    def parse(self, response):
        print("文本内容",response.url)#,response.text)
        # pass
        node_list = response.xpath('//tr[@class="listitem"]')
        print("nodelist")
        if len(node_list) == 0:  #修改条件某月后的内容
            url = self.all_start_urls[self.index]
            print("即将抓取下一页:", url)
            self.nextstock(url)
            yield scrapy.Request(url=url, callback=self.parse)
            return None
        print("node")
        for node in node_list:  # [0:5]:
            temp = MyspiderItem()
            temp['id'] = self.count
            temp['code'] = self.stockcode
            temp['name'] = self.stockname
            temp['reads'] = node.xpath('./td[1]/div/text()')[0].extract()
            temp['review'] = node.xpath('./td[2]/div/text()')[0].extract()
            temp['title'] = node.xpath('./td[3]/div//text()')[0].extract()  # //*[@id="mainlist"]/div/ul/li[1]/table/tbody/tr[2]/td[3]/div/a
            # temp['content'] = response.url
            temp['content'] = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())  # @href
            # content = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())
            temp['author'] = node.xpath('./td[4]/div/a/text()')[0].extract()
            postdate = node.xpath('./td[5]/div/text()')[0].extract()
            mon = int(postdate[:2])
            if mon > self.month and self.month==1 and mon==12:
                self.year -= 1
            self.month = mon
            temp['postdate'] = f"{self.year}-{postdate}"
            temp['reply'] = -1
            # print(temp)

            yield temp
            self.count += 1
        #下一页
        print("下一页")
        self.page+=1
        yield scrapy.Request(
            url=f"https://guba.eastmoney.com/list,{self.stockcode},f_{self.page}.html",
            callback=self.parse,
        )

