import scrapy
from myspider.items import MyspiderItem
import pymysql


# scrapy crawl onlytitle --nolog
class OnlytitleSpider(scrapy.Spider):
    name = "onlytitle"
    allowed_domains = ["guba.eastmoney.com"]
    start_urls = ["https://guba.eastmoney.com/list,fshferbm,f.html"]#https://guba.eastmoney.com/list,000001,f.html
    # start_urls = ["https://guba.eastmoney.com/list,000001,f.html"]
    count = 1
    year = 2025
    month = 4

    def __init__(self, *args, **kwargs):
        super(OnlytitleSpider, self).__init__(*args, **kwargs)
        # 连接数据库
        self.connection = pymysql.connect(**{
            "host":"localhost",
            "user":"root",
            "password":"123456",
            "database":"test06",
            "port":3306,
            "charset":"utf8"
        })
        # 获取数据库中的 URL 列表并添加到 start_urls
        self.get_urls_from_db()

    def get_urls_from_db(self):
        try:
            with self.connection.cursor() as cursor:
                # 假设数据库中有一个名为 urls 的表，包含 url 字段
                sql = "SELECT * FROM ps_info"
                cursor.execute(sql)
                results = cursor.fetchall()
                for row in results:
                    self.start_urls.append(f"https://guba.eastmoney.com/list,{row['code']},f.html")
        except Exception as e:
            print(f"Error getting URLs from database: {e}")
        finally:
            self.connection.close()
    def parse(self, response):
        node_list = response.xpath('//tr[@class="listitem "]')
        for node in node_list:  # [0:5]:
            temp = MyspiderItem()
            temp['id'] = self.count
            temp['code'] = None
            temp['name'] = None
            temp['reads'] = node.xpath('./td[1]/div/text()')[0].extract()
            temp['review'] = node.xpath('./td[2]/div/text()')[0].extract()
            temp['title'] = node.xpath('./td[3]/div/a/text()')[0].extract()
            # temp['content'] = response.url
            temp['content'] = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())  # @href
            # content = response.urljoin(node.xpath('./td[3]/div/a/@href')[0].extract())
            temp['author'] = node.xpath('./td[4]/div/a/text()')[0].extract()
            postdate = node.xpath('./td[5]/div/text()')[0].extract()
            mon = int(postdate[:2])
            if mon > self.month:
                self.year -= 1
            self.month = mon
            temp['postdate'] = f"{self.year}-{postdate}"
            temp['reply'] = -1
            # print(temp)

            yield temp
            self.count += 1
        nextpage = response.xpath('//a[@class="nextp"]')
        for next in nextpage:
            # print("+++++++++++++++++++++++",next.xpath('./@href')[0].extract())
            yield scrapy.Request(
                url=next.xpath('./@href')[0].extract(),
                callback=self.parse,
            )
