# stock_scraper/spiders/stock_spider.py

from scrapy_selenium import SeleniumRequest
from scrapy.spiders import Spider
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time

class StockSpider(Spider):
    name = 'stock_spider'
    start_urls = ['https://quote.eastmoney.com/center/gridlist.html#hs_a_board']

    def start_requests(self):
        for url in self.start_urls:
            yield SeleniumRequest(url=url, callback=self.parse, wait_time=10)

    def clean_decimal(self, value):
        if value:
            value = value.strip().replace(',', '')
            if '%' in value:
                try:
                    return float(value.replace('%', '')) / 100
                except ValueError:
                    return None
            if '亿' in value:
                try:
                    return float(value.replace('亿', '')) * 1e8
                except ValueError:
                    return None
            elif '万' in value:
                try:
                    return float(value.replace('万', '')) * 1e4
                except ValueError:
                    return None
            try:
                return float(value)
            except ValueError:
                return None
        return None

    def parse(self, response):
        driver = response.request.meta['driver']

        WebDriverWait(driver, 15).until(
            EC.presence_of_element_located((By.XPATH, '//table//tr'))
        )

        rows = response.xpath('//table//tr')
        for row in rows:
            item = {}
            item['bStockNo'] = row.xpath('./td[2]/a/text()').get()
            item['bStockName'] = row.xpath('./td[@class="mywidth"]/a/text()').get()
            item['bLatestPrice'] = self.clean_decimal(row.xpath('./td[@class="mywidth2"][1]/span/text()').get())
            item['bChangePercent'] = self.clean_decimal(row.xpath('./td[@class="mywidth2"][2]/span/text()').get())
            item['bChangeAmount'] = self.clean_decimal(row.xpath('./td[7]/span/text()').get())
            item['bVolume'] = self.clean_decimal(row.xpath('./td[8]/text()').get())
            item['bAmplitude'] = self.clean_decimal(row.xpath('./td[10]/text()').get())
            item['bHigh'] = self.clean_decimal(row.xpath('./td[11]/span/text()').get())
            item['bLow'] = self.clean_decimal(row.xpath('./td[12]/span/text()').get())
            item['bOpen'] = self.clean_decimal(row.xpath('./td[13]/span/text()').get())
            item['bPreviousClose'] = self.clean_decimal(row.xpath('./td[14]/text()').get())
            yield item

        try:
            next_button = WebDriverWait(driver, 10).until(
                EC.element_to_be_clickable((By.XPATH, '//a[contains(@class, "next paginate_button")]'))
            )
            if next_button.is_displayed() and next_button.is_enabled():
                self.logger.info("Clicking 'Next' button for pagination.")
                next_button.click()

                # 增加延迟或等待下一页数据加载完成
                WebDriverWait(driver, 10).until(
                    EC.presence_of_element_located((By.XPATH, "//table//tr"))
                )

                # 重新调用 parse 函数继续爬取
                yield SeleniumRequest(
                    url=driver.current_url,
                    callback=self.parse,
                    wait_time=10,
                    dont_filter=True
                )
            else:
                self.logger.info("No more pages to crawl or next button disabled.")
        except Exception as e:
            self.logger.error(f"Error during pagination: {e}")