import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
from selenium.webdriver.common.keys import Keys
import time
import re
import json

def final_parser(text):
    if not isinstance(text, str):
        return {}
    keys_in_order = [
        "产品类型", "产品型号", "生产者", "备案号", "能效等级", "公告时间", "依据国家标准",
        "备案信息", "制冷量(CC) (W)", "制热量(HC) (W)", "全年耗电量(APC)(kW•h)",
        "制冷消耗功率(W)", "制热消耗功率(W)", "全年性能系数(APF)", "最小制冷能效比 (EERmin)",
        "序号 室内机规格型号 制冷量(W) 匹配数量（台）"
    ]
    data = {}
    text = text.replace('\\n', ' | ').replace('详细', '').replace('确认', '').strip()
    key_positions = {key: text.find(key) for key in keys_in_order if text.find(key) != -1}
    sorted_keys = sorted(key_positions.keys(), key=lambda k: key_positions[k])
    for i, key in enumerate(sorted_keys):
        start_pos = key_positions[key] + len(key)
        end_pos = key_positions[sorted_keys[i+1]] if i + 1 < len(sorted_keys) else len(text)
        value = text[start_pos:end_pos].replace(':', '').strip()
        if key == "序号 室内机规格型号 制冷量(W) 匹配数量（台）":
            table_rows = value.split('|')
            table_data = []
            for row in table_rows:
                row = row.strip()
                if not row: continue
                match = re.match(r'(\d+)\s+([\w\s/-]+?)\s+([\d.]+)\s+(\d+)', row)
                if match:
                    table_data.append({
                        "序号": match.group(1), "室内机规格型号": match.group(2).strip(),
                        "制冷量(W)": match.group(3), "匹配数量（台）": match.group(4)
                    })
            data['室内机信息'] = json.dumps(table_data, ensure_ascii=False)
        else:
            data[key] = value.replace('|', '').strip()
    return data

def scrape_and_process():
    # --- User Input for Number of Pages ---
    try:
        pages_to_scrape = int(input("请输入您希望抓取的页数 (例如: 10): "))
        if pages_to_scrape <= 0:
            raise ValueError()
    except ValueError:
        print("无效的输入。将只抓取第1页。")
        pages_to_scrape = 1
    print(f"好的，将尝试抓取 {pages_to_scrape} 页数据。")

    # --- Browser Setup ---
    chrome_driver_path = "assets/chromedriver.exe"
    service = ChromeService(executable_path=chrome_driver_path)
    driver = webdriver.Chrome(service=service)
    wait = WebDriverWait(driver, 20)
    driver.get("https://www.energylabel.com.cn/productFiling")
    all_scraped_data = []
    try:
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'input[placeholder="请选择"]'))).click()
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div.el-cascader-panel ul.el-cascader-menu__list > li:nth-child(1)"))).click()
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div.el-cascader-panel > div:nth-child(2) .el-cascader-node:nth-child(5)"))).click()
        time.sleep(1)
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button.sousuo"))).click()
        
        # Wait for the search results to load correctly and verify the first row.
        # This is the robust way to ensure the correct data is loaded before proceeding.
        try:
            first_row_product_type_selector = "div.el-table__body-wrapper table > tbody > tr:nth-child(1) > td:nth-child(2)"
            wait.until(EC.text_to_be_present_in_element(
                (By.CSS_SELECTOR, first_row_product_type_selector), "多联式空调（热泵）机组 2021版"
            ))
            print("Search results loaded and verified.")
        except TimeoutException:
            print("Fatal Error: Search results did not load correctly. The first item is not the expected product type. Aborting.")
            driver.quit()
            return

        # Determine the actual number of pages to scrape
        try:
            total_pages_element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".el-pagination__total")))
            total_items = int(re.search(r'\d+', total_pages_element.text).group())
            available_pages = (total_items + 9) // 10
            print(f"网站上共有 {available_pages} 页数据。")
            final_pages_to_scrape = min(pages_to_scrape, available_pages)
            if final_pages_to_scrape < pages_to_scrape:
                print(f"将实际抓取网站上可用的 {final_pages_to_scrape} 页。")
        except (TimeoutException, AttributeError):
            print("无法确定总页数。将只抓取第1页。")
            final_pages_to_scrape = 1

        for current_page in range(1, final_pages_to_scrape + 1):
            print(f"--- Scraping Page {current_page} ---")
            if current_page > 1:
                try:
                    # After navigating, we must re-verify the content has loaded.
                    page_input = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".el-pagination__jump .el-input__inner")))
                    driver.execute_script(f"arguments[0].value = '{current_page}';", page_input)
                    page_input.send_keys(Keys.ENTER)
                    
                    wait.until(EC.text_to_be_present_in_element(
                        (By.CSS_SELECTOR, ".el-pager li.number.active"), str(current_page)
                    ))
                    print(f"Successfully navigated to page {current_page}.")

                    # Crucially, wait for the *content* of the new page to be correct.
                    wait.until(EC.text_to_be_present_in_element(
                        (By.CSS_SELECTOR, first_row_product_type_selector), "多联式空调（热泵）机组 2021版"
                    ))
                    print("Page content verified after navigation.")

                except TimeoutException:
                    print(f"Failed to navigate to or verify page {current_page}. Ending scrape.")
                    break
            
            table_rows_selector = "div.el-table__body-wrapper table > tbody > tr"
            current_rows_on_page = driver.find_elements(By.CSS_SELECTOR, table_rows_selector)
            num_rows = len(current_rows_on_page)
            print(f"Found {num_rows} rows on page {current_page}.")
            for i in range(num_rows):
                row_data = {}
                current_rows = driver.find_elements(By.CSS_SELECTOR, table_rows_selector)
                if i >= len(current_rows): break
                cells = current_rows[i].find_elements(By.TAG_NAME, 'td')
                if len(cells) > 5:
                    row_data['产品类型'] = cells[1].text
                    row_data['产品型号'] = cells[2].text
                    row_data['备案号'] = cells[3].text
                    row_data['生产者'] = cells[4].text
                else:
                    print(f"Skipping row {i+1} as it does not have enough data cells.")
                    continue
                try:
                    detail_button = driver.find_element(By.CSS_SELECTOR, f".el-table__fixed-right .el-table__row:nth-of-type({i+1}) .el-button")
                    driver.execute_script("arguments[0].click();", detail_button)
                    dialog_selector = "div.el-dialog[aria-label='详细']"
                    dialog = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, dialog_selector)))
                    content_locator = (By.CSS_SELECTOR, f"{dialog_selector} .el-form-item__content")
                    class text_not_to_be_present_in_element:
                        def __init__(self, locator, text): self.locator, self.text = locator, text
                        def __call__(self, driver):
                            try: return self.text not in driver.find_element(*self.locator).text
                            except StaleElementReferenceException: return False
                    wait.until(text_not_to_be_present_in_element(content_locator, "暂无"))
                    raw_details = dialog.text
                    parsed_details = final_parser(raw_details)
                    row_data.update(parsed_details)
                    dialog.find_element(By.CSS_SELECTOR, "button[aria-label='Close']").click()
                    wait.until(EC.invisibility_of_element_located((By.CSS_SELECTOR, dialog_selector)))
                    all_scraped_data.append(row_data)
                    print(f"Successfully scraped item {i+1} on page {current_page}")
                except Exception as e:
                    print(f"Could not process detail button for item {i+1} on page {current_page}. Error: {e}")
                    continue
    finally:
        print("Scraping finished. Closing the browser.")
        driver.quit()
        if all_scraped_data:
            df = pd.DataFrame(all_scraped_data)
            output_path = 'energylabel/energylabel_data_processed.csv'
            df.to_csv(output_path, index=False, encoding='utf-8-sig')
            print(f"Data from all pages successfully saved to {output_path}. Total items: {len(df)}")
        else:
            print("No data was scraped.")

if __name__ == '__main__':
    scrape_and_process() 