import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
from selenium.webdriver.common.keys import Keys
import time
import re
import json
from datetime import datetime
import os

# --- 全新的分层解析器定义 ---

# 1. 定义弹窗中的顶层字段。备案信息和室内机信息将被特殊处理。
TOP_LEVEL_KEYS = [
    "产品类型", "产品型号", "生产者", "备案号", "能效等级", "公告时间", "依据国家标准",
    "备案信息",
    "序号 室内机规格型号 制冷量(W) 匹配数量（台）"
]

def final_parser(text, key_alias_map):
    """
    Parses the detail popup text using a key-alias map for normalization.

    Args:
        text (str): The raw text from the popup.
        key_alias_map (dict): A dictionary mapping alias keys to canonical keys.

    Returns:
        dict: A dictionary of the parsed data.
    """
    if not isinstance(text, str):
        return {}
        
    # 从一个空的字典开始，动态地添加所有找到的字段
    data = {}

    # 使用正则表达式替换所有类型的换行符(\r, \n, \r\n)，使其更健壮
    text = re.sub(r'[\r\n]+', ' | ', text)
    text = text.replace('详细', '').replace('确认', '').strip()

    # 仅使用顶层关键字进行主干解析
    key_positions = {key: text.find(key) for key in TOP_LEVEL_KEYS if text.find(key) != -1}
    sorted_keys = sorted(key_positions.keys(), key=lambda k: key_positions[k])

    for i, key in enumerate(sorted_keys):
        start_pos = key_positions[key] + len(key)
        end_pos = key_positions[sorted_keys[i+1]] if i + 1 < len(sorted_keys) else len(text)
        value_block = text[start_pos:end_pos].replace(':', '').strip()

        if key == "备案信息":
            # --- 全新的动态"备案信息"解析逻辑 ---
            parts = [p.strip() for p in value_block.split('|') if p.strip()]
            
            # 动态地将文本块中的内容解析为键值对
            # 假定备案信息总是以 键-值-键-值 的顺序成对出现
            for j in range(0, len(parts), 2):
                if j + 1 < len(parts):
                    sub_key_raw = parts[j]
                    sub_value = parts[j+1]
                    
                    # 使用别名映射来规范化键
                    normalized_key = key_alias_map.get(sub_key_raw, sub_key_raw)

                    # 一个简单的健全性检查，防止值本身也是一个已知的顶层关键字
                    if sub_value not in TOP_LEVEL_KEYS:
                        data[normalized_key] = sub_value

        elif key == "序号 室内机规格型号 制冷量(W) 匹配数量（台）":
            # --- "室内机信息"子表格的解析逻辑 ---
            table_rows = value_block.split('|')
            table_data = []
            for row in table_rows:
                row = row.strip()
                if not row: continue
                match = re.match(r'(\d+)\s+([\w\s/-]+?)\s+([\d.]+)\s+(\d+)', row)
                if match:
                    table_data.append({
                        "序号": match.group(1), "室内机规格型号": match.group(2).strip(),
                        "制冷量(W)": match.group(3), "匹配数量（台）": match.group(4)
                    })
            if table_data:
                data['室内机信息'] = json.dumps(table_data, ensure_ascii=False)
        else:
            # --- 其他常规顶层字段的处理 ---
            final_value = ' '.join(value_block.split('|')).strip()
            
            # 在这里也应用别名规范化
            normalized_key = key_alias_map.get(key, key)
            data[normalized_key] = final_value
            
    return data

def scrape_by_date(product_category_name, category_config, stop_date_str, output_dir="outputdata", status_callback=print, progress_callback=None):
    """
    Scrapes data for a given product category up to a specified stop date.

    Args:
        product_category_name (str): The name of the product category to scrape.
        category_config (dict): Configuration for the selected category.
        stop_date_str (str): The cut-off date in "YYYY-MM-DD" format.
        output_dir (str): The directory to save the output file.
        status_callback (function): Callback function for status updates.
        progress_callback (function): Callback for progress bar updates.
    """
    # --- Date Validation ---
    try:
        stop_date = datetime.strptime(stop_date_str, "%Y-%m-%d")
    except ValueError:
        status_callback("错误：日期格式不正确，应为 YYYY-MM-DD。")
        return
    status_callback(f"开始抓取任务：将抓取公告日期在 {stop_date_str} 及之后的数据。")

    # --- Alias Map Setup ---
    coalesce_map = category_config.get('column_coalesce_map', {})
    key_alias_map = {alias: canonical for canonical, aliases in coalesce_map.items() for alias in aliases}
    status_callback("已根据配置生成别名映射。")

    # --- Directory Setup ---
    os.makedirs(output_dir, exist_ok=True)
    status_callback(f"输出目录: {output_dir}")

    # --- Browser Setup ---
    options = webdriver.ChromeOptions()
    options.add_argument("--headless")  # Re-enable headless mode for packaging
    options.add_argument("--disable-gpu") # Recommended for headless mode
    options.add_argument("--window-size=1920,1080") # Specify window size

    chrome_driver_path = "assets/chromedriver.exe"
    service = ChromeService(executable_path=chrome_driver_path)
    driver = webdriver.Chrome(service=service, options=options)
    wait = WebDriverWait(driver, 20)
    driver.get("https://www.energylabel.com.cn/productFiling")
    all_scraped_data = []
    scraping_counter = 1 # Initialize a counter for ordering
    
    # 从配置中获取动态值
    selector_index = category_config.get("selector_index", 5)
    validation_text = category_config.get("validation_text", product_category_name)

    try:
        # --- 恢复并适配后的选择器逻辑 ---
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'input[placeholder="请选择"]'))).click()
        
        # 点击第一级菜单（通常是"家用电器"）
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div.el-cascader-panel ul.el-cascader-menu__list > li:nth-child(1)"))).click()
        
        # 点击第二级菜单中的主要产品类别
        category_selector = f"div.el-cascader-panel > div:nth-child(2) .el-cascader-node:nth-child({selector_index})"
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, category_selector))).click()

        # 新的、更健壮的逻辑：
        # 1. 显式等待，直到整个下拉选择面板从DOM中消失
        wait.until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "div.el-cascader-panel")))
        
        # 2. 现在可以安全地点击"搜索"按钮，因为它不再被遮挡
        wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button.sousuo"))).click()
        
        # Smart wait for the initial search results to load correctly. THIS is the crucial wait.
        try:
            # 使用从配置中传入的文本进行验证
            wait.until(EC.text_to_be_present_in_element(
                (By.CSS_SELECTOR, "div.el-table__body-wrapper table > tbody > tr:nth-child(1) > td:nth-child(2)"), 
                validation_text
            ))
            status_callback("初始搜索结果已加载并验证。")
        except TimeoutException:
            status_callback("错误：初始搜索结果未能正确加载。程序中止。")
            return

        page_number = 1
        while True: # Loop for pagination
            status_callback(f"--- 正在扫描第 {page_number} 页 ---")

            # --- Final Corrected Logic with Filtering ---
            # 1. Wait for the page to be stable and get all rows and buttons once.
            try:
                wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div.el-table__body-wrapper table > tbody > tr")))
                all_rows = driver.find_elements(By.CSS_SELECTOR, "div.el-table__body-wrapper table > tbody > tr")
                all_buttons = driver.find_elements(By.CSS_SELECTOR, "div.el-table__fixed-right .el-button")
            except TimeoutException:
                status_callback("警告: 在本页未找到任何数据行。")
                break # Break to pagination logic which will likely terminate

            # 2. Filter for valid data rows that we intend to process.
            valid_data_rows = []
            for row in all_rows:
                try:
                    cells = row.find_elements(By.TAG_NAME, 'td')
                    # 使用动态文本进行检查
                    if len(cells) >= 8 and validation_text in cells[1].text:
                        valid_data_rows.append(row)
                except StaleElementReferenceException:
                    continue # Ignore rows that become stale during inspection

            # 3. Ensure the number of valid rows and buttons match.
            if len(valid_data_rows) != len(all_buttons):
                status_callback(f"错误: 有效数据行 ({len(valid_data_rows)}) 和按钮 ({len(all_buttons)}) 的数量不匹配。")
                break

            for i in range(len(valid_data_rows)):
                try:
                    # Use the pre-filtered valid rows and corresponding buttons.
                    cells = valid_data_rows[i].find_elements(By.TAG_NAME, 'td')
                    
                    announcement_date_str = cells[7].text.strip()
                    try:
                        announcement_date = datetime.strptime(announcement_date_str, "%Y-%m-%d")
                        if announcement_date < stop_date:
                            status_callback(f"公告日期 {announcement_date_str} 早于截止日期 {stop_date_str}。抓取结束。")
                            return # User's original stop logic
                    except (ValueError, IndexError):
                        continue # Skip if date is malformed
                        
                    row_data = {
                        '抓取序号': scraping_counter,
                        '产品类型': cells[1].text.strip(),
                        '产品型号': cells[2].text,
                        '备案号': cells[3].text,
                        '生产者': cells[4].text,
                    }

                    # Use JavaScript click, which is often more reliable.
                    driver.execute_script("arguments[0].click();", all_buttons[i])
                    
                    dialog_selector = "div.el-dialog[aria-label='详细']"
                    dialog = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, dialog_selector)))
                    time.sleep(1)

                    raw_details = dialog.text
                    parsed_details = final_parser(raw_details, key_alias_map)
                    row_data.update(parsed_details)

                    dialog.find_element(By.CSS_SELECTOR, "button[aria-label='Close']").click()
                    wait.until(EC.invisibility_of_element_located((By.CSS_SELECTOR, dialog_selector)))
                    
                    all_scraped_data.append(row_data)
                    status_callback(f"成功抓取项目 #{scraping_counter} (位于第 {page_number} 页, 第 {i+1} 行)")
                    scraping_counter += 1

                except Exception as e:
                    status_callback(f"处理第 {page_number} 页，第 {i+1} 行时发生意外错误: {e}")
                    continue

            # --- Pagination Logic ---
            try:
                next_button_disabled = driver.find_element(By.CSS_SELECTOR, "button.btn-next").get_attribute("disabled")
                if next_button_disabled:
                    status_callback("已到达最后一页，抓取结束。")
                    return # Normal termination

                page_input = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".el-pagination__jump .el-input__inner")))
                page_number += 1
                driver.execute_script(f"arguments[0].value = '{page_number}';", page_input)
                page_input.send_keys(Keys.ENTER)
                
                # Reverting to the user-preferred wait logic
                wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, ".el-pager li.number.active"), str(page_number)))
                time.sleep(2)
                
                status_callback(f"成功导航到第 {page_number} 页。")

            except Exception as e:
                status_callback(f"无法导航到下一页，抓取结束。错误: {e}")
                return # Terminate on any pagination error

    finally:
        status_callback("抓取流程结束，正在关闭浏览器。")
        driver.quit()
        if all_scraped_data:
            df = pd.DataFrame(all_scraped_data)
            
            # 在保存前，将所有NaN值替换为'N/A'，确保CSV文件清晰
            df.fillna('N/A', inplace=True)

            # --- Dynamic Filename ---
            safe_category_name = product_category_name.replace("/", "-").replace(" ", "")
            filename = f"{safe_category_name}_{stop_date_str}.csv"
            output_path = os.path.join(output_dir, filename)
            
            df.to_csv(output_path, index=False, encoding='utf-8-sig')
            status_callback(f"数据已成功保存至 {output_path}。总计: {len(df)} 条。")
            return output_path # Return the path on success
        else:
            status_callback("没有抓取到任何数据。")
            return None # Return None if no file was created

if __name__ == '__main__':
    # This part is now for testing purposes only
    # Example: scrape_by_date("多联式空调（热泵）机组 2021版", "2024-05-01") 
    # To test this file directly, you would need to construct a fake category_config dictionary.
    # It's now recommended to run the application via run_app.py
    print("This script is intended to be run from the main application.") 