import customtkinter as ctk
import tkinter as tk
from tkinter import filedialog
import pandas as pd
from threading import Thread
from datetime import datetime
import os
import sys
from tkcalendar import DateEntry
import re

from date_scraper import scrape_by_date

# --- 全局配置 ---
# 定义所有产品类别的配置信息，作为程序的"单一事实来源"
# 这使得未来添加新的产品类别变得非常容易
CONFIG = {
    "多联式空调（热泵）机组 2021版": {
        "selector_index": 5,
        "validation_text": "多联式空调（热泵）机组 2021版",
        "column_coalesce_map": {
            '能效等级': ['能效等级'],
            '型号': ['产品型号', '型号'],
            '公告时间': ['公告时间'],
            '生产者厂家': ['生产者厂家', '生产者'],
            '国家标准': ['国家标准', '依据国家标准'],
            '制冷量(CC) (W)': ['制冷量(CC) (W)', '额定制冷量(W)', '制冷量(W)'],
            '制热量(HC) (W)': ['制热量(HC) (W)', '额定制热量(W)', '制热量(W)'],
            '全年耗电量(APC)(kW•h)': ['全年耗电量(APC)(kW•h)', '全年耗电量(kW·h)'],
            '全年性能系数(APF) [ (W·h)/(W·h)]': ['全年性能系数(APF) [ (W·h)/(W·h)]', '全年性能系数(APF)', '全年性能系数(APF)[(W·h/W·h)]', '全年性能系数（APF）[(W·h)/(W·h)]'],
            '制冷消耗功率(W)': ['制冷消耗功率(W)'],
            '制热消耗功率(W)': ['制热消耗功率(W)'],
            '最小制冷能效比(EERmin)': ['最小制冷能效比(EERmin)']
        },
        "final_columns": [
            '能效等级', '型号', '公告时间', '生产者厂家', '国家标准',
            '制冷量(CC) (W)', '制热量(HC) (W)', '全年耗电量(APC)(kW•h)',
            '全年性能系数(APF) [ (W·h)/(W·h)]', '制冷消耗功率(W)', '制热消耗功率(W)'
        ],
        "classify_col": '制冷量(CC) (W)',
        "classify_bins": [0, 8000, 10000, 12000, 14000, 16000, 18000, 20000, 22400, 25200, 28000,
                          31000, 33500, 40000, 45000, 50400, 56000, 61500, 68000, 73500, 78500,
                          85000, 90000, 101000, 106000, 112000, 117000, 123500, 130000, 136000, float('inf')],
        "classify_labels": ["80以下外机", "80外机", "100外机", "120外机", "140外机", "160外机", "180外机",
                            "200外机", "224外机", "252外机", "280外机", "310外机", "335外机", "400外机",
                            "450外机", "504外机", "560外机", "615外机", "680外机", "735外机", "785外机",
                            "850外机", "900外机", "1010外机", "1060外机", "1120外机", "1170外机",
                            "1235外机", "1300外机", "1360外机"],
        "filter_complete_params_config": {
            "filter_column": '全年性能系数(APF) [ (W·h)/(W·h)]',
            "columns_to_drop": [
                '最小制冷能效比(EERmin)'
            ]
        }
    },
    "房间空气调节器 2019版": {
        "selector_index": 2,
        "validation_text": "房间空气调节器 2019版",
        "column_coalesce_map": {
            '能效等级': ['能效等级'],
            '型号': ['型号', '产品型号'],
            '公告时间': ['公告时间'],
            '生产者厂家': ['生产者厂家', '生产者', '生产者名称'],
            '国家标准': ['国家标准', '依据国家标准'],
            '额定制冷量(W)': ['额定制冷量(W)', '制冷量(W)', '制冷量(CC) (W)'],
            '制冷季节耗电量(kW·h)': ['制冷季节耗电量(kW·h)', '制冷季节耗电量(CSTE)(kW·h)'],
            '额定制热量(W)': ['额定制热量(W)', '名义制热量(W)', '制热量(W)', '制热量(HC) (W)'],
            '制热季节耗电量(kW·h)': ['制热季节耗电量(kW·h)', '制热季节耗电量(HSTE)(kW·h)'],
            '全年能源消耗效率[(W·h)/(W·h)]': ['全年能源消耗效率[(W·h)/(W·h)]', '全年性能系数(APF)', '全年性能系数(APF)[(W·h/W·h)]', '全年性能系数（APF）[(W·h)/(W·h)]'],
            '制热季节能源消耗效率[(W·h)/(W·h)]': ['制热季节能源消耗效率[(W·h)/(W·h)]'],
            '制冷季节能源消耗效率[(W·h)/(W·h)]': ['制冷季节能源消耗效率[(W·h)/(W·h)]', '制冷季节能效比(SEER)[(W·h)/(W·h)]'],
        },
        "final_columns": [
            '能效等级', '型号', '公告时间', '生产者厂家', '国家标准', '额定制冷量(W)',
            '制冷季节耗电量(kW·h)', '额定制热量(W)', '制热季节耗电量(kW·h)', '全年能源消耗效率[(W·h)/(W·h)]',
            '制热季节能源消耗效率[(W·h)/(W·h)]', '制冷季节能源消耗效率[(W·h)/(W·h)]'
        ],
        "classify_col": '额定制冷量(W)',
        # 注意: 您的规则有重叠，这里做了合并处理。例如2600-3200会标记为"26"。
        # 5000-7200范围会标记为"50GW或50LW"，因为无法区分。
        "classify_bins": [0, 2600, 3200, 3500, 5000, 7200, 7600, 12000, 16000, 25000, float('inf')],
        "classify_labels": [
            "2600W以下", "26", "32或35", "3500-5000W", "50GW或50LW", "72GW或72LW",
            "76LW", "120LW", "160LW", "10P柜机或更高"
        ],
        "filter_complete_params_config": {
            "filter_column": '全年能源消耗效率[(W·h)/(W·h)]',
            "columns_to_drop": [
                '制热季节能源消耗效率[(W·h)/(W·h)]',
                '制冷季节能源消耗效率[(W·h)/(W·h)]'
            ]
        }
    },
    "单元式空气调节机 2019版": {
        "selector_index": 3,
        "validation_text": "单元式空气调节机 2019版",
        "column_coalesce_map": {
            '能效等级': ['能效等级'],
            '型号': ['型号', '产品型号'],
            '公告时间': ['公告时间'],
            '生产者厂家': ['生产者厂家', '生产者'],
            '国家标准': ['国家标准', '依据国家标准'],
            '额定制冷量(W)': ['额定制冷量(W)', '制冷量(W)', '制冷量(CC) (W)'],
            '额定制热量(W)': ['额定制热量(W)', '制热量(W)', '制热量(HC) (W)'],
            '制冷季节耗电量(kW·h)': ['制冷季节耗电量(CSTE)(kW·h)', '制冷季节耗电量(kW·h)', '制冷季节耗电量(CSTE)(kW•h)'],
            '制热季节耗电量(kW·h)': ['制热季节耗电量(HSTE)(kW·h)', '制热季节耗电量(kW·h)', '制热季节耗电量(HSTE)(kW•h)'],
            '制冷消耗功率(W)': ['制冷消耗功率(W)'],
            # 三个能效指标彻底分开
            '全年性能系数(APF)[(W·h)/(W·h)]': ['全年性能系数(APF)[(W·h)/(W·h)]', '全年性能系数(APF)[(W•h)/(W•h)]', '全年性能系数(APF)'],
            '全年能效比(AEER)(W/W)': ['全年能效比(AEER)(W/W)'],
            '制冷综合部分负荷性能系数(IPLV(C))(W/W)': ['制冷综合部分负荷性能系数(IPLV(C))(W/W)']
        },
        "final_columns": [
            '能效等级', '型号', '公告时间', '生产者厂家', '国家标准', '额定制冷量(W)',
            '额定制热量(W)', '制冷季节耗电量(kW·h)', '制热季节耗电量(kW·h)', '制冷消耗功率(W)',
            '全年性能系数(APF)[(W·h)/(W·h)]', '全年能效比(AEER)(W/W)', '制冷综合部分负荷性能系数(IPLV(C))(W/W)'
        ],
        "classify_col": '额定制冷量(W)',
        "classify_bins": [0, 5000, 7200, 12000, 14000, float('inf')],
        "classify_labels": ["5000W以下", "50QW", "72QW", "120QW", "140QW"],
        "filter_complete_params_config": {
            "filter_column": '全年性能系数(APF)[(W·h)/(W·h)]',
            "columns_to_drop": [
                '全年能效比(AEER)(W/W)',
                '制冷综合部分负荷性能系数(IPLV(C))(W/W)'
            ]
        }
    },
    "风管送风式空调机组 2019版": {
        "selector_index": 11,
        "validation_text": "风管送风式空调机组 2019版",
        "column_coalesce_map": {
            # 目标列: [来源列1(高优先级), 来源列2(低优先级), ...]
            '能效等级': ['能效等级'],
            '型号': ['型号', '产品型号'],
            '公告时间': ['公告时间'],
            '生产者厂家': ['生产者厂家', '生产者', '生产者名称'],
            '国家标准': ['国家标准', '依据国家标准'],
            '制冷量(W)': ['制冷量(W)', '额定制冷量(W)'],
            '制热量(W)': ['制热量(W)', '额定制热量(W)'],
            '制冷季节耗电量(CSTE)(kW·h)': ['制冷季节耗电量(CSTE)(kW·h)', '制冷季节耗电量(CSTE)(kW•h)'],
            '制热季节耗电量(HSTE)(kW·h)': ['制热季节耗电量(HSTE)(kW·h)', '制热季节耗电量(HSTE)(kW•h)'],
            '制冷消耗功率(W)': ['制冷消耗功率(W)'],
            '全年性能系数(APF)[(W·h)/(W·h)]': ['全年性能系数(APF)[(W·h)/(W·h)]', '全年性能系数(APF)[(W•h)/(W•h)]', '全年性能系数（APF）[(W•h)/(W•h)]', '全年性能系数(APF)[(W·h/W·h)]', '全年性能系数(APF)'],
            '制冷季节能效比(SEER)[(W·h)/(W·h)]': ['制冷季节能效比(SEER)[(W·h)/(W·h)]', '制冷季节能效比(SEER)[(W•h)/(W•h)]', '制冷季节能效比(SEER) [(W•h)/ (W•h)]'],
            '能效比(W/W)': ['能效比(W/W)']
        },
        "final_columns": [
            '能效等级', '型号', '公告时间', '生产者厂家', '国家标准', '制冷量(W)', '制热量(W)',
            '制冷季节耗电量(CSTE)(kW·h)', '制热季节耗电量(HSTE)(kW·h)', '制冷消耗功率(W)',
            '全年性能系数(APF)[(W·h)/(W·h)]', '制冷季节能效比(SEER)[(W·h)/(W·h)]', '能效比(W/W)'
        ],
        "classify_col": '制冷量(W)',
        "classify_bins": [0, 2600, 3500, 5000, 6500, 7500, 10000, 12000, 14000, 25000, 30000, float('inf')],
        "classify_labels": ["2600W以下", "26风管机", "35风管机", "50风管机", "65风管机", "75风管机",
                            "100风管机", "120风管机", "140风管机", "250风管机", "30KW以上风管机"],
        "filter_complete_params_config": {
            "filter_column": '全年性能系数(APF)[(W·h)/(W·h)]',
            # 新增白名单模式，如果这个键存在，则优先使用它来精确选择和排序最终的列
            "final_columns_whitelist": [
                '能效等级', '型号', '公告时间', '生产者厂家', '国家标准', '制冷量(W)', 
                '制冷季节耗电量(CSTE)(kW·h)', '制热量(W)', '制热季节耗电量(HSTE)(kW·h)', '全年性能系数(APF)[(W·h)/(W·h)]'
            ]
        }
    }
}


class App(ctk.CTk):
    def __init__(self):
        super().__init__()

        # ---- App Setup ----
        if getattr(sys, 'frozen', False):
            # If the application is run as a bundle, the base path is the executable's directory
            base_path = os.path.dirname(sys.executable)
        else:
            # If run as a script, the base path is the script's directory
            base_path = os.path.dirname(os.path.abspath(__file__))

        self.output_dir = os.path.join(base_path, "outputdata")
        os.makedirs(self.output_dir, exist_ok=True)
        # self.product_category = "多联式空调（热泵）机组 2021版" # 旧的硬编码方式
        self.last_scraped_file = None # To store the path of the last created file
        self.last_normalized_file = None # To store the path of the last normalized file
        self.last_complete_params_file = None # 新增：存储"完备参数"文件的路径

        # ---- Window Setup ----
        self.title("中国能效标识备案信息抓取与处理工具")
        self.geometry("800x600")
        self._set_appearance_mode("system")

        # ---- Main Layout ----
        self.grid_columnconfigure(0, weight=1)
        self.grid_rowconfigure(1, weight=1)

        # ---- Top Frame for Controls ----
        self.controls_frame = ctk.CTkFrame(self)
        self.controls_frame.grid(row=0, column=0, padx=10, pady=10, sticky="nsew")
        self.controls_frame.grid_columnconfigure(0, weight=1)

        # -- Row 1: Selections --
        self.selection_frame = ctk.CTkFrame(self.controls_frame)
        self.selection_frame.grid(row=0, column=0, padx=5, pady=(5, 2), sticky="ew")

        self.category_label = ctk.CTkLabel(self.selection_frame, text="产品类别:", font=ctk.CTkFont(size=14))
        self.category_label.pack(side="left", padx=(10, 5), pady=5)

        self.category_combobox = ctk.CTkComboBox(self.selection_frame, values=list(CONFIG.keys()), width=250)
        self.category_combobox.pack(side="left", padx=5, pady=5)
        self.category_combobox.set(list(CONFIG.keys())[0]) # 默认选择第一个

        self.label = ctk.CTkLabel(self.selection_frame, text="截止公告日期:", font=ctk.CTkFont(size=14))
        self.label.pack(side="left", padx=(10, 5), pady=5)

        self.date_entry = DateEntry(
            self.selection_frame,
            date_pattern='y-mm-dd',
            width=12,
            background='darkblue',
            foreground='white',
            borderwidth=2
        )
        self.date_entry.pack(side="left", padx=5, pady=5)

        # -- Row 2: Buttons --
        self.button_frame = ctk.CTkFrame(self.controls_frame)
        self.button_frame.grid(row=1, column=0, padx=5, pady=(2, 5), sticky="ew")
        
        # To center the buttons, we make the columns in the button_frame expand
        self.button_frame.grid_columnconfigure(0, weight=1)
        self.button_frame.grid_columnconfigure(1, weight=1)
        self.button_frame.grid_columnconfigure(2, weight=1)

        self.scrape_button = ctk.CTkButton(self.button_frame, text="开始抓取", command=self.start_scraping_thread)
        self.scrape_button.grid(row=0, column=0, padx=5, pady=10)

        self.normalize_button = ctk.CTkButton(self.button_frame, text="数据规范化", command=self.run_normalization)
        self.normalize_button.grid(row=0, column=1, padx=5, pady=10)
        self.normalize_button.configure(state="disabled") # Disabled by default

        self.classify_button = ctk.CTkButton(self.button_frame, text="数据分级", command=self.run_classification)
        self.classify_button.grid(row=0, column=2, padx=5, pady=10)
        self.classify_button.configure(state="disabled") # Disabled by default

        # ---- Bottom Frame for Log/Status ----
        self.log_textbox = ctk.CTkTextbox(self, state="disabled", wrap="word")
        self.log_textbox.grid(row=1, column=0, padx=10, pady=(0, 10), sticky="nsew")

    def log(self, message):
        """ Appends a message to the log textbox on the main thread. """
        self.log_textbox.configure(state="normal")
        self.log_textbox.insert("end", f"{message}\\n")
        self.log_textbox.configure(state="disabled")
        self.log_textbox.see("end") # Auto-scroll

    def start_scraping_thread(self):
        """ Starts the scraping process in a separate thread to avoid freezing the GUI. """
        stop_date_str = self.date_entry.get()
        if not stop_date_str:
            self.log("错误: 无效的日期。")
            return
        
        selected_category_name = self.category_combobox.get()
        if not selected_category_name or selected_category_name not in CONFIG:
            self.log("错误: 无效的产品类别。")
            return
            
        category_config = CONFIG[selected_category_name]
        
        self.scrape_button.configure(state="disabled", text="正在抓取...")
        self.normalize_button.configure(state="disabled") # Disable while scraping
        self.classify_button.configure(state="disabled") # Also disable this
        self.last_scraped_file = None # Reset on new scrape
        self.last_normalized_file = None # Reset on new scrape
        self.last_complete_params_file = None # 新增：重置"完备参数"文件路径
        
        thread = Thread(target=self.run_scrape, args=(selected_category_name, category_config, stop_date_str))
        thread.start()

    def run_scrape(self, category_name, category_config, stop_date_str):
        """ The actual scraping task that runs in a thread. """
        try:
            # The scraper function now returns the path of the created file
            file_path = scrape_by_date(
                product_category_name=category_name,
                category_config=category_config,
                stop_date_str=stop_date_str,
                output_dir=self.output_dir,
                status_callback=self.log_safe
            )
            # Store the path for the normalizer
            self.last_scraped_file = file_path
        except Exception as e:
            self.log_safe(f"发生未预料的错误: {e}")
            self.last_scraped_file = None
        finally:
            self.after(0, self.scrape_finished)

    def scrape_finished(self):
        """ Callback to run on the main thread after scraping is done. """
        self.scrape_button.configure(state="normal", text="开始抓取")
        if self.last_scraped_file:
            self.log(f"抓取完成。文件 '{os.path.basename(self.last_scraped_file)}' 已准备好被规范化。")
            self.normalize_button.configure(state="normal") # Enable if scrape was successful
        else:
            self.log("抓取线程已结束，但未生成任何文件。")

    def run_normalization(self):
        """
        Automatically normalizes the last scraped file without user prompts.
        """
        if not self.last_scraped_file:
            self.log("错误: 没有可供规范化的文件。请先成功抓取一次数据。")
            return

        selected_category_name = self.category_combobox.get()
        if not selected_category_name or selected_category_name not in CONFIG:
            self.log("错误: 无效的产品类别，无法进行规范化。")
            return
        category_config = CONFIG[selected_category_name]

        self.log(f"自动规范化文件: {os.path.basename(self.last_scraped_file)}")
        self.normalize_button.configure(state="disabled") # Disable during processing

        try:
            input_path = self.last_scraped_file
            df = pd.read_csv(input_path)
            self.log("文件读取成功，开始规范化...")

            # ---- 全新的动态数据合并(Coalesce)规范化逻辑 ----
            df = df.replace(r'^\s*--\s*$', pd.NA, regex=True).replace(r'^\s*N/A\s*$', pd.NA, regex=True)

            column_coalesce_map = category_config.get("column_coalesce_map", {})
            
            # --- 步骤 1: 合并列 (Coalesce Columns) ---
            # 采用更健壮的 combine_first 方法重写，从根本上避免数据行重复或错乱的问题
            self.log("开始按配置合并别名列...")
            for target_col, source_aliases in column_coalesce_map.items():
                # 获取当前DataFrame中实际存在的别名列
                existing_aliases = [col for col in source_aliases if col in df.columns]
                
                if not existing_aliases:
                    # 如果没有任何别名列存在，则创建一个空的目标列
                    df[target_col] = pd.NA
                    continue

                # 从最高优先级的列开始 (列表中的第一个)
                # 使用 .copy() 确保我们操作的是一个副本，而不是原始数据的视图
                combined_col = df[existing_aliases[0]].copy()
                
                # 从第二个别名列开始，依次用低优先级的列来填充前面高优先级列留下的空值
                if len(existing_aliases) > 1:
                    for i in range(1, len(existing_aliases)):
                        combined_col = combined_col.combine_first(df[existing_aliases[i]])
                
                # 将最终计算出的、合并好的列赋值给目标列
                df[target_col] = combined_col
            self.log("别名列合并完成。")

            # 步骤 2: 清理不再需要的原始别名列
            # 确定所有需要被合并的源列
            all_source_aliases = set()
            for alias_list in column_coalesce_map.values():
                all_source_aliases.update(alias_list)

            # 确定目标列
            target_cols = set(column_coalesce_map.keys())

            # 要删除的列是那些仅作为源存在，而不作为目标的列
            # 并且这些列必须真实存在于当前的DataFrame中
            aliases_to_drop = [
                col for col in all_source_aliases 
                if col not in target_cols and col in df.columns
            ]

            if aliases_to_drop:
                df.drop(columns=aliases_to_drop, inplace=True)
                self.log(f"已清理不再需要的原始别名列: {', '.join(aliases_to_drop)}")
            else:
                self.log("没有需要清理的原始别名列。")

            # 步骤 3: 确保所有在配置中定义的目标列都存在
            for target_col in target_cols:
                if target_col not in df.columns:
                    df[target_col] = pd.NA
            
            # 按配置的final_columns排序和选择最终列
            final_columns = category_config.get("final_columns", [])
            if final_columns:
                # 确保所有final_columns都存在于df中，以防万一
                for col in final_columns:
                    if col not in df.columns:
                        df[col] = pd.NA
                df = df[final_columns]

            base, ext = os.path.splitext(os.path.basename(input_path))
            new_filename = f"{base}_normalized{ext}"
            output_path = os.path.join(self.output_dir, new_filename)
            
            df.to_csv(output_path, index=False, encoding='utf-8-sig')
            self.log(f"数据规范化成功！文件已保存至: {output_path}")
            self.last_normalized_file = output_path # Store path for classification
            
            # --- 全新的、按类别配置的筛选功能 ---
            filter_config = category_config.get("filter_complete_params_config")
            if filter_config:
                self.log("检测到'完备参数'筛选配置，开始执行...")
                try:
                    filter_column = filter_config.get("filter_column")
                    columns_to_drop = filter_config.get("columns_to_drop", [])
                    final_columns_whitelist = filter_config.get("final_columns_whitelist")

                    if not filter_column:
                        self.log("错误：筛选配置中缺少 'filter_column'。")
                        self.classify_button.configure(state="normal") # 在返回前恢复按钮状态
                        return

                    if filter_column in df.columns:
                        complete_df = df[df[filter_column] != 'N/A'].copy()
                        
                        if not complete_df.empty:
                            # --- 升级后的逻辑：优先使用白名单，否则回退到黑名单 ---
                            if final_columns_whitelist:
                                # 白名单模式：精确选择和排序
                                # 先确保所有白名单中的列都存在，不存在的用N/A填充，以防万一
                                for col in final_columns_whitelist:
                                    if col not in complete_df.columns:
                                        complete_df[col] = 'N/A'
                                complete_df = complete_df[final_columns_whitelist]
                                self.log(f"已按白名单规则筛选'完备参数'文件的列。")
                            elif columns_to_drop:
                                # 黑名单模式（旧逻辑）：移除指定的列
                                existing_cols_to_drop = [col for col in columns_to_drop if col in complete_df.columns]
                                if existing_cols_to_drop:
                                    complete_df.drop(columns=existing_cols_to_drop, inplace=True)
                                    self.log(f"已从'完备参数'文件中移除列: {', '.join(existing_cols_to_drop)}")

                            # 构建并保存新文件
                            complete_filename = f"{base}_normalized_完备参数{ext}"
                            complete_output_path = os.path.join(self.output_dir, complete_filename)
                            complete_df.to_csv(complete_output_path, index=False, encoding='utf-8-sig')
                            self.log(f"成功筛选并保存'完备参数'文件至: {complete_output_path} (共 {len(complete_df)} 条数据)")
                            self.last_complete_params_file = complete_output_path # 新增：保存"完备参数"文件路径
                        else:
                            self.log(f"未找到'{filter_column}'非空的数据，跳过生成'完备参数'文件。")
                    else:
                        self.log(f"警告: 规范化结果中不包含列 '{filter_column}'，无法进行'完备参数'筛选。")
                except Exception as filter_e:
                    self.log(f"筛选'完备参数'数据时发生错误: {filter_e}")
            else:
                self.log("当前类别未配置'完备参数'筛选，跳过此步骤。")

            self.classify_button.configure(state="normal") # Enable classification

        except Exception as e:
            self.log(f"处理文件时发生错误: {e}")
            self.last_normalized_file = None
            self.last_complete_params_file = None # 新增：出错时重置
            self.classify_button.configure(state="disabled")
        finally:
            self.normalize_button.configure(state="normal")
    
    def run_classification(self):
        """
        Classifies the data into multiple files. It prioritizes the 'complete_params' file 
        if it exists, otherwise falls back to the 'normalized' file.
        """
        input_path = None
        base_filename_for_date = None

        # --- 智能选择输入文件 ---
        # 优先使用"完备参数"文件
        if self.last_complete_params_file and os.path.exists(self.last_complete_params_file):
            input_path = self.last_complete_params_file
            base_filename_for_date = os.path.basename(self.last_complete_params_file)
            self.log(f"检测到'完备参数'文件，将对其进行分级: {base_filename_for_date}")
        # 如果不存在，则回退到使用"规范化"文件
        elif self.last_normalized_file and os.path.exists(self.last_normalized_file):
            input_path = self.last_normalized_file
            base_filename_for_date = os.path.basename(self.last_normalized_file)
            self.log(f"未找到'完备参数'文件，将对规范化文件进行分级: {base_filename_for_date}")
        else:
            self.log("错误: 没有可供分级的文件。请先成功规范化一次数据。")
            return

        selected_category_name = self.category_combobox.get()
        if not selected_category_name or selected_category_name not in CONFIG:
            self.log("错误: 无效的产品类别，无法进行分级。")
            return
        category_config = CONFIG[selected_category_name]
            
        self.log(f"启动数据分级流程: {os.path.basename(input_path)}")
        self.classify_button.configure(state="disabled")

        try:
            df = pd.read_csv(input_path)

            # ---- 动态分级逻辑 ----
            classify_col = category_config.get("classify_col")
            bins = category_config.get("classify_bins")
            labels = category_config.get("classify_labels")

            if not all([classify_col, bins, labels]):
                self.log("错误: 当前产品类别的分级规则不完整。")
                self.classify_button.configure(state="normal")
                return

            if classify_col not in df.columns:
                self.log(f"错误: 分级所需的列 '{classify_col}' 不在规范化文件中。")
                self.classify_button.configure(state="normal")
                return

            # Ensure the cooling capacity column is numeric
            df[classify_col] = pd.to_numeric(df[classify_col], errors='coerce')
            df.dropna(subset=[classify_col], inplace=True) # Drop rows where conversion failed

            # 使用动态规则进行分级
            df['分级'] = pd.cut(df[classify_col], bins=bins, labels=labels, right=False)

            # Drop rows that don't fall into any category
            df.dropna(subset=['分级'], inplace=True)

            # --- 优化的文件名生成逻辑 ---
            # 1. 从输入文件名中提取日期部分
            date_part = ""
            match = re.search(r'(\d{4}-\d{2}-\d{2})', base_filename_for_date)
            if match:
                date_part = match.group(1)
            
            # 2. 从UI获取当前产品类别，并创建安全的前缀
            category_prefix = selected_category_name.replace(" ", "_").replace("（", "").replace("）", "").replace("/", "-")

            # Get a list of unique categories that actually exist in the data
            present_categories = df['分级'].unique()

            # Group by the new level name and save each group to a file
            for level_name in present_categories:
                if pd.isna(level_name):
                    continue

                group = df[df['分级'] == level_name]
                if group.empty:
                    continue
                
                # 创建安全的分级名称部分
                safe_level_name = str(level_name).replace(" ", "_").replace("/", "-")
                
                # 3. 组合成新的、带前缀的文件名
                output_filename = f"{category_prefix}_{safe_level_name}_{date_part}.csv" if date_part else f"{category_prefix}_{safe_level_name}.csv"
                output_path = os.path.join(self.output_dir, output_filename)
                
                # Drop the temporary '分级' column before saving
                group_to_save = group.drop(columns=['分级'])
                
                group_to_save.to_csv(output_path, index=False, encoding='utf-8-sig')
                self.log(f"成功保存分级文件: {output_path} (包含 {len(group_to_save)} 条数据)")

            self.log("数据分级全部完成。")

        except Exception as e:
            self.log(f"数据分级过程中发生错误: {e}")
        finally:
            # Re-enable the button once done
            self.classify_button.configure(state="normal")

    def log_safe(self, message):
        """ A thread-safe way to call the log method. """
        self.after(0, self.log, message)


if __name__ == "__main__":
    app = App()
    app.mainloop() 