# 校验长度一致性
from modules import train_modules_V2 as train_V2  
import sys
import os
from config import config
import glob
from utility import filter as filter
from dataset import dataset_from_csv as csv_dataset
import time  # ✅ 添加导入
from datetime import timedelta
from datetime import datetime

feature_names_list=config.feature_names
label_names_list=config.label_names

trainset_dir=os.path.join(config.project_dir, config.train_dir_name)
# 搜索该目录下所有 CSV 文件
csv_files = glob.glob(os.path.join(trainset_dir, "*.csv"))
if not csv_files:
    print(f"警告：在目录 '{trainset_dir}' 中未找到任何 CSV 文件！")
    sys.exit(1)  # 非零表示异常退出
else:
    file_path = csv_files[0]

model_path=os.path.join(config.project_dir, config.model_dir_name)
info_path=os.path.join(config.project_dir, config.train_info_dir_name)
scaler_path=os.path.join(config.project_dir, config.scaler_dir_name)

# feats_filtered, labels_filtered =filter.filter_by_train_neccessary(feature_names_list, label_names_list, label_train_neccessary)

# 读取数据集,只读取一次
start_time = time.time()  # ⏱️ 开始计时
csvdataset=csv_dataset.LSTMDatasetfromCSV(file_path=file_path, ncodecsv=True).get_data_from_csv()
# 获取当前时间戳
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
model_file = os.path.join(model_path, "all_model.h5")
    # if os.path.exists(model_file):
    #     print(f"模型 {labels[0]}_model.h5 已存在，跳过训练。")
    #     continue
trainer = train_V2.LSTMTrainer_V2(
    dataset_original=csvdataset,
    feature_names=feature_names_list,
    label_names=label_names_list,
    model_path=model_path,
    info_path=info_path,
    scaler_path=scaler_path,
    train_size=0.8,
    timestep=50,
    )

trainer.train_V2(continue_epochs=50)
    
end_time = time.time()  # ⏱️ 结束计时
elapsed = end_time - start_time
elapsed_td = timedelta(seconds=elapsed)  # ⏱️ 转换为 H:M:S 形式
print(f"✅ 模型训练完成，用时 {str(elapsed_td)}（即 {elapsed:.2f} 秒）")