# -*- coding: utf-8 -*-
"""
build_dataset_multi_v2.py

阶段一：多设备批量构建训练集（未用气=1）
------------------------------------------------
改进点：
1) 仅使用前向填充 ffill，避免时序泄漏（去掉 bfill）。
2) “最小时长”用累计相邻时间差判定，更稳健（抗不均匀采样）。
3) 可选重采样（例如 1 分钟），让滚动窗口更有真实“时间”意义。
4) 导出列字典 dataset_columns_<device>.csv，便于下游追踪与解释。
5) 补充时间正余弦编码（hour_sin/hour_cos, dow_sin/dow_cos），可选入模。

依赖：
    pip install pandas pyodbc python-dateutil numpy
"""

import os, re, argparse
from typing import Tuple, Optional, List
import numpy as np
import pandas as pd
import pyodbc
from dateutil import parser as dtparser

# ---------------------------
# SQL
# ---------------------------
def get_sql_connection(driver, server, database, uid, pwd):
    """建立 SQL Server 连接"""
    conn_str = (
        f"DRIVER={{{driver}}};SERVER={server};DATABASE={database};UID={uid};PWD={pwd};"
    )
    return pyodbc.connect(conn_str)

def query_data(device_no: str, start_time: str, end_time: str, conn) -> pd.DataFrame:
    """从 sm_deviceHistoryData 查询指定设备在时间范围内的数据"""
    sql = """
    SELECT collectTime, pressure, temperature, temp2, temp3, temp4
    FROM sm_deviceHistoryData
    WHERE deviceNo = ?
      AND collectTime >= ?
      AND collectTime <= ?
    ORDER BY collectTime
    """
    st = dtparser.parse(start_time)
    et = dtparser.parse(end_time)
    return pd.read_sql(sql, conn, params=[device_no, st, et])

# ---------------------------
# 解析与清洗
# ---------------------------
_num_re = re.compile(r"[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?")

def _to_float(x) -> Optional[float]:
    """把字符串/数值转成 float，解析失败返回 NaN"""
    if x is None:
        return np.nan
    if isinstance(x, (int, float, np.floating)):
        return float(x)
    s = str(x)
    m = _num_re.search(s)
    return float(m.group()) if m else np.nan

def _split_pair(s: str):
    """拆分 'a|b' 形式，返回 (float(a), float(b))"""
    if s is None or (isinstance(s, float) and np.isnan(s)):
        return (np.nan, np.nan)
    parts = str(s).split("|")
    if len(parts) == 1:
        return (_to_float(parts[0]), np.nan)
    return (_to_float(parts[0]), _to_float(parts[1]))

def parse_columns(df: pd.DataFrame) -> pd.DataFrame:
    """解析原始字段 -> 标准化列"""
    out = df.copy()
    out.rename(columns={"collectTime": "collect_time"}, inplace=True)
    out["collect_time"] = pd.to_datetime(out["collect_time"])

    out["pre_pressure_max"] = out.get("pressure", np.nan).map(_to_float)
    out["pre_temp_max"]     = out.get("temperature", np.nan).map(_to_float)

    pre_min, pre_tmin = [], []
    for v in out.get("temp2", []):
        p, t = _split_pair(v)
        pre_min.append(p); pre_tmin.append(t)
    out["pre_pressure_min"] = pre_min
    out["pre_temp_min"]     = pre_tmin

    post_max, post_tmax = [], []
    for v in out.get("temp3", []):
        p, t = _split_pair(v)
        post_max.append(p); post_tmax.append(t)
    out["post_pressure_max"] = post_max
    out["post_temp_max"]     = post_tmax

    post_min, post_tmin = [], []
    for v in out.get("temp4", []):
        p, t = _split_pair(v)
        post_min.append(p); post_tmin.append(t)
    out["post_pressure_min"] = post_min
    out["post_temp_min"]     = post_tmin

    cols = [
        "collect_time",
        "pre_pressure_max", "pre_pressure_min",
        "post_pressure_max", "post_pressure_min",
        "pre_temp_max", "pre_temp_min",
        "post_temp_max", "post_temp_min",
    ]
    return out[cols].sort_values("collect_time").reset_index(drop=True)

def resample_uniform(df: pd.DataFrame, rule: str = "1T") -> pd.DataFrame:
    """
    可选：重采样到固定频率（如 '1T'=1分钟）
    - 数值列用均值，再做按时间插值；确保滚动窗口对应固定时间长度
    """
    d = df.set_index("collect_time").sort_index()
    d = d.resample(rule).mean()
    d = d.interpolate(method="time")
    return d.reset_index()

# ---------------------------
# 特征工程（仅 ffill）
# ---------------------------
def build_features(df: pd.DataFrame, roll_windows: List[int]):
    """构建衍生特征、差分、滚动统计、时间特征（仅前向填充，避免时序泄漏）"""
    d = df.copy().sort_values("collect_time").reset_index(drop=True)

    # 基础差值/压降/温度变化
    d["pre_diff"] = d["pre_pressure_max"] - d["pre_pressure_min"]
    d["post_diff"] = d["post_pressure_max"] - d["post_pressure_min"]
    d["pressure_drop"] = d["pre_pressure_max"] - d["post_pressure_max"]
    d["temp_change_pre"]  = d["pre_temp_max"]  - d["pre_temp_min"]
    d["temp_change_post"] = d["post_temp_max"] - d["post_temp_min"]

    # 一阶差分（当前 - 上一时刻）
    for col in ["pre_pressure_max", "post_pressure_max", "pre_diff", "post_diff", "pressure_drop"]:
        d[f"delta_{col}"] = d[col].diff()

    # 滚动统计（窗口内 mean/std/range；min_periods=1）
    for W in roll_windows:
        for col in ["pre_diff", "post_diff", "pressure_drop"]:
            roll = d[col].rolling(W, min_periods=1)
            d[f"roll_mean_{col}_w{W}"] = roll.mean()
            d[f"roll_std_{col}_w{W}"]  = roll.std()
            d[f"roll_rng_{col}_w{W}"]  = roll.max() - roll.min()

    # 时间特征 + 正余弦编码
    d["hour"] = d["collect_time"].dt.hour
    d["minute"] = d["collect_time"].dt.minute
    d["dayofweek"] = d["collect_time"].dt.weekday
    d["hour_sin"] = np.sin(2*np.pi*d["hour"]/24)
    d["hour_cos"] = np.cos(2*np.pi*d["hour"]/24)
    d["dow_sin"]  = np.sin(2*np.pi*d["dayofweek"]/7)
    d["dow_cos"]  = np.cos(2*np.pi*d["dayofweek"]/7)

    # 缺失处理：仅前向填充；首条依然 NaN 的用 0 收尾（对树模型通常OK）
    d = d.fillna(method="ffill").fillna(0.0)

    feature_cols = [
        "pre_pressure_max", "pre_pressure_min",
        "post_pressure_max", "post_pressure_min",
        "pre_temp_max", "pre_temp_min",
        "post_temp_max", "post_temp_min",
        "pre_diff", "post_diff", "pressure_drop",
        "temp_change_pre", "temp_change_post",
        "delta_pre_pressure_max", "delta_post_pressure_max",
        "delta_pre_diff", "delta_post_diff",
        "delta_pressure_drop",
    ]
    for W in roll_windows:
        feature_cols += [
            f"roll_mean_pre_diff_w{W}", f"roll_std_pre_diff_w{W}", f"roll_rng_pre_diff_w{W}",
            f"roll_mean_post_diff_w{W}", f"roll_std_post_diff_w{W}", f"roll_rng_post_diff_w{W}",
            f"roll_mean_pressure_drop_w{W}", f"roll_std_pressure_drop_w{W}", f"roll_rng_pressure_drop_w{W}",
        ]
    feature_cols += ["hour", "minute", "dayofweek", "hour_sin", "hour_cos", "dow_sin", "dow_cos"]

    return d, feature_cols

# ---------------------------
# 标签规则
# ---------------------------
def label_using_rule(df: pd.DataFrame, threshold: float = 0.05, window: int = 3) -> pd.Series:
    """规则判定是否用气（True=用气）"""
    tmp = df.copy()
    tmp["pre_diff"]  = tmp["pre_pressure_max"]  - tmp["pre_pressure_min"]
    tmp["post_diff"] = tmp["post_pressure_max"] - tmp["post_pressure_min"]
    raw = (tmp["pre_diff"] >= threshold) | (tmp["post_diff"] >= threshold)
    smooth = raw.rolling(window=window, min_periods=1).max().astype(bool)
    return smooth

def invert_to_unused(y_using_bool: pd.Series) -> pd.Series:
    """True(用气) → 0，False(不用气) → 1"""
    return (~y_using_bool).astype(int)

def enforce_min_unused_duration(collect_time: pd.Series, y_unused: pd.Series, min_minutes: int = 30) -> pd.Series:
    """
    对“未用气=1”的连续片段执行最小时长约束：
    - 使用“累计相邻时间差之和”判定片段时长（更稳健，抗不均匀采样）
    """
    y = y_unused.astype(int).copy().reset_index(drop=True)
    t = pd.to_datetime(collect_time).reset_index(drop=True)
    if len(y) == 0:
        return y

    block_id = (y != y.shift(1)).cumsum()
    result = y.copy()
    min_delta = pd.Timedelta(minutes=min_minutes)

    for _, idxs in y.groupby(block_id).groups.items():
        idx = list(idxs)
        if not idx or y.iloc[idx[0]] != 1:
            continue
        acc = pd.Timedelta(0)
        # 片段累计：相邻时间差之和
        for j in range(idx[0] + 1, idx[-1] + 1):
            acc += (t.iloc[j] - t.iloc[j - 1])
        if acc < min_delta:
            result.iloc[idx] = 0
    return result

# ---------------------------
# 列字典导出
# ---------------------------
def export_column_dict(feature_cols: List[str], device_id: str, path_csv: str):
    """
    导出列字典 dataset_columns_<device>.csv
    字段：列名 / 中文含义 / 单位 / 类型 / 备注
    """
    rows = []

    base_meta = {
        "collect_time": ("采集时间", "-", "datetime64", "时间戳（本地时区/数据库时区一致）"),
        "device_id": ("设备号", "-", "str", "输出时追加"),
        "y_is_unused_gas": ("是否未用气(1=未用气)", "-", "int", "规则判定 + 最小时长约束"),
        "hour": ("小时(0-23)", "-", "int", "时间特征"),
        "minute": ("分钟(0-59)", "-", "int", "时间特征"),
        "dayofweek": ("星期(周一=0)", "-", "int", "时间特征"),
        "hour_sin": ("小时正弦", "-", "float", "时间特征"),
        "hour_cos": ("小时余弦", "-", "float", "时间特征"),
        "dow_sin": ("星期正弦", "-", "float", "时间特征"),
        "dow_cos": ("星期余弦", "-", "float", "时间特征"),
    }

    def parse_unit(name):
        if "pressure" in name: return "MPa"
        if "temp" in name: return "°C"
        return "-"

    def parse_type(name):
        if name == "collect_time": return "datetime64"
        if name in ["device_id"]: return "str"
        if name == "y_is_unused_gas": return "int"
        if name in ["hour", "minute", "dayofweek"]: return "int"
        return "float"

    col_order = ["collect_time"] + feature_cols + ["device_id", "y_is_unused_gas"]
    for c in col_order:
        if c in base_meta:
            zh, unit, typ, note = base_meta[c]
        else:
            zh = c
            unit = parse_unit(c)
            typ = parse_type(c)
            note = "-"
        rows.append({"列名": c, "中文含义": zh, "单位": unit, "类型": typ, "备注": note})

    pd.DataFrame(rows).to_csv(path_csv, index=False, encoding="utf-8-sig")

# ---------------------------
# 主流程
# ---------------------------
def main(args_list=None):
    parser = argparse.ArgumentParser(description="阶段一：多设备批量构建训练集（1=未用气）")
    parser.add_argument("--devices", nargs="+", required=True, help="设备号列表")
    parser.add_argument("--start", type=str, required=True)
    parser.add_argument("--end",   type=str, required=True)
    parser.add_argument("--threshold", type=float, default=0.05, help="用气判定阈值（MPa）")
    parser.add_argument("--label-window", type=int, default=3, help="用气判定平滑窗口长度")
    parser.add_argument("--min-unused-minutes", type=int, default=30, help="未用气最小时长（分钟）")
    parser.add_argument("--roll-windows", type=int, nargs="+", default=[3,5,7], help="滚动窗口长度（单位：点数；如重采样为1T则近似分钟）")
    parser.add_argument("--resample", type=str, default="", help="例如 '1T' 表示1分钟重采样；为空则不启用")
    parser.add_argument("--outdir", type=str, default="./output")

    # DB 参数（建议生产用环境变量管理）
    parser.add_argument("--driver",   type=str, default=os.getenv("MSSQL_DRIVER", "ODBC Driver 17 for SQL Server"))
    parser.add_argument("--server",   type=str, default=os.getenv("MSSQL_SERVER", "8.140.243.205,1433"))
    parser.add_argument("--database", type=str, default=os.getenv("MSSQL_DB", "AQFSG_AllMeterData"))
    parser.add_argument("--uid",      type=str, default=os.getenv("MSSQL_UID", "prmt"))
    parser.add_argument("--pwd",      type=str, default=os.getenv("MSSQL_PWD", "prmtsoftpasswd"))

    args = parser.parse_args(args_list) if args_list else parser.parse_args()
    os.makedirs(args.outdir, exist_ok=True)

    with get_sql_connection(args.driver, args.server, args.database, args.uid, args.pwd) as conn:
        for dev in args.devices:
            print(f"\n=== Device {dev} ===")
            raw = query_data(dev, args.start, args.end, conn)
            if raw.empty:
                print("No data"); continue

            parsed = parse_columns(raw)
            if args.resample:
                parsed = resample_uniform(parsed, rule=args.resample)

            # 构建特征（仅 ffill）
            feat_df, feature_cols = build_features(parsed, roll_windows=args.roll_windows)

            # 规则打标 -> 未用气=1 -> 最小时长约束（累计时间差）
            y_using  = label_using_rule(parsed, threshold=args.threshold, window=args.label_window)
            y_unused = invert_to_unused(y_using)
            y_final  = enforce_min_unused_duration(parsed["collect_time"], y_unused, args.min_unused_minutes)

            # 组装导出
            export_df = feat_df[["collect_time"] + feature_cols].copy()
            export_df["device_id"] = dev
            export_df["y_is_unused_gas"] = y_final.values

            # 数值列保留三位小数
            num_cols = export_df.select_dtypes(include=[np.number]).columns
            export_df[num_cols] = export_df[num_cols].round(3)

            dataset_path = os.path.join(args.outdir, f"dataset_{dev}.csv")
            export_df.to_csv(dataset_path, index=False, encoding="utf-8-sig")
            dict_path = os.path.join(args.outdir, f"dataset_columns_{dev}.csv")
            export_column_dict(feature_cols, dev, dict_path)

            print(f"Saved dataset:  {dataset_path}, rows={len(export_df)}")
            print(f"Saved columns:  {dict_path}")

if __name__ == "__main__":
    # 使用示例（按需修改）：
    # python build_dataset_multi_v2.py --devices 0276563025050213 --start "2025-08-11 00:00:00" --end "2025-08-11 23:59:59" --resample "1T" --outdir ./output
    main()
