# -*- coding: UTF-8 -*-
"""
@Date    ：2025/9/15 11:15
@Author  ：Liu Yuezhao
@Project ：bert
@File    ：dataset.py
@IDE     ：PyCharm
"""
import torch
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
from typing import List, Dict, Any
import ast

class MBDTrxDataset(Dataset):
    def __init__(self,
                 trx_target_data: pd.DataFrame,
                 trx_dict: dict,
                 max_seq_len: int,
                 device,
                 pad_token_id=0,
                 cols_used=['event_subtype'],
                 time_col='event_time',
                 mask_prob=0.15,
                 replace_prob=0.8,
                 random_replace_prob=0.5,
                 time_unit='hour'
                 ):
        """
        用于 MLM 预训练的时间信息事件序列 Dataset

        Args:
            trx_target_data: DataFrame，每行是一个用户/序列
            trx_dict: 词表字典，包含各列的 vocab_size, cls_token_id 等
            max_seq_len: 最大序列长度（含 CLS/SEP）
            device: 数据加载到的设备（如 'cuda' 或 'cpu'）
            pad_token_id: 填充 token ID
            cols_used: 用于 MLM 的事件列名列表，如 ['event_subtype']
            time_col: 时间戳列名
            mask_prob: MLM 掩码概率
            replace_prob: 被掩码位置中替换为 [MASK] 的概率
            random_replace_prob: 非 [MASK] 时随机替换为其他 token 的概率
            time_unit: 时间单位，'day' 或 'hour' 或 'minute' 或 'second'
        """
        super().__init__()
        self.data = trx_target_data.reset_index(drop=True)
        self.trx_dict = trx_dict
        self.max_seq_len = max_seq_len
        self.cols_used = cols_used
        self.time_col = time_col
        self.pad_token_id = pad_token_id
        self.device = device
        self.mask_prob = mask_prob
        self.replace_prob = replace_prob
        self.random_replace_prob = random_replace_prob
        self.time_unit = time_unit

        if len(cols_used) == 0:
            raise ValueError("cols_used must contain at least one column (e.g., 'event_subtype')")

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self._process_seq(self.data.iloc[index].to_dict())

    def _parse_timestamps(self, timestamps: List) -> np.ndarray:
        """
        解析时间戳，支持：
        - list of int/float (Unix timestamp in seconds/ms/us)
        - list of str (ISO format)
        - list of datetime
        - str 表示的 list（自动 ast.literal_eval）
        """
        if isinstance(timestamps, str):
            try:
                timestamps = ast.literal_eval(timestamps)
            except Exception:
                raise ValueError("Cannot parse string as list of timestamps")

        ts_array = np.array(timestamps)

        if np.issubdtype(ts_array.dtype, np.number):
            # 智能判断时间戳单位（秒、毫秒、微秒）
            non_zero = ts_array[ts_array > 0]
            if len(non_zero) == 0:
                raise ValueError("All timestamps are zero or negative.")
            avg_val = float(np.mean(non_zero))

            if avg_val < 1e10:      # 秒级 (e.g., 1.6e9)
                scale = 1e9
            elif avg_val < 1e13:    # 毫秒级 (e.g., 1.6e12)
                scale = 1e6
            elif avg_val < 1e16:    # 微秒级 (e.g., 1.6e15)
                scale = 1e3
            else:
                raise ValueError(f"Unrealistic timestamp magnitude: {avg_val}")

            return (ts_array * scale).astype('datetime64[ns]')
        else:
            return pd.to_datetime(ts_array).values

    def _compute_age_in_days(self, timestamps: np.ndarray) -> np.ndarray:
        """计算每个事件距离序列第一个事件的时间差"""
        if len(timestamps) == 0:
            return np.array([])
        first_time = timestamps[0]
        deltas = timestamps - first_time
        if self.time_unit == 'day':
            return deltas.astype('timedelta64[D]').astype(float)
        elif self.time_unit == 'hour':
            return deltas.astype('timedelta64[h]').astype(float)
        elif self.time_unit == 'minute':
            return deltas.astype('timedelta64[m]').astype(float)
        elif self.time_unit == 'second':
            return deltas.astype('timedelta64[s]').astype(float)
        else:
            raise ValueError("time_unit must be 'day', 'hour', 'minute', or 'second'")

    def _compute_interval_in_days(self, timestamps: np.ndarray) -> np.ndarray:
        """计算相邻事件之间的时间间隔"""
        if len(timestamps) <= 1:
            return np.array([0.0])
        deltas = np.diff(timestamps)
        if self.time_unit == 'day':
            interval = deltas.astype('timedelta64[D]').astype(float)
        elif self.time_unit == 'hour':
            interval = deltas.astype('timedelta64[h]').astype(float)
        elif self.time_unit == 'minute':
            interval = deltas.astype('timedelta64[m]').astype(float)
        elif self.time_unit == 'second':
            interval = deltas.astype('timedelta64[s]').astype(float)
        else:
            raise ValueError("time_unit must be 'day', 'hour', 'minute', or 'second'")
        return np.concatenate([[0.0], interval])

    def _compute_same_event_interval_in_days(self, timestamps: np.ndarray, event_types: List) -> np.ndarray:
        """计算每个事件距离上一次同类事件的时间间隔"""
        if len(timestamps) == 0:
            return np.array([])

        interval = np.zeros(len(timestamps))
        last_occurrence = {}

        for i in range(len(timestamps)):
            event_type = event_types[i]
            current_time = timestamps[i]

            if event_type in last_occurrence:
                prev_time = last_occurrence[event_type]
                delta = current_time - prev_time
                if self.time_unit == 'day':
                    interval[i] = delta.astype('timedelta64[D]').astype(float)
                elif self.time_unit == 'hour':
                    interval[i] = delta.astype('timedelta64[h]').astype(float)
                elif self.time_unit == 'minute':
                    interval[i] = delta.astype('timedelta64[m]').astype(float)
                elif self.time_unit == 'second':
                    interval[i] = delta.astype('timedelta64[s]').astype(float)

            last_occurrence[event_type] = current_time

        return interval

    def _pad_time_sequence(self, seq: list) -> list:
        """
        专门用于时间特征的填充：
        - 截断保留最近 max_len-2 个事件
        - [CLS] 和 [SEP] 位置的时间值设为 0.0
        - 左侧用 0.0 填充
        """
        max_len_wo_special = self.max_seq_len - 2
        truncated = seq[-max_len_wo_special:] if len(seq) > max_len_wo_special else seq

        padded = [0.0] * (self.max_seq_len - len(truncated) - 2)  # 左侧填充 0
        padded += [0.0]  # [CLS] 位置时间特征为 0
        padded += truncated
        padded += [0.0]  # [SEP] 位置时间特征为 0
        return padded

    def _add_special_token_and_mask(self, seq: list, col: str):
        """
        对事件序列进行 MLM 掩码处理
        返回 input_ids 和 labels（-100 表示不计算损失）
        """
        if isinstance(seq, np.ndarray):
            seq = seq.tolist()

        max_len_wo_special = self.max_seq_len - 2
        truncated = seq[-max_len_wo_special:] if len(seq) > max_len_wo_special else seq

        cls_id = self.trx_dict[col]["cls_token_id"]
        sep_id = self.trx_dict[col]["sep_token_id"]
        mask_id = self.trx_dict[col]["mask_token_id"]

        seq_with_special = [cls_id] + truncated + [sep_id]
        padding_length = self.max_seq_len - len(seq_with_special)
        input_ids = [self.pad_token_id] * padding_length + seq_with_special
        labels = [-100] * len(input_ids)

        # 只对真实事件（非 [PAD], [CLS], [SEP]）进行 MLM
        for i in range(padding_length + 1, len(input_ids) - 1):
            if np.random.rand() < self.mask_prob:
                labels[i] = input_ids[i]
                rand = np.random.rand()
                if rand < self.replace_prob:
                    input_ids[i] = mask_id
                elif rand < self.replace_prob + (1 - self.replace_prob) * self.random_replace_prob:
                    vocab_size = self.trx_dict[col]['vocab_size']
                    input_ids[i] = np.random.randint(1, vocab_size-3)  # 跳过特殊 token (默认词表后三位为特殊token)

        return input_ids, labels

    def _process_seq(self, seq: Dict[str, Any]) -> Dict[str, torch.Tensor]:
        result = {}

        # 提取并解析时间戳
        # if self.time_col not in seq or not seq[self.time_col]:
        #     raise ValueError(f"Missing or empty time column: {self.time_col}")
        raw_timestamps = seq[self.time_col]
        timestamps = self._parse_timestamps(raw_timestamps)

        # 提取事件类型
        event_type_col = self.cols_used[0]
        raw_event_types = seq[event_type_col]
        if isinstance(raw_event_types, str):
            try:
                event_types = ast.literal_eval(raw_event_types)
            except Exception as e:
                raise ValueError(f"Cannot parse event_types: {raw_event_types}") from e
        else:
            event_types = raw_event_types

        # 对齐长度
        seq_len = min(len(event_types), len(timestamps))
        event_types = event_types[:seq_len]
        timestamps = timestamps[:seq_len]

        # 截断：保留最近的 max_len - 2 个事件
        max_len_wo_special = self.max_seq_len - 2
        if len(timestamps) > max_len_wo_special:
            truncated_event_types = event_types[-max_len_wo_special:]
            truncated_timestamps = timestamps[-max_len_wo_special:]
        else:
            truncated_event_types = event_types
            truncated_timestamps = timestamps

        # 计算时间特征（基于截断后的序列）
        age = self._compute_age_in_days(truncated_timestamps)
        interval = self._compute_interval_in_days(truncated_timestamps)
        same_event_interval = self._compute_same_event_interval_in_days(
            truncated_timestamps, truncated_event_types
        )

        # 填充时间特征：[CLS] 和 [SEP] 位置设为 0
        result['age'] = torch.tensor(
            self._pad_time_sequence(age.tolist()),
            dtype=torch.float, device=self.device
        )
        result['interval'] = torch.tensor(
            self._pad_time_sequence(interval.tolist()),
            dtype=torch.float, device=self.device
        )
        result['same_event_interval'] = torch.tensor(
            self._pad_time_sequence(same_event_interval.tolist()),
            dtype=torch.float, device=self.device
        )

        # 处理 MLM 输入（使用截断后的 event_types）
        for k in self.cols_used:
            input_ids, labels = self._add_special_token_and_mask(truncated_event_types, k)
            result[k] = torch.tensor(input_ids, dtype=torch.long, device=self.device)
            result[f"{k}_labels"] = torch.tensor(labels, dtype=torch.long, device=self.device)
            mask = [1 if token != self.pad_token_id else 0 for token in input_ids]
            result[f"{k}_mask"] = torch.tensor(mask, dtype=torch.long, device=self.device)

        return result


if __name__ == '__main__':
    import torch

    # 1. 构造测试数据（Unix 时间戳，单位：秒）
    trx_target_data = pd.DataFrame({
        "event_subtype": [
            [101, 205, 101, 302, 205],
            [401, 205, 401],
            [101, 101]
        ],
        "event_time": [
            [1634136341, 1638718103, 1663377587, 1667825293],
            [1669488361, 1671103076, 1671461313],
            [1609660447, 1610090831]
        ]
    })

    # 2. 词表
    trx_dict = {
        "event_subtype": {
            "vocab_size": 500,
            "cls_token_id": 1,
            "sep_token_id": 2,
            "mask_token_id": 3
        }
    }

    # 3. 创建 dataset
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    dataset = MBDTrxDataset(
        trx_target_data=trx_target_data,
        trx_dict=trx_dict,
        max_seq_len=12,
        device=device,
        cols_used=['event_subtype'],
        time_col='event_time',
        time_unit='day'
    )

    # 4. 测试获取第一个样本
    sample = dataset[0]

    print("Sample keys:", list(sample.keys()))
    print("Input IDs      :", sample['event_subtype'].cpu().numpy())
    print("Labels (MLM)   :", sample['event_subtype_labels'].cpu().numpy())
    print("Attention Mask :", sample['event_subtype_mask'].cpu().numpy())
    print("Age (days)     :", sample['age'].cpu().numpy().round(2))
    print("Interval (days):", sample['interval'].cpu().numpy().round(2))
    print("Same-event Intv:", sample['same_event_interval'].cpu().numpy().round(2))