from tqdm import tqdm
import numpy as np
from sklearn.model_selection import train_test_split


# 生成序列对，给定一个序列和窗口大小，生成连续的子序列对
def generate_pairs(line, window_size):
    line = np.array(line)
    line = line[:, 0]

    seqs = []  # 初始化一个列表，用于存储子序列
    for i in range(0, len(line), window_size):  # 按窗口大小迭代序列
        seq = line[i:i + window_size]  # 从当前位置i开始，截取长度为window_size的子序列
        seqs.append(seq)  # 将子序列添加到seqs列表中
    seqs += []
    seq_pairs = []  # 初始化一个列表，用于存储连续的子序列对
    for i in range(1, len(seqs)):  # 生成连续子序列对
        seq_pairs.append([seqs[i - 1], seqs[i]])
    return seqs


# 使用固定窗口大小处理序列数据，如果指定自适应窗口，则使用整个序列长度
def fixed_window(line, window_size, adaptive_window, seq_len=None, min_len=0):
    line = [ln.split(",") for ln in line.split()]  # 将字符串分割成列表

    # filter the line/session shorter than 10 过滤长度小于最小长度的序列
    if len(line) < min_len:
        return [], []

    # max seq len   如果指定了最大序列长度，则截断序列
    if seq_len is not None:
        line = line[:seq_len]

    if adaptive_window:  # 如果使用自适应窗口，窗口大小等于序列长度
        window_size = len(line)

    line = np.array(line)

    # if time duration exists in data 如果数据中包含时间戳
    if line.shape[1] == 2:
        tim = line[:, 1].astype(float)  # 提取时间戳并转换为浮点数
        line = line[:, 0]  # 提取序列数据

        # the first time duration of a session should be 0, so max is window_size(mins) * 60
        tim[0] = 0  # 第一个时间戳设置为0
    else:
        line = line.squeeze()  # 压缩数组
        # if time duration doesn't exist, then create a zero array for time 如果没有时间戳，创建一个全0的数组
        tim = np.zeros(line.shape)

    logkey_seqs = []
    time_seq = []
    for i in range(0, len(line), window_size):  # 按窗口大小迭代序列
        logkey_seqs.append(line[i:i + window_size])  # 添加序列片段
        time_seq.append(tim[i:i + window_size])  # 添加时间戳片段

    return logkey_seqs, time_seq


# 生成训练集和验证集
def generate_train_valid(data_path, window_size=20, adaptive_window=True,
                         sample_ratio=1, valid_size=0.1, output_path=None,
                         scale=None, scale_path=None, seq_len=None, min_len=0):
    with open(data_path, 'r') as f:  # 打开数据文件
        data_iter = f.readlines()  # 读取所有行

    num_session = int(len(data_iter) * sample_ratio)    # 根据采样比例计算会话数量
    # only even number of samples, or drop_last=True in DataLoader API
    # coz in parallel computing in CUDA, odd number of samples reports issue when merging the result
    # num_session += num_session % 2    # 确保样本数量为偶数，以防在CUDA并行计算中出现问题

    test_size = int(min(num_session, len(data_iter)) * valid_size)
    # only even number of samples
    # test_size += test_size % 2

    print("before filtering short session")
    print("train size ", int(num_session - test_size))
    print("valid size ", int(test_size))
    print("=" * 40)

    logkey_seq_pairs = []
    time_seq_pairs = []
    session = 0
    for line in tqdm(data_iter):
        if session >= num_session:  # 如果达到会话数量上限，则停止
            break
        session += 1

        # 使用fixed_window函数处理当前行，获取序列片段和时间戳片段
        logkeys, times = fixed_window(line, window_size, adaptive_window, seq_len, min_len)# 处理当前行
        logkey_seq_pairs += logkeys  # 添加序列片段
        time_seq_pairs += times  # 添加时间戳片段

    # logkey_seq_pairs = np.array(logkey_seq_pairs)
    # time_seq_pairs = np.array(time_seq_pairs)

    logkey_seq_pairs = np.array(logkey_seq_pairs, dtype=object)
    time_seq_pairs = np.array(time_seq_pairs, dtype=object)

    logkey_trainset, logkey_validset, time_trainset, time_validset = train_test_split(logkey_seq_pairs,
                                                                                      time_seq_pairs,
                                                                                      test_size=test_size,
                                                                                      random_state=1234)

    # sort seq_pairs by seq len 根据序列长度对训练集和验证集进行排序
    train_len = list(map(len, logkey_trainset))
    valid_len = list(map(len, logkey_validset))

    train_sort_index = np.argsort(-1 * np.array(train_len))
    valid_sort_index = np.argsort(-1 * np.array(valid_len))

    # 使用排序索引重新排列训练集和验证集
    logkey_trainset = logkey_trainset[train_sort_index]
    logkey_validset = logkey_validset[valid_sort_index]

    time_trainset = time_trainset[train_sort_index]
    time_validset = time_validset[valid_sort_index]

    print("=" * 40)
    print("Num of train seqs", len(logkey_trainset))
    print("Num of valid seqs", len(logkey_validset))
    print("=" * 40)

    return logkey_trainset, logkey_validset, time_trainset, time_validset
