from torch.utils.data import Dataset, DataLoader
import random

class e4srecDataset(Dataset):
    def __init__(self, path,  maxlen=50):
        super().__init__()

        self.maxlen = maxlen

        self.trainData, self.valData, self.testData = [], {}, {}
        self.n_user, self.m_item = 0, 0

        with open(path + '_processed.txt', 'r') as f:
            for line in f:
                line = line.strip().split(' ')
                user, items = int(line[0]) - 1, [int(item) for item in line[1:]]
                self.n_user = max(self.n_user, user)
                self.m_item = max(self.m_item, max(items))
                if len(items) >= 3:
                    train_items = items[:-2]
                    length = min(len(train_items), self.maxlen)
                    for t in range(length):
                        self.trainData.append([train_items[:-length + t], train_items[-length + t]])
                    self.valData[user] = [items[:-2], items[-2]]
                    self.testData[user] = [items[:-1], items[-1]]
                else:
                    for t in range(len(items)):
                        self.trainData.append([items[:-len(items) + t], items[-len(items) + t]])
                    self.valData[user] = []
                    self.testData[user] = []

        self.n_user, self.m_item = self.n_user + 1, self.m_item + 1

        # with open(path+"_sample.txt", 'r') as f:
        #     i=0
        #     for line in f:
        #         line = line.strip().split(' ')
        #         user, items = int(line[0]), [int(t) for t in line[1:]]
        #         # self.candidate_item.append(items)
        #         self.candidate_item.append([items,self.test_data[i]])
        #         i+=1

    def __getitem__(self, idx):
        seq, label = self.train_data[idx]
        return seq, label

    def __len__(self):
        return len(self.train_data)

class SeqDataset(Dataset):
    def __init__(self, path,  maxlen=50):
        super(SeqDataset, self).__init__()

        self.train_data = []
        self.test_data = []
        self.val_data = []
        self.item_max = 0
        with open(path+"_processed.txt", 'r') as f:
            for line in f:
                line = line.strip().split(' ')
                # 物品是从1开始编号，为了使用CrossEntropyLoss函数（标签从0开始），所以最后加1
                user, items = int(line[0]), [int(t) for t in line[1:]]
                self.item_max = max(self.item_max, max(items))
                length = len(items)

                if length >= 3:# 可以有验证集和测试集
                    # self.val_data[user] = [items[:-2], items[-2]]
                    self.val_data.append([items[:-2], items[-2]])
                    self.test_data.append([items[:-1], items[-1]])
                    train_items = items[:-2]
                    trainlen = len(train_items)
                    for t in range(1, trainlen):
                        self.train_data.append(
                            [train_items[:-trainlen+t], train_items[-trainlen+t]])# 这里不管maxlen了，原E4SRec只保留最后maxlen条数据，因为SASRec是maxlen条训练数据；我这里取了全部
                else:  # 防止只有一个物品，此时舍弃；不加入验证集和测试集，仅训练
                    for t in range(1, length):
                        self.train_data.append(
                            [items[:-length+t], items[-length+t]])
        self.item_max = self.item_max+1
        self.candidate_item = []
        # with open(path+"_sample.txt", 'r') as f:
        #     i=0
        #     for line in f:
        #         line = line.strip().split(' ')
        #         user, items = int(line[0]), [int(t) for t in line[1:]]
        #         # self.candidate_item.append(items)
        #         self.candidate_item.append([items,self.test_data[i]])
        #         i+=1

    def __getitem__(self, idx):
        seq, label = self.train_data[idx]
        return seq, label

    def __len__(self):
        return len(self.train_data)
    
class SASDataset(Dataset):
    def __init__(self, path, item_max, maxlen=50):
        super().__init__()

        self.train_data = []
        id=0
        with open(path+"_processed.txt", 'r') as f:
            for line in f:
                line = line.strip().split(' ')
                # 物品是从1开始编号，为了使用CrossEntropyLoss函数（标签从0开始），所以最后加1
                user, items = int(line[0]), [int(t) for t in line[1:]]
                length=len(items)
                if length>=3:
                    train_items=items[:-2]
                else:
                    train_items=items
                train_len=len(train_items)
                if train_len<=maxlen:
                    if train_len==1:
                        continue
                    pre_neg=[]
                    for j in range(1,train_len):
                        seq=[0]*(maxlen-j)+train_items[:j]
                        if j<train_len-1:
                            pos=[0]*(maxlen-j)+[-num for num in train_items[1:j+1]]
                        else:
                            pos=[0]*(maxlen-j)+train_items[1:j+1]
                        neg_item=random.randint(1,item_max-1)
                        while neg_item in set(train_items):
                            neg_item=random.randint(1,item_max-1)
                        neg=[0]*(maxlen-j)+pre_neg+[neg_item]
                        pre_neg.append(neg_item)
                        self.train_data.append([id,user,seq,pos,neg])
                        id+=1
                else:
                    s=train_len-maxlen-1
                    pre_neg=[]
                    for j in range(1,maxlen+1):#去掉验证集和测试集
                        seq=[0]*(maxlen-j)+train_items[s:j+s]
                        if j < maxlen:
                            pos=[0]*(maxlen-j)+[-num for num in train_items[s+1:j+s+1]]
                        else:
                            pos=[0]*(maxlen-j)+train_items[s+1:j+s+1]
                        neg_item=random.randint(1,item_max-1)
                        while neg_item in set(train_items):
                            neg_item=random.randint(1,item_max-1)
                        neg=[0]*(maxlen-j)+pre_neg+[neg_item]
                        pre_neg.append(neg_item)
                        self.train_data.append([id,user,seq,pos,neg])      
                        id+=1
                
                
    def __getitem__(self, idx):
        seq, label = self.train_data[idx]
        return seq, label

    def __len__(self):
        return len(self.train_data)



class ClassDataset(Dataset):
    def __init__(self, path, maxlen=200,rand_seed=1):
        super(ClassDataset, self).__init__()

        self.train_data = []
        self.test_data = []
        self.data=[]
        self.data2=[]
        with open(path, 'r') as f:
            for line in f:
                line = line.strip().split(' ')
                user, items,label = int(line[0]), [int(t) for t in line[1:-1]],int(line[-1])
                self.data.append([items,[label]])
                self.data2.append([[user],items,[label]])
        # 设置随机种子以确保结果可复现
        random.seed(rand_seed)
        # 计算训练集大小（20%）
        train_size = int(len(self.data) * 0.8)
        # 随机抽取训练集索引
        train_indices = random.sample(range(len(self.data)), train_size)
        # 提取训练集和测试集
        self.train_data = [self.data[i] for i in train_indices]
        self.test_data = [self.data[i] for i in range(len(self.data)) if i not in train_indices]
        self.test_data2 = [self.data2[i] for i in range(len(self.data)) if i not in train_indices]#含有user_id

    def __getitem__(self, idx):
        seq, label = self.train_data[idx]
        return seq, label

    def __len__(self):
        return len(self.data)
    
class ClassTestDataset(Dataset):#用作测试集
    def __init__(self, path, maxlen=200,rand_seed=1):
        super(ClassTestDataset, self).__init__()

        self.train_data = []
        self.test_data = []
        self.data=[]
        with open(path, 'r') as f:
            for line in f:
                line = line.strip().split(' ')
                user, items,label = int(line[0]), [int(t) for t in line[1:-1]],int(line[-1])
                self.data.append([items,[label]])
        # 设置随机种子以确保结果可复现
        random.seed(rand_seed)

    def __getitem__(self, idx):
        seq, label = self.train_data[idx]
        return seq, label

    def __len__(self):
        return len(self.data)
  
class ClassDataset2(Dataset):
    def __init__(self, path, maxlen=200,rand_seed=1):
        super(ClassDataset2, self).__init__()

        self.train_data = []
        self.test_data = []
        self.data=[]
        with open(path, 'r') as f:
            for line in f:
                line = line.strip().split(' ')
                items,label = [int(t) for t in line[:-4]],int(line[-3])
                self.data.append([items,[label]])
        # 设置随机种子以确保结果可复现
        random.seed(rand_seed)

        # 计算训练集大小（20%）
        train_size = int(len(self.data) * 0.8)
        # 随机抽取训练集索引
        train_indices = random.sample(range(len(self.data)), train_size)
        # 提取训练集和测试集
        self.train_data = [self.data[i] for i in train_indices]
        self.test_data = [self.data[i] for i in range(len(self.data)) if i not in train_indices]

    def __getitem__(self, idx):
        seq, label = self.train_data[idx]
        return seq, label

    def __len__(self):
        return len(self.data)
    
class ClassEnsDataset(Dataset):#带有熵、长度特征的数据
    def __init__(self, path, maxlen=200,rand_seed=1):
        super(ClassEnsDataset, self).__init__()

        self.train_data = []
        self.test_data = []
        self.data=[]
        with open(path, 'r') as f:
            for line in f:
                line = line.strip().split(' ')
                user, items,een,sen,length,label = int(line[0]), [int(t) for t in line[1:-4]],float(line[-4]),float(line[-3]),int(line[-2]),int(line[-1])
                self.data.append([items,[een],[sen],[length],[label]])
        # 设置随机种子以确保结果可复现
        random.seed(rand_seed)
        # 计算训练集大小（20%）
        train_size = int(len(self.data) * 0.8)
        # 随机抽取训练集索引
        train_indices = random.sample(range(len(self.data)), train_size)
        # 提取训练集和测试集
        self.train_data = [self.data[i] for i in train_indices]
        self.test_data = [self.data[i] for i in range(len(self.data)) if i not in train_indices]

    def __getitem__(self, idx):
        seq, label = self.train_data[idx]
        return seq, label

    def __len__(self):
        return len(self.data)