import ast
import json
import os.path
from random import randint
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader


class TaoBaoDataset(Dataset):
    def __init__(self,
                 train_target_item_file,
                 user_behavior_file, mode,
                 behaviors_num=200,
                 n_users=None,
                 q_num=4,
                 short_q_num=4,
                 early_q_num=4):
        print(f"taobao {mode} dataset 准备")
        self.behaviors_num = behaviors_num
        use_columns = ['user_id','item_id', "item_category", "timestamp"]
        self.q_num = q_num
        self.short_q_num =short_q_num
        self.early_q_num = early_q_num
        self.train_target_item = pd.read_csv(train_target_item_file,nrows=n_users).iloc[:, 1:]
        self.user_behavior = pd.read_csv(user_behavior_file, usecols=use_columns,nrows=n_users)
        self.user_behavior['item_id'] = self.user_behavior['item_id'].apply(ast.literal_eval)
        self.user_behavior['item_category'] = self.user_behavior['item_category'].apply(ast.literal_eval)
        self.user_behavior['timestamp'] = self.user_behavior['timestamp'].apply(ast.literal_eval)
        max_time = 0
        min_time = 0
        for timestamps in self.user_behavior.loc[:, 'timestamp']:
            _max_time = max(timestamps)
            _min_time = min(timestamps)
            if _max_time > max_time:
                max_time = _max_time
            if _min_time < min_time:
                min_time = _min_time

        time_bucket_num = (max_time - min_time) // 3600 + 1
        self.time_bucket_num = time_bucket_num
        self.min_time = min_time
        self.mode = mode

        with open('../data/taobao/category2idx.json', 'r', encoding='utf-8') as f:
            self.category2idx = json.load(f)
        with open('../data/taobao/idx2category.json', 'r', encoding='utf-8') as f:
            self.idx2category = json.load(f)
        with open('../data/taobao/itemid2idx.json', 'r', encoding='utf-8') as f:
            self.itemid2idx = json.load(f)
        with open('../data/taobao/idx2itemid.json', 'r', encoding='utf-8') as f:
            self.idx2itemid = json.load(f)

        self.vocab_size_dic = {
            "inherent_feature":{
                "item_id": len(self.itemid2idx) +1,
                "item_category": len(self.category2idx) + 1
            },
            "cross_feature": {
                "time_stamp": time_bucket_num + 1
            }
        }
        print("准备完成")

    def get_time_bucket(self, time_stamp):
        return (time_stamp - self.min_time) // 3600 + 1

    def __getitem__(self, index):
        user_id, target_itemid, target_item_category, target, target_timestamp = self.train_target_item.iloc[index]
        if target == 0:
            target_timestamp -= 10
        behaviors = []
        # if index % 2 == 0 or self.mode != 'train':
        behavior_items = list(self.user_behavior[self.user_behavior['user_id'] == user_id].iloc[0, 1:])

        if self.mode == 'train':
            try:
                target_item_loc = list(behavior_items[-1]).index(target_timestamp)
            except ValueError:
                target_item_loc = -1
        elif self.mode == 'test':
            target_item_loc = -1
        else:
            target_item_loc = -1

        for index, behavior in enumerate(
                zip(behavior_items[0][target_item_loc + 1:],
                    behavior_items[1][target_item_loc + 1:],
                    behavior_items[2][target_item_loc + 1:])):
            behavior_item_id = self.itemid2idx[str(behavior[0])]
            behavior_item_category = self.category2idx[str(behavior[1])]
            behavior_item_timestamps = self.get_time_bucket(behavior[2])
            behaviors.append([behavior_item_id, behavior_item_category, behavior_item_timestamps])

        if len(behaviors) >= self.behaviors_num:
            behaviors = behaviors[:self.behaviors_num]
        else:
            if self.behaviors_num > 1000:
                for i in range(self.behaviors_num - len(behaviors)):
                    behaviors.append([randint(0, self.vocab_size_dic['inherent_feature']['item_id'] - 1),
                                    randint(0, self.vocab_size_dic['inherent_feature']['item_category'] - 1),
                                    randint(0, self.vocab_size_dic['cross_feature']['time_stamp'] - 1)])
            else:
                for i in range(self.behaviors_num - len(behaviors)):
                    behaviors.append([0, 0, 0])

        assert len(behaviors) == self.behaviors_num
        target_itemid = self.itemid2idx[str(target_itemid)]
        target_item_category = self.category2idx[str(target_item_category)]
        target_item_behavior = self.get_time_bucket(target_timestamp)
        target_item = torch.tensor([target_itemid,
                                    target_item_category,
                                    target_item_behavior]).long()
        target = torch.tensor(target).long()
        global_token = torch.tensor(range(self.q_num)).long()
        behaviors = torch.tensor(behaviors).long()
        return target_item, behaviors, global_token,target

    # def __getitem__(self, index):
    #     if self.mode == "train":
    #         pos_target_item, pos_behaviors, global_token,pos_target = self.get_item(index*2)
    #         neg_target_item, neg_behaviors, global_token,neg_target = self.get_item(index*2+1)
    #         return pos_target_item,neg_target_item,pos_behaviors,global_token,pos_target,neg_target
    #     else:
    #         target_item, behaviors, global_token,target = self.get_item(index)
    #         return target_item, behaviors, global_token,target
    
    def __len__(self):
        # if self.mode=='train':
        #     return len(self.train_target_item) // 2
        # else :
        return len(self.train_target_item)


class XLongDataset(Dataset):
    def __init__(self, data_path,behavior_num=1000, mode='train',nrows=None,q_num = 4):
        assert mode == 'train' or mode == 'test'
        print(f"XLong {mode}数据集读取中")
        self.data = pd.read_csv(os.path.join(data_path, f"final_{mode}_data.csv"),sep='\t', nrows=nrows)
        self.data['item_seq'] = self.data['item_seq'].apply(ast.literal_eval)
        self.mode = mode
        self.behavior_num = behavior_num
        self.embedding_num = 0
        self.q_num = q_num
        for l in self.data.loc[:, 'item_seq']:
            _max = np.max(l)
            if _max > self.embedding_num:
                self.embedding_num = _max


        self.vocab_size_dic = {
            "inherent_feature": {
                "item_id": self.embedding_num+1,
            },
            "cross_feature": {
            }
        }
        print("读取完毕")

    def __getitem__(self, item):
        uid, behaviors, target_item, target = self.data.iloc[item]
        behaviors = list(behaviors)
        if len(behaviors) > self.behavior_num:
            behaviors = behaviors[:self.behavior_num]
        elif len(behaviors) < self.behavior_num:
            for _ in range(self.behavior_num - len(behaviors)):
                behaviors.append(self.embedding_num+1)
        target_item = torch.tensor(target_item).long().unsqueeze(-1)
        behaviors = torch.tensor(behaviors).long().unsqueeze(-1)
        target = torch.tensor(target)
        global_token = torch.tensor(range(self.q_num)).long()
        return target_item, behaviors, global_token, target

    def __len__(self):
        return len(self.data)

def test_taobao():
    dataset = TaoBaoDataset(train_target_item_file='../data/taobao/final_train_data.csv',
                            user_behavior_file='../data/taobao/active_user_actions.csv',
                            mode='train', n_users=50)
    dataloader = DataLoader(dataset, batch_size=3, shuffle=True)
    for target_item, behaviors,target in dataloader:
        print(target_item.shape)
        print(behaviors.shape)
        print(target.shape)
        break

def test_xlong():
    data_path = "../data/xlong"
    dataset = XLongDataset(data_path, nrows=20)
    dataloader = DataLoader(dataset, batch_size=3,shuffle=True)
    for target_items, behaviors,targets in dataloader:
        print(target_items.shape)
        print(behaviors.shape)
        print(targets.shape)
        break


if __name__ == '__main__':
    pass