import torch.utils.data as Data
import torch
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from param import args

def get_dataloader():
    data = pd.read_csv('./data/amazon/data.csv')
    train_data = data.iloc[:,2:]
    train_lable = data.click.values
    train_data = train_data.apply(LabelEncoder().fit_transform)
    fields = train_data.max().values+1
    #train_data,test_data,train_lable,test_lable = train_test_split(train_data,train_lable,test_size=0.2,random_state=2024, stratify=train_lable)
    train_data, val_data, train_lable, val_lable = train_test_split(train_data, train_lable, test_size=0.3,
                                                                      random_state=2024, stratify=train_lable)
    train_data = torch.from_numpy(train_data.values).long()
    #test_data = torch.from_numpy(test_data.values).long()
    val_data = torch.from_numpy(val_data.values).long()
    train_lable = torch.from_numpy(train_lable).float()
    #test_lable = torch.from_numpy(test_lable).float()
    val_lable = torch.from_numpy(val_lable).float()
    train_data = Data.TensorDataset(train_data,train_lable)
    val_data = Data.TensorDataset(val_data,val_lable)
    #test_data = Data.TensorDataset(test_data,test_lable)

    train_loader = Data.DataLoader(dataset=train_data,batch_size=args.batch_size,shuffle=True)
    #test_loader = Data.DataLoader(dataset=test_data, batch_size=512, shuffle=False)
    val_loader = Data.DataLoader(dataset=val_data, batch_size=args.batch_size, shuffle=False)

    return train_loader,val_loader,val_loader,fields

def get_one_hot_data():
    data = pd.read_csv('./data/amazon/data.csv')
    train_data = data.iloc[:,2:]
    train_lable = data.click.values
    train_data = pd.get_dummies(train_data)
    #train_data,test_data,train_lable,test_lable = train_test_split(train_data,train_lable,test_size=0.2,random_state=2024, stratify=train_lable)
    train_data, val_data, train_lable, val_lable = train_test_split(train_data, train_lable, test_size=0.3,
                                                                          random_state=2024, stratify=train_lable)
    return train_data, val_data, train_lable, val_lable

_,_,_,_ = get_dataloader()