import torch
import torch.utils.data as torchdata
import os
import numpy as np
from sklearn.model_selection import train_test_split


def accuracy(output, target, topk=(1, 5)):
    maxk = max(topk)
    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))
    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
        res.append(correct_k)
    return res


def all_files(dir):
    result=[]
    for root,dirs,files in os.walk(dir):
        result+=[os.path.join(root,e) for e in files]

    return result

# def file_to_vector(filepath,max_value=255,max_length=64):
#     result=[]
#     with open(filepath,'rb') as f:
#         for line in f:

#             result.append(min(len(line),max_value))

#     result=np.array(result,dtype=np.float32)



#     if len(result)>max_length:
#         result=[result[i*max_length:min((i+1)*max_length,len(result))] for i in range(len(result)//max_length+1)]
#         if len(result[-1])==0:
#             result=result[:-1]
#         elif len(result[-1])<max_length:
#             result[-1]=np.pad(result[-1],[0,max_length-len(result[-1])])

#     elif len(result)<max_length:
#         result=[np.pad(result,[0,max_length-len(result)])]

#     else:
#         result=[result]


#     return result

def file_to_vector(filepath,max_value=255,max_length=256):
    result=[]
    with open(filepath,'rb') as f:
        for line in f:

            result.append(min(len(line),max_value))

    result=np.array(result,dtype=np.float32)

    if len(result)>max_length:
        result=result[:max_length]

    elif len(result)<max_length:
        result=np.pad(result,[0,max_length-len(result)])


    return [result]

def make_dataset(dataset_dir,max_value,max_length):
    examples=[]
    labels=[]
    author_dirs=[]
    for dir in os.listdir(dataset_dir):
        author_dirs.append(os.path.join(dataset_dir,dir))


    for i,author_dir in enumerate(author_dirs):
        for file in all_files(author_dir):
            vectors=file_to_vector(file,max_value,max_length)
            for _ in range(len(vectors)):
                one_hot=np.zeros(shape=len(author_dirs))
                one_hot[i]=1
                labels.append(one_hot)
            examples+=vectors

    x_train, x_test, y_train, y_test=train_test_split(examples,labels,test_size=0.2,shuffle=True)


    return (x_train,y_train),(x_test,y_test)

class TestModel(torch.nn.Module):
    def __init__(self,input_size,embeding_size,num_layer,num_class) -> None:
        super().__init__()
        self.embeding_size=embeding_size
        self.num_layer=num_layer

        self.lstm=torch.nn.LSTM(1,1,num_layer,batch_first=True)
        self.linear=torch.nn.Linear(embeding_size,num_class)
        self.embeding_layer=torch.nn.Linear(input_size,embeding_size)
        
        self.input_size=input_size

    def forward(self,x):
        x=x.reshape(-1,self.input_size,1)
        #print(x.shape)
        h0=torch.zeros(self.num_layer,x.size(0), 1)
        c0=torch.zeros(self.num_layer,x.size(0), 1)

        x,(hn,cn)=self.lstm(x,(h0,c0))

        x=x.reshape(-1,self.input_size)
        x=self.embeding_layer(x)
        #print(x.shape)

        x=torch.dropout(x,0.2,self.training)

        #x=torch.sigmoid(x)

        return self.linear(x)

def build_model(input_size,hidden_size,author_number):
    return TestModel(input_size,hidden_size,2,author_number)

class SourceDataset(torchdata.Dataset):
    def __init__(self,x,y,example_length,seq_length) -> None:
        self.x=x
        self.y=y
        self.seq_length=seq_length
        self.example_length=example_length
        
    def __getitem__(self, index):
        return self.x[index],self.y[index]
        #return self.x[index][:self.seq_length],self.y[index]
        # dev=index%self.group
        # example_index=index//self.group

        # return self.x[example_index][dev:dev+self.seq_length],self.y[example_index]

    @property
    def group(self):
        return 4

    def __len__(self):
        return len(self.x)  

def train(model:TestModel,train_data,test_data):
    train_dataset=SourceDataset(*train_data,model.input_size,model.input_size)
    test_dataset=SourceDataset(*test_data,model.input_size,model.input_size)
    dataloader_train=torchdata.DataLoader(train_dataset,batch_size=256,shuffle=True)
    dataloder_test=torchdata.DataLoader(test_dataset,batch_size=256,shuffle=False)

    loss_func=torch.nn.CrossEntropyLoss()
    optimizer=torch.optim.Adam(model.parameters(),lr=0.01)

    for i in range(100):
        print(f"epoch : {i}:")
        model.train()
        for x,y in dataloader_train:
            y_hat=model(x)
            loss= loss_func(y_hat,y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        model.eval()
        total_test_loss=0
        total_accuracy=0
        total_acc5=0
        total_group_acc=0
        with torch.no_grad():
            for x,y in dataloder_test:
                y_hat=model(x)
                loss=loss_func(y_hat,y)
                total_test_loss = total_test_loss + loss.item()

                _,maxk=torch.topk(y_hat,5,dim=-1)



                total_accuracy+=(y.argmax(1).view(-1,1) == maxk[:,0:1]).sum().item()
                total_acc5+=(y.argmax(1).view(-1,1)== maxk).sum().item()
                # y_hat_group=torch.stack([y_hat[i*test_dataset.group:(i+1)*test_dataset.group,:] for i in range(1)])
                # y_hat_group_eval=(torch.max(y_hat_group,2,keepdim=True)[0]==y_hat_group)

                # y_hat_group_eval=torch.sum(y_hat_group_eval,1)
                # y_group_eval=torch.stack([y[i*test_dataset.group] for i in range(1)])

                # group_acc=(y_hat_group_eval.argmax(1) == y_group_eval.argmax(1)).sum()
                # total_group_acc+=group_acc

            print(f'loss :{total_test_loss} top1:{total_accuracy/len(test_dataset)} top5: {total_acc5/len(test_dataset)}')

input_size=256
embeding_size=256

train_data,test_data=make_dataset('data/java/raw/java40',input_size,embeding_size)

model=build_model(input_size,embeding_size,40)

train(model,train_data,test_data)

# for e in x:
#     print(e)
#     break
#print(all_files('data/java/raw/java40/quyi'))