from utils import seq2seq,Encoder,Decoder
from datasource import myDataSet
import torch
from torch import optim,nn
from torchtext.vocab import vocab
from torchtext.data.utils import get_tokenizer
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_packed_sequence,pack_padded_sequence


from collections import Counter,OrderedDict


## ready work
de_tokenizer=get_tokenizer('spacy',language='de_core_news_sm')
en_tokenizer=get_tokenizer('spacy',language='en_core_web_sm')

def build_vocab(path,tokenizer):
    count=Counter()

    with open(path,'r',encoding='utf-8') as f:
        for line in f:
            count.update(tokenizer(line))
    sorted_vo=sorted(count.items(),key=lambda x:x[1],reverse=True)
    specials=['<pad>','<bos>','<eos>','<unk>']

    v=vocab(OrderedDict(sorted_vo))

    for special in specials:
        idx=0
        v.insert_token(special,idx)
        idx+=1


    return v

baseUrl="./datas/"
en=build_vocab(baseUrl+'train.en',en_tokenizer)
de=build_vocab(baseUrl+'train.de',de_tokenizer)


## dataloader

myDatasets=myDataSet([en,de],['train.en','train.de'],[en_tokenizer,de_tokenizer])

MydataLoader=DataLoader(myDatasets,batch_size=128,shuffle=True)




def batch_process(tensor,lens):
    v,idx=torch.sort(lens,descending=True)
    tensor=tensor[idx]
    padding=pack_padded_sequence(tensor,v)
    return padding


##  相关参数

### 
enc_in_dim=len(en)
dec_in_dim=len(de)

enc_hid_dim=64
dec_hid_dim=128

enc_emb_dim=32
dec_emd_dim=32

att_dim=myDatasets.max_lengths[0]+2

# print(att_dim)
# print(myDatasets.max_lengths)





epoch=1
encoder=Encoder(enc_in_dim,enc_emb_dim,enc_hid_dim,dec_hid_dim)
decoder=Decoder(dec_in_dim,dec_hid_dim,att_dim,dec_in_dim)

net=seq2seq(encoder,decoder)

Loss=nn.CrossEntropyLoss()
optimizer=optim.SGD(params=net.parameters(),lr=1e-3)

for i in range(epoch):
    print("第{}个epoch:\n".format(i+1))
    loss_value=0.0
    count=0
    for ed,dd,_,_ in MydataLoader:
        # input_pad=batch_process(ed,es)
        # output_pad=batch_process(dd,ds)
        optimizer.zero_grad()
        outputs=net(ed.T,dd.T)
        loss=Loss(outputs.permute(0,2,1),dd.T)

        loss.backward()
        optimizer.step()

        loss_value+=loss.item()

        if count%10==9:
            print("loss value is {:.6f}".format(loss_value/10))
            loss_value=0.0

        count+=1

        




