import torch
import torch.nn as nn
import os
import torch.nn.functional as F

# class EncodeDev(nn.Module):
#     def __init__(self):
#         super(EncodeDev,self).__init__()
#         self.gru = nn.GRU(input_size=10, hidden_size=25,num_layers=9)
#         self.embedding = nn.Embedding(num_embeddings=1000, embedding_dim=10)
        
#     def forward(self, input,hidden):
#         # input  [1]
#         # hidden [1,1,25]
#         # x = self.embedding(input)
#         # x = x.unsqueeze(0) # (1,1,10)
#         res,output_hidden = self.gru(input,hidden)
#         return res,output_hidden




# class Encode(nn.Module):
#     def __init__(self):
#         super(Encode,self).__init__()
#         self.gru = nn.GRU(input_size=10, hidden_size=25,num_layers=1)
#         self.embedding = nn.Embedding(num_embeddings=1000, embedding_dim=10)
        
#     def forward(self, input,hidden):
#         # input  [1]
#         # hidden [1,1,25]
#         x = self.embedding(input)
#         x = x.unsqueeze(0) # (1,1,10)
#         res,output_hidden = self.gru(x,hidden)
#         return res,output_hidden



class EncodeIntroduction(nn.Module):
    def __init__(self,input_size=10,hidden_size=25):
        super(EncodeIntroduction,self).__init__()
        
        self.input_size = input_size
        self.hidden_size = hidden_size

        self.gru = nn.GRU(input_size=hidden_size, hidden_size=25,num_layers=1)
        self.embedding = nn.Embedding(num_embeddings=input_size, embedding_dim=hidden_size)
        
        
        
    def forward(self, input,hidden):
        # input  [1]
        # hidden [1,1,25]
        x = self.embedding(input)
        x = x.unsqueeze(0) # (1,1,10)
        res,output_hidden = self.gru(x,hidden)
        return res,output_hidden

    def initHidden(self):
        return torch.zeros(1,1,self.hidden_size)
    

class Decode(nn.Module):
    '''
    没注意力机制的
    '''
    def __init__(self,input_size=10,hidden_size=25):
        super(Decode,self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size

        self.gru = nn.GRU(input_size=hidden_size, hidden_size=hidden_size,num_layers=1)
        self.embedding = nn.Embedding(num_embeddings=input_size, embedding_dim=hidden_size)
        self.linea = nn.Linear(hidden_size,input_size)
        

    def forward(self, input,hidden):
        x = self.embedding(input) # (1,10)
        x = x.unsqueeze(0) # (1,1,10)
        h1,h0 = self.gru(x,hidden)
        res1 = self.linea(h0[0])
        res = F.log_softmax(res1,dim=1)
        # res2 = F.softmax(res1,dim=1)
        
        return res,h0


    def initHidden(self):
        return torch.zeros(1,1,self.hidden_size)


def one_train():
    criterion = nn.NLLLoss()
    
    
    
    
    # 训练一次
    in_datas = [0,1,2,3,4,5,6,7,8,9]
    out_datas = [9,8,7,6,5,4,3,2,1,0]
    
    in_data = torch.Tensor(in_datas).long().view(-1,1)
    out_data = torch.Tensor(out_datas).long().view(-1,1)

    input = in_data[0]
    output = out_data[0]

    input_size=in_data.size(0)
    hidden_size=25
    
    
    model_encode = EncodeIntroduction(input_size,hidden_size)
    model_decode = Decode(input_size,hidden_size)
    opt1 = torch.optim.Adam(model_encode.parameters(),lr=0.01)
    opt2 = torch.optim.Adam(model_decode.parameters(),lr=0.01)

    
    hidden_encode = model_encode.initHidden()
    
    for ep in range(10):
        opt1.zero_grad()
        opt2.zero_grad()
        loss = 0
        for _x in range(10):
            input = in_data[_x]
            output = out_data[_x]
            
            _,h1 = model_encode(input,hidden_encode)
            # hidden_decode = model.initHidden()
            res = model_decode(output,h1)
            los = criterion(res,output)
            loss += los
            
                
            los.backward(retain_graph=True)
            opt1.step()
            opt2.step()
        print('loss',loss/10)
        
        
    hidden_encode = model_encode.initHidden()
    _,h1 = model_encode(torch.tensor([0]),hidden_encode)

    res = model_decode(torch.tensor([0]),h1)
    los = criterion(res,torch.tensor([0]))
    print('预测的结果',loss.item())
    
def multi_train(predict=False):
    criterion = nn.NLLLoss()
    
    
    
    
    # 训练一次
    in_datas = [0,1,2,3,4,5,6,7,8,9]
    out_datas = [9,8,7,6,5,4,3,2,1,0]
    
    in_data = torch.Tensor(in_datas).long().view(-1,1)
    out_data = torch.Tensor(out_datas).long().view(-1,1)

    input = in_data
    output = out_data

    input_size=in_data.size(0) # 词的数目
    hidden_size=25
    
    
    model_encode = EncodeIntroduction(input_size,hidden_size)
    model_decode = Decode(input_size,hidden_size)
    
    if os.path.exists('model/model_encode.pth'):
        model_encode.load_state_dict(torch.load('model/model_encode.pth',weights_only=True))
        model_encode.eval()
        print('加载模型 model_encode 成功')
    if os.path.exists('model/model_decode.pth'):
        model_decode.load_state_dict(torch.load('model/model_decode.pth',weights_only=True))
        model_decode.eval()
        print('加载模型 model_decode 成功')

    if predict:
        with torch.no_grad():
            # 开始测试
            test_in_datas = [0,1,2,3,4,5,6,7,8,9]
            test_in_data = torch.Tensor(test_in_datas).long().view(-1,1)
            hidden_encode = model_encode.initHidden()
            
            for i in test_in_data:
                # [3],[4]
                h1 = hidden_encode
                _,h1 = model_encode(i,h1)
            
            first = torch.Tensor([0]).long()
            for _ in range(10):
                res,h1 = model_decode(first,h1)
                
                topv,topi = res.topk(1)
                # 获取到值
                topi = topi.item()
                value = test_in_datas[topi]
                print('预测的值是',value)
                first = torch.Tensor([value]).long()
        print('预测完成')
        quit()
        
    
    opt1 = torch.optim.Adam(model_encode.parameters(),lr=0.01)
    opt2 = torch.optim.Adam(model_decode.parameters(),lr=0.01)
    
    
    for ep in range(1000):
        loss = 0
        opt1.zero_grad()
        opt2.zero_grad()
        
        hidden_encode = model_encode.initHidden()
        
        for i in input:
            # [3],[4]
            h1 = hidden_encode
            _,h1 = model_encode(i,h1)
        
        # print(h1)
        
        
        first = torch.Tensor([0]).long()
        length = len(output)
        for j in range(0,length):
            res,h1 = model_decode(first,h1)
            # 使用真实值
            first = output[j]
            los = criterion(res,output[j])
            loss += los
            
        loss.backward()
        opt1.step()
        opt2.step()
        if ep % 100 == 0:
            print('epoch',ep,'loss',loss.item()/length)
            # print('每轮损失',loss.item()/length)

    torch.save(model_encode.state_dict(), 'model/model_encode.pth')
    torch.save(model_decode.state_dict(), 'model/model_decode.pth')

    print('保存模型成功')




if __name__ == '__main__':
    
    
    # one_train()
    multi_train()
    quit()
    # 数据
    in_datas = [0,1,2,3,4,5,6,7,8,9]
    out_datas = [9,8,7,6,5,4,3,2,1,0]
    
    in_data = torch.Tensor(in_datas).long().view(-1,1)
    out_data = torch.Tensor(out_datas).long().view(-1,1)
    
    input_size=in_data.size(0)
    hidden_size=25
    input = out_data[0]
    model = Decode(input_size,hidden_size)
    hidden = model.initHidden()
    h= model(input,hidden)
    # print('h',h.shape)
    # print(h)
    # print(h.sum(1))
    loss = nn.NLLLoss()
    loss_calc = loss(h,input)
    print('loss_calc',loss_calc)
    
    
    
    
    
    
    
    
    
    quit()
    
    '''
    EncodeIntroduction 测试
    '''
    
    # input = in_data[0]
    # input_size=10
    # hidden_size=25
    # model = EncodeIntroduction(input_size,hidden_size)
    # hidden = model.initHidden()
    # h1,h0 = model(input,hidden)
    # print('h1',h1.shape)
    # print('h0',h0.shape)
    # print(h0)
    # print(h1)
    
    
    
    
    
    
    
    '''
    1、EncodeDev 测试
    自定义seq_len 和 num_layer
    '''
    # model = EncodeDev()
    # input = torch.randn(3,4,10) # (seq_len,batch_size,embedding_dim)
    # hidden = torch.zeros(9,4,25) # (num_layer,batch_size,hidden_size)
    # y1,y2 = model(input,hidden)
    # print('y1',y1.size()) # (3,4,25)  seq_len,batch_size,hidden_size
    # print('y2',y2.size())  # (9,4,25) num_layer,batch_size,hidden_size
    
    
    '''
    2、Encode 测试
    
    '''
    # datas = [1,2,3,4,5,6,7]
    
    # data = torch.Tensor(datas).long().view(-1,1)
    # print(data.dtype)
    # print(data[0])
    # model = Encode()
    # input = torch.Tensor([1]).long()  # (batch_size,seq_len)
    # input = data[0]
    # hidden = torch.zeros(1,1,25) # (num_layer,batch_size,hidden_size)
    # y1,y2 = model(input,hidden)
    # print('y1',y1.size()) # (1,1,25)  num_layer,batch_size,hidden_size
    # print('y2',y2.size())  # (1,1,25) num_layer,batch_size,hidden_size
    
    

    

    

