from data_process import *
from models import *
import matplotlib.pyplot as plt
import time
from config import *
import requests



"""----------------训练过程是否改变参数 ,如果不改变，把这三行注释掉，并把下面的variables['lr']改成变量lr, variables['epochs']改成变量epochs 等--------------"""
thread =threading.Thread(target=change_param) #  启动线程，用于改变模型参数
thread.daemon = True
thread.start()



device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用{device}进行训练")

encoder = EncoderCNN(embedding_dim).to(device)
decoder = DecoderRNN(embedding_dim, hidden_state_size, dict_number, num_layers).to(device)

# 如果有已存在的模型，则加载模型，并在其基础上继续训练
if os.path.exists("./models/encoder22.pth"):
    # try:
    encoder.load_state_dict(torch.load("./models/encoder22.pth"))
    decoder.load_state_dict(torch.load("./models/decoder22.pth"))
    print("加载已存在的模型,继续训练")

params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.bn.parameters())
optimizer = torch.optim.Adam(params, lr=lr)

train_loss=[]
encoder.train()
decoder.train()

epoch=1
while epoch <= variables['epochs']:
    start_time = time.time()
    epoch_loss=0
    for param_group in optimizer.param_groups:  # 更新参数的学习率
        param_group['lr'] = variables['lr']
    # print("Epoch:{}  lr:{}".format(epoch, optimizer.state_dict()['param_groups'][0]['lr']))

    for images,captions,length in trainloader:
        images = images.to(device)
        captions = captions.to(device)

        targets = pack_padded_sequence(captions, length.squeeze(),batch_first=True, enforce_sorted=False)[0] #

        features = encoder(images)
        outputs = decoder(features, captions, length.squeeze())
        loss = F.cross_entropy(outputs, targets)
        epoch_loss+=loss.item()

        decoder.zero_grad()
        encoder.zero_grad()
        loss.backward()
        optimizer.step()


    train_loss.append(epoch_loss)
    print(f"({epoch}/{variables['epochs']}),训练损失值：{epoch_loss:.2f}" ,end=' ,')
    end_time=time.time()
    print(f"本轮训练耗时：{int(end_time-start_time)}秒 ,时间：{time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))}")



    """-------------------保存模型----------------------------"""
    if int(variables['save_current_model'])==1:
        torch.save(encoder.state_dict(), f"./models/current_encoder{epoch}.pth")
        torch.save(decoder.state_dict(), f"./models/current_decoder{epoch}.pth")
        variables['save_current_model']=0

    if  epoch % save_step == 0:
        # 训练过程关注损失值的变化，一旦发现损失值上升，及时停止训练，检查代码。 发现损失值收敛，也要提前中断训练
        # 所以，要每隔几次把模型参数保存一下，防止训练过程中断，导致模型无法保存
        torch.save(encoder.state_dict(),f"./models/encoder{epoch}.pth")
        torch.save(decoder.state_dict(),f"./models/decoder{epoch}.pth")

    epoch += 1

    """可添加功能： 1.当训练过程出现损失值收敛、损失值上升，当前轮损失值减去上一轮损失值小于2，退出当前轮训练
    """


"""由于训练的时间要7分钟一轮，不能一直看着，所以训练完后发送消息到微信，提醒自己"""
# 微信消息推送用法：
# content = "模型已经训练完毕，GPU要冒烟了！！！"        # 要发给微信服务号的消息内容
# url1 = (f'https://wxpusher.zjiecode.com/api/send/message/?appToken= &content={content}&uid= &url=http%3a%2f%2fwxpusher.zjiecode.com')
# response = requests.get(url1).json()
# if response['msg'] == '处理成功':
#     print('微信消息推送成功，请查收！')
# else:
#     print('微信消息推送失败！请查看原因')



plt.plot(range(1,variables['epochs']+1),train_loss)
plt.show()





"""     --------------------  模型验证   ----------------------  """
# BLEU用来计算目标文本与预测文本的相似度，如果相似度大于0.45，则认为这条文本预测正确，然后可以统计正确率了
# from nltk.translate.bleu_score import sentence_bleu
# from nltk.translate.bleu_score import SmoothingFunction
# encoder.eval()
# decoder.eval()
# for images, captions, length in valloader:
#     images = images.to(device)
#     captions = captions.to(device)
#     with torch.no_grad():
#
#         features = encoder(images)
#         sampled_ids = decoder.sample(features, max_len)
#         sampled_ids = sampled_ids[0].cpu().numpy()
#
#         ids = [x for x in sampled_ids if x != 0]
#         sentence_list = [vocab[i] for i in ids]
#         idlist=captions.squeeze().tolist()
#         reference = [idlist]
#         candidate = ids
#         smooth = SmoothingFunction().method1  # 定义平滑函数对象,指定N-gram为1
#         score = sentence_bleu(reference, candidate, weights=(1, 0, 0, 0), smoothing_function=smooth)
#         print(score)



