import os
import time

import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader

from nets.siamese import Siamese
from utils.dataloader import SiameseDataset, dataset_collate
from utils.utils_fit import fit_one_epoch
from utils.loss import LossHistoryN
from nets.ContrastiveLoss import ContrastiveLoss

#----------------------------------------------------#
#   计算图片总数
#----------------------------------------------------#
def get_image_num(path, train_own_data):
    num = 0
    if train_own_data:
        train_path = os.path.join(path, 'images_background')
        for character in os.listdir(train_path):
            # i = i + 1
            character_path = os.path.join(train_path, character)
            # if len(os.listdir(character_path)) >2:
            #     m.append(character_path)
            num += len(os.listdir(character_path))
    return num

#-------mail
def mail(msg):
    import smtplib
    from email.mime.text import MIMEText
    # 设置服务器所需信息
    # 163邮箱服务器地址
    mail_host = 'smtp.163.com'
    # 163用户名
    mail_user = 'ray7jq@163.com'
    # 密码(部分邮箱为授权码)
    mail_pass = 'YJBUFJIZOFPJECTE'
    # 邮件发送方邮箱地址
    sender = 'ray7jq@163.com'
    # 邮件接受方邮箱地址，注意需要[]包裹，这意味着你可以写多个邮件地址群发
    receivers = ['1271783179@qq.com']
    # 设置email信息
    # 邮件内容设置
    message = MIMEText(msg, 'plain', 'utf-8')
    # 邮件主题
    message['Subject'] = '模型训练完了'
    # 发送方信息
    message['From'] = sender
    # 接受方信息
    message['To'] = receivers[0]
    # 登录并发送邮件
    try:
        smtpObj = smtplib.SMTP()
        # 连接到服务器
        smtpObj.connect(mail_host, 25)
        # 登录到服务器
        smtpObj.login(mail_user, mail_pass)
        # 发送
        smtpObj.sendmail(
        sender, receivers, message.as_string())
        # 退出
        smtpObj.quit()
        print('success')
    except smtplib.SMTPException as e:
            print('error', e)  # 打印错误
#-----

if __name__ == "__main__":
    #-------------------------------#
    #   是否使用Cuda
    #   没有GPU可以设置成False
    #-------------------------------#
    Cuda            = False
    #----------------------------------------------------#
    #   数据集存放的路径
    #----------------------------------------------------#
    dataset_path    = "./datasets"
    #----------------------------------------------------#
    #   输入图像的大小，默认为105,105,3,(h,w,t)
    #----------------------------------------------------#
    input_shape     = [448,448,3]
    #----------------------------------------------------#
    #   当训练自己的数据集时设置为True
    #----------------------------------------------------#
    train_own_data  = True
    #-------------------------------#
    #   用于指定是否使用VGG预训练权重
    #-------------------------------#
    pretrained      = True
    #----------------------------------------------------------------------------------------------------------------------------#
    #
    #----------------------------------------------------------------------------------------------------------------------------#
    model_path      = ""

    # ----------------------------------------
    #log
    import logging
    def Logger(logger_name):
        logger = logging.getLogger(__name__)
        logger.setLevel(level=logging.INFO)
        handler = logging.FileHandler(logger_name)
        handler.setLevel(logging.INFO)
        formatter = logging.Formatter('%(message)s')
        handler.setFormatter(formatter)
        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        logger.addHandler(handler)
        logger.addHandler(console)
        return logger

    log = "trainLogs/" + "2022-3-21"
    logname = log + ".txt"
    logger = Logger(logname)
    #----------------------------------

    #----------loss记录
    loss_history = LossHistoryN('./loss', loss_name=('train', 'val','train_acc','val_acc'))
    #---------------

    model = Siamese(input_shape, pretrained)
    # if model_path != '':
    #     print('Loading weights into state dict...')
    #     device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    #
    #     pretrained_dict = torch.load(model_path, map_location=device)
    #     model_dict = model.state_dict()
    #
    #     pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
    #     model_dict.update(pretrained_dict)
    #     model.load_state_dict(model_dict)
    
    model_train = model.train()
    if Cuda:
        model_train = torch.nn.DataParallel(model)#使用多个GPU训练
        cudnn.benchmark = True
        model_train = model_train.cuda()

    loss = nn.BCELoss()#计算目标值和预测值之间的二进制交叉熵损失函数。
    # loss = ContrastiveLoss()#对比损失

    #----------------------------------------------------#
    #   训练集和验证集的比例。
    #----------------------------------------------------#
    train_ratio         = 0.9
    images_num          = get_image_num(dataset_path, train_own_data)#所有图片的数量
    num_train           = int(images_num * train_ratio)#训练集图片总数
    num_val             = images_num - num_train#验证集图片数量
    # num_species_train   = num_train / 2
    # num_spacies_val     = num_val / 2
    
    #-------------------------------------------------------------#
    #   训练分为两个阶段，两阶段初始的学习率不同，手动调节了学习率
    #-------------------------------------------------------------#
    if True:
        Batch_size      = 1
        Lr              = 1e-4#
        Init_epoch      = 0
        Freeze_epoch    = 50

        epoch_step          = num_train // Batch_size
        epoch_step = int(epoch_step / 2)
        epoch_step_val      = num_val // Batch_size
        epoch_step_val = int(epoch_step_val / 2)


        if epoch_step == 0 or epoch_step_val == 0:
            raise ValueError('数据集过小，无法进行训练，请扩充数据集。')

        # ---------------写日志
        logtime = time.strftime('%F %T', time.localtime(time.time()))
        logger.info("-----freeze_epoch-----{}----- ：Batch_size = {}    Lr = {}   Freeze_epoch = {}  ".format(logtime, Batch_size, Lr,Freeze_epoch))
        # -----------------

        optimizer       = optim.Adam(model_train.parameters(), Lr)#优化算法Adam(Adaptive Moment Estimation)
        lr_scheduler    = optim.lr_scheduler.StepLR(optimizer, step_size = 1, gamma = 0.96)

        train_dataset   = SiameseDataset(input_shape, dataset_path, num_train, num_val, train=True, train_own_data=train_own_data)
        val_dataset     = SiameseDataset(input_shape, dataset_path, num_train, num_val, train=False, train_own_data=train_own_data)
        gen             = DataLoader(train_dataset, batch_size=Batch_size, num_workers=1, pin_memory=True,
                                drop_last=False, collate_fn=dataset_collate)#一次从数据库中取出batch_size组数据   按照collate_fn取
        gen_val         = DataLoader(val_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True, 
                                drop_last=False, collate_fn=dataset_collate)

        for epoch in range(Init_epoch, Freeze_epoch):
            #epoch是第几轮训练
            # model_train模型的train() loss损失函数  optimizer优化算法
            fit_one_epoch(model_train, model, loss, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Freeze_epoch, Cuda,logger,loss_history)
            lr_scheduler.step()

    if True:
        Batch_size      = 4
        Lr              = 1e-5
        Freeze_epoch    = 50
        Unfreeze_epoch  = 100

        epoch_step          = num_train // Batch_size
        epoch_step_val      = num_val // Batch_size

        if epoch_step == 0 or epoch_step_val == 0:
            raise ValueError('数据集过小，无法进行训练，请扩充数据集。')

        # ---------------写日志
        logtime = time.strftime('%F %T', time.localtime(time.time()))
        logger.info("-----Unfreeze_epoch-----{}----- ：Batch_size = {}    Lr = {}   Freeze_epoch = {}  ".format(logtime, Batch_size, Lr,
                                                                                        Unfreeze_epoch - Freeze_epoch))
        # -----------------

        optimizer       = optim.Adam(model_train.parameters(), Lr)#优化器
        lr_scheduler    = optim.lr_scheduler.StepLR(optimizer, step_size = 1, gamma = 0.96)

        #SiameseDataset是Dataset的继承类
        train_dataset   = SiameseDataset(input_shape, dataset_path, num_train, num_val, train=True, train_own_data=train_own_data)
        val_dataset     = SiameseDataset(input_shape, dataset_path, num_train, num_val, train=False, train_own_data=train_own_data)
        gen             = DataLoader(train_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True,
                                drop_last=True, collate_fn=dataset_collate)#一次从数据库中取出batch_size个数据
        gen_val         = DataLoader(val_dataset, batch_size=Batch_size, num_workers=4, pin_memory=True, 
                                drop_last=True, collate_fn=dataset_collate)

        for epoch in range(Freeze_epoch, Unfreeze_epoch):
            fit_one_epoch(model_train, model, loss, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Unfreeze_epoch, Cuda,logger,loss_history)
            lr_scheduler.step()

    #画图
    loss_history.plt_loss()

    msg = "模型训练完了"
    mail(msg)

