import os

import numpy as np
import torch
from torch.nn import functional as F


def set_seed_everywhere(seed,cuda):
    np.random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed_all(seed)

def handle_dirs(dirpath):
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)

def make_train_state(args):
    return {'stop_early':False,
            'early_stopping_step':0,
            'early_stopping_best_val':1e8,
            'learning_rate':args.learning_rate,
            'epoch_index':0,
            'train_loss':[],
            'train_acc':[],
            'val_loss':[],
            'val_acc':[],
            'test_loss':-1,
            'test_acc':-1,
            'model_filename':args.model_state_file}

def update_train_state(args,model,train_state):
    if train_state['epoch_index']==0:
        torch.save(model.state_dict(),train_state['model_filename'])
        train_state['stop_early']=False
    elif train_state['epoch_index']>=-1:
        loss_tm1,loss_t=train_state['val_loss'][-2:]
        if loss_t>=loss_tm1:
            train_state['early_stopping_step']+=1
        else:
            if loss_t<train_state['early_stopping_best_val']:
                torch.save(model.state_dict(),train_state['model_filename'])
                train_state['early_stopping_best_val']=loss_t

            train_state['early_stopping_step']=0

        train_state['stop_early']=train_state['early_stopping_step']>=args.early_stopping_criteria

    return train_state


def normalize_sizes(y_pred, y_true):
    #向量标准化
    if len(y_pred.size()) == 3:
        y_pred = y_pred.contiguous().view(-1, y_pred.size(2))
    if len(y_true.size()) == 2:
        y_true = y_true.contiguous().view(-1)
    return y_pred, y_true

#计算准确率
def compute_accuracy(y_pred,y_true,mask_index):
    y_pred,y_true=normalize_sizes(y_pred,y_true)

    _,y_pred_indices=y_pred.max(dim=1)

    correct_indices=torch.eq(y_pred_indices,y_true).float()
    valid_indices=torch.ne(y_true,mask_index).float()

    n_correct=(correct_indices*valid_indices).sum().item()
    n_valid=valid_indices.sum().item()

    return n_correct/n_valid * 100

#损失函数
def sequence_loss(y_pred,y_true,mask_index):
    y_pred,y_true=normalize_sizes(y_pred,y_true)
    return F.cross_entropy(y_pred,y_true,ignore_index=mask_index)

