import os
import torch
import torch.nn as nn

def gpu_setup(use_gpu, gpu_id):
    """GPU设置"""
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if torch.cuda.is_available() and use_gpu:
        print('cuda available with GPU:', torch.cuda.get_device_name(0))
        device = torch.device("cuda")
    else:
        print('cuda not available')
        device = torch.device("cpu")
    return device


def grad_clipping(model, max_norm):
    """裁剪梯度"""
    if isinstance(model, nn.Module):
        params = [p for p in model.parameters() if p.requires_grad]
    else:
        params = model.params
    nn.utils.clip_grad_norm_(params, max_norm=max_norm, norm_type=2)


def init_state(model, state, batch_size, device, use_random_iter):
    if state is None or use_random_iter:
        # 在第一次迭代或使用随机抽样时初始化state
        state = model.init_state(batch_size=batch_size, device=device)
    else:
        # detach_()：阻断梯度传播，保证上下句子连接无关
        if isinstance(model, nn.Module) and not isinstance(state, tuple):
            # state对于nn.GRU是个张量
            state.detach_()
        else:
            # state对于nn.LSTM或对于我们从零开始实现的模型是个张量
            for s in state:
                s.detach_()
    return state