import torch
import argparse  #命令行参数解析包
from model.Bert_Spc import BERT_SPC
from model.ian import IAN
from model.lstm import LSTM  #引入定义的网络对象
from model.aen import AEN_BERT
from model.Bert_lstm import BERT_LSTM
from model.aen_conv_bert import AEN_CONV_BERT


from utils.GPU import getGPU
import pandas as pd

dataset_files = {
    # 'twitter': {
    #     'train': './datasets/acl-14-short-data/train.raw',
    #     'test': './datasets/acl-14-short-data/test.raw'
    # },
    'restaurant': {
        'train': './datasets/semeval14/Restaurants_Train.xml.seg',
        'test': './datasets/semeval14/Restaurants_Test_Gold.xml.seg'
    },
    'laptop': {
        'train': './datasets/semeval14/Laptops_Train.xml.seg',
        'test': './datasets/semeval14/Laptops_Test_Gold.xml.seg'
    }
}
input_colses = {  # 输入所需要的数据
    'lstm': ['text_indices'],
    'td_lstm': ['left_with_aspect_indices', 'right_with_aspect_indices'],
    'tc_lstm': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],
    'atae_lstm': ['text_indices', 'aspect_indices'],
    'ian': ['text_indices', 'aspect_indices'],
    'memnet': ['context_indices', 'aspect_indices'],
    'ram': ['text_indices', 'aspect_indices', 'left_indices'],
    'cabasc': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],
    'tnet_lf': ['text_indices', 'aspect_indices', 'aspect_boundary'],
    'aoa': ['text_indices', 'aspect_indices'],
    'mgan': ['text_indices', 'aspect_indices', 'left_indices'],
    'asgcn': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
    'bert_spc': ['concat_bert_indices', 'concat_segments_indices'],
    'aen_bert': ['text_bert_indices', 'aspect_bert_indices'],
    'lcf_bert': ['concat_bert_indices', 'concat_segments_indices', 'text_bert_indices', 'aspect_bert_indices'],
    'bert_lstm': ['text_bert_indices', 'aspect_bert_indices'],
    'aen_conv_bert': ['text_bert_indices', 'aspect_bert_indices']
}
initializers = {  # 模型参数初始化
    'xavier_uniform_': torch.nn.init.xavier_uniform_,
    'xavier_normal_': torch.nn.init.xavier_normal_,
    'orthogonal_': torch.nn.init.orthogonal_,
}
optimizers = {
    'adadelta': torch.optim.Adadelta,  # default lr=1.0
    'adagrad': torch.optim.Adagrad,  # default lr=0.01
    'adam': torch.optim.Adam,  # default lr=0.001
    'adamax': torch.optim.Adamax,  # default lr=0.002
    'asgd': torch.optim.ASGD,  # default lr=0.01
    'rmsprop': torch.optim.RMSprop,  # default lr=0.01
    'sgd': torch.optim.SGD,
}

model_classes = {
        'aen_conv_bert': AEN_CONV_BERT,
        'bert_spc': BERT_SPC,
        'lstm': LSTM,
        'ian': IAN,
        'aen_bert': AEN_BERT,
        'bert_lstm': BERT_LSTM,

        # default hyper-parameters for LCF-BERT model is as follws:
        # lr: 2e-5
        # l2: 1e-5
        # batch size: 16
        # num epochs: 5
    }  #字符串和对应的类

model_name=['bert_spc']   #所包含的模型名称
def Construct_config():
    # Hyper Parameters，超参数的设置
    parser = argparse.ArgumentParser()  # 添加参数，给出参数的默认值
    parser.add_argument('--model_name', default='aen_bert', type=str)  # 给出网络模型名，默认是bert_spc
    parser.add_argument('--dataset', default='laptop', type=str, help='twitter, restaurant, laptop')  # 默认是laptop数据集
    parser.add_argument('--optimizer', default='adam', type=str)  # 默认adam优化器
    parser.add_argument('--initializer', default='xavier_uniform_', type=str)  # 参数初始化
    parser.add_argument('--lr', default=5e-5, type=float, help='try 5e-5, 2e-5 for BERT, 1e-3 for others')  # 学习率
    parser.add_argument('--dropout', default=0.1, type=float)  # dropout率
    parser.add_argument('--l2reg', default=0.01, type=float)  # l2正则化率
    parser.add_argument('--num_epoch', default=0, type=int, help='try larger number for non-BERT models')  # epoch数默认20
    parser.add_argument('--batch_size', default=16, type=int, help='try 16, 32, 64 for BERT models')  # batchsize
    parser.add_argument('--log_step', default=10, type=int)  # 日志步数？
    parser.add_argument('--embed_dim', default=300, type=int)  # 嵌入层的维数
    parser.add_argument('--hidden_dim', default=300, type=int)  # 隐藏层维数
    parser.add_argument('--bert_dim', default=768, type=int)  # 隐藏层单元个数？
    parser.add_argument('--pretrained_bert_name', default='bert-base-uncased', type=str)  # 默认的预训练模型base不区分大小写
    parser.add_argument('--max_seq_len', default=85, type=int)  # 序列最大长度
    parser.add_argument('--polarities_dim', default=3, type=int)  # 极性向量的最大长度
    parser.add_argument('--hops', default=3, type=int)
    parser.add_argument('--patience', default=5, type=int)  # 防止过拟合
    parser.add_argument('--device', default=None, type=str, help='e.g. cuda:0')  # 设备名
    parser.add_argument('--seed', default=1234, type=int, help='set seed for reproducibility')  # 复现性？
    parser.add_argument('--valset_ratio', default=0.2, type=float,
                        help='set ratio between 0 and 1 for validation support')
    # The following parameters are only valid for the lcf-bert model（对于LCF模型才有用）
    parser.add_argument('--local_context_focus', default='cdm', type=str, help='local context focus mode, cdw or cdm')
    parser.add_argument('--SRD', default=3, type=int,
                        help='semantic-relative-distance, see the paper of LCF-BERT model')
    opt = parser.parse_args()  # 生成命名空间

    opt.model_class = model_classes[opt.model_name]
    opt.dataset_file = dataset_files[opt.dataset]  # 拿到数据集的文件
    opt.inputs_cols = input_colses[opt.model_name]  # 输入数据的形式，对于不同的网络模型输入的形式不同
    opt.initializer = initializers[opt.initializer]  # 用什么方法进行参数初始化
    opt.optimizer = optimizers[opt.optimizer]  # 用什么优化器
    opt.device = torch.device('cuda:'+str(getGPU()) if torch.cuda.is_available() else 'cpu') \
        if opt.device is None else torch.device(opt.device)  #将原本的字符串重新赋值成为了torch.device,getGPU()获取空闲GPU


    return opt