{"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/preprocess.py","language":"python","identifier":"check_existing_pt_files","parameters":"(opt)","argument_list":"","return_statement":"","docstring":"Checking if there are existing .pt files to avoid tampering","docstring_summary":"Checking if there are existing .pt files to avoid tampering","docstring_tokens":["Checking","if","there","are","existing",".","pt","files","to","avoid","tampering"],"function":"def check_existing_pt_files(opt):\n \"\"\" Checking if there are existing .pt files to avoid tampering \"\"\"\n # We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt\n # when training, so check to avoid tampering with existing pt files\n # or mixing them up.\n for t in ['train', 'valid', 'vocab']:\n pattern = opt.save_data + '.' + t + '*.pt'\n if glob.glob(pattern):\n sys.stderr.write(\"Please backup existing pt file: %s, \"\n \"to avoid tampering!\\n\" % pattern)\n sys.exit(1)","function_tokens":["def","check_existing_pt_files","(","opt",")",":","# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt","# when training, so check to avoid tampering with existing pt files","# or mixing them up.","for","t","in","[","'train'",",","'valid'",",","'vocab'","]",":","pattern","=","opt",".","save_data","+","'.'","+","t","+","'*.pt'","if","glob",".","glob","(","pattern",")",":","sys",".","stderr",".","write","(","\"Please backup existing pt file: %s, \"","\"to avoid tampering!\\n\"","%","pattern",")","sys",".","exit","(","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/preprocess.py#L20-L30"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/preprocess.py","language":"python","identifier":"parse_args","parameters":"()","argument_list":"","return_statement":"return opt","docstring":"Parsing arguments","docstring_summary":"Parsing arguments","docstring_tokens":["Parsing","arguments"],"function":"def parse_args():\n \"\"\" Parsing arguments \"\"\"\n parser = argparse.ArgumentParser(\n description='preprocess.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.preprocess_opts(parser)\n\n opt = parser.parse_args()\n torch.manual_seed(opt.seed)\n\n check_existing_pt_files(opt)\n\n return opt","function_tokens":["def","parse_args","(",")",":","parser","=","argparse",".","ArgumentParser","(","description","=","'preprocess.py'",",","formatter_class","=","argparse",".","ArgumentDefaultsHelpFormatter",")","opts",".","add_md_help_argument","(","parser",")","opts",".","preprocess_opts","(","parser",")","opt","=","parser",".","parse_args","(",")","torch",".","manual_seed","(","opt",".","seed",")","check_existing_pt_files","(","opt",")","return","opt"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/preprocess.py#L33-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/preprocess.py","language":"python","identifier":"build_save_in_shards","parameters":"(src_corpus, tgt_corpus, fields,\n corpus_type, opt)","argument_list":"","return_statement":"return ret_list","docstring":"Divide the big corpus into shards, and build dataset separately.\n This is currently only for data_type=='text'.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.\n\n To tackle this, we only read in part of the corpus file of size\n `max_shard_size`(actually it is multiples of 64 bytes that equals\n or is slightly larger than this size), and process it into dataset,\n then write it to disk along the way. By doing this, we only focus on\n part of the corpus at any moment, thus effectively reducing memory use.\n According to test, this method can reduce memory footprint by ~50%.\n\n Note! As we process along the shards, previous shards might still\n stay in memory, but since we are done with them, and no more\n reference to them, if there is memory tight situation, the OS could\n easily reclaim these memory.\n\n If `max_shard_size` is 0 or is larger than the corpus size, it is\n effectively preprocessed into one dataset, i.e. no sharding.\n\n NOTE! `max_shard_size` is measuring the input corpus size, not the\n output pt file size. So a shard pt file consists of examples of size\n 2 * `max_shard_size`(source + target).","docstring_summary":"Divide the big corpus into shards, and build dataset separately.\n This is currently only for data_type=='text'.","docstring_tokens":["Divide","the","big","corpus","into","shards","and","build","dataset","separately",".","This","is","currently","only","for","data_type","==","text","."],"function":"def build_save_in_shards(src_corpus, tgt_corpus, fields,\n corpus_type, opt):\n \"\"\"\n Divide the big corpus into shards, and build dataset separately.\n This is currently only for data_type=='text'.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.\n\n To tackle this, we only read in part of the corpus file of size\n `max_shard_size`(actually it is multiples of 64 bytes that equals\n or is slightly larger than this size), and process it into dataset,\n then write it to disk along the way. By doing this, we only focus on\n part of the corpus at any moment, thus effectively reducing memory use.\n According to test, this method can reduce memory footprint by ~50%.\n\n Note! As we process along the shards, previous shards might still\n stay in memory, but since we are done with them, and no more\n reference to them, if there is memory tight situation, the OS could\n easily reclaim these memory.\n\n If `max_shard_size` is 0 or is larger than the corpus size, it is\n effectively preprocessed into one dataset, i.e. no sharding.\n\n NOTE! `max_shard_size` is measuring the input corpus size, not the\n output pt file size. So a shard pt file consists of examples of size\n 2 * `max_shard_size`(source + target).\n \"\"\"\n\n corpus_size = os.path.getsize(src_corpus)\n if corpus_size > 10 * (1024 ** 2) and opt.max_shard_size == 0:\n logger.info(\"Warning. The corpus %s is larger than 10M bytes, \"\n \"you can set '-max_shard_size' to process it by \"\n \"small shards to use less memory.\" % src_corpus)\n\n if opt.max_shard_size != 0:\n logger.info(' * divide corpus into shards and build dataset '\n 'separately (shard_size = %d bytes).'\n % opt.max_shard_size)\n\n ret_list = []\n src_iter = inputters.ShardedTextCorpusIterator(\n src_corpus, opt.src_seq_length_trunc,\n \"src\", opt.max_shard_size)\n tgt_iter = inputters.ShardedTextCorpusIterator(\n tgt_corpus, opt.tgt_seq_length_trunc,\n \"tgt\", opt.max_shard_size,\n assoc_iter=src_iter)\n\n index = 0\n while not src_iter.hit_end():\n index += 1\n dataset = inputters.TextDataset(\n fields, src_iter, tgt_iter,\n src_iter.num_feats, tgt_iter.num_feats,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n dynamic_dict=opt.dynamic_dict)\n\n # We save fields in vocab.pt separately, so make it empty.\n dataset.fields = []\n\n pt_file = \"{:s}.{:s}.{:d}.pt\".format(\n opt.save_data, corpus_type, index)\n logger.info(\" * saving %s data shard to %s.\"\n % (corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n ret_list.append(pt_file)\n\n return ret_list","function_tokens":["def","build_save_in_shards","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")",":","corpus_size","=","os",".","path",".","getsize","(","src_corpus",")","if","corpus_size",">","10","*","(","1024","**","2",")","and","opt",".","max_shard_size","==","0",":","logger",".","info","(","\"Warning. The corpus %s is larger than 10M bytes, \"","\"you can set '-max_shard_size' to process it by \"","\"small shards to use less memory.\"","%","src_corpus",")","if","opt",".","max_shard_size","!=","0",":","logger",".","info","(","' * divide corpus into shards and build dataset '","'separately (shard_size = %d bytes).'","%","opt",".","max_shard_size",")","ret_list","=","[","]","src_iter","=","inputters",".","ShardedTextCorpusIterator","(","src_corpus",",","opt",".","src_seq_length_trunc",",","\"src\"",",","opt",".","max_shard_size",")","tgt_iter","=","inputters",".","ShardedTextCorpusIterator","(","tgt_corpus",",","opt",".","tgt_seq_length_trunc",",","\"tgt\"",",","opt",".","max_shard_size",",","assoc_iter","=","src_iter",")","index","=","0","while","not","src_iter",".","hit_end","(",")",":","index","+=","1","dataset","=","inputters",".","TextDataset","(","fields",",","src_iter",",","tgt_iter",",","src_iter",".","num_feats",",","tgt_iter",".","num_feats",",","src_seq_length","=","opt",".","src_seq_length",",","tgt_seq_length","=","opt",".","tgt_seq_length",",","dynamic_dict","=","opt",".","dynamic_dict",")","# We save fields in vocab.pt separately, so make it empty.","dataset",".","fields","=","[","]","pt_file","=","\"{:s}.{:s}.{:d}.pt\"",".","format","(","opt",".","save_data",",","corpus_type",",","index",")","logger",".","info","(","\" * saving %s data shard to %s.\"","%","(","corpus_type",",","pt_file",")",")","torch",".","save","(","dataset",",","pt_file",")","ret_list",".","append","(","pt_file",")","return","ret_list"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/preprocess.py#L50-L120"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/preprocess.py","language":"python","identifier":"build_save_in_shards_using_shards_size","parameters":"(src_corpus, tgt_corpus, fields,\n corpus_type, opt)","argument_list":"","return_statement":"return ret_list","docstring":"Divide src_corpus and tgt_corpus into smaller multiples\n src_copus and tgt corpus files, then build shards, each\n shard will have opt.shard_size samples except last shard.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.","docstring_summary":"Divide src_corpus and tgt_corpus into smaller multiples\n src_copus and tgt corpus files, then build shards, each\n shard will have opt.shard_size samples except last shard.","docstring_tokens":["Divide","src_corpus","and","tgt_corpus","into","smaller","multiples","src_copus","and","tgt","corpus","files","then","build","shards","each","shard","will","have","opt",".","shard_size","samples","except","last","shard","."],"function":"def build_save_in_shards_using_shards_size(src_corpus, tgt_corpus, fields,\n corpus_type, opt):\n \"\"\"\n Divide src_corpus and tgt_corpus into smaller multiples\n src_copus and tgt corpus files, then build shards, each\n shard will have opt.shard_size samples except last shard.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.\n \"\"\"\n\n src_data = open(src_corpus, \"r\", encoding=\"utf-8\").readlines()\n tgt_data = open(tgt_corpus, \"r\", encoding=\"utf-8\").readlines()\n\n src_corpus = \"\".join(src_corpus.split(\".\")[:-1])\n tgt_corpus = \"\".join(tgt_corpus.split(\".\")[:-1])\n\n for x in range(int(len(src_data) \/ opt.shard_size)):\n open(src_corpus + \".{0}.txt\".format(x), \"w\",\n encoding=\"utf-8\").writelines(\n src_data[x * opt.shard_size: (x + 1) * opt.shard_size])\n open(tgt_corpus + \".{0}.txt\".format(x), \"w\",\n encoding=\"utf-8\").writelines(\n tgt_data[x * opt.shard_size: (x + 1) * opt.shard_size])\n\n src_list = sorted(glob.glob(src_corpus + '.*.txt'))\n tgt_list = sorted(glob.glob(tgt_corpus + '.*.txt'))\n\n ret_list = []\n\n for index, src in enumerate(src_list):\n dataset = inputters.build_dataset(\n fields, opt.data_type,\n src_path=src,\n tgt_path=tgt_list[index],\n src_dir=opt.src_dir,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n src_seq_length_trunc=opt.src_seq_length_trunc,\n tgt_seq_length_trunc=opt.tgt_seq_length_trunc,\n dynamic_dict=opt.dynamic_dict,\n sample_rate=opt.sample_rate,\n window_size=opt.window_size,\n window_stride=opt.window_stride,\n window=opt.window,\n image_channel_size=opt.image_channel_size\n )\n\n pt_file = \"{:s}.{:s}.{:d}.pt\".format(\n opt.save_data, corpus_type, index)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n logger.info(\" * saving %sth %s data image shard to %s.\"\n % (index, corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n ret_list.append(pt_file)\n\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n return ret_list","function_tokens":["def","build_save_in_shards_using_shards_size","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")",":","src_data","=","open","(","src_corpus",",","\"r\"",",","encoding","=","\"utf-8\"",")",".","readlines","(",")","tgt_data","=","open","(","tgt_corpus",",","\"r\"",",","encoding","=","\"utf-8\"",")",".","readlines","(",")","src_corpus","=","\"\"",".","join","(","src_corpus",".","split","(","\".\"",")","[",":","-","1","]",")","tgt_corpus","=","\"\"",".","join","(","tgt_corpus",".","split","(","\".\"",")","[",":","-","1","]",")","for","x","in","range","(","int","(","len","(","src_data",")","\/","opt",".","shard_size",")",")",":","open","(","src_corpus","+","\".{0}.txt\"",".","format","(","x",")",",","\"w\"",",","encoding","=","\"utf-8\"",")",".","writelines","(","src_data","[","x","*","opt",".","shard_size",":","(","x","+","1",")","*","opt",".","shard_size","]",")","open","(","tgt_corpus","+","\".{0}.txt\"",".","format","(","x",")",",","\"w\"",",","encoding","=","\"utf-8\"",")",".","writelines","(","tgt_data","[","x","*","opt",".","shard_size",":","(","x","+","1",")","*","opt",".","shard_size","]",")","src_list","=","sorted","(","glob",".","glob","(","src_corpus","+","'.*.txt'",")",")","tgt_list","=","sorted","(","glob",".","glob","(","tgt_corpus","+","'.*.txt'",")",")","ret_list","=","[","]","for","index",",","src","in","enumerate","(","src_list",")",":","dataset","=","inputters",".","build_dataset","(","fields",",","opt",".","data_type",",","src_path","=","src",",","tgt_path","=","tgt_list","[","index","]",",","src_dir","=","opt",".","src_dir",",","src_seq_length","=","opt",".","src_seq_length",",","tgt_seq_length","=","opt",".","tgt_seq_length",",","src_seq_length_trunc","=","opt",".","src_seq_length_trunc",",","tgt_seq_length_trunc","=","opt",".","tgt_seq_length_trunc",",","dynamic_dict","=","opt",".","dynamic_dict",",","sample_rate","=","opt",".","sample_rate",",","window_size","=","opt",".","window_size",",","window_stride","=","opt",".","window_stride",",","window","=","opt",".","window",",","image_channel_size","=","opt",".","image_channel_size",")","pt_file","=","\"{:s}.{:s}.{:d}.pt\"",".","format","(","opt",".","save_data",",","corpus_type",",","index",")","# We save fields in vocab.pt seperately, so make it empty.","dataset",".","fields","=","[","]","logger",".","info","(","\" * saving %sth %s data image shard to %s.\"","%","(","index",",","corpus_type",",","pt_file",")",")","torch",".","save","(","dataset",",","pt_file",")","ret_list",".","append","(","pt_file",")","del","dataset",".","examples","gc",".","collect","(",")","del","dataset","gc",".","collect","(",")","return","ret_list"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/preprocess.py#L123-L188"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/preprocess.py","language":"python","identifier":"build_save_dataset","parameters":"(corpus_type, fields, opt)","argument_list":"","return_statement":"return [pt_file]","docstring":"Building and saving the dataset","docstring_summary":"Building and saving the dataset","docstring_tokens":["Building","and","saving","the","dataset"],"function":"def build_save_dataset(corpus_type, fields, opt):\n \"\"\" Building and saving the dataset \"\"\"\n assert corpus_type in ['train', 'valid']\n\n if corpus_type == 'train':\n src_corpus = opt.train_src\n tgt_corpus = opt.train_tgt\n else:\n src_corpus = opt.valid_src\n tgt_corpus = opt.valid_tgt\n\n # Currently we only do preprocess sharding for corpus: data_type=='text'.\n if opt.data_type == 'text':\n return build_save_in_shards(\n src_corpus, tgt_corpus, fields,\n corpus_type, opt)\n\n if (opt.shard_size > 0):\n return build_save_in_shards_using_shards_size(src_corpus,\n tgt_corpus,\n fields,\n corpus_type,\n opt)\n\n # For data_type == 'img' or 'audio', currently we don't do\n # preprocess sharding. We only build a monolithic dataset.\n # But since the interfaces are uniform, it would be not hard\n # to do this should users need this feature.\n dataset = inputters.build_dataset(\n fields, opt.data_type,\n src_path=src_corpus,\n tgt_path=tgt_corpus,\n src_dir=opt.src_dir,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n src_seq_length_trunc=opt.src_seq_length_trunc,\n tgt_seq_length_trunc=opt.tgt_seq_length_trunc,\n dynamic_dict=opt.dynamic_dict,\n sample_rate=opt.sample_rate,\n window_size=opt.window_size,\n window_stride=opt.window_stride,\n window=opt.window,\n image_channel_size=opt.image_channel_size)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n pt_file = \"{:s}.{:s}.pt\".format(opt.save_data, corpus_type)\n logger.info(\" * saving %s dataset to %s.\" % (corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n return [pt_file]","function_tokens":["def","build_save_dataset","(","corpus_type",",","fields",",","opt",")",":","assert","corpus_type","in","[","'train'",",","'valid'","]","if","corpus_type","==","'train'",":","src_corpus","=","opt",".","train_src","tgt_corpus","=","opt",".","train_tgt","else",":","src_corpus","=","opt",".","valid_src","tgt_corpus","=","opt",".","valid_tgt","# Currently we only do preprocess sharding for corpus: data_type=='text'.","if","opt",".","data_type","==","'text'",":","return","build_save_in_shards","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")","if","(","opt",".","shard_size",">","0",")",":","return","build_save_in_shards_using_shards_size","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")","# For data_type == 'img' or 'audio', currently we don't do","# preprocess sharding. We only build a monolithic dataset.","# But since the interfaces are uniform, it would be not hard","# to do this should users need this feature.","dataset","=","inputters",".","build_dataset","(","fields",",","opt",".","data_type",",","src_path","=","src_corpus",",","tgt_path","=","tgt_corpus",",","src_dir","=","opt",".","src_dir",",","src_seq_length","=","opt",".","src_seq_length",",","tgt_seq_length","=","opt",".","tgt_seq_length",",","src_seq_length_trunc","=","opt",".","src_seq_length_trunc",",","tgt_seq_length_trunc","=","opt",".","tgt_seq_length_trunc",",","dynamic_dict","=","opt",".","dynamic_dict",",","sample_rate","=","opt",".","sample_rate",",","window_size","=","opt",".","window_size",",","window_stride","=","opt",".","window_stride",",","window","=","opt",".","window",",","image_channel_size","=","opt",".","image_channel_size",")","# We save fields in vocab.pt seperately, so make it empty.","dataset",".","fields","=","[","]","pt_file","=","\"{:s}.{:s}.pt\"",".","format","(","opt",".","save_data",",","corpus_type",")","logger",".","info","(","\" * saving %s dataset to %s.\"","%","(","corpus_type",",","pt_file",")",")","torch",".","save","(","dataset",",","pt_file",")","return","[","pt_file","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/preprocess.py#L191-L242"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/preprocess.py","language":"python","identifier":"build_save_vocab","parameters":"(train_dataset, fields, opt)","argument_list":"","return_statement":"","docstring":"Building and saving the vocab","docstring_summary":"Building and saving the vocab","docstring_tokens":["Building","and","saving","the","vocab"],"function":"def build_save_vocab(train_dataset, fields, opt):\n \"\"\" Building and saving the vocab \"\"\"\n fields = inputters.build_vocab(train_dataset, fields, opt.data_type,\n opt.share_vocab,\n opt.src_vocab,\n opt.src_vocab_size,\n opt.src_words_min_frequency,\n opt.tgt_vocab,\n opt.tgt_vocab_size,\n opt.tgt_words_min_frequency)\n\n # Can't save fields, so remove\/reconstruct at training time.\n vocab_file = opt.save_data + '.vocab.pt'\n torch.save(inputters.save_fields_to_vocab(fields), vocab_file)","function_tokens":["def","build_save_vocab","(","train_dataset",",","fields",",","opt",")",":","fields","=","inputters",".","build_vocab","(","train_dataset",",","fields",",","opt",".","data_type",",","opt",".","share_vocab",",","opt",".","src_vocab",",","opt",".","src_vocab_size",",","opt",".","src_words_min_frequency",",","opt",".","tgt_vocab",",","opt",".","tgt_vocab_size",",","opt",".","tgt_words_min_frequency",")","# Can't save fields, so remove\/reconstruct at training time.","vocab_file","=","opt",".","save_data","+","'.vocab.pt'","torch",".","save","(","inputters",".","save_fields_to_vocab","(","fields",")",",","vocab_file",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/preprocess.py#L245-L258"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/train.py","language":"python","identifier":"run","parameters":"(opt, device_id, error_queue)","argument_list":"","return_statement":"","docstring":"run process","docstring_summary":"run process","docstring_tokens":["run","process"],"function":"def run(opt, device_id, error_queue):\n \"\"\" run process \"\"\"\n try:\n gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)\n if gpu_rank != opt.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n single_main(opt, device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))","function_tokens":["def","run","(","opt",",","device_id",",","error_queue",")",":","try",":","gpu_rank","=","onmt",".","utils",".","distributed",".","multi_init","(","opt",",","device_id",")","if","gpu_rank","!=","opt",".","gpu_ranks","[","device_id","]",":","raise","AssertionError","(","\"An error occurred in \\\n Distributed initialization\"",")","single_main","(","opt",",","device_id",")","except","KeyboardInterrupt",":","pass","# killed by parent, do nothing","except","Exception",":","# propagate exception to parent process, keeping original traceback","import","traceback","error_queue",".","put","(","(","opt",".","gpu_ranks","[","device_id","]",",","traceback",".","format_exc","(",")",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/train.py#L56-L69"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/train.py","language":"python","identifier":"ErrorHandler.__init__","parameters":"(self, error_queue)","argument_list":"","return_statement":"","docstring":"init error handler","docstring_summary":"init error handler","docstring_tokens":["init","error","handler"],"function":"def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)","function_tokens":["def","__init__","(","self",",","error_queue",")",":","import","signal","import","threading","self",".","error_queue","=","error_queue","self",".","children_pids","=","[","]","self",".","error_thread","=","threading",".","Thread","(","target","=","self",".","error_listener",",","daemon","=","True",")","self",".","error_thread",".","start","(",")","signal",".","signal","(","signal",".","SIGUSR1",",","self",".","signal_handler",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/train.py#L76-L85"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/train.py","language":"python","identifier":"ErrorHandler.add_child","parameters":"(self, pid)","argument_list":"","return_statement":"","docstring":"error handler","docstring_summary":"error handler","docstring_tokens":["error","handler"],"function":"def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)","function_tokens":["def","add_child","(","self",",","pid",")",":","self",".","children_pids",".","append","(","pid",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/train.py#L87-L89"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/train.py","language":"python","identifier":"ErrorHandler.error_listener","parameters":"(self)","argument_list":"","return_statement":"","docstring":"error listener","docstring_summary":"error listener","docstring_tokens":["error","listener"],"function":"def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)","function_tokens":["def","error_listener","(","self",")",":","(","rank",",","original_trace",")","=","self",".","error_queue",".","get","(",")","self",".","error_queue",".","put","(","(","rank",",","original_trace",")",")","os",".","kill","(","os",".","getpid","(",")",",","signal",".","SIGUSR1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/train.py#L91-L95"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/train.py","language":"python","identifier":"ErrorHandler.signal_handler","parameters":"(self, signalnum, stackframe)","argument_list":"","return_statement":"","docstring":"signal handler","docstring_summary":"signal handler","docstring_tokens":["signal","handler"],"function":"def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)","function_tokens":["def","signal_handler","(","self",",","signalnum",",","stackframe",")",":","for","pid","in","self",".","children_pids",":","os",".","kill","(","pid",",","signal",".","SIGINT",")","# kill children processes","(","rank",",","original_trace",")","=","self",".","error_queue",".","get","(",")","msg","=","\"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"","msg","+=","original_trace","raise","Exception","(","msg",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/train.py#L97-L105"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/model_builder.py","language":"python","identifier":"build_embeddings","parameters":"(opt, word_dict, feature_dicts, for_encoder=True)","argument_list":"","return_statement":"return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n feat_merge=opt.feat_merge,\n feat_vec_exponent=opt.feat_vec_exponent,\n feat_vec_size=opt.feat_vec_size,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n feat_padding_idx=feats_padding_idx,\n word_vocab_size=num_word_embeddings,\n feat_vocab_sizes=num_feat_embeddings,\n sparse=opt.optim == \"sparseadam\")","docstring":"Build an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?","docstring_summary":"Build an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?","docstring_tokens":["Build","an","Embeddings","instance",".","Args",":","opt",":","the","option","in","current","environment",".","word_dict","(","Vocab",")",":","words","dictionary",".","feature_dicts","(","[","Vocab","]","optional",")",":","a","list","of","feature","dictionary",".","for_encoder","(","bool",")",":","build","Embeddings","for","encoder","or","decoder?"],"function":"def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):\n \"\"\"\n Build an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?\n \"\"\"\n if for_encoder:\n embedding_dim = opt.src_word_vec_size\n else:\n embedding_dim = opt.tgt_word_vec_size\n\n word_padding_idx = word_dict.stoi[inputters.PAD_WORD]\n num_word_embeddings = len(word_dict)\n\n feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]\n for feat_dict in feature_dicts]\n num_feat_embeddings = [len(feat_dict) for feat_dict in\n feature_dicts]\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n feat_merge=opt.feat_merge,\n feat_vec_exponent=opt.feat_vec_exponent,\n feat_vec_size=opt.feat_vec_size,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n feat_padding_idx=feats_padding_idx,\n word_vocab_size=num_word_embeddings,\n feat_vocab_sizes=num_feat_embeddings,\n sparse=opt.optim == \"sparseadam\")","function_tokens":["def","build_embeddings","(","opt",",","word_dict",",","feature_dicts",",","for_encoder","=","True",")",":","if","for_encoder",":","embedding_dim","=","opt",".","src_word_vec_size","else",":","embedding_dim","=","opt",".","tgt_word_vec_size","word_padding_idx","=","word_dict",".","stoi","[","inputters",".","PAD_WORD","]","num_word_embeddings","=","len","(","word_dict",")","feats_padding_idx","=","[","feat_dict",".","stoi","[","inputters",".","PAD_WORD","]","for","feat_dict","in","feature_dicts","]","num_feat_embeddings","=","[","len","(","feat_dict",")","for","feat_dict","in","feature_dicts","]","return","Embeddings","(","word_vec_size","=","embedding_dim",",","position_encoding","=","opt",".","position_encoding",",","feat_merge","=","opt",".","feat_merge",",","feat_vec_exponent","=","opt",".","feat_vec_exponent",",","feat_vec_size","=","opt",".","feat_vec_size",",","dropout","=","opt",".","dropout",",","word_padding_idx","=","word_padding_idx",",","feat_padding_idx","=","feats_padding_idx",",","word_vocab_size","=","num_word_embeddings",",","feat_vocab_sizes","=","num_feat_embeddings",",","sparse","=","opt",".","optim","==","\"sparseadam\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/model_builder.py#L29-L61"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/model_builder.py","language":"python","identifier":"build_encoder","parameters":"(opt, embeddings)","argument_list":"","return_statement":"","docstring":"Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.","docstring_summary":"Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.","docstring_tokens":["Various","encoder","dispatcher","function",".","Args",":","opt",":","the","option","in","current","environment",".","embeddings","(","Embeddings",")",":","vocab","embeddings","for","this","encoder","."],"function":"def build_encoder(opt, embeddings):\n \"\"\"\n Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.\n \"\"\"\n if opt.encoder_type == \"transformer\":\n return TransformerEncoder(opt.enc_layers, opt.rnn_size,\n opt.heads, opt.transformer_ff,\n opt.dropout, embeddings)\n elif opt.encoder_type == \"cnn\":\n return CNNEncoder(opt.enc_layers, opt.rnn_size,\n opt.cnn_kernel_width,\n opt.dropout, embeddings)\n elif opt.encoder_type == \"mean\":\n return MeanEncoder(opt.enc_layers, embeddings)\n else:\n # \"rnn\" or \"brnn\"\n return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,\n opt.rnn_size, opt.dropout, embeddings,\n opt.bridge)","function_tokens":["def","build_encoder","(","opt",",","embeddings",")",":","if","opt",".","encoder_type","==","\"transformer\"",":","return","TransformerEncoder","(","opt",".","enc_layers",",","opt",".","rnn_size",",","opt",".","heads",",","opt",".","transformer_ff",",","opt",".","dropout",",","embeddings",")","elif","opt",".","encoder_type","==","\"cnn\"",":","return","CNNEncoder","(","opt",".","enc_layers",",","opt",".","rnn_size",",","opt",".","cnn_kernel_width",",","opt",".","dropout",",","embeddings",")","elif","opt",".","encoder_type","==","\"mean\"",":","return","MeanEncoder","(","opt",".","enc_layers",",","embeddings",")","else",":","# \"rnn\" or \"brnn\"","return","RNNEncoder","(","opt",".","rnn_type",",","opt",".","brnn",",","opt",".","enc_layers",",","opt",".","rnn_size",",","opt",".","dropout",",","embeddings",",","opt",".","bridge",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/model_builder.py#L64-L85"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/model_builder.py","language":"python","identifier":"build_decoder","parameters":"(opt, embeddings)","argument_list":"","return_statement":"","docstring":"Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.","docstring_summary":"Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.","docstring_tokens":["Various","decoder","dispatcher","function",".","Args",":","opt",":","the","option","in","current","environment",".","embeddings","(","Embeddings",")",":","vocab","embeddings","for","this","decoder","."],"function":"def build_decoder(opt, embeddings):\n \"\"\"\n Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.\n \"\"\"\n if opt.decoder_type == \"transformer\":\n return TransformerDecoder(opt.dec_layers, opt.rnn_size,\n opt.heads, opt.transformer_ff,\n opt.global_attention, opt.copy_attn,\n opt.self_attn_type,\n opt.dropout, embeddings)\n elif opt.decoder_type == \"cnn\":\n return CNNDecoder(opt.dec_layers, opt.rnn_size,\n opt.global_attention, opt.copy_attn,\n opt.cnn_kernel_width, opt.dropout,\n embeddings)\n elif opt.input_feed:\n return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.rnn_size,\n opt.global_attention,\n opt.global_attention_function,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings,\n opt.reuse_copy_attn)\n else:\n return StdRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.rnn_size,\n opt.global_attention,\n opt.global_attention_function,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings,\n opt.reuse_copy_attn)","function_tokens":["def","build_decoder","(","opt",",","embeddings",")",":","if","opt",".","decoder_type","==","\"transformer\"",":","return","TransformerDecoder","(","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","heads",",","opt",".","transformer_ff",",","opt",".","global_attention",",","opt",".","copy_attn",",","opt",".","self_attn_type",",","opt",".","dropout",",","embeddings",")","elif","opt",".","decoder_type","==","\"cnn\"",":","return","CNNDecoder","(","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","global_attention",",","opt",".","copy_attn",",","opt",".","cnn_kernel_width",",","opt",".","dropout",",","embeddings",")","elif","opt",".","input_feed",":","return","InputFeedRNNDecoder","(","opt",".","rnn_type",",","opt",".","brnn",",","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","global_attention",",","opt",".","global_attention_function",",","opt",".","coverage_attn",",","opt",".","context_gate",",","opt",".","copy_attn",",","opt",".","dropout",",","embeddings",",","opt",".","reuse_copy_attn",")","else",":","return","StdRNNDecoder","(","opt",".","rnn_type",",","opt",".","brnn",",","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","global_attention",",","opt",".","global_attention_function",",","opt",".","coverage_attn",",","opt",".","context_gate",",","opt",".","copy_attn",",","opt",".","dropout",",","embeddings",",","opt",".","reuse_copy_attn",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/model_builder.py#L88-L127"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/model_builder.py","language":"python","identifier":"build_base_model","parameters":"(model_opt, fields, gpu, checkpoint=None)","argument_list":"","return_statement":"return model","docstring":"Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.","docstring_summary":"Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.","docstring_tokens":["Args",":","model_opt",":","the","option","loaded","from","checkpoint",".","fields",":","Field","objects","for","the","model",".","gpu","(","bool",")",":","whether","to","use","gpu",".","checkpoint",":","the","model","gnerated","by","train","phase","or","a","resumed","snapshot","model","from","a","stopped","training",".","Returns",":","the","NMTModel","."],"function":"def build_base_model(model_opt, fields, gpu, checkpoint=None):\n \"\"\"\n Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.\n \"\"\"\n assert model_opt.model_type in [\"text\", \"img\", \"audio\"], \\\n (\"Unsupported model type %s\" % (model_opt.model_type))\n\n # Build encoder.\n if model_opt.model_type == \"text\":\n src_dict = fields[\"src\"].vocab\n feature_dicts = inputters.collect_feature_vocabs(fields, 'src')\n src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)\n encoder = build_encoder(model_opt, src_embeddings)\n elif model_opt.model_type == \"img\":\n if (\"image_channel_size\" not in model_opt.__dict__):\n image_channel_size = 3\n else:\n image_channel_size = model_opt.image_channel_size\n\n encoder = ImageEncoder(model_opt.enc_layers,\n model_opt.brnn,\n model_opt.rnn_size,\n model_opt.dropout,\n image_channel_size)\n elif model_opt.model_type == \"audio\":\n encoder = AudioEncoder(model_opt.enc_layers,\n model_opt.brnn,\n model_opt.rnn_size,\n model_opt.dropout,\n model_opt.sample_rate,\n model_opt.window_size)\n\n model_parameters = filter(lambda p: p.requires_grad, encoder.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n # Build decoder.\n tgt_dict = fields[\"tgt\"].vocab\n feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')\n tgt_embeddings = build_embeddings(model_opt, tgt_dict,\n feature_dicts, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src\/tgt vocab should be the same if `-share_vocab` is specified.\n if src_dict != tgt_dict:\n raise AssertionError('The `-share_vocab` should be set during '\n 'preprocess if you use share_embeddings!')\n\n tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight\n\n decoder = build_decoder(model_opt, tgt_embeddings)\n\n # Build NMTModel(= encoder + decoder).\n device = torch.device(\"cuda\" if gpu else \"cpu\")\n model = onmt.models.NMTModel(encoder, decoder)\n model.model_type = model_opt.model_type\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n generator = nn.Sequential(\n nn.Linear(model_opt.rnn_size, len(fields[\"tgt\"].vocab)), gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n else:\n generator = CopyGenerator(model_opt.rnn_size,\n fields[\"tgt\"].vocab)\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n model.load_state_dict(checkpoint['model'])\n generator.load_state_dict(checkpoint['generator'])\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if hasattr(model.encoder, 'embeddings'):\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)\n\n # Add generator to model (this registers it as parameter of model).\n model.generator = generator\n model.to(device)\n\n return model","function_tokens":["def","build_base_model","(","model_opt",",","fields",",","gpu",",","checkpoint","=","None",")",":","assert","model_opt",".","model_type","in","[","\"text\"",",","\"img\"",",","\"audio\"","]",",","(","\"Unsupported model type %s\"","%","(","model_opt",".","model_type",")",")","# Build encoder.","if","model_opt",".","model_type","==","\"text\"",":","src_dict","=","fields","[","\"src\"","]",".","vocab","feature_dicts","=","inputters",".","collect_feature_vocabs","(","fields",",","'src'",")","src_embeddings","=","build_embeddings","(","model_opt",",","src_dict",",","feature_dicts",")","encoder","=","build_encoder","(","model_opt",",","src_embeddings",")","elif","model_opt",".","model_type","==","\"img\"",":","if","(","\"image_channel_size\"","not","in","model_opt",".","__dict__",")",":","image_channel_size","=","3","else",":","image_channel_size","=","model_opt",".","image_channel_size","encoder","=","ImageEncoder","(","model_opt",".","enc_layers",",","model_opt",".","brnn",",","model_opt",".","rnn_size",",","model_opt",".","dropout",",","image_channel_size",")","elif","model_opt",".","model_type","==","\"audio\"",":","encoder","=","AudioEncoder","(","model_opt",".","enc_layers",",","model_opt",".","brnn",",","model_opt",".","rnn_size",",","model_opt",".","dropout",",","model_opt",".","sample_rate",",","model_opt",".","window_size",")","model_parameters","=","filter","(","lambda","p",":","p",".","requires_grad",",","encoder",".","parameters","(",")",")","params","=","sum","(","[","np",".","prod","(","p",".","size","(",")",")","for","p","in","model_parameters","]",")","# Build decoder.","tgt_dict","=","fields","[","\"tgt\"","]",".","vocab","feature_dicts","=","inputters",".","collect_feature_vocabs","(","fields",",","'tgt'",")","tgt_embeddings","=","build_embeddings","(","model_opt",",","tgt_dict",",","feature_dicts",",","for_encoder","=","False",")","# Share the embedding matrix - preprocess with share_vocab required.","if","model_opt",".","share_embeddings",":","# src\/tgt vocab should be the same if `-share_vocab` is specified.","if","src_dict","!=","tgt_dict",":","raise","AssertionError","(","'The `-share_vocab` should be set during '","'preprocess if you use share_embeddings!'",")","tgt_embeddings",".","word_lut",".","weight","=","src_embeddings",".","word_lut",".","weight","decoder","=","build_decoder","(","model_opt",",","tgt_embeddings",")","# Build NMTModel(= encoder + decoder).","device","=","torch",".","device","(","\"cuda\"","if","gpu","else","\"cpu\"",")","model","=","onmt",".","models",".","NMTModel","(","encoder",",","decoder",")","model",".","model_type","=","model_opt",".","model_type","# Build Generator.","if","not","model_opt",".","copy_attn",":","if","model_opt",".","generator_function","==","\"sparsemax\"",":","gen_func","=","onmt",".","modules",".","sparse_activations",".","LogSparsemax","(","dim","=","-","1",")","else",":","gen_func","=","nn",".","LogSoftmax","(","dim","=","-","1",")","generator","=","nn",".","Sequential","(","nn",".","Linear","(","model_opt",".","rnn_size",",","len","(","fields","[","\"tgt\"","]",".","vocab",")",")",",","gen_func",")","if","model_opt",".","share_decoder_embeddings",":","generator","[","0","]",".","weight","=","decoder",".","embeddings",".","word_lut",".","weight","else",":","generator","=","CopyGenerator","(","model_opt",".","rnn_size",",","fields","[","\"tgt\"","]",".","vocab",")","# Load the model states from checkpoint or initialize them.","if","checkpoint","is","not","None",":","model",".","load_state_dict","(","checkpoint","[","'model'","]",")","generator",".","load_state_dict","(","checkpoint","[","'generator'","]",")","else",":","if","model_opt",".","param_init","!=","0.0",":","for","p","in","model",".","parameters","(",")",":","p",".","data",".","uniform_","(","-","model_opt",".","param_init",",","model_opt",".","param_init",")","for","p","in","generator",".","parameters","(",")",":","p",".","data",".","uniform_","(","-","model_opt",".","param_init",",","model_opt",".","param_init",")","if","model_opt",".","param_init_glorot",":","for","p","in","model",".","parameters","(",")",":","if","p",".","dim","(",")",">","1",":","xavier_uniform_","(","p",")","for","p","in","generator",".","parameters","(",")",":","if","p",".","dim","(",")",">","1",":","xavier_uniform_","(","p",")","if","hasattr","(","model",".","encoder",",","'embeddings'",")",":","model",".","encoder",".","embeddings",".","load_pretrained_vectors","(","model_opt",".","pre_word_vecs_enc",",","model_opt",".","fix_word_vecs_enc",")","if","hasattr","(","model",".","decoder",",","'embeddings'",")",":","model",".","decoder",".","embeddings",".","load_pretrained_vectors","(","model_opt",".","pre_word_vecs_dec",",","model_opt",".","fix_word_vecs_dec",")","# Add generator to model (this registers it as parameter of model).","model",".","generator","=","generator","model",".","to","(","device",")","return","model"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/model_builder.py#L148-L255"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/model_builder.py","language":"python","identifier":"build_model","parameters":"(model_opt, opt, fields, checkpoint)","argument_list":"","return_statement":"return model","docstring":"Build the Model","docstring_summary":"Build the Model","docstring_tokens":["Build","the","Model"],"function":"def build_model(model_opt, opt, fields, checkpoint):\n \"\"\" Build the Model \"\"\"\n logger.info('Building model...')\n model = build_base_model(model_opt, fields,\n use_gpu(opt), checkpoint)\n logger.info(model)\n return model","function_tokens":["def","build_model","(","model_opt",",","opt",",","fields",",","checkpoint",")",":","logger",".","info","(","'Building model...'",")","model","=","build_base_model","(","model_opt",",","fields",",","use_gpu","(","opt",")",",","checkpoint",")","logger",".","info","(","model",")","return","model"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/model_builder.py#L258-L264"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"build_trainer","parameters":"(opt, device_id, model, fields,\n optim, data_type, model_saver=None)","argument_list":"","return_statement":"return trainer","docstring":"Simplify `Trainer` creation based on user `opt`s*\n\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model","docstring_summary":"Simplify `Trainer` creation based on user `opt`s*","docstring_tokens":["Simplify","Trainer","creation","based","on","user","opt","s","*"],"function":"def build_trainer(opt, device_id, model, fields,\n optim, data_type, model_saver=None):\n \"\"\"\n Simplify `Trainer` creation based on user `opt`s*\n\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model\n \"\"\"\n train_loss = onmt.utils.loss.build_loss_compute(\n model, fields[\"tgt\"].vocab, opt)\n valid_loss = onmt.utils.loss.build_loss_compute(\n model, fields[\"tgt\"].vocab, opt, train=False)\n\n trunc_size = opt.truncated_decoder # Badly named...\n shard_size = opt.max_generator_batches\n norm_method = opt.normalization\n grad_accum_count = opt.accum_count\n n_gpu = opt.world_size\n if device_id >= 0:\n gpu_rank = opt.gpu_ranks[device_id]\n else:\n gpu_rank = 0\n n_gpu = 0\n gpu_verbose_level = opt.gpu_verbose_level\n\n report_manager = onmt.utils.build_report_manager(opt)\n trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size,\n shard_size, data_type, norm_method,\n grad_accum_count, n_gpu, gpu_rank,\n gpu_verbose_level, report_manager,\n model_saver=model_saver)\n return trainer","function_tokens":["def","build_trainer","(","opt",",","device_id",",","model",",","fields",",","optim",",","data_type",",","model_saver","=","None",")",":","train_loss","=","onmt",".","utils",".","loss",".","build_loss_compute","(","model",",","fields","[","\"tgt\"","]",".","vocab",",","opt",")","valid_loss","=","onmt",".","utils",".","loss",".","build_loss_compute","(","model",",","fields","[","\"tgt\"","]",".","vocab",",","opt",",","train","=","False",")","trunc_size","=","opt",".","truncated_decoder","# Badly named...","shard_size","=","opt",".","max_generator_batches","norm_method","=","opt",".","normalization","grad_accum_count","=","opt",".","accum_count","n_gpu","=","opt",".","world_size","if","device_id",">=","0",":","gpu_rank","=","opt",".","gpu_ranks","[","device_id","]","else",":","gpu_rank","=","0","n_gpu","=","0","gpu_verbose_level","=","opt",".","gpu_verbose_level","report_manager","=","onmt",".","utils",".","build_report_manager","(","opt",")","trainer","=","onmt",".","Trainer","(","model",",","train_loss",",","valid_loss",",","optim",",","trunc_size",",","shard_size",",","data_type",",","norm_method",",","grad_accum_count",",","n_gpu",",","gpu_rank",",","gpu_verbose_level",",","report_manager",",","model_saver","=","model_saver",")","return","trainer"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L23-L61"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"Trainer.train","parameters":"(self, train_iter_fct, valid_iter_fct, train_steps, valid_steps)","argument_list":"","return_statement":"return total_stats","docstring":"The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`\n\n Args:\n train_iter_fct(function): a function that returns the train\n iterator. e.g. something like\n train_iter_fct = lambda: generator(*args, **kwargs)\n valid_iter_fct(function): same as train_iter_fct, for valid data\n train_steps(int):\n valid_steps(int):\n save_checkpoint_steps(int):\n\n Return:\n None","docstring_summary":"The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`","docstring_tokens":["The","main","training","loops",".","by","iterating","over","training","data","(","i",".","e",".","train_iter_fct",")","and","running","validation","(","i",".","e",".","iterating","over","valid_iter_fct"],"function":"def train(self, train_iter_fct, valid_iter_fct, train_steps, valid_steps):\n \"\"\"\n The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`\n\n Args:\n train_iter_fct(function): a function that returns the train\n iterator. e.g. something like\n train_iter_fct = lambda: generator(*args, **kwargs)\n valid_iter_fct(function): same as train_iter_fct, for valid data\n train_steps(int):\n valid_steps(int):\n save_checkpoint_steps(int):\n\n Return:\n None\n \"\"\"\n logger.info('Start training...')\n\n step = self.optim._step + 1\n true_batchs = []\n accum = 0\n normalization = 0\n train_iter = train_iter_fct()\n\n total_stats = onmt.utils.Statistics()\n report_stats = onmt.utils.Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n #pdb.set_trace()\n while step <= train_steps:\n reduce_counter = 0\n for i, batch in enumerate(train_iter):\n if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):\n if self.gpu_verbose_level > 1:\n logger.info(\"GpuRank %d: index: %d accum: %d\"\n % (self.gpu_rank, i, accum))\n\n true_batchs.append(batch)\n\n if self.norm_method == \"tokens\":\n num_tokens = batch.tgt[1:].ne(\n self.train_loss.padding_idx).sum()\n normalization += num_tokens.item()\n else:\n normalization += batch.batch_size\n accum += 1\n if accum == self.grad_accum_count:\n reduce_counter += 1\n if self.gpu_verbose_level > 0:\n logger.info(\"GpuRank %d: reduce_counter: %d \\\n n_minibatch %d\"\n % (self.gpu_rank, reduce_counter,\n len(true_batchs)))\n if self.n_gpu > 1:\n normalization = sum(onmt.utils.distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n true_batchs, normalization, total_stats,\n report_stats)\n\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optim.learning_rate,\n report_stats)\n\n true_batchs = []\n accum = 0\n normalization = 0\n if (step % valid_steps == 0):\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: validate step %d'\n % (self.gpu_rank, step))\n valid_iter = valid_iter_fct()\n with torch.no_grad():\n valid_stats = self.validate(valid_iter)\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: gather valid stat \\\n step %d' % (self.gpu_rank, step))\n valid_stats = self._maybe_gather_stats(valid_stats)\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: report stat step %d'\n % (self.gpu_rank, step))\n self._report_step(self.optim.learning_rate,\n step, valid_stats=valid_stats)\n\n if self.gpu_rank == 0:\n self._maybe_save(step)\n step += 1\n if step > train_steps:\n break\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: we completed an epoch \\\n at step %d' % (self.gpu_rank, step))\n train_iter = train_iter_fct()\n\n return total_stats","function_tokens":["def","train","(","self",",","train_iter_fct",",","valid_iter_fct",",","train_steps",",","valid_steps",")",":","logger",".","info","(","'Start training...'",")","step","=","self",".","optim",".","_step","+","1","true_batchs","=","[","]","accum","=","0","normalization","=","0","train_iter","=","train_iter_fct","(",")","total_stats","=","onmt",".","utils",".","Statistics","(",")","report_stats","=","onmt",".","utils",".","Statistics","(",")","self",".","_start_report_manager","(","start_time","=","total_stats",".","start_time",")","#pdb.set_trace()","while","step","<=","train_steps",":","reduce_counter","=","0","for","i",",","batch","in","enumerate","(","train_iter",")",":","if","self",".","n_gpu","==","0","or","(","i","%","self",".","n_gpu","==","self",".","gpu_rank",")",":","if","self",".","gpu_verbose_level",">","1",":","logger",".","info","(","\"GpuRank %d: index: %d accum: %d\"","%","(","self",".","gpu_rank",",","i",",","accum",")",")","true_batchs",".","append","(","batch",")","if","self",".","norm_method","==","\"tokens\"",":","num_tokens","=","batch",".","tgt","[","1",":","]",".","ne","(","self",".","train_loss",".","padding_idx",")",".","sum","(",")","normalization","+=","num_tokens",".","item","(",")","else",":","normalization","+=","batch",".","batch_size","accum","+=","1","if","accum","==","self",".","grad_accum_count",":","reduce_counter","+=","1","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","\"GpuRank %d: reduce_counter: %d \\\n n_minibatch %d\"","%","(","self",".","gpu_rank",",","reduce_counter",",","len","(","true_batchs",")",")",")","if","self",".","n_gpu",">","1",":","normalization","=","sum","(","onmt",".","utils",".","distributed",".","all_gather_list","(","normalization",")",")","self",".","_gradient_accumulation","(","true_batchs",",","normalization",",","total_stats",",","report_stats",")","report_stats","=","self",".","_maybe_report_training","(","step",",","train_steps",",","self",".","optim",".","learning_rate",",","report_stats",")","true_batchs","=","[","]","accum","=","0","normalization","=","0","if","(","step","%","valid_steps","==","0",")",":","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: validate step %d'","%","(","self",".","gpu_rank",",","step",")",")","valid_iter","=","valid_iter_fct","(",")","with","torch",".","no_grad","(",")",":","valid_stats","=","self",".","validate","(","valid_iter",")","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: gather valid stat \\\n step %d'","%","(","self",".","gpu_rank",",","step",")",")","valid_stats","=","self",".","_maybe_gather_stats","(","valid_stats",")","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: report stat step %d'","%","(","self",".","gpu_rank",",","step",")",")","self",".","_report_step","(","self",".","optim",".","learning_rate",",","step",",","valid_stats","=","valid_stats",")","if","self",".","gpu_rank","==","0",":","self",".","_maybe_save","(","step",")","step","+=","1","if","step",">","train_steps",":","break","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: we completed an epoch \\\n at step %d'","%","(","self",".","gpu_rank",",","step",")",")","train_iter","=","train_iter_fct","(",")","return","total_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L118-L217"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"Trainer.validate","parameters":"(self, valid_iter)","argument_list":"","return_statement":"return stats","docstring":"Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics","docstring_summary":"Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics","docstring_tokens":["Validate","model",".","valid_iter",":","validate","data","iterator","Returns",":",":","obj",":","nmt",".","Statistics",":","validation","loss","statistics"],"function":"def validate(self, valid_iter):\n \"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n # Set model in validating mode.\n self.model.eval()\n\n stats = onmt.utils.Statistics()\n\n for batch in valid_iter:\n src = inputters.make_features(batch, 'src', self.data_type)\n if self.data_type == 'text':\n _, src_lengths = batch.src\n else:\n src_lengths = None\n\n tgt = inputters.make_features(batch, 'tgt')\n\n # F-prop through the model.\n outputs, attns, _ = self.model(src, tgt, src_lengths)\n\n # Compute loss.\n batch_stats = self.valid_loss.monolithic_compute_loss(\n batch, outputs, attns)\n\n # Update statistics.\n stats.update(batch_stats)\n\n # Set model back to training mode.\n self.model.train()\n\n return stats","function_tokens":["def","validate","(","self",",","valid_iter",")",":","# Set model in validating mode.","self",".","model",".","eval","(",")","stats","=","onmt",".","utils",".","Statistics","(",")","for","batch","in","valid_iter",":","src","=","inputters",".","make_features","(","batch",",","'src'",",","self",".","data_type",")","if","self",".","data_type","==","'text'",":","_",",","src_lengths","=","batch",".","src","else",":","src_lengths","=","None","tgt","=","inputters",".","make_features","(","batch",",","'tgt'",")","# F-prop through the model.","outputs",",","attns",",","_","=","self",".","model","(","src",",","tgt",",","src_lengths",")","# Compute loss.","batch_stats","=","self",".","valid_loss",".","monolithic_compute_loss","(","batch",",","outputs",",","attns",")","# Update statistics.","stats",".","update","(","batch_stats",")","# Set model back to training mode.","self",".","model",".","train","(",")","return","stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L219-L252"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"Trainer._start_report_manager","parameters":"(self, start_time=None)","argument_list":"","return_statement":"","docstring":"Simple function to start report manager (if any)","docstring_summary":"Simple function to start report manager (if any)","docstring_tokens":["Simple","function","to","start","report","manager","(","if","any",")"],"function":"def _start_report_manager(self, start_time=None):\n \"\"\"\n Simple function to start report manager (if any)\n \"\"\"\n if self.report_manager is not None:\n if start_time is None:\n self.report_manager.start()\n else:\n self.report_manager.start_time = start_time","function_tokens":["def","_start_report_manager","(","self",",","start_time","=","None",")",":","if","self",".","report_manager","is","not","None",":","if","start_time","is","None",":","self",".","report_manager",".","start","(",")","else",":","self",".","report_manager",".","start_time","=","start_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L320-L328"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"Trainer._maybe_gather_stats","parameters":"(self, stat)","argument_list":"","return_statement":"return stat","docstring":"Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object","docstring_summary":"Gather statistics in multi-processes cases","docstring_tokens":["Gather","statistics","in","multi","-","processes","cases"],"function":"def _maybe_gather_stats(self, stat):\n \"\"\"\n Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object\n \"\"\"\n if stat is not None and self.n_gpu > 1:\n return onmt.utils.Statistics.all_gather_stats(stat)\n return stat","function_tokens":["def","_maybe_gather_stats","(","self",",","stat",")",":","if","stat","is","not","None","and","self",".","n_gpu",">","1",":","return","onmt",".","utils",".","Statistics",".","all_gather_stats","(","stat",")","return","stat"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L330-L343"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"Trainer._maybe_report_training","parameters":"(self, step, num_steps, learning_rate,\n report_stats)","argument_list":"","return_statement":"","docstring":"Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc","docstring_summary":"Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc","docstring_tokens":["Simple","function","to","report","training","stats","(","if","report_manager","is","set",")","see","onmt",".","utils",".","ReportManagerBase",".","report_training","for","doc"],"function":"def _maybe_report_training(self, step, num_steps, learning_rate,\n report_stats):\n \"\"\"\n Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_training(\n step, num_steps, learning_rate, report_stats,\n multigpu=self.n_gpu > 1)","function_tokens":["def","_maybe_report_training","(","self",",","step",",","num_steps",",","learning_rate",",","report_stats",")",":","if","self",".","report_manager","is","not","None",":","return","self",".","report_manager",".","report_training","(","step",",","num_steps",",","learning_rate",",","report_stats",",","multigpu","=","self",".","n_gpu",">","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L345-L354"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"Trainer._report_step","parameters":"(self, learning_rate, step, train_stats=None,\n valid_stats=None)","argument_list":"","return_statement":"","docstring":"Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc","docstring_summary":"Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc","docstring_tokens":["Simple","function","to","report","stats","(","if","report_manager","is","set",")","see","onmt",".","utils",".","ReportManagerBase",".","report_step","for","doc"],"function":"def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n \"\"\"\n Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)","function_tokens":["def","_report_step","(","self",",","learning_rate",",","step",",","train_stats","=","None",",","valid_stats","=","None",")",":","if","self",".","report_manager","is","not","None",":","return","self",".","report_manager",".","report_step","(","learning_rate",",","step",",","train_stats","=","train_stats",",","valid_stats","=","valid_stats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L356-L365"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/trainer.py","language":"python","identifier":"Trainer._maybe_save","parameters":"(self, step)","argument_list":"","return_statement":"","docstring":"Save the model if a model saver is set","docstring_summary":"Save the model if a model saver is set","docstring_tokens":["Save","the","model","if","a","model","saver","is","set"],"function":"def _maybe_save(self, step):\n \"\"\"\n Save the model if a model saver is set\n \"\"\"\n if self.model_saver is not None:\n self.model_saver.maybe_save(step)","function_tokens":["def","_maybe_save","(","self",",","step",")",":","if","self",".","model_saver","is","not","None",":","self",".","model_saver",".","maybe_save","(","step",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/trainer.py#L367-L372"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/opts.py","language":"python","identifier":"model_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"These options are passed to the construction of the model.\n Be careful with these as they will be used during translation.","docstring_summary":"These options are passed to the construction of the model.\n Be careful with these as they will be used during translation.","docstring_tokens":["These","options","are","passed","to","the","construction","of","the","model",".","Be","careful","with","these","as","they","will","be","used","during","translation","."],"function":"def model_opts(parser):\n \"\"\"\n These options are passed to the construction of the model.\n Be careful with these as they will be used during translation.\n \"\"\"\n\n # Embedding Options\n group = parser.add_argument_group('Model-Embeddings')\n group.add_argument('-src_word_vec_size', type=int, default=500,\n help='Word embedding size for src.')\n group.add_argument('-tgt_word_vec_size', type=int, default=500,\n help='Word embedding size for tgt.')\n group.add_argument('-word_vec_size', type=int, default=-1,\n help='Word embedding size for src and tgt.')\n\n group.add_argument('-share_decoder_embeddings', action='store_true',\n help=\"\"\"Use a shared weight matrix for the input and\n output word embeddings in the decoder.\"\"\")\n group.add_argument('-share_embeddings', action='store_true',\n help=\"\"\"Share the word embeddings between encoder\n and decoder. Need to use shared dictionary for this\n option.\"\"\")\n group.add_argument('-position_encoding', action='store_true',\n help=\"\"\"Use a sin to mark relative words positions.\n Necessary for non-RNN style models.\n \"\"\")\n\n group = parser.add_argument_group('Model-Embedding Features')\n group.add_argument('-feat_merge', type=str, default='concat',\n choices=['concat', 'sum', 'mlp'],\n help=\"\"\"Merge action for incorporating features embeddings.\n Options [concat|sum|mlp].\"\"\")\n group.add_argument('-feat_vec_size', type=int, default=-1,\n help=\"\"\"If specified, feature embedding sizes\n will be set to this. Otherwise, feat_vec_exponent\n will be used.\"\"\")\n group.add_argument('-feat_vec_exponent', type=float, default=0.7,\n help=\"\"\"If -feat_merge_size is not set, feature\n embedding sizes will be set to N^feat_vec_exponent\n where N is the number of values the feature takes.\"\"\")\n\n # Encoder-Decoder Options\n group = parser.add_argument_group('Model- Encoder-Decoder')\n group.add_argument('-model_type', default='text',\n help=\"\"\"Type of source model to use. Allows\n the system to incorporate non-text inputs.\n Options are [text|img|audio].\"\"\")\n\n group.add_argument('-encoder_type', type=str, default='rnn',\n choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],\n help=\"\"\"Type of encoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|brnn|mean|transformer|cnn].\"\"\")\n group.add_argument('-decoder_type', type=str, default='rnn',\n choices=['rnn', 'transformer', 'cnn'],\n help=\"\"\"Type of decoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|transformer|cnn].\"\"\")\n\n group.add_argument('-layers', type=int, default=-1,\n help='Number of layers in enc\/dec.')\n group.add_argument('-enc_layers', type=int, default=2,\n help='Number of layers in the encoder')\n group.add_argument('-dec_layers', type=int, default=2,\n help='Number of layers in the decoder')\n group.add_argument('-rnn_size', type=int, default=500,\n help='Size of rnn hidden states')\n group.add_argument('-cnn_kernel_width', type=int, default=3,\n help=\"\"\"Size of windows in the cnn, the kernel_size is\n (cnn_kernel_width, 1) in conv layer\"\"\")\n\n group.add_argument('-input_feed', type=int, default=1,\n help=\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\")\n group.add_argument('-bridge', action=\"store_true\",\n help=\"\"\"Have an additional layer between the last encoder\n state and the first decoder state\"\"\")\n group.add_argument('-rnn_type', type=str, default='LSTM',\n choices=['LSTM', 'GRU', 'SRU'],\n action=CheckSRU,\n help=\"\"\"The gate type to use in the RNNs\"\"\")\n # group.add_argument('-residual', action=\"store_true\",\n # help=\"Add residual connections between RNN layers.\")\n\n group.add_argument('-brnn', action=DeprecateAction,\n help=\"Deprecated, use `encoder_type`.\")\n\n group.add_argument('-context_gate', type=str, default=None,\n choices=['source', 'target', 'both'],\n help=\"\"\"Type of context gate to use.\n Do not select for no context gate.\"\"\")\n\n # Attention options\n group = parser.add_argument_group('Model- Attention')\n group.add_argument('-global_attention', type=str, default='general',\n choices=['dot', 'general', 'mlp'],\n help=\"\"\"The attention type to use:\n dotprod or general (Luong) or MLP (Bahdanau)\"\"\")\n group.add_argument('-global_attention_function', type=str,\n default=\"softmax\", choices=[\"softmax\", \"sparsemax\"])\n group.add_argument('-self_attn_type', type=str, default=\"scaled-dot\",\n help=\"\"\"Self attention type in Transformer decoder\n layer -- currently \"scaled-dot\" or \"average\" \"\"\")\n group.add_argument('-heads', type=int, default=8,\n help='Number of heads for transformer self-attention')\n group.add_argument('-transformer_ff', type=int, default=2048,\n help='Size of hidden transformer feed-forward')\n\n # Generator and loss options.\n group.add_argument('-copy_attn', action=\"store_true\",\n help='Train copy attention layer.')\n group.add_argument('-generator_function', default=\"log_softmax\",\n choices=[\"log_softmax\", \"sparsemax\"],\n help=\"\"\"Which function to use for generating\n probabilities over the target vocabulary (choices:\n log_softmax, sparsemax)\"\"\")\n group.add_argument('-copy_attn_force', action=\"store_true\",\n help='When available, train to copy.')\n group.add_argument('-reuse_copy_attn', action=\"store_true\",\n help=\"Reuse standard attention for copy\")\n group.add_argument('-copy_loss_by_seqlength', action=\"store_true\",\n help=\"Divide copy loss by length of sequence\")\n group.add_argument('-coverage_attn', action=\"store_true\",\n help='Train a coverage attention layer.')\n group.add_argument('-lambda_coverage', type=float, default=1,\n help='Lambda value for coverage.')","function_tokens":["def","model_opts","(","parser",")",":","# Embedding Options","group","=","parser",".","add_argument_group","(","'Model-Embeddings'",")","group",".","add_argument","(","'-src_word_vec_size'",",","type","=","int",",","default","=","500",",","help","=","'Word embedding size for src.'",")","group",".","add_argument","(","'-tgt_word_vec_size'",",","type","=","int",",","default","=","500",",","help","=","'Word embedding size for tgt.'",")","group",".","add_argument","(","'-word_vec_size'",",","type","=","int",",","default","=","-","1",",","help","=","'Word embedding size for src and tgt.'",")","group",".","add_argument","(","'-share_decoder_embeddings'",",","action","=","'store_true'",",","help","=","\"\"\"Use a shared weight matrix for the input and\n output word embeddings in the decoder.\"\"\"",")","group",".","add_argument","(","'-share_embeddings'",",","action","=","'store_true'",",","help","=","\"\"\"Share the word embeddings between encoder\n and decoder. Need to use shared dictionary for this\n option.\"\"\"",")","group",".","add_argument","(","'-position_encoding'",",","action","=","'store_true'",",","help","=","\"\"\"Use a sin to mark relative words positions.\n Necessary for non-RNN style models.\n \"\"\"",")","group","=","parser",".","add_argument_group","(","'Model-Embedding Features'",")","group",".","add_argument","(","'-feat_merge'",",","type","=","str",",","default","=","'concat'",",","choices","=","[","'concat'",",","'sum'",",","'mlp'","]",",","help","=","\"\"\"Merge action for incorporating features embeddings.\n Options [concat|sum|mlp].\"\"\"",")","group",".","add_argument","(","'-feat_vec_size'",",","type","=","int",",","default","=","-","1",",","help","=","\"\"\"If specified, feature embedding sizes\n will be set to this. Otherwise, feat_vec_exponent\n will be used.\"\"\"",")","group",".","add_argument","(","'-feat_vec_exponent'",",","type","=","float",",","default","=","0.7",",","help","=","\"\"\"If -feat_merge_size is not set, feature\n embedding sizes will be set to N^feat_vec_exponent\n where N is the number of values the feature takes.\"\"\"",")","# Encoder-Decoder Options","group","=","parser",".","add_argument_group","(","'Model- Encoder-Decoder'",")","group",".","add_argument","(","'-model_type'",",","default","=","'text'",",","help","=","\"\"\"Type of source model to use. Allows\n the system to incorporate non-text inputs.\n Options are [text|img|audio].\"\"\"",")","group",".","add_argument","(","'-encoder_type'",",","type","=","str",",","default","=","'rnn'",",","choices","=","[","'rnn'",",","'brnn'",",","'mean'",",","'transformer'",",","'cnn'","]",",","help","=","\"\"\"Type of encoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|brnn|mean|transformer|cnn].\"\"\"",")","group",".","add_argument","(","'-decoder_type'",",","type","=","str",",","default","=","'rnn'",",","choices","=","[","'rnn'",",","'transformer'",",","'cnn'","]",",","help","=","\"\"\"Type of decoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|transformer|cnn].\"\"\"",")","group",".","add_argument","(","'-layers'",",","type","=","int",",","default","=","-","1",",","help","=","'Number of layers in enc\/dec.'",")","group",".","add_argument","(","'-enc_layers'",",","type","=","int",",","default","=","2",",","help","=","'Number of layers in the encoder'",")","group",".","add_argument","(","'-dec_layers'",",","type","=","int",",","default","=","2",",","help","=","'Number of layers in the decoder'",")","group",".","add_argument","(","'-rnn_size'",",","type","=","int",",","default","=","500",",","help","=","'Size of rnn hidden states'",")","group",".","add_argument","(","'-cnn_kernel_width'",",","type","=","int",",","default","=","3",",","help","=","\"\"\"Size of windows in the cnn, the kernel_size is\n (cnn_kernel_width, 1) in conv layer\"\"\"",")","group",".","add_argument","(","'-input_feed'",",","type","=","int",",","default","=","1",",","help","=","\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\"",")","group",".","add_argument","(","'-bridge'",",","action","=","\"store_true\"",",","help","=","\"\"\"Have an additional layer between the last encoder\n state and the first decoder state\"\"\"",")","group",".","add_argument","(","'-rnn_type'",",","type","=","str",",","default","=","'LSTM'",",","choices","=","[","'LSTM'",",","'GRU'",",","'SRU'","]",",","action","=","CheckSRU",",","help","=","\"\"\"The gate type to use in the RNNs\"\"\"",")","# group.add_argument('-residual', action=\"store_true\",","# help=\"Add residual connections between RNN layers.\")","group",".","add_argument","(","'-brnn'",",","action","=","DeprecateAction",",","help","=","\"Deprecated, use `encoder_type`.\"",")","group",".","add_argument","(","'-context_gate'",",","type","=","str",",","default","=","None",",","choices","=","[","'source'",",","'target'",",","'both'","]",",","help","=","\"\"\"Type of context gate to use.\n Do not select for no context gate.\"\"\"",")","# Attention options","group","=","parser",".","add_argument_group","(","'Model- Attention'",")","group",".","add_argument","(","'-global_attention'",",","type","=","str",",","default","=","'general'",",","choices","=","[","'dot'",",","'general'",",","'mlp'","]",",","help","=","\"\"\"The attention type to use:\n dotprod or general (Luong) or MLP (Bahdanau)\"\"\"",")","group",".","add_argument","(","'-global_attention_function'",",","type","=","str",",","default","=","\"softmax\"",",","choices","=","[","\"softmax\"",",","\"sparsemax\"","]",")","group",".","add_argument","(","'-self_attn_type'",",","type","=","str",",","default","=","\"scaled-dot\"",",","help","=","\"\"\"Self attention type in Transformer decoder\n layer -- currently \"scaled-dot\" or \"average\" \"\"\"",")","group",".","add_argument","(","'-heads'",",","type","=","int",",","default","=","8",",","help","=","'Number of heads for transformer self-attention'",")","group",".","add_argument","(","'-transformer_ff'",",","type","=","int",",","default","=","2048",",","help","=","'Size of hidden transformer feed-forward'",")","# Generator and loss options.","group",".","add_argument","(","'-copy_attn'",",","action","=","\"store_true\"",",","help","=","'Train copy attention layer.'",")","group",".","add_argument","(","'-generator_function'",",","default","=","\"log_softmax\"",",","choices","=","[","\"log_softmax\"",",","\"sparsemax\"","]",",","help","=","\"\"\"Which function to use for generating\n probabilities over the target vocabulary (choices:\n log_softmax, sparsemax)\"\"\"",")","group",".","add_argument","(","'-copy_attn_force'",",","action","=","\"store_true\"",",","help","=","'When available, train to copy.'",")","group",".","add_argument","(","'-reuse_copy_attn'",",","action","=","\"store_true\"",",","help","=","\"Reuse standard attention for copy\"",")","group",".","add_argument","(","'-copy_loss_by_seqlength'",",","action","=","\"store_true\"",",","help","=","\"Divide copy loss by length of sequence\"",")","group",".","add_argument","(","'-coverage_attn'",",","action","=","\"store_true\"",",","help","=","'Train a coverage attention layer.'",")","group",".","add_argument","(","'-lambda_coverage'",",","type","=","float",",","default","=","1",",","help","=","'Lambda value for coverage.'",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/opts.py#L8-L134"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/opts.py","language":"python","identifier":"preprocess_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"Pre-procesing options","docstring_summary":"Pre-procesing options","docstring_tokens":["Pre","-","procesing","options"],"function":"def preprocess_opts(parser):\n \"\"\" Pre-procesing options \"\"\"\n # Data options\n group = parser.add_argument_group('Data')\n group.add_argument('-data_type', default=\"text\",\n help=\"\"\"Type of the source input.\n Options are [text|img].\"\"\")\n\n group.add_argument('-train_src', required=True,\n help=\"Path to the training source data\")\n group.add_argument('-train_tgt', required=True,\n help=\"Path to the training target data\")\n group.add_argument('-valid_src', required=True,\n help=\"Path to the validation source data\")\n group.add_argument('-valid_tgt', required=True,\n help=\"Path to the validation target data\")\n\n group.add_argument('-src_dir', default=\"\",\n help=\"Source directory for image or audio files.\")\n\n group.add_argument('-save_data', required=True,\n help=\"Output file for the prepared data\")\n\n group.add_argument('-max_shard_size', type=int, default=0,\n help=\"\"\"For text corpus of large volume, it will\n be divided into shards of this size to preprocess.\n If 0, the data will be handled as a whole. The unit\n is in bytes. Optimal value should be multiples of\n 64 bytes. A commonly used sharding value is 131072000.\n It is recommended to ensure the corpus is shuffled\n before sharding.\"\"\")\n\n group.add_argument('-shard_size', type=int, default=0,\n help=\"\"\"Divide src_corpus and tgt_corpus into\n smaller multiple src_copus and tgt corpus files, then\n build shards, each shard will have\n opt.shard_size samples except last shard.\n shard_size=0 means no segmentation\n shard_size>0 means segment dataset into multiple shards,\n each shard has shard_size samples\"\"\")\n\n # Dictionary options, for text corpus\n\n group = parser.add_argument_group('Vocab')\n group.add_argument('-src_vocab', default=\"\",\n help=\"\"\"Path to an existing source vocabulary. Format:\n one word per line.\"\"\")\n group.add_argument('-tgt_vocab', default=\"\",\n help=\"\"\"Path to an existing target vocabulary. Format:\n one word per line.\"\"\")\n group.add_argument('-features_vocabs_prefix', type=str, default='',\n help=\"Path prefix to existing features vocabularies\")\n group.add_argument('-src_vocab_size', type=int, default=50000,\n help=\"Size of the source vocabulary\")\n group.add_argument('-tgt_vocab_size', type=int, default=50000,\n help=\"Size of the target vocabulary\")\n\n group.add_argument('-src_words_min_frequency', type=int, default=0)\n group.add_argument('-tgt_words_min_frequency', type=int, default=0)\n\n group.add_argument('-dynamic_dict', action='store_true',\n help=\"Create dynamic dictionaries\")\n group.add_argument('-share_vocab', action='store_true',\n help=\"Share source and target vocabulary\")\n\n # Truncation options, for text corpus\n group = parser.add_argument_group('Pruning')\n group.add_argument('-src_seq_length', type=int, default=50,\n help=\"Maximum source sequence length\")\n group.add_argument('-src_seq_length_trunc', type=int, default=0,\n help=\"Truncate source sequence length.\")\n group.add_argument('-tgt_seq_length', type=int, default=50,\n help=\"Maximum target sequence length to keep.\")\n group.add_argument('-tgt_seq_length_trunc', type=int, default=0,\n help=\"Truncate target sequence length.\")\n group.add_argument('-lower', action='store_true', help='lowercase data')\n\n # Data processing options\n group = parser.add_argument_group('Random')\n group.add_argument('-shuffle', type=int, default=1,\n help=\"Shuffle data\")\n group.add_argument('-seed', type=int, default=3435,\n help=\"Random seed\")\n\n group = parser.add_argument_group('Logging')\n group.add_argument('-report_every', type=int, default=100000,\n help=\"Report status every this many sentences\")\n group.add_argument('-log_file', type=str, default=\"\",\n help=\"Output logs to a file under this path.\")\n\n # Options most relevant to speech\n group = parser.add_argument_group('Speech')\n group.add_argument('-sample_rate', type=int, default=16000,\n help=\"Sample rate.\")\n group.add_argument('-window_size', type=float, default=.02,\n help=\"Window size for spectrogram in seconds.\")\n group.add_argument('-window_stride', type=float, default=.01,\n help=\"Window stride for spectrogram in seconds.\")\n group.add_argument('-window', default='hamming',\n help=\"Window type for spectrogram generation.\")\n\n # Option most relevant to image input\n group.add_argument('-image_channel_size', type=int, default=3,\n choices=[3, 1],\n help=\"\"\"Using grayscale image can training\n model faster and smaller\"\"\")","function_tokens":["def","preprocess_opts","(","parser",")",":","# Data options","group","=","parser",".","add_argument_group","(","'Data'",")","group",".","add_argument","(","'-data_type'",",","default","=","\"text\"",",","help","=","\"\"\"Type of the source input.\n Options are [text|img].\"\"\"",")","group",".","add_argument","(","'-train_src'",",","required","=","True",",","help","=","\"Path to the training source data\"",")","group",".","add_argument","(","'-train_tgt'",",","required","=","True",",","help","=","\"Path to the training target data\"",")","group",".","add_argument","(","'-valid_src'",",","required","=","True",",","help","=","\"Path to the validation source data\"",")","group",".","add_argument","(","'-valid_tgt'",",","required","=","True",",","help","=","\"Path to the validation target data\"",")","group",".","add_argument","(","'-src_dir'",",","default","=","\"\"",",","help","=","\"Source directory for image or audio files.\"",")","group",".","add_argument","(","'-save_data'",",","required","=","True",",","help","=","\"Output file for the prepared data\"",")","group",".","add_argument","(","'-max_shard_size'",",","type","=","int",",","default","=","0",",","help","=","\"\"\"For text corpus of large volume, it will\n be divided into shards of this size to preprocess.\n If 0, the data will be handled as a whole. The unit\n is in bytes. Optimal value should be multiples of\n 64 bytes. A commonly used sharding value is 131072000.\n It is recommended to ensure the corpus is shuffled\n before sharding.\"\"\"",")","group",".","add_argument","(","'-shard_size'",",","type","=","int",",","default","=","0",",","help","=","\"\"\"Divide src_corpus and tgt_corpus into\n smaller multiple src_copus and tgt corpus files, then\n build shards, each shard will have\n opt.shard_size samples except last shard.\n shard_size=0 means no segmentation\n shard_size>0 means segment dataset into multiple shards,\n each shard has shard_size samples\"\"\"",")","# Dictionary options, for text corpus","group","=","parser",".","add_argument_group","(","'Vocab'",")","group",".","add_argument","(","'-src_vocab'",",","default","=","\"\"",",","help","=","\"\"\"Path to an existing source vocabulary. Format:\n one word per line.\"\"\"",")","group",".","add_argument","(","'-tgt_vocab'",",","default","=","\"\"",",","help","=","\"\"\"Path to an existing target vocabulary. Format:\n one word per line.\"\"\"",")","group",".","add_argument","(","'-features_vocabs_prefix'",",","type","=","str",",","default","=","''",",","help","=","\"Path prefix to existing features vocabularies\"",")","group",".","add_argument","(","'-src_vocab_size'",",","type","=","int",",","default","=","50000",",","help","=","\"Size of the source vocabulary\"",")","group",".","add_argument","(","'-tgt_vocab_size'",",","type","=","int",",","default","=","50000",",","help","=","\"Size of the target vocabulary\"",")","group",".","add_argument","(","'-src_words_min_frequency'",",","type","=","int",",","default","=","0",")","group",".","add_argument","(","'-tgt_words_min_frequency'",",","type","=","int",",","default","=","0",")","group",".","add_argument","(","'-dynamic_dict'",",","action","=","'store_true'",",","help","=","\"Create dynamic dictionaries\"",")","group",".","add_argument","(","'-share_vocab'",",","action","=","'store_true'",",","help","=","\"Share source and target vocabulary\"",")","# Truncation options, for text corpus","group","=","parser",".","add_argument_group","(","'Pruning'",")","group",".","add_argument","(","'-src_seq_length'",",","type","=","int",",","default","=","50",",","help","=","\"Maximum source sequence length\"",")","group",".","add_argument","(","'-src_seq_length_trunc'",",","type","=","int",",","default","=","0",",","help","=","\"Truncate source sequence length.\"",")","group",".","add_argument","(","'-tgt_seq_length'",",","type","=","int",",","default","=","50",",","help","=","\"Maximum target sequence length to keep.\"",")","group",".","add_argument","(","'-tgt_seq_length_trunc'",",","type","=","int",",","default","=","0",",","help","=","\"Truncate target sequence length.\"",")","group",".","add_argument","(","'-lower'",",","action","=","'store_true'",",","help","=","'lowercase data'",")","# Data processing options","group","=","parser",".","add_argument_group","(","'Random'",")","group",".","add_argument","(","'-shuffle'",",","type","=","int",",","default","=","1",",","help","=","\"Shuffle data\"",")","group",".","add_argument","(","'-seed'",",","type","=","int",",","default","=","3435",",","help","=","\"Random seed\"",")","group","=","parser",".","add_argument_group","(","'Logging'",")","group",".","add_argument","(","'-report_every'",",","type","=","int",",","default","=","100000",",","help","=","\"Report status every this many sentences\"",")","group",".","add_argument","(","'-log_file'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Output logs to a file under this path.\"",")","# Options most relevant to speech","group","=","parser",".","add_argument_group","(","'Speech'",")","group",".","add_argument","(","'-sample_rate'",",","type","=","int",",","default","=","16000",",","help","=","\"Sample rate.\"",")","group",".","add_argument","(","'-window_size'",",","type","=","float",",","default","=",".02",",","help","=","\"Window size for spectrogram in seconds.\"",")","group",".","add_argument","(","'-window_stride'",",","type","=","float",",","default","=",".01",",","help","=","\"Window stride for spectrogram in seconds.\"",")","group",".","add_argument","(","'-window'",",","default","=","'hamming'",",","help","=","\"Window type for spectrogram generation.\"",")","# Option most relevant to image input","group",".","add_argument","(","'-image_channel_size'",",","type","=","int",",","default","=","3",",","choices","=","[","3",",","1","]",",","help","=","\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/opts.py#L137-L242"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/opts.py","language":"python","identifier":"train_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"Training and saving options","docstring_summary":"Training and saving options","docstring_tokens":["Training","and","saving","options"],"function":"def train_opts(parser):\n \"\"\" Training and saving options \"\"\"\n\n group = parser.add_argument_group('General')\n group.add_argument('-data', required=True,\n help=\"\"\"Path prefix to the \".train.pt\" and\n \".valid.pt\" file path from preprocess.py\"\"\")\n\n group.add_argument('-save_model', default='model',\n help=\"\"\"Model filename (the model will be saved as\n _N.pt where N is the number\n of steps\"\"\")\n\n group.add_argument('-save_checkpoint_steps', type=int, default=5000,\n help=\"\"\"Save a checkpoint every X steps\"\"\")\n group.add_argument('-keep_checkpoint', type=int, default=-1,\n help=\"\"\"Keep X checkpoints (negative: keep all)\"\"\")\n\n # GPU\n group.add_argument('-gpuid', default=[], nargs='+', type=int,\n help=\"Deprecated see world_size and gpu_ranks.\")\n group.add_argument('-gpu_ranks', default=[], nargs='+', type=int,\n help=\"list of ranks of each process.\")\n group.add_argument('-world_size', default=1, type=int,\n help=\"total number of distributed processes.\")\n group.add_argument('-gpu_backend', default='nccl', nargs='+', type=str,\n help=\"Type of torch distributed backend\")\n group.add_argument('-gpu_verbose_level', default=0, type=int,\n help=\"Gives more info on each process per GPU.\")\n group.add_argument('-master_ip', default=\"localhost\", type=str,\n help=\"IP of master for torch.distributed training.\")\n group.add_argument('-master_port', default=10000, type=int,\n help=\"Port of master for torch.distributed training.\")\n\n group.add_argument('-seed', type=int, default=-1,\n help=\"\"\"Random seed used for the experiments\n reproducibility.\"\"\")\n\n # Init options\n group = parser.add_argument_group('Initialization')\n group.add_argument('-param_init', type=float, default=0.1,\n help=\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init).\n Use 0 to not use initialization\"\"\")\n group.add_argument('-param_init_glorot', action='store_true',\n help=\"\"\"Init parameters with xavier_uniform.\n Required for transfomer.\"\"\")\n\n group.add_argument('-train_from', default='', type=str,\n help=\"\"\"If training from a checkpoint then this is the\n path to the pretrained model's state_dict.\"\"\")\n\n # Pretrained word vectors\n group.add_argument('-pre_word_vecs_enc',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\")\n group.add_argument('-pre_word_vecs_dec',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\")\n # Fixed word vectors\n group.add_argument('-fix_word_vecs_enc',\n action='store_true',\n help=\"Fix word embeddings on the encoder side.\")\n group.add_argument('-fix_word_vecs_dec',\n action='store_true',\n help=\"Fix word embeddings on the decoder side.\")\n\n # Optimization options\n group = parser.add_argument_group('Optimization- Type')\n group.add_argument('-batch_size', type=int, default=64,\n help='Maximum batch size for training')\n group.add_argument('-batch_type', default='sents',\n choices=[\"sents\", \"tokens\"],\n help=\"\"\"Batch grouping for batch_size. Standard\n is sents. Tokens will do dynamic batching\"\"\")\n group.add_argument('-normalization', default='sents',\n choices=[\"sents\", \"tokens\"],\n help='Normalization method of the gradient.')\n group.add_argument('-accum_count', type=int, default=1,\n help=\"\"\"Accumulate gradient this many times.\n Approximately equivalent to updating\n batch_size * accum_count batches at once.\n Recommended for Transformer.\"\"\")\n group.add_argument('-valid_steps', type=int, default=10000,\n help='Perfom validation every X steps')\n group.add_argument('-valid_batch_size', type=int, default=32,\n help='Maximum batch size for validation')\n group.add_argument('-max_generator_batches', type=int, default=32,\n help=\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but\n uses more memory.\"\"\")\n group.add_argument('-train_steps', type=int, default=100000,\n help='Number of training steps')\n group.add_argument('-epochs', type=int, default=0,\n help='Deprecated epochs see train_steps')\n group.add_argument('-optim', default='sgd',\n choices=['sgd', 'adagrad', 'adadelta', 'adam',\n 'sparseadam'],\n help=\"\"\"Optimization method.\"\"\")\n group.add_argument('-adagrad_accumulator_init', type=float, default=0,\n help=\"\"\"Initializes the accumulator values in adagrad.\n Mirrors the initial_accumulator_value option\n in the tensorflow adagrad (use 0.1 for their default).\n \"\"\")\n group.add_argument('-max_grad_norm', type=float, default=5,\n help=\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to\n max_grad_norm\"\"\")\n group.add_argument('-dropout', type=float, default=0.3,\n help=\"Dropout probability; applied in LSTM stacks.\")\n group.add_argument('-truncated_decoder', type=int, default=0,\n help=\"\"\"Truncated bptt.\"\"\")\n group.add_argument('-adam_beta1', type=float, default=0.9,\n help=\"\"\"The beta1 parameter used by Adam.\n Almost without exception a value of 0.9 is used in\n the literature, seemingly giving good results,\n so we would discourage changing this value from\n the default without due consideration.\"\"\")\n group.add_argument('-adam_beta2', type=float, default=0.999,\n help=\"\"\"The beta2 parameter used by Adam.\n Typically a value of 0.999 is recommended, as this is\n the value suggested by the original paper describing\n Adam, and is also the value adopted in other frameworks\n such as Tensorflow and Kerras, i.e. see:\n https:\/\/www.tensorflow.org\/api_docs\/python\/tf\/train\/AdamOptimizer\n https:\/\/keras.io\/optimizers\/ .\n Whereas recently the paper \"Attention is All You Need\"\n suggested a value of 0.98 for beta2, this parameter may\n not work well for normal models \/ default\n baselines.\"\"\")\n group.add_argument('-label_smoothing', type=float, default=0.0,\n help=\"\"\"Label smoothing value epsilon.\n Probabilities of all non-true labels\n will be smoothed by epsilon \/ (vocab_size - 1).\n Set to zero to turn off label smoothing.\n For more detailed information, see:\n https:\/\/arxiv.org\/abs\/1512.00567\"\"\")\n # learning rate\n group = parser.add_argument_group('Optimization- Rate')\n group.add_argument('-learning_rate', type=float, default=1.0,\n help=\"\"\"Starting learning rate.\n Recommended settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001\"\"\")\n group.add_argument('-learning_rate_decay', type=float, default=0.5,\n help=\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) steps have gone past\n start_decay_steps\"\"\")\n group.add_argument('-start_decay_steps', type=int, default=50000,\n help=\"\"\"Start decaying every decay_steps after\n start_decay_steps\"\"\")\n group.add_argument('-decay_steps', type=int, default=10000,\n help=\"\"\"Decay every decay_steps\"\"\")\n\n group.add_argument('-decay_method', type=str, default=\"\",\n choices=['noam'], help=\"Use a custom decay rate.\")\n group.add_argument('-warmup_steps', type=int, default=4000,\n help=\"\"\"Number of warmup steps for custom decay.\"\"\")\n\n group = parser.add_argument_group('Logging')\n group.add_argument('-report_every', type=int, default=50,\n help=\"Print stats at this interval.\")\n group.add_argument('-log_file', type=str, default=\"\",\n help=\"Output logs to a file under this path.\")\n group.add_argument('-exp_host', type=str, default=\"\",\n help=\"Send logs to this crayon server.\")\n group.add_argument('-exp', type=str, default=\"\",\n help=\"Name of the experiment for logging.\")\n # Use TensorboardX for visualization during training\n group.add_argument('-tensorboard', action=\"store_true\",\n help=\"\"\"Use tensorboardX for visualization during training.\n Must have the library tensorboardX.\"\"\")\n group.add_argument(\"-tensorboard_log_dir\", type=str,\n default=\"runs\/onmt\",\n help=\"\"\"Log directory for Tensorboard.\n This is also the name of the run.\n \"\"\")\n\n group = parser.add_argument_group('Speech')\n # Options most relevant to speech\n group.add_argument('-sample_rate', type=int, default=16000,\n help=\"Sample rate.\")\n group.add_argument('-window_size', type=float, default=.02,\n help=\"Window size for spectrogram in seconds.\")\n\n # Option most relevant to image input\n group.add_argument('-image_channel_size', type=int, default=3,\n choices=[3, 1],\n help=\"\"\"Using grayscale image can training\n model faster and smaller\"\"\")","function_tokens":["def","train_opts","(","parser",")",":","group","=","parser",".","add_argument_group","(","'General'",")","group",".","add_argument","(","'-data'",",","required","=","True",",","help","=","\"\"\"Path prefix to the \".train.pt\" and\n \".valid.pt\" file path from preprocess.py\"\"\"",")","group",".","add_argument","(","'-save_model'",",","default","=","'model'",",","help","=","\"\"\"Model filename (the model will be saved as\n _N.pt where N is the number\n of steps\"\"\"",")","group",".","add_argument","(","'-save_checkpoint_steps'",",","type","=","int",",","default","=","5000",",","help","=","\"\"\"Save a checkpoint every X steps\"\"\"",")","group",".","add_argument","(","'-keep_checkpoint'",",","type","=","int",",","default","=","-","1",",","help","=","\"\"\"Keep X checkpoints (negative: keep all)\"\"\"",")","# GPU","group",".","add_argument","(","'-gpuid'",",","default","=","[","]",",","nargs","=","'+'",",","type","=","int",",","help","=","\"Deprecated see world_size and gpu_ranks.\"",")","group",".","add_argument","(","'-gpu_ranks'",",","default","=","[","]",",","nargs","=","'+'",",","type","=","int",",","help","=","\"list of ranks of each process.\"",")","group",".","add_argument","(","'-world_size'",",","default","=","1",",","type","=","int",",","help","=","\"total number of distributed processes.\"",")","group",".","add_argument","(","'-gpu_backend'",",","default","=","'nccl'",",","nargs","=","'+'",",","type","=","str",",","help","=","\"Type of torch distributed backend\"",")","group",".","add_argument","(","'-gpu_verbose_level'",",","default","=","0",",","type","=","int",",","help","=","\"Gives more info on each process per GPU.\"",")","group",".","add_argument","(","'-master_ip'",",","default","=","\"localhost\"",",","type","=","str",",","help","=","\"IP of master for torch.distributed training.\"",")","group",".","add_argument","(","'-master_port'",",","default","=","10000",",","type","=","int",",","help","=","\"Port of master for torch.distributed training.\"",")","group",".","add_argument","(","'-seed'",",","type","=","int",",","default","=","-","1",",","help","=","\"\"\"Random seed used for the experiments\n reproducibility.\"\"\"",")","# Init options","group","=","parser",".","add_argument_group","(","'Initialization'",")","group",".","add_argument","(","'-param_init'",",","type","=","float",",","default","=","0.1",",","help","=","\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init).\n Use 0 to not use initialization\"\"\"",")","group",".","add_argument","(","'-param_init_glorot'",",","action","=","'store_true'",",","help","=","\"\"\"Init parameters with xavier_uniform.\n Required for transfomer.\"\"\"",")","group",".","add_argument","(","'-train_from'",",","default","=","''",",","type","=","str",",","help","=","\"\"\"If training from a checkpoint then this is the\n path to the pretrained model's state_dict.\"\"\"",")","# Pretrained word vectors","group",".","add_argument","(","'-pre_word_vecs_enc'",",","help","=","\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\"",")","group",".","add_argument","(","'-pre_word_vecs_dec'",",","help","=","\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\"",")","# Fixed word vectors","group",".","add_argument","(","'-fix_word_vecs_enc'",",","action","=","'store_true'",",","help","=","\"Fix word embeddings on the encoder side.\"",")","group",".","add_argument","(","'-fix_word_vecs_dec'",",","action","=","'store_true'",",","help","=","\"Fix word embeddings on the decoder side.\"",")","# Optimization options","group","=","parser",".","add_argument_group","(","'Optimization- Type'",")","group",".","add_argument","(","'-batch_size'",",","type","=","int",",","default","=","64",",","help","=","'Maximum batch size for training'",")","group",".","add_argument","(","'-batch_type'",",","default","=","'sents'",",","choices","=","[","\"sents\"",",","\"tokens\"","]",",","help","=","\"\"\"Batch grouping for batch_size. Standard\n is sents. Tokens will do dynamic batching\"\"\"",")","group",".","add_argument","(","'-normalization'",",","default","=","'sents'",",","choices","=","[","\"sents\"",",","\"tokens\"","]",",","help","=","'Normalization method of the gradient.'",")","group",".","add_argument","(","'-accum_count'",",","type","=","int",",","default","=","1",",","help","=","\"\"\"Accumulate gradient this many times.\n Approximately equivalent to updating\n batch_size * accum_count batches at once.\n Recommended for Transformer.\"\"\"",")","group",".","add_argument","(","'-valid_steps'",",","type","=","int",",","default","=","10000",",","help","=","'Perfom validation every X steps'",")","group",".","add_argument","(","'-valid_batch_size'",",","type","=","int",",","default","=","32",",","help","=","'Maximum batch size for validation'",")","group",".","add_argument","(","'-max_generator_batches'",",","type","=","int",",","default","=","32",",","help","=","\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but\n uses more memory.\"\"\"",")","group",".","add_argument","(","'-train_steps'",",","type","=","int",",","default","=","100000",",","help","=","'Number of training steps'",")","group",".","add_argument","(","'-epochs'",",","type","=","int",",","default","=","0",",","help","=","'Deprecated epochs see train_steps'",")","group",".","add_argument","(","'-optim'",",","default","=","'sgd'",",","choices","=","[","'sgd'",",","'adagrad'",",","'adadelta'",",","'adam'",",","'sparseadam'","]",",","help","=","\"\"\"Optimization method.\"\"\"",")","group",".","add_argument","(","'-adagrad_accumulator_init'",",","type","=","float",",","default","=","0",",","help","=","\"\"\"Initializes the accumulator values in adagrad.\n Mirrors the initial_accumulator_value option\n in the tensorflow adagrad (use 0.1 for their default).\n \"\"\"",")","group",".","add_argument","(","'-max_grad_norm'",",","type","=","float",",","default","=","5",",","help","=","\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to\n max_grad_norm\"\"\"",")","group",".","add_argument","(","'-dropout'",",","type","=","float",",","default","=","0.3",",","help","=","\"Dropout probability; applied in LSTM stacks.\"",")","group",".","add_argument","(","'-truncated_decoder'",",","type","=","int",",","default","=","0",",","help","=","\"\"\"Truncated bptt.\"\"\"",")","group",".","add_argument","(","'-adam_beta1'",",","type","=","float",",","default","=","0.9",",","help","=","\"\"\"The beta1 parameter used by Adam.\n Almost without exception a value of 0.9 is used in\n the literature, seemingly giving good results,\n so we would discourage changing this value from\n the default without due consideration.\"\"\"",")","group",".","add_argument","(","'-adam_beta2'",",","type","=","float",",","default","=","0.999",",","help","=","\"\"\"The beta2 parameter used by Adam.\n Typically a value of 0.999 is recommended, as this is\n the value suggested by the original paper describing\n Adam, and is also the value adopted in other frameworks\n such as Tensorflow and Kerras, i.e. see:\n https:\/\/www.tensorflow.org\/api_docs\/python\/tf\/train\/AdamOptimizer\n https:\/\/keras.io\/optimizers\/ .\n Whereas recently the paper \"Attention is All You Need\"\n suggested a value of 0.98 for beta2, this parameter may\n not work well for normal models \/ default\n baselines.\"\"\"",")","group",".","add_argument","(","'-label_smoothing'",",","type","=","float",",","default","=","0.0",",","help","=","\"\"\"Label smoothing value epsilon.\n Probabilities of all non-true labels\n will be smoothed by epsilon \/ (vocab_size - 1).\n Set to zero to turn off label smoothing.\n For more detailed information, see:\n https:\/\/arxiv.org\/abs\/1512.00567\"\"\"",")","# learning rate","group","=","parser",".","add_argument_group","(","'Optimization- Rate'",")","group",".","add_argument","(","'-learning_rate'",",","type","=","float",",","default","=","1.0",",","help","=","\"\"\"Starting learning rate.\n Recommended settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001\"\"\"",")","group",".","add_argument","(","'-learning_rate_decay'",",","type","=","float",",","default","=","0.5",",","help","=","\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) steps have gone past\n start_decay_steps\"\"\"",")","group",".","add_argument","(","'-start_decay_steps'",",","type","=","int",",","default","=","50000",",","help","=","\"\"\"Start decaying every decay_steps after\n start_decay_steps\"\"\"",")","group",".","add_argument","(","'-decay_steps'",",","type","=","int",",","default","=","10000",",","help","=","\"\"\"Decay every decay_steps\"\"\"",")","group",".","add_argument","(","'-decay_method'",",","type","=","str",",","default","=","\"\"",",","choices","=","[","'noam'","]",",","help","=","\"Use a custom decay rate.\"",")","group",".","add_argument","(","'-warmup_steps'",",","type","=","int",",","default","=","4000",",","help","=","\"\"\"Number of warmup steps for custom decay.\"\"\"",")","group","=","parser",".","add_argument_group","(","'Logging'",")","group",".","add_argument","(","'-report_every'",",","type","=","int",",","default","=","50",",","help","=","\"Print stats at this interval.\"",")","group",".","add_argument","(","'-log_file'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Output logs to a file under this path.\"",")","group",".","add_argument","(","'-exp_host'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Send logs to this crayon server.\"",")","group",".","add_argument","(","'-exp'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Name of the experiment for logging.\"",")","# Use TensorboardX for visualization during training","group",".","add_argument","(","'-tensorboard'",",","action","=","\"store_true\"",",","help","=","\"\"\"Use tensorboardX for visualization during training.\n Must have the library tensorboardX.\"\"\"",")","group",".","add_argument","(","\"-tensorboard_log_dir\"",",","type","=","str",",","default","=","\"runs\/onmt\"",",","help","=","\"\"\"Log directory for Tensorboard.\n This is also the name of the run.\n \"\"\"",")","group","=","parser",".","add_argument_group","(","'Speech'",")","# Options most relevant to speech","group",".","add_argument","(","'-sample_rate'",",","type","=","int",",","default","=","16000",",","help","=","\"Sample rate.\"",")","group",".","add_argument","(","'-window_size'",",","type","=","float",",","default","=",".02",",","help","=","\"Window size for spectrogram in seconds.\"",")","# Option most relevant to image input","group",".","add_argument","(","'-image_channel_size'",",","type","=","int",",","default","=","3",",","choices","=","[","3",",","1","]",",","help","=","\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/opts.py#L245-L436"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/opts.py","language":"python","identifier":"translate_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"Translation \/ inference options","docstring_summary":"Translation \/ inference options","docstring_tokens":["Translation","\/","inference","options"],"function":"def translate_opts(parser):\n \"\"\" Translation \/ inference options \"\"\"\n group = parser.add_argument_group('Model')\n group.add_argument('-model', dest='models', metavar='MODEL',\n nargs='+', type=str, default=[], required=True,\n help='Path to model .pt file(s). '\n 'Multiple models can be specified, '\n 'for ensemble decoding.')\n\n group = parser.add_argument_group('Data')\n group.add_argument('-data_type', default=\"text\",\n help=\"Type of the source input. Options: [text|img].\")\n\n group.add_argument('-src', required=True,\n help=\"\"\"Source sequence to decode (one line per\n sequence)\"\"\")\n group.add_argument('-src_dir', default=\"\",\n help='Source directory for image or audio files')\n group.add_argument('-tgt',\n help='True target sequence (optional)')\n group.add_argument('-output', default='pred.txt',\n help=\"\"\"Path to output the predictions (each line will\n be the decoded sequence\"\"\")\n group.add_argument('-report_bleu', action='store_true',\n help=\"\"\"Report bleu score after translation,\n call tools\/multi-bleu.perl on command line\"\"\")\n group.add_argument('-report_rouge', action='store_true',\n help=\"\"\"Report rouge 1\/2\/3\/L\/SU4 score after translation\n call tools\/test_rouge.py on command line\"\"\")\n\n # Options most relevant to summarization.\n group.add_argument('-dynamic_dict', action='store_true',\n help=\"Create dynamic dictionaries\")\n group.add_argument('-share_vocab', action='store_true',\n help=\"Share source and target vocabulary\")\n\n group = parser.add_argument_group('Beam')\n group.add_argument('-fast', action=\"store_true\",\n help=\"\"\"Use fast beam search (some features may not be\n supported!)\"\"\")\n group.add_argument('-beam_size', type=int, default=5,\n help='Beam size')\n group.add_argument('-min_length', type=int, default=0,\n help='Minimum prediction length')\n group.add_argument('-max_length', type=int, default=100,\n help='Maximum prediction length.')\n group.add_argument('-max_sent_length', action=DeprecateAction,\n help=\"Deprecated, use `-max_length` instead\")\n\n # Alpha and Beta values for Google Length + Coverage penalty\n # Described here: https:\/\/arxiv.org\/pdf\/1609.08144.pdf, Section 7\n group.add_argument('-stepwise_penalty', action='store_true',\n help=\"\"\"Apply penalty at every decoding step.\n Helpful for summary penalty.\"\"\")\n group.add_argument('-length_penalty', default='none',\n choices=['none', 'wu', 'avg'],\n help=\"\"\"Length Penalty to use.\"\"\")\n group.add_argument('-coverage_penalty', default='none',\n choices=['none', 'wu', 'summary'],\n help=\"\"\"Coverage Penalty to use.\"\"\")\n group.add_argument('-alpha', type=float, default=0.,\n help=\"\"\"Google NMT length penalty parameter\n (higher = longer generation)\"\"\")\n group.add_argument('-beta', type=float, default=-0.,\n help=\"\"\"Coverage penalty parameter\"\"\")\n group.add_argument('-block_ngram_repeat', type=int, default=0,\n help='Block repetition of ngrams during decoding.')\n group.add_argument('-ignore_when_blocking', nargs='+', type=str,\n default=[],\n help=\"\"\"Ignore these strings when blocking repeats.\n You want to block sentence delimiters.\"\"\")\n group.add_argument('-replace_unk', action=\"store_true\",\n help=\"\"\"Replace the generated UNK tokens with the\n source token that had highest attention weight. If\n phrase_table is provided, it will lookup the\n identified source token and give the corresponding\n target token. If it is not provided(or the identified\n source token does not exist in the table) then it\n will copy the source token\"\"\")\n\n group = parser.add_argument_group('Logging')\n group.add_argument('-verbose', action=\"store_true\",\n help='Print scores and predictions for each sentence')\n group.add_argument('-log_file', type=str, default=\"\",\n help=\"Output logs to a file under this path.\")\n group.add_argument('-attn_debug', action=\"store_true\",\n help='Print best attn for each word')\n group.add_argument('-dump_beam', type=str, default=\"\",\n help='File to dump beam information to.')\n group.add_argument('-n_best', type=int, default=1,\n help=\"\"\"If verbose is set, will output the n_best\n decoded sentences\"\"\")\n\n group = parser.add_argument_group('Efficiency')\n group.add_argument('-batch_size', type=int, default=30,\n help='Batch size')\n group.add_argument('-gpu', type=int, default=-1,\n help=\"Device to run on\")\n\n # Options most relevant to speech.\n group = parser.add_argument_group('Speech')\n group.add_argument('-sample_rate', type=int, default=16000,\n help=\"Sample rate.\")\n group.add_argument('-window_size', type=float, default=.02,\n help='Window size for spectrogram in seconds')\n group.add_argument('-window_stride', type=float, default=.01,\n help='Window stride for spectrogram in seconds')\n group.add_argument('-window', default='hamming',\n help='Window type for spectrogram generation')\n\n # Option most relevant to image input\n group.add_argument('-image_channel_size', type=int, default=3,\n choices=[3, 1],\n help=\"\"\"Using grayscale image can training\n model faster and smaller\"\"\")","function_tokens":["def","translate_opts","(","parser",")",":","group","=","parser",".","add_argument_group","(","'Model'",")","group",".","add_argument","(","'-model'",",","dest","=","'models'",",","metavar","=","'MODEL'",",","nargs","=","'+'",",","type","=","str",",","default","=","[","]",",","required","=","True",",","help","=","'Path to model .pt file(s). '","'Multiple models can be specified, '","'for ensemble decoding.'",")","group","=","parser",".","add_argument_group","(","'Data'",")","group",".","add_argument","(","'-data_type'",",","default","=","\"text\"",",","help","=","\"Type of the source input. Options: [text|img].\"",")","group",".","add_argument","(","'-src'",",","required","=","True",",","help","=","\"\"\"Source sequence to decode (one line per\n sequence)\"\"\"",")","group",".","add_argument","(","'-src_dir'",",","default","=","\"\"",",","help","=","'Source directory for image or audio files'",")","group",".","add_argument","(","'-tgt'",",","help","=","'True target sequence (optional)'",")","group",".","add_argument","(","'-output'",",","default","=","'pred.txt'",",","help","=","\"\"\"Path to output the predictions (each line will\n be the decoded sequence\"\"\"",")","group",".","add_argument","(","'-report_bleu'",",","action","=","'store_true'",",","help","=","\"\"\"Report bleu score after translation,\n call tools\/multi-bleu.perl on command line\"\"\"",")","group",".","add_argument","(","'-report_rouge'",",","action","=","'store_true'",",","help","=","\"\"\"Report rouge 1\/2\/3\/L\/SU4 score after translation\n call tools\/test_rouge.py on command line\"\"\"",")","# Options most relevant to summarization.","group",".","add_argument","(","'-dynamic_dict'",",","action","=","'store_true'",",","help","=","\"Create dynamic dictionaries\"",")","group",".","add_argument","(","'-share_vocab'",",","action","=","'store_true'",",","help","=","\"Share source and target vocabulary\"",")","group","=","parser",".","add_argument_group","(","'Beam'",")","group",".","add_argument","(","'-fast'",",","action","=","\"store_true\"",",","help","=","\"\"\"Use fast beam search (some features may not be\n supported!)\"\"\"",")","group",".","add_argument","(","'-beam_size'",",","type","=","int",",","default","=","5",",","help","=","'Beam size'",")","group",".","add_argument","(","'-min_length'",",","type","=","int",",","default","=","0",",","help","=","'Minimum prediction length'",")","group",".","add_argument","(","'-max_length'",",","type","=","int",",","default","=","100",",","help","=","'Maximum prediction length.'",")","group",".","add_argument","(","'-max_sent_length'",",","action","=","DeprecateAction",",","help","=","\"Deprecated, use `-max_length` instead\"",")","# Alpha and Beta values for Google Length + Coverage penalty","# Described here: https:\/\/arxiv.org\/pdf\/1609.08144.pdf, Section 7","group",".","add_argument","(","'-stepwise_penalty'",",","action","=","'store_true'",",","help","=","\"\"\"Apply penalty at every decoding step.\n Helpful for summary penalty.\"\"\"",")","group",".","add_argument","(","'-length_penalty'",",","default","=","'none'",",","choices","=","[","'none'",",","'wu'",",","'avg'","]",",","help","=","\"\"\"Length Penalty to use.\"\"\"",")","group",".","add_argument","(","'-coverage_penalty'",",","default","=","'none'",",","choices","=","[","'none'",",","'wu'",",","'summary'","]",",","help","=","\"\"\"Coverage Penalty to use.\"\"\"",")","group",".","add_argument","(","'-alpha'",",","type","=","float",",","default","=","0.",",","help","=","\"\"\"Google NMT length penalty parameter\n (higher = longer generation)\"\"\"",")","group",".","add_argument","(","'-beta'",",","type","=","float",",","default","=","-","0.",",","help","=","\"\"\"Coverage penalty parameter\"\"\"",")","group",".","add_argument","(","'-block_ngram_repeat'",",","type","=","int",",","default","=","0",",","help","=","'Block repetition of ngrams during decoding.'",")","group",".","add_argument","(","'-ignore_when_blocking'",",","nargs","=","'+'",",","type","=","str",",","default","=","[","]",",","help","=","\"\"\"Ignore these strings when blocking repeats.\n You want to block sentence delimiters.\"\"\"",")","group",".","add_argument","(","'-replace_unk'",",","action","=","\"store_true\"",",","help","=","\"\"\"Replace the generated UNK tokens with the\n source token that had highest attention weight. If\n phrase_table is provided, it will lookup the\n identified source token and give the corresponding\n target token. If it is not provided(or the identified\n source token does not exist in the table) then it\n will copy the source token\"\"\"",")","group","=","parser",".","add_argument_group","(","'Logging'",")","group",".","add_argument","(","'-verbose'",",","action","=","\"store_true\"",",","help","=","'Print scores and predictions for each sentence'",")","group",".","add_argument","(","'-log_file'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Output logs to a file under this path.\"",")","group",".","add_argument","(","'-attn_debug'",",","action","=","\"store_true\"",",","help","=","'Print best attn for each word'",")","group",".","add_argument","(","'-dump_beam'",",","type","=","str",",","default","=","\"\"",",","help","=","'File to dump beam information to.'",")","group",".","add_argument","(","'-n_best'",",","type","=","int",",","default","=","1",",","help","=","\"\"\"If verbose is set, will output the n_best\n decoded sentences\"\"\"",")","group","=","parser",".","add_argument_group","(","'Efficiency'",")","group",".","add_argument","(","'-batch_size'",",","type","=","int",",","default","=","30",",","help","=","'Batch size'",")","group",".","add_argument","(","'-gpu'",",","type","=","int",",","default","=","-","1",",","help","=","\"Device to run on\"",")","# Options most relevant to speech.","group","=","parser",".","add_argument_group","(","'Speech'",")","group",".","add_argument","(","'-sample_rate'",",","type","=","int",",","default","=","16000",",","help","=","\"Sample rate.\"",")","group",".","add_argument","(","'-window_size'",",","type","=","float",",","default","=",".02",",","help","=","'Window size for spectrogram in seconds'",")","group",".","add_argument","(","'-window_stride'",",","type","=","float",",","default","=",".01",",","help","=","'Window stride for spectrogram in seconds'",")","group",".","add_argument","(","'-window'",",","default","=","'hamming'",",","help","=","'Window type for spectrogram generation'",")","# Option most relevant to image input","group",".","add_argument","(","'-image_channel_size'",",","type","=","int",",","default","=","3",",","choices","=","[","3",",","1","]",",","help","=","\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/opts.py#L439-L553"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/opts.py","language":"python","identifier":"add_md_help_argument","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"md help parser","docstring_summary":"md help parser","docstring_tokens":["md","help","parser"],"function":"def add_md_help_argument(parser):\n \"\"\" md help parser \"\"\"\n parser.add_argument('-md', action=MarkdownHelpAction,\n help='print Markdown-formatted help text and exit.')","function_tokens":["def","add_md_help_argument","(","parser",")",":","parser",".","add_argument","(","'-md'",",","action","=","MarkdownHelpAction",",","help","=","'print Markdown-formatted help text and exit.'",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/opts.py#L556-L559"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/audio_encoder.py","language":"python","identifier":"AudioEncoder.load_pretrained_vectors","parameters":"(self, opt)","argument_list":"","return_statement":"","docstring":"Pass in needed options only when modify function definition.","docstring_summary":"Pass in needed options only when modify function definition.","docstring_tokens":["Pass","in","needed","options","only","when","modify","function","definition","."],"function":"def load_pretrained_vectors(self, opt):\n \"\"\" Pass in needed options only when modify function definition.\"\"\"\n pass","function_tokens":["def","load_pretrained_vectors","(","self",",","opt",")",":","pass"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/audio_encoder.py#L45-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/audio_encoder.py","language":"python","identifier":"AudioEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return hidden, output","docstring":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_summary":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","encoders",".","encoder",".","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`\"\n # (batch_size, 1, nfft, t)\n # layer 1\n src = self.batch_norm1(self.layer1(src[:, :, :, :]))\n\n # (batch_size, 32, nfft\/2, t\/2)\n src = F.hardtanh(src, 0, 20, inplace=True)\n\n # (batch_size, 32, nfft\/2\/2, t\/2)\n # layer 2\n src = self.batch_norm2(self.layer2(src))\n\n # (batch_size, 32, nfft\/2\/2, t\/2)\n src = F.hardtanh(src, 0, 20, inplace=True)\n\n batch_size = src.size(0)\n length = src.size(3)\n src = src.view(batch_size, -1, length)\n src = src.transpose(0, 2).transpose(1, 2)\n\n output, hidden = self.rnn(src)\n\n return hidden, output","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","# (batch_size, 1, nfft, t)","# layer 1","src","=","self",".","batch_norm1","(","self",".","layer1","(","src","[",":",",",":",",",":",",",":","]",")",")","# (batch_size, 32, nfft\/2, t\/2)","src","=","F",".","hardtanh","(","src",",","0",",","20",",","inplace","=","True",")","# (batch_size, 32, nfft\/2\/2, t\/2)","# layer 2","src","=","self",".","batch_norm2","(","self",".","layer2","(","src",")",")","# (batch_size, 32, nfft\/2\/2, t\/2)","src","=","F",".","hardtanh","(","src",",","0",",","20",",","inplace","=","True",")","batch_size","=","src",".","size","(","0",")","length","=","src",".","size","(","3",")","src","=","src",".","view","(","batch_size",",","-","1",",","length",")","src","=","src",".","transpose","(","0",",","2",")",".","transpose","(","1",",","2",")","output",",","hidden","=","self",".","rnn","(","src",")","return","hidden",",","output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/audio_encoder.py#L49-L72"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/mean_encoder.py","language":"python","identifier":"MeanEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return encoder_final, memory_bank","docstring":"See :obj:`EncoderBase.forward()`","docstring_summary":"See :obj:`EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`EncoderBase.forward()`\"\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n _, batch, emb_dim = emb.size()\n mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)\n memory_bank = emb\n encoder_final = (mean, mean)\n return encoder_final, memory_bank","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","_",",","batch",",","emb_dim","=","emb",".","size","(",")","mean","=","emb",".","mean","(","0",")",".","expand","(","self",".","num_layers",",","batch",",","emb_dim",")","memory_bank","=","emb","encoder_final","=","(","mean",",","mean",")","return","encoder_final",",","memory_bank"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/mean_encoder.py#L20-L29"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/transformer.py","language":"python","identifier":"TransformerEncoderLayer.forward","parameters":"(self, inputs, mask)","argument_list":"","return_statement":"return self.feed_forward(out)","docstring":"Transformer Encoder Layer definition.\n\n Args:\n inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`\n mask (`LongTensor`): `[batch_size x src_len x src_len]`\n\n Returns:\n (`FloatTensor`):\n\n * outputs `[batch_size x src_len x model_dim]`","docstring_summary":"Transformer Encoder Layer definition.","docstring_tokens":["Transformer","Encoder","Layer","definition","."],"function":"def forward(self, inputs, mask):\n \"\"\"\n Transformer Encoder Layer definition.\n\n Args:\n inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`\n mask (`LongTensor`): `[batch_size x src_len x src_len]`\n\n Returns:\n (`FloatTensor`):\n\n * outputs `[batch_size x src_len x model_dim]`\n \"\"\"\n input_norm = self.layer_norm(inputs)\n context, _ = self.self_attn(input_norm, input_norm, input_norm,\n mask=mask)\n out = self.dropout(context) + inputs\n return self.feed_forward(out)","function_tokens":["def","forward","(","self",",","inputs",",","mask",")",":","input_norm","=","self",".","layer_norm","(","inputs",")","context",",","_","=","self",".","self_attn","(","input_norm",",","input_norm",",","input_norm",",","mask","=","mask",")","out","=","self",".","dropout","(","context",")","+","inputs","return","self",".","feed_forward","(","out",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/transformer.py#L35-L52"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/transformer.py","language":"python","identifier":"TransformerEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return emb, out.transpose(0, 1).contiguous()","docstring":"See :obj:`EncoderBase.forward()`","docstring_summary":"See :obj:`EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"\"\" See :obj:`EncoderBase.forward()`\"\"\"\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n\n out = emb.transpose(0, 1).contiguous()\n words = src[:, :, 0].transpose(0, 1)\n w_batch, w_len = words.size()\n padding_idx = self.embeddings.word_padding_idx\n mask = words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(w_batch, w_len, w_len)\n # Run the forward pass of every layer of the tranformer.\n for i in range(self.num_layers):\n out = self.transformer[i](out, mask)\n out = self.layer_norm(out)\n\n return emb, out.transpose(0, 1).contiguous()","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","out","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","words","=","src","[",":",",",":",",","0","]",".","transpose","(","0",",","1",")","w_batch",",","w_len","=","words",".","size","(",")","padding_idx","=","self",".","embeddings",".","word_padding_idx","mask","=","words",".","data",".","eq","(","padding_idx",")",".","unsqueeze","(","1",")",".","expand","(","w_batch",",","w_len",",","w_len",")","# Run the forward pass of every layer of the tranformer.","for","i","in","range","(","self",".","num_layers",")",":","out","=","self",".","transformer","[","i","]","(","out",",","mask",")","out","=","self",".","layer_norm","(","out",")","return","emb",",","out",".","transpose","(","0",",","1",")",".","contiguous","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/transformer.py#L98-L115"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/encoder.py","language":"python","identifier":"EncoderBase.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"","docstring":"Args:\n src (:obj:`LongTensor`):\n padded sequences of sparse indices `[src_len x batch x nfeat]`\n lengths (:obj:`LongTensor`): length of each sequence `[batch]`\n\n\n Returns:\n (tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):\n * final encoder state, used to initialize decoder\n * memory bank for attention, `[src_len x batch x hidden]`","docstring_summary":"Args:\n src (:obj:`LongTensor`):\n padded sequences of sparse indices `[src_len x batch x nfeat]`\n lengths (:obj:`LongTensor`): length of each sequence `[batch]`","docstring_tokens":["Args",":","src","(",":","obj",":","LongTensor",")",":","padded","sequences","of","sparse","indices","[","src_len","x","batch","x","nfeat","]","lengths","(",":","obj",":","LongTensor",")",":","length","of","each","sequence","[","batch","]"],"function":"def forward(self, src, lengths=None):\n \"\"\"\n Args:\n src (:obj:`LongTensor`):\n padded sequences of sparse indices `[src_len x batch x nfeat]`\n lengths (:obj:`LongTensor`): length of each sequence `[batch]`\n\n\n Returns:\n (tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):\n * final encoder state, used to initialize decoder\n * memory bank for attention, `[src_len x batch x hidden]`\n \"\"\"\n raise NotImplementedError","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","raise","NotImplementedError"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/encoder.py#L41-L54"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/rnn_encoder.py","language":"python","identifier":"RNNEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return encoder_final, memory_bank","docstring":"See :obj:`EncoderBase.forward()`","docstring_summary":"See :obj:`EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`EncoderBase.forward()`\"\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n # s_len, batch, emb_dim = emb.size()\n\n packed_emb = emb\n if lengths is not None and not self.no_pack_padded_seq:\n # Lengths data is wrapped inside a Tensor.\n lengths = lengths.view(-1).tolist()\n packed_emb = pack(emb, lengths)\n\n memory_bank, encoder_final = self.rnn(packed_emb)\n\n if lengths is not None and not self.no_pack_padded_seq:\n memory_bank = unpack(memory_bank)[0]\n\n if self.use_bridge:\n encoder_final = self._bridge(encoder_final)\n return encoder_final, memory_bank","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","# s_len, batch, emb_dim = emb.size()","packed_emb","=","emb","if","lengths","is","not","None","and","not","self",".","no_pack_padded_seq",":","# Lengths data is wrapped inside a Tensor.","lengths","=","lengths",".","view","(","-","1",")",".","tolist","(",")","packed_emb","=","pack","(","emb",",","lengths",")","memory_bank",",","encoder_final","=","self",".","rnn","(","packed_emb",")","if","lengths","is","not","None","and","not","self",".","no_pack_padded_seq",":","memory_bank","=","unpack","(","memory_bank",")","[","0","]","if","self",".","use_bridge",":","encoder_final","=","self",".","_bridge","(","encoder_final",")","return","encoder_final",",","memory_bank"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/rnn_encoder.py#L53-L73"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/rnn_encoder.py","language":"python","identifier":"RNNEncoder._bridge","parameters":"(self, hidden)","argument_list":"","return_statement":"return outs","docstring":"Forward hidden state through bridge","docstring_summary":"Forward hidden state through bridge","docstring_tokens":["Forward","hidden","state","through","bridge"],"function":"def _bridge(self, hidden):\n \"\"\"\n Forward hidden state through bridge\n \"\"\"\n def bottle_hidden(linear, states):\n \"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)\n\n if isinstance(hidden, tuple): # LSTM\n outs = tuple([bottle_hidden(layer, hidden[ix])\n for ix, layer in enumerate(self.bridge)])\n else:\n outs = bottle_hidden(self.bridge[0], hidden)\n return outs","function_tokens":["def","_bridge","(","self",",","hidden",")",":","def","bottle_hidden","(","linear",",","states",")",":","\"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"","size","=","states",".","size","(",")","result","=","linear","(","states",".","view","(","-","1",",","self",".","total_hidden_dim",")",")","return","F",".","relu","(","result",")",".","view","(","size",")","if","isinstance","(","hidden",",","tuple",")",":","# LSTM","outs","=","tuple","(","[","bottle_hidden","(","layer",",","hidden","[","ix","]",")","for","ix",",","layer","in","enumerate","(","self",".","bridge",")","]",")","else",":","outs","=","bottle_hidden","(","self",".","bridge","[","0","]",",","hidden",")","return","outs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/rnn_encoder.py#L90-L107"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/image_encoder.py","language":"python","identifier":"ImageEncoder.load_pretrained_vectors","parameters":"(self, opt)","argument_list":"","return_statement":"","docstring":"Pass in needed options only when modify function definition.","docstring_summary":"Pass in needed options only when modify function definition.","docstring_tokens":["Pass","in","needed","options","only","when","modify","function","definition","."],"function":"def load_pretrained_vectors(self, opt):\n \"\"\" Pass in needed options only when modify function definition.\"\"\"\n pass","function_tokens":["def","load_pretrained_vectors","(","self",",","opt",")",":","pass"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/image_encoder.py#L50-L52"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/image_encoder.py","language":"python","identifier":"ImageEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return hidden_t, out","docstring":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_summary":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","encoders",".","encoder",".","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`\"\n\n batch_size = src.size(0)\n # (batch_size, 64, imgH, imgW)\n # layer 1\n src = F.relu(self.layer1(src[:, :, :, :] - 0.5), True)\n\n # (batch_size, 64, imgH\/2, imgW\/2)\n src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))\n\n # (batch_size, 128, imgH\/2, imgW\/2)\n # layer 2\n src = F.relu(self.layer2(src), True)\n\n # (batch_size, 128, imgH\/2\/2, imgW\/2\/2)\n src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))\n\n # (batch_size, 256, imgH\/2\/2, imgW\/2\/2)\n # layer 3\n # batch norm 1\n src = F.relu(self.batch_norm1(self.layer3(src)), True)\n\n # (batch_size, 256, imgH\/2\/2, imgW\/2\/2)\n # layer4\n src = F.relu(self.layer4(src), True)\n\n # (batch_size, 256, imgH\/2\/2\/2, imgW\/2\/2)\n src = F.max_pool2d(src, kernel_size=(1, 2), stride=(1, 2))\n\n # (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2)\n # layer 5\n # batch norm 2\n src = F.relu(self.batch_norm2(self.layer5(src)), True)\n\n # (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)\n src = F.max_pool2d(src, kernel_size=(2, 1), stride=(2, 1))\n\n # (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)\n src = F.relu(self.batch_norm3(self.layer6(src)), True)\n\n # # (batch_size, 512, H, W)\n all_outputs = []\n for row in range(src.size(2)):\n inp = src[:, :, row, :].transpose(0, 2) \\\n .transpose(1, 2)\n row_vec = torch.Tensor(batch_size).type_as(inp.data) \\\n .long().fill_(row)\n pos_emb = self.pos_lut(row_vec)\n with_pos = torch.cat(\n (pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)\n outputs, hidden_t = self.rnn(with_pos)\n all_outputs.append(outputs)\n out = torch.cat(all_outputs, 0)\n\n return hidden_t, out","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","batch_size","=","src",".","size","(","0",")","# (batch_size, 64, imgH, imgW)","# layer 1","src","=","F",".","relu","(","self",".","layer1","(","src","[",":",",",":",",",":",",",":","]","-","0.5",")",",","True",")","# (batch_size, 64, imgH\/2, imgW\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","2",",","2",")",",","stride","=","(","2",",","2",")",")","# (batch_size, 128, imgH\/2, imgW\/2)","# layer 2","src","=","F",".","relu","(","self",".","layer2","(","src",")",",","True",")","# (batch_size, 128, imgH\/2\/2, imgW\/2\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","2",",","2",")",",","stride","=","(","2",",","2",")",")","# (batch_size, 256, imgH\/2\/2, imgW\/2\/2)","# layer 3","# batch norm 1","src","=","F",".","relu","(","self",".","batch_norm1","(","self",".","layer3","(","src",")",")",",","True",")","# (batch_size, 256, imgH\/2\/2, imgW\/2\/2)","# layer4","src","=","F",".","relu","(","self",".","layer4","(","src",")",",","True",")","# (batch_size, 256, imgH\/2\/2\/2, imgW\/2\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","1",",","2",")",",","stride","=","(","1",",","2",")",")","# (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2)","# layer 5","# batch norm 2","src","=","F",".","relu","(","self",".","batch_norm2","(","self",".","layer5","(","src",")",")",",","True",")","# (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","2",",","1",")",",","stride","=","(","2",",","1",")",")","# (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)","src","=","F",".","relu","(","self",".","batch_norm3","(","self",".","layer6","(","src",")",")",",","True",")","# # (batch_size, 512, H, W)","all_outputs","=","[","]","for","row","in","range","(","src",".","size","(","2",")",")",":","inp","=","src","[",":",",",":",",","row",",",":","]",".","transpose","(","0",",","2",")",".","transpose","(","1",",","2",")","row_vec","=","torch",".","Tensor","(","batch_size",")",".","type_as","(","inp",".","data",")",".","long","(",")",".","fill_","(","row",")","pos_emb","=","self",".","pos_lut","(","row_vec",")","with_pos","=","torch",".","cat","(","(","pos_emb",".","view","(","1",",","pos_emb",".","size","(","0",")",",","pos_emb",".","size","(","1",")",")",",","inp",")",",","0",")","outputs",",","hidden_t","=","self",".","rnn","(","with_pos",")","all_outputs",".","append","(","outputs",")","out","=","torch",".","cat","(","all_outputs",",","0",")","return","hidden_t",",","out"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/image_encoder.py#L54-L109"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/encoders\/cnn_encoder.py","language":"python","identifier":"CNNEncoder.forward","parameters":"(self, input, lengths=None, hidden=None)","argument_list":"","return_statement":"return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \\\n out.squeeze(3).transpose(0, 1).contiguous()","docstring":"See :obj:`onmt.modules.EncoderBase.forward()`","docstring_summary":"See :obj:`onmt.modules.EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","modules",".","EncoderBase",".","forward","()"],"function":"def forward(self, input, lengths=None, hidden=None):\n \"\"\" See :obj:`onmt.modules.EncoderBase.forward()`\"\"\"\n self._check_args(input, lengths, hidden)\n\n emb = self.embeddings(input)\n # s_len, batch, emb_dim = emb.size()\n\n emb = emb.transpose(0, 1).contiguous()\n emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)\n emb_remap = self.linear(emb_reshape)\n emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)\n emb_remap = shape_transform(emb_remap)\n out = self.cnn(emb_remap)\n\n return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \\\n out.squeeze(3).transpose(0, 1).contiguous()","function_tokens":["def","forward","(","self",",","input",",","lengths","=","None",",","hidden","=","None",")",":","self",".","_check_args","(","input",",","lengths",",","hidden",")","emb","=","self",".","embeddings","(","input",")","# s_len, batch, emb_dim = emb.size()","emb","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","emb_reshape","=","emb",".","view","(","emb",".","size","(","0",")","*","emb",".","size","(","1",")",",","-","1",")","emb_remap","=","self",".","linear","(","emb_reshape",")","emb_remap","=","emb_remap",".","view","(","emb",".","size","(","0",")",",","emb",".","size","(","1",")",",","-","1",")","emb_remap","=","shape_transform","(","emb_remap",")","out","=","self",".","cnn","(","emb_remap",")","return","emb_remap",".","squeeze","(","3",")",".","transpose","(","0",",","1",")",".","contiguous","(",")",",","out",".","squeeze","(","3",")",".","transpose","(","0",",","1",")",".","contiguous","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/encoders\/cnn_encoder.py#L28-L43"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/models\/sru.py","language":"python","identifier":"check_sru_requirement","parameters":"(abort=False)","argument_list":"","return_statement":"return True","docstring":"Return True if check pass; if check fails and abort is True,\n raise an Exception, othereise return False.","docstring_summary":"Return True if check pass; if check fails and abort is True,\n raise an Exception, othereise return False.","docstring_tokens":["Return","True","if","check","pass",";","if","check","fails","and","abort","is","True","raise","an","Exception","othereise","return","False","."],"function":"def check_sru_requirement(abort=False):\n \"\"\"\n Return True if check pass; if check fails and abort is True,\n raise an Exception, othereise return False.\n \"\"\"\n\n # Check 1.\n try:\n if platform.system() == 'Windows':\n subprocess.check_output('pip freeze | findstr cupy', shell=True)\n subprocess.check_output('pip freeze | findstr pynvrtc',\n shell=True)\n else: # Unix-like systems\n subprocess.check_output('pip freeze | grep -w cupy', shell=True)\n subprocess.check_output('pip freeze | grep -w pynvrtc',\n shell=True)\n except subprocess.CalledProcessError:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires 'cupy' and 'pynvrtc' \"\n \"python packages installed.\")\n\n # Check 2.\n if torch.cuda.is_available() is False:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires pytorch built with cuda.\")\n\n # Check 3.\n pattern = re.compile(\".*cuda\/lib.*\")\n ld_path = os.getenv('LD_LIBRARY_PATH', \"\")\n if re.match(pattern, ld_path) is None:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires setting cuda lib path, e.g. \"\n \"export LD_LIBRARY_PATH=\/usr\/local\/cuda\/lib64.\")\n\n return True","function_tokens":["def","check_sru_requirement","(","abort","=","False",")",":","# Check 1.","try",":","if","platform",".","system","(",")","==","'Windows'",":","subprocess",".","check_output","(","'pip freeze | findstr cupy'",",","shell","=","True",")","subprocess",".","check_output","(","'pip freeze | findstr pynvrtc'",",","shell","=","True",")","else",":","# Unix-like systems","subprocess",".","check_output","(","'pip freeze | grep -w cupy'",",","shell","=","True",")","subprocess",".","check_output","(","'pip freeze | grep -w pynvrtc'",",","shell","=","True",")","except","subprocess",".","CalledProcessError",":","if","not","abort",":","return","False","raise","AssertionError","(","\"Using SRU requires 'cupy' and 'pynvrtc' \"","\"python packages installed.\"",")","# Check 2.","if","torch",".","cuda",".","is_available","(",")","is","False",":","if","not","abort",":","return","False","raise","AssertionError","(","\"Using SRU requires pytorch built with cuda.\"",")","# Check 3.","pattern","=","re",".","compile","(","\".*cuda\/lib.*\"",")","ld_path","=","os",".","getenv","(","'LD_LIBRARY_PATH'",",","\"\"",")","if","re",".","match","(","pattern",",","ld_path",")","is","None",":","if","not","abort",":","return","False","raise","AssertionError","(","\"Using SRU requires setting cuda lib path, e.g. \"","\"export LD_LIBRARY_PATH=\/usr\/local\/cuda\/lib64.\"",")","return","True"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/models\/sru.py#L32-L69"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/models\/model.py","language":"python","identifier":"NMTModel.forward","parameters":"(self, src, tgt, lengths, dec_state=None)","argument_list":"","return_statement":"return decoder_outputs, attns, dec_state","docstring":"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state","docstring_summary":"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.","docstring_tokens":["Forward","propagate","a","src","and","tgt","pair","for","training",".","Possible","initialized","with","a","beginning","decoder","state","."],"function":"def forward(self, src, tgt, lengths, dec_state=None):\n \"\"\"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state\n \"\"\"\n tgt = tgt[:-1] # exclude last target from inputs\n\n enc_final, memory_bank = self.encoder(src, lengths)\n enc_state = \\\n self.decoder.init_decoder_state(src, memory_bank, enc_final)\n decoder_outputs, dec_state, attns = \\\n self.decoder(tgt, memory_bank,\n enc_state if dec_state is None\n else dec_state,\n memory_lengths=lengths)\n if self.multigpu:\n # Not yet supported on multi-gpu\n dec_state = None\n attns = None\n return decoder_outputs, attns, dec_state","function_tokens":["def","forward","(","self",",","src",",","tgt",",","lengths",",","dec_state","=","None",")",":","tgt","=","tgt","[",":","-","1","]","# exclude last target from inputs","enc_final",",","memory_bank","=","self",".","encoder","(","src",",","lengths",")","enc_state","=","self",".","decoder",".","init_decoder_state","(","src",",","memory_bank",",","enc_final",")","decoder_outputs",",","dec_state",",","attns","=","self",".","decoder","(","tgt",",","memory_bank",",","enc_state","if","dec_state","is","None","else","dec_state",",","memory_lengths","=","lengths",")","if","self",".","multigpu",":","# Not yet supported on multi-gpu","dec_state","=","None","attns","=","None","return","decoder_outputs",",","attns",",","dec_state"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/models\/model.py#L22-L57"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/models\/model_saver.py","language":"python","identifier":"ModelSaverBase.maybe_save","parameters":"(self, step)","argument_list":"","return_statement":"","docstring":"Main entry point for model saver\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic","docstring_summary":"Main entry point for model saver\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic","docstring_tokens":["Main","entry","point","for","model","saver","It","wraps","the","_save","method","with","checks","and","apply","keep_checkpoint","related","logic"],"function":"def maybe_save(self, step):\n \"\"\"\n Main entry point for model saver\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic\n \"\"\"\n if self.keep_checkpoint == 0:\n return\n\n if step % self.save_checkpoint_steps != 0:\n return\n\n chkpt, chkpt_name = self._save(step)\n\n if self.keep_checkpoint > 0:\n if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:\n todel = self.checkpoint_queue.popleft()\n self._rm_checkpoint(todel)\n self.checkpoint_queue.append(chkpt_name)","function_tokens":["def","maybe_save","(","self",",","step",")",":","if","self",".","keep_checkpoint","==","0",":","return","if","step","%","self",".","save_checkpoint_steps","!=","0",":","return","chkpt",",","chkpt_name","=","self",".","_save","(","step",")","if","self",".","keep_checkpoint",">","0",":","if","len","(","self",".","checkpoint_queue",")","==","self",".","checkpoint_queue",".","maxlen",":","todel","=","self",".","checkpoint_queue",".","popleft","(",")","self",".","_rm_checkpoint","(","todel",")","self",".","checkpoint_queue",".","append","(","chkpt_name",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/models\/model_saver.py#L43-L61"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/models\/model_saver.py","language":"python","identifier":"ModelSaverBase._save","parameters":"(self, step)","argument_list":"","return_statement":"","docstring":"Save a resumable checkpoint.\n\n Args:\n step (int): step number\n\n Returns:\n checkpoint: the saved object\n checkpoint_name: name (or path) of the saved checkpoint","docstring_summary":"Save a resumable checkpoint.","docstring_tokens":["Save","a","resumable","checkpoint","."],"function":"def _save(self, step):\n \"\"\" Save a resumable checkpoint.\n\n Args:\n step (int): step number\n\n Returns:\n checkpoint: the saved object\n checkpoint_name: name (or path) of the saved checkpoint\n \"\"\"\n raise NotImplementedError()","function_tokens":["def","_save","(","self",",","step",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/models\/model_saver.py#L63-L73"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/models\/model_saver.py","language":"python","identifier":"ModelSaverBase._rm_checkpoint","parameters":"(self, name)","argument_list":"","return_statement":"","docstring":"Remove a checkpoint\n\n Args:\n name(str): name that indentifies the checkpoint\n (it may be a filepath)","docstring_summary":"Remove a checkpoint","docstring_tokens":["Remove","a","checkpoint"],"function":"def _rm_checkpoint(self, name):\n \"\"\"\n Remove a checkpoint\n\n Args:\n name(str): name that indentifies the checkpoint\n (it may be a filepath)\n \"\"\"\n raise NotImplementedError()","function_tokens":["def","_rm_checkpoint","(","self",",","name",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/models\/model_saver.py#L75-L83"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/cnn_factory.py","language":"python","identifier":"shape_transform","parameters":"(x)","argument_list":"","return_statement":"return torch.unsqueeze(torch.transpose(x, 1, 2), 3)","docstring":"Tranform the size of the tensors to fit for conv input.","docstring_summary":"Tranform the size of the tensors to fit for conv input.","docstring_tokens":["Tranform","the","size","of","the","tensors","to","fit","for","conv","input","."],"function":"def shape_transform(x):\n \"\"\" Tranform the size of the tensors to fit for conv input. \"\"\"\n return torch.unsqueeze(torch.transpose(x, 1, 2), 3)","function_tokens":["def","shape_transform","(","x",")",":","return","torch",".","unsqueeze","(","torch",".","transpose","(","x",",","1",",","2",")",",","3",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/cnn_factory.py#L14-L16"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"build_optim","parameters":"(model, opt, checkpoint)","argument_list":"","return_statement":"return optim","docstring":"Build optimizer","docstring_summary":"Build optimizer","docstring_tokens":["Build","optimizer"],"function":"def build_optim(model, opt, checkpoint):\n \"\"\" Build optimizer \"\"\"\n saved_optimizer_state_dict = None\n\n if opt.train_from:\n optim = checkpoint['optim']\n # We need to save a copy of optim.optimizer.state_dict() for setting\n # the, optimizer state later on in Stage 2 in this method, since\n # the method optim.set_parameters(model.parameters()) will overwrite\n # optim.optimizer, and with ith the values stored in\n # optim.optimizer.state_dict()\n saved_optimizer_state_dict = optim.optimizer.state_dict()\n else:\n optim = Optimizer(\n opt.optim, opt.learning_rate, opt.max_grad_norm,\n lr_decay=opt.learning_rate_decay,\n start_decay_steps=opt.start_decay_steps,\n decay_steps=opt.decay_steps,\n beta1=opt.adam_beta1,\n beta2=opt.adam_beta2,\n adagrad_accum=opt.adagrad_accumulator_init,\n decay_method=opt.decay_method,\n warmup_steps=opt.warmup_steps,\n model_size=opt.rnn_size)\n\n # Stage 1:\n # Essentially optim.set_parameters (re-)creates and optimizer using\n # model.paramters() as parameters that will be stored in the\n # optim.optimizer.param_groups field of the torch optimizer class.\n # Importantly, this method does not yet load the optimizer state, as\n # essentially it builds a new optimizer with empty optimizer state and\n # parameters from the model.\n optim.set_parameters(model.named_parameters())\n\n if opt.train_from:\n # Stage 2: In this stage, which is only performed when loading an\n # optimizer from a checkpoint, we load the saved_optimizer_state_dict\n # into the re-created optimizer, to set the optim.optimizer.state\n # field, which was previously empty. For this, we use the optimizer\n # state saved in the \"saved_optimizer_state_dict\" variable for\n # this purpose.\n # See also: https:\/\/github.com\/pytorch\/pytorch\/issues\/2830\n optim.optimizer.load_state_dict(saved_optimizer_state_dict)\n # Convert back the state values to cuda type if applicable\n if use_gpu(opt):\n for state in optim.optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()\n\n # We want to make sure that indeed we have a non-empty optimizer state\n # when we loaded an existing model. This should be at least the case\n # for Adam, which saves \"exp_avg\" and \"exp_avg_sq\" state\n # (Exponential moving average of gradient and squared gradient values)\n if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):\n raise RuntimeError(\n \"Error: loaded Adam optimizer from existing model\" +\n \" but optimizer state is empty\")\n\n return optim","function_tokens":["def","build_optim","(","model",",","opt",",","checkpoint",")",":","saved_optimizer_state_dict","=","None","if","opt",".","train_from",":","optim","=","checkpoint","[","'optim'","]","# We need to save a copy of optim.optimizer.state_dict() for setting","# the, optimizer state later on in Stage 2 in this method, since","# the method optim.set_parameters(model.parameters()) will overwrite","# optim.optimizer, and with ith the values stored in","# optim.optimizer.state_dict()","saved_optimizer_state_dict","=","optim",".","optimizer",".","state_dict","(",")","else",":","optim","=","Optimizer","(","opt",".","optim",",","opt",".","learning_rate",",","opt",".","max_grad_norm",",","lr_decay","=","opt",".","learning_rate_decay",",","start_decay_steps","=","opt",".","start_decay_steps",",","decay_steps","=","opt",".","decay_steps",",","beta1","=","opt",".","adam_beta1",",","beta2","=","opt",".","adam_beta2",",","adagrad_accum","=","opt",".","adagrad_accumulator_init",",","decay_method","=","opt",".","decay_method",",","warmup_steps","=","opt",".","warmup_steps",",","model_size","=","opt",".","rnn_size",")","# Stage 1:","# Essentially optim.set_parameters (re-)creates and optimizer using","# model.paramters() as parameters that will be stored in the","# optim.optimizer.param_groups field of the torch optimizer class.","# Importantly, this method does not yet load the optimizer state, as","# essentially it builds a new optimizer with empty optimizer state and","# parameters from the model.","optim",".","set_parameters","(","model",".","named_parameters","(",")",")","if","opt",".","train_from",":","# Stage 2: In this stage, which is only performed when loading an","# optimizer from a checkpoint, we load the saved_optimizer_state_dict","# into the re-created optimizer, to set the optim.optimizer.state","# field, which was previously empty. For this, we use the optimizer","# state saved in the \"saved_optimizer_state_dict\" variable for","# this purpose.","# See also: https:\/\/github.com\/pytorch\/pytorch\/issues\/2830","optim",".","optimizer",".","load_state_dict","(","saved_optimizer_state_dict",")","# Convert back the state values to cuda type if applicable","if","use_gpu","(","opt",")",":","for","state","in","optim",".","optimizer",".","state",".","values","(",")",":","for","k",",","v","in","state",".","items","(",")",":","if","torch",".","is_tensor","(","v",")",":","state","[","k","]","=","v",".","cuda","(",")","# We want to make sure that indeed we have a non-empty optimizer state","# when we loaded an existing model. This should be at least the case","# for Adam, which saves \"exp_avg\" and \"exp_avg_sq\" state","# (Exponential moving average of gradient and squared gradient values)","if","(","optim",".","method","==","'adam'",")","and","(","len","(","optim",".","optimizer",".","state",")","<","1",")",":","raise","RuntimeError","(","\"Error: loaded Adam optimizer from existing model\"","+","\" but optimizer state is empty\"",")","return","optim"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L9-L68"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.__init__","parameters":"(self, op)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def __init__(self, op):\n \"\"\" ? \"\"\"\n self.optimizers = op","function_tokens":["def","__init__","(","self",",","op",")",":","self",".","optimizers","=","op"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L74-L76"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.zero_grad","parameters":"(self)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def zero_grad(self):\n \"\"\" ? \"\"\"\n for op in self.optimizers:\n op.zero_grad()","function_tokens":["def","zero_grad","(","self",")",":","for","op","in","self",".","optimizers",":","op",".","zero_grad","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L78-L81"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.step","parameters":"(self)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def step(self):\n \"\"\" ? \"\"\"\n for op in self.optimizers:\n op.step()","function_tokens":["def","step","(","self",")",":","for","op","in","self",".","optimizers",":","op",".","step","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L83-L86"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.state","parameters":"(self)","argument_list":"","return_statement":"return {k: v for op in self.optimizers for k, v in op.state.items()}","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def state(self):\n \"\"\" ? \"\"\"\n return {k: v for op in self.optimizers for k, v in op.state.items()}","function_tokens":["def","state","(","self",")",":","return","{","k",":","v","for","op","in","self",".","optimizers","for","k",",","v","in","op",".","state",".","items","(",")","}"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L89-L91"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.state_dict","parameters":"(self)","argument_list":"","return_statement":"return [op.state_dict() for op in self.optimizers]","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def state_dict(self):\n \"\"\" ? \"\"\"\n return [op.state_dict() for op in self.optimizers]","function_tokens":["def","state_dict","(","self",")",":","return","[","op",".","state_dict","(",")","for","op","in","self",".","optimizers","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L93-L95"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.load_state_dict","parameters":"(self, state_dicts)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def load_state_dict(self, state_dicts):\n \"\"\" ? \"\"\"\n assert len(state_dicts) == len(self.optimizers)\n for i in range(len(state_dicts)):\n self.optimizers[i].load_state_dict(state_dicts[i])","function_tokens":["def","load_state_dict","(","self",",","state_dicts",")",":","assert","len","(","state_dicts",")","==","len","(","self",".","optimizers",")","for","i","in","range","(","len","(","state_dicts",")",")",":","self",".","optimizers","[","i","]",".","load_state_dict","(","state_dicts","[","i","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L97-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"Optimizer.set_parameters","parameters":"(self, params)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def set_parameters(self, params):\n \"\"\" ? \"\"\"\n self.params = []\n self.sparse_params = []\n for k, p in params:\n if p.requires_grad:\n if self.method != 'sparseadam' or \"embed\" not in k:\n self.params.append(p)\n else:\n self.sparse_params.append(p)\n if self.method == 'sgd':\n self.optimizer = optim.SGD(self.params, lr=self.learning_rate)\n elif self.method == 'adagrad':\n self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)\n for group in self.optimizer.param_groups:\n for p in group['params']:\n self.optimizer.state[p]['sum'] = self.optimizer\\\n .state[p]['sum'].fill_(self.adagrad_accum)\n elif self.method == 'adadelta':\n self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)\n elif self.method == 'adam':\n self.optimizer = optim.Adam(self.params, lr=self.learning_rate,\n betas=self.betas, eps=1e-9)\n elif self.method == 'sparseadam':\n self.optimizer = MultipleOptimizer(\n [optim.Adam(self.params, lr=self.learning_rate,\n betas=self.betas, eps=1e-8),\n optim.SparseAdam(self.sparse_params, lr=self.learning_rate,\n betas=self.betas, eps=1e-8)])\n else:\n raise RuntimeError(\"Invalid optim method: \" + self.method)","function_tokens":["def","set_parameters","(","self",",","params",")",":","self",".","params","=","[","]","self",".","sparse_params","=","[","]","for","k",",","p","in","params",":","if","p",".","requires_grad",":","if","self",".","method","!=","'sparseadam'","or","\"embed\"","not","in","k",":","self",".","params",".","append","(","p",")","else",":","self",".","sparse_params",".","append","(","p",")","if","self",".","method","==","'sgd'",":","self",".","optimizer","=","optim",".","SGD","(","self",".","params",",","lr","=","self",".","learning_rate",")","elif","self",".","method","==","'adagrad'",":","self",".","optimizer","=","optim",".","Adagrad","(","self",".","params",",","lr","=","self",".","learning_rate",")","for","group","in","self",".","optimizer",".","param_groups",":","for","p","in","group","[","'params'","]",":","self",".","optimizer",".","state","[","p","]","[","'sum'","]","=","self",".","optimizer",".","state","[","p","]","[","'sum'","]",".","fill_","(","self",".","adagrad_accum",")","elif","self",".","method","==","'adadelta'",":","self",".","optimizer","=","optim",".","Adadelta","(","self",".","params",",","lr","=","self",".","learning_rate",")","elif","self",".","method","==","'adam'",":","self",".","optimizer","=","optim",".","Adam","(","self",".","params",",","lr","=","self",".","learning_rate",",","betas","=","self",".","betas",",","eps","=","1e-9",")","elif","self",".","method","==","'sparseadam'",":","self",".","optimizer","=","MultipleOptimizer","(","[","optim",".","Adam","(","self",".","params",",","lr","=","self",".","learning_rate",",","betas","=","self",".","betas",",","eps","=","1e-8",")",",","optim",".","SparseAdam","(","self",".","sparse_params",",","lr","=","self",".","learning_rate",",","betas","=","self",".","betas",",","eps","=","1e-8",")","]",")","else",":","raise","RuntimeError","(","\"Invalid optim method: \"","+","self",".","method",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L158-L188"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py","language":"python","identifier":"Optimizer.step","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Update the model parameters based on current gradients.\n\n Optionally, will employ gradient modification or update learning\n rate.","docstring_summary":"Update the model parameters based on current gradients.","docstring_tokens":["Update","the","model","parameters","based","on","current","gradients","."],"function":"def step(self):\n \"\"\"Update the model parameters based on current gradients.\n\n Optionally, will employ gradient modification or update learning\n rate.\n \"\"\"\n self._step += 1\n\n # Decay method used in tensor2tensor.\n if self.decay_method == \"noam\":\n self._set_rate(\n self.original_lr *\n (self.model_size ** (-0.5) *\n min(self._step ** (-0.5),\n self._step * self.warmup_steps**(-1.5))))\n # Decay based on start_decay_steps every decay_steps\n else:\n if ((self.start_decay_steps is not None) and (\n self._step >= self.start_decay_steps)):\n self.start_decay = True\n if self.start_decay:\n if ((self._step - self.start_decay_steps)\n % self.decay_steps == 0):\n self.learning_rate = self.learning_rate * self.lr_decay\n\n if self.method != 'sparseadam':\n self.optimizer.param_groups[0]['lr'] = self.learning_rate\n\n if self.max_grad_norm:\n clip_grad_norm_(self.params, self.max_grad_norm)\n self.optimizer.step()","function_tokens":["def","step","(","self",")",":","self",".","_step","+=","1","# Decay method used in tensor2tensor.","if","self",".","decay_method","==","\"noam\"",":","self",".","_set_rate","(","self",".","original_lr","*","(","self",".","model_size","**","(","-","0.5",")","*","min","(","self",".","_step","**","(","-","0.5",")",",","self",".","_step","*","self",".","warmup_steps","**","(","-","1.5",")",")",")",")","# Decay based on start_decay_steps every decay_steps","else",":","if","(","(","self",".","start_decay_steps","is","not","None",")","and","(","self",".","_step",">=","self",".","start_decay_steps",")",")",":","self",".","start_decay","=","True","if","self",".","start_decay",":","if","(","(","self",".","_step","-","self",".","start_decay_steps",")","%","self",".","decay_steps","==","0",")",":","self",".","learning_rate","=","self",".","learning_rate","*","self",".","lr_decay","if","self",".","method","!=","'sparseadam'",":","self",".","optimizer",".","param_groups","[","0","]","[","'lr'","]","=","self",".","learning_rate","if","self",".","max_grad_norm",":","clip_grad_norm_","(","self",".","params",",","self",".","max_grad_norm",")","self",".","optimizer",".","step","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/optimizers.py#L198-L228"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.all_gather_stats","parameters":"(stat, max_size=4096)","argument_list":"","return_statement":"return stats[0]","docstring":"Gather a `Statistics` object accross multiple process\/nodes\n\n Args:\n stat(:obj:Statistics): the statistics object to gather\n accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n `Statistics`, the update stats object","docstring_summary":"Gather a `Statistics` object accross multiple process\/nodes","docstring_tokens":["Gather","a","Statistics","object","accross","multiple","process","\/","nodes"],"function":"def all_gather_stats(stat, max_size=4096):\n \"\"\"\n Gather a `Statistics` object accross multiple process\/nodes\n\n Args:\n stat(:obj:Statistics): the statistics object to gather\n accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n `Statistics`, the update stats object\n \"\"\"\n stats = Statistics.all_gather_stats_list([stat], max_size=max_size)\n return stats[0]","function_tokens":["def","all_gather_stats","(","stat",",","max_size","=","4096",")",":","stats","=","Statistics",".","all_gather_stats_list","(","[","stat","]",",","max_size","=","max_size",")","return","stats","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L30-L43"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.all_gather_stats_list","parameters":"(stat_list, max_size=4096)","argument_list":"","return_statement":"return our_stats","docstring":"Gather a `Statistics` list accross all processes\/nodes\n\n Args:\n stat_list(list([`Statistics`])): list of statistics objects to\n gather accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n our_stats(list([`Statistics`])): list of updated stats","docstring_summary":"Gather a `Statistics` list accross all processes\/nodes","docstring_tokens":["Gather","a","Statistics","list","accross","all","processes","\/","nodes"],"function":"def all_gather_stats_list(stat_list, max_size=4096):\n \"\"\"\n Gather a `Statistics` list accross all processes\/nodes\n\n Args:\n stat_list(list([`Statistics`])): list of statistics objects to\n gather accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n our_stats(list([`Statistics`])): list of updated stats\n \"\"\"\n # Get a list of world_size lists with len(stat_list) Statistics objects\n all_stats = all_gather_list(stat_list, max_size=max_size)\n\n our_rank = get_rank()\n our_stats = all_stats[our_rank]\n for other_rank, stats in enumerate(all_stats):\n if other_rank == our_rank:\n continue\n for i, stat in enumerate(stats):\n our_stats[i].update(stat, update_n_src_words=True)\n return our_stats","function_tokens":["def","all_gather_stats_list","(","stat_list",",","max_size","=","4096",")",":","# Get a list of world_size lists with len(stat_list) Statistics objects","all_stats","=","all_gather_list","(","stat_list",",","max_size","=","max_size",")","our_rank","=","get_rank","(",")","our_stats","=","all_stats","[","our_rank","]","for","other_rank",",","stats","in","enumerate","(","all_stats",")",":","if","other_rank","==","our_rank",":","continue","for","i",",","stat","in","enumerate","(","stats",")",":","our_stats","[","i","]",".","update","(","stat",",","update_n_src_words","=","True",")","return","our_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L46-L68"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.update","parameters":"(self, stat, update_n_src_words=False)","argument_list":"","return_statement":"","docstring":"Update statistics by suming values with another `Statistics` object\n\n Args:\n stat: another statistic object\n update_n_src_words(bool): whether to update (sum) `n_src_words`\n or not","docstring_summary":"Update statistics by suming values with another `Statistics` object","docstring_tokens":["Update","statistics","by","suming","values","with","another","Statistics","object"],"function":"def update(self, stat, update_n_src_words=False):\n \"\"\"\n Update statistics by suming values with another `Statistics` object\n\n Args:\n stat: another statistic object\n update_n_src_words(bool): whether to update (sum) `n_src_words`\n or not\n\n \"\"\"\n self.loss += stat.loss\n self.n_words += stat.n_words\n self.n_correct += stat.n_correct\n\n if update_n_src_words:\n self.n_src_words += stat.n_src_words","function_tokens":["def","update","(","self",",","stat",",","update_n_src_words","=","False",")",":","self",".","loss","+=","stat",".","loss","self",".","n_words","+=","stat",".","n_words","self",".","n_correct","+=","stat",".","n_correct","if","update_n_src_words",":","self",".","n_src_words","+=","stat",".","n_src_words"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L70-L85"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.accuracy","parameters":"(self)","argument_list":"","return_statement":"return 100 * (self.n_correct \/ self.n_words)","docstring":"compute accuracy","docstring_summary":"compute accuracy","docstring_tokens":["compute","accuracy"],"function":"def accuracy(self):\n \"\"\" compute accuracy \"\"\"\n return 100 * (self.n_correct \/ self.n_words)","function_tokens":["def","accuracy","(","self",")",":","return","100","*","(","self",".","n_correct","\/","self",".","n_words",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L87-L89"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.xent","parameters":"(self)","argument_list":"","return_statement":"return self.loss \/ self.n_words","docstring":"compute cross entropy","docstring_summary":"compute cross entropy","docstring_tokens":["compute","cross","entropy"],"function":"def xent(self):\n \"\"\" compute cross entropy \"\"\"\n return self.loss \/ self.n_words","function_tokens":["def","xent","(","self",")",":","return","self",".","loss","\/","self",".","n_words"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L91-L93"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.ppl","parameters":"(self)","argument_list":"","return_statement":"return math.exp(min(self.loss \/ self.n_words, 100))","docstring":"compute perplexity","docstring_summary":"compute perplexity","docstring_tokens":["compute","perplexity"],"function":"def ppl(self):\n \"\"\" compute perplexity \"\"\"\n return math.exp(min(self.loss \/ self.n_words, 100))","function_tokens":["def","ppl","(","self",")",":","return","math",".","exp","(","min","(","self",".","loss","\/","self",".","n_words",",","100",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L95-L97"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.elapsed_time","parameters":"(self)","argument_list":"","return_statement":"return time.time() - self.start_time","docstring":"compute elapsed time","docstring_summary":"compute elapsed time","docstring_tokens":["compute","elapsed","time"],"function":"def elapsed_time(self):\n \"\"\" compute elapsed time \"\"\"\n return time.time() - self.start_time","function_tokens":["def","elapsed_time","(","self",")",":","return","time",".","time","(",")","-","self",".","start_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L99-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.output","parameters":"(self, step, num_steps, learning_rate, start)","argument_list":"","return_statement":"","docstring":"Write out statistics to stdout.\n\n Args:\n step (int): current step\n n_batch (int): total batches\n start (int): start time of step.","docstring_summary":"Write out statistics to stdout.","docstring_tokens":["Write","out","statistics","to","stdout","."],"function":"def output(self, step, num_steps, learning_rate, start):\n \"\"\"Write out statistics to stdout.\n\n Args:\n step (int): current step\n n_batch (int): total batches\n start (int): start time of step.\n \"\"\"\n t = self.elapsed_time()\n logger.info(\n (\"Step %2d\/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \" +\n \"lr: %7.5f; %3.0f\/%3.0f tok\/s; %6.0f sec\")\n % (step, num_steps,\n self.accuracy(),\n self.ppl(),\n self.xent(),\n learning_rate,\n self.n_src_words \/ (t + 1e-5),\n self.n_words \/ (t + 1e-5),\n time.time() - start))\n sys.stdout.flush()","function_tokens":["def","output","(","self",",","step",",","num_steps",",","learning_rate",",","start",")",":","t","=","self",".","elapsed_time","(",")","logger",".","info","(","(","\"Step %2d\/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \"","+","\"lr: %7.5f; %3.0f\/%3.0f tok\/s; %6.0f sec\"",")","%","(","step",",","num_steps",",","self",".","accuracy","(",")",",","self",".","ppl","(",")",",","self",".","xent","(",")",",","learning_rate",",","self",".","n_src_words","\/","(","t","+","1e-5",")",",","self",".","n_words","\/","(","t","+","1e-5",")",",","time",".","time","(",")","-","start",")",")","sys",".","stdout",".","flush","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L103-L123"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.log_tensorboard","parameters":"(self, prefix, writer, learning_rate, step)","argument_list":"","return_statement":"","docstring":"display statistics to tensorboard","docstring_summary":"display statistics to tensorboard","docstring_tokens":["display","statistics","to","tensorboard"],"function":"def log_tensorboard(self, prefix, writer, learning_rate, step):\n \"\"\" display statistics to tensorboard \"\"\"\n t = self.elapsed_time()\n writer.add_scalar(prefix + \"\/xent\", self.xent(), step)\n writer.add_scalar(prefix + \"\/ppl\", self.ppl(), step)\n writer.add_scalar(prefix + \"\/accuracy\", self.accuracy(), step)\n writer.add_scalar(prefix + \"\/tgtper\", self.n_words \/ t, step)\n writer.add_scalar(prefix + \"\/lr\", learning_rate, step)","function_tokens":["def","log_tensorboard","(","self",",","prefix",",","writer",",","learning_rate",",","step",")",":","t","=","self",".","elapsed_time","(",")","writer",".","add_scalar","(","prefix","+","\"\/xent\"",",","self",".","xent","(",")",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/ppl\"",",","self",".","ppl","(",")",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/accuracy\"",",","self",".","accuracy","(",")",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/tgtper\"",",","self",".","n_words","\/","t",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/lr\"",",","learning_rate",",","step",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/statistics.py#L125-L132"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/rnn_factory.py","language":"python","identifier":"rnn_factory","parameters":"(rnn_type, **kwargs)","argument_list":"","return_statement":"return rnn, no_pack_padded_seq","docstring":"rnn factory, Use pytorch version when available.","docstring_summary":"rnn factory, Use pytorch version when available.","docstring_tokens":["rnn","factory","Use","pytorch","version","when","available","."],"function":"def rnn_factory(rnn_type, **kwargs):\n \"\"\" rnn factory, Use pytorch version when available. \"\"\"\n no_pack_padded_seq = False\n if rnn_type == \"SRU\":\n # SRU doesn't support PackedSequence.\n no_pack_padded_seq = True\n rnn = onmt.models.sru.SRU(**kwargs)\n else:\n rnn = getattr(nn, rnn_type)(**kwargs)\n return rnn, no_pack_padded_seq","function_tokens":["def","rnn_factory","(","rnn_type",",","*","*","kwargs",")",":","no_pack_padded_seq","=","False","if","rnn_type","==","\"SRU\"",":","# SRU doesn't support PackedSequence.","no_pack_padded_seq","=","True","rnn","=","onmt",".","models",".","sru",".","SRU","(","*","*","kwargs",")","else",":","rnn","=","getattr","(","nn",",","rnn_type",")","(","*","*","kwargs",")","return","rnn",",","no_pack_padded_seq"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/rnn_factory.py#L10-L19"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/distributed.py","language":"python","identifier":"all_reduce_and_rescale_tensors","parameters":"(tensors, rescale_denom,\n buffer_size=10485760)","argument_list":"","return_statement":"","docstring":"All-reduce and rescale tensors in chunks of the specified size.\n\n Args:\n tensors: list of Tensors to all-reduce\n rescale_denom: denominator for rescaling summed Tensors\n buffer_size: all-reduce chunk size in bytes","docstring_summary":"All-reduce and rescale tensors in chunks of the specified size.","docstring_tokens":["All","-","reduce","and","rescale","tensors","in","chunks","of","the","specified","size","."],"function":"def all_reduce_and_rescale_tensors(tensors, rescale_denom,\n buffer_size=10485760):\n \"\"\"All-reduce and rescale tensors in chunks of the specified size.\n\n Args:\n tensors: list of Tensors to all-reduce\n rescale_denom: denominator for rescaling summed Tensors\n buffer_size: all-reduce chunk size in bytes\n \"\"\"\n # buffer size in bytes, determine equiv. # of elements based on data type\n buffer_t = tensors[0].new(\n math.ceil(buffer_size \/ tensors[0].element_size())).zero_()\n buffer = []\n\n def all_reduce_buffer():\n # copy tensors into buffer_t\n offset = 0\n for t in buffer:\n numel = t.numel()\n buffer_t[offset:offset+numel].copy_(t.view(-1))\n offset += numel\n\n # all-reduce and rescale\n torch.distributed.all_reduce(buffer_t[:offset])\n buffer_t.div_(rescale_denom)\n\n # copy all-reduced buffer back into tensors\n offset = 0\n for t in buffer:\n numel = t.numel()\n t.view(-1).copy_(buffer_t[offset:offset+numel])\n offset += numel\n\n filled = 0\n for t in tensors:\n sz = t.numel() * t.element_size()\n if sz > buffer_size:\n # tensor is bigger than buffer, all-reduce and rescale directly\n torch.distributed.all_reduce(t)\n t.div_(rescale_denom)\n elif filled + sz > buffer_size:\n # buffer is full, all-reduce and replace buffer with grad\n all_reduce_buffer()\n buffer = [t]\n filled = sz\n else:\n # add tensor to buffer\n buffer.append(t)\n filled += sz\n\n if len(buffer) > 0:\n all_reduce_buffer()","function_tokens":["def","all_reduce_and_rescale_tensors","(","tensors",",","rescale_denom",",","buffer_size","=","10485760",")",":","# buffer size in bytes, determine equiv. # of elements based on data type","buffer_t","=","tensors","[","0","]",".","new","(","math",".","ceil","(","buffer_size","\/","tensors","[","0","]",".","element_size","(",")",")",")",".","zero_","(",")","buffer","=","[","]","def","all_reduce_buffer","(",")",":","# copy tensors into buffer_t","offset","=","0","for","t","in","buffer",":","numel","=","t",".","numel","(",")","buffer_t","[","offset",":","offset","+","numel","]",".","copy_","(","t",".","view","(","-","1",")",")","offset","+=","numel","# all-reduce and rescale","torch",".","distributed",".","all_reduce","(","buffer_t","[",":","offset","]",")","buffer_t",".","div_","(","rescale_denom",")","# copy all-reduced buffer back into tensors","offset","=","0","for","t","in","buffer",":","numel","=","t",".","numel","(",")","t",".","view","(","-","1",")",".","copy_","(","buffer_t","[","offset",":","offset","+","numel","]",")","offset","+=","numel","filled","=","0","for","t","in","tensors",":","sz","=","t",".","numel","(",")","*","t",".","element_size","(",")","if","sz",">","buffer_size",":","# tensor is bigger than buffer, all-reduce and rescale directly","torch",".","distributed",".","all_reduce","(","t",")","t",".","div_","(","rescale_denom",")","elif","filled","+","sz",">","buffer_size",":","# buffer is full, all-reduce and replace buffer with grad","all_reduce_buffer","(",")","buffer","=","[","t","]","filled","=","sz","else",":","# add tensor to buffer","buffer",".","append","(","t",")","filled","+=","sz","if","len","(","buffer",")",">","0",":","all_reduce_buffer","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/distributed.py#L35-L86"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/distributed.py","language":"python","identifier":"all_gather_list","parameters":"(data, max_size=4096)","argument_list":"","return_statement":"return results","docstring":"Gathers arbitrary data from all nodes into a list.","docstring_summary":"Gathers arbitrary data from all nodes into a list.","docstring_tokens":["Gathers","arbitrary","data","from","all","nodes","into","a","list","."],"function":"def all_gather_list(data, max_size=4096):\n \"\"\"Gathers arbitrary data from all nodes into a list.\"\"\"\n world_size = torch.distributed.get_world_size()\n if not hasattr(all_gather_list, '_in_buffer') or \\\n max_size != all_gather_list._in_buffer.size():\n all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)\n all_gather_list._out_buffers = [\n torch.cuda.ByteTensor(max_size)\n for i in range(world_size)\n ]\n in_buffer = all_gather_list._in_buffer\n out_buffers = all_gather_list._out_buffers\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n if enc_size + 2 > max_size:\n raise ValueError(\n 'encoded data exceeds max_size: {}'.format(enc_size + 2))\n assert max_size < 255*256\n in_buffer[0] = enc_size \/\/ 255 # this encoding works for max_size < 65k\n in_buffer[1] = enc_size % 255\n in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))\n\n torch.distributed.all_gather(out_buffers, in_buffer.cuda())\n\n results = []\n for i in range(world_size):\n out_buffer = out_buffers[i]\n size = (255 * out_buffer[0].item()) + out_buffer[1].item()\n\n bytes_list = bytes(out_buffer[2:size+2].tolist())\n result = pickle.loads(bytes_list)\n results.append(result)\n return results","function_tokens":["def","all_gather_list","(","data",",","max_size","=","4096",")",":","world_size","=","torch",".","distributed",".","get_world_size","(",")","if","not","hasattr","(","all_gather_list",",","'_in_buffer'",")","or","max_size","!=","all_gather_list",".","_in_buffer",".","size","(",")",":","all_gather_list",".","_in_buffer","=","torch",".","cuda",".","ByteTensor","(","max_size",")","all_gather_list",".","_out_buffers","=","[","torch",".","cuda",".","ByteTensor","(","max_size",")","for","i","in","range","(","world_size",")","]","in_buffer","=","all_gather_list",".","_in_buffer","out_buffers","=","all_gather_list",".","_out_buffers","enc","=","pickle",".","dumps","(","data",")","enc_size","=","len","(","enc",")","if","enc_size","+","2",">","max_size",":","raise","ValueError","(","'encoded data exceeds max_size: {}'",".","format","(","enc_size","+","2",")",")","assert","max_size","<","255","*","256","in_buffer","[","0","]","=","enc_size","\/\/","255","# this encoding works for max_size < 65k","in_buffer","[","1","]","=","enc_size","%","255","in_buffer","[","2",":","enc_size","+","2","]","=","torch",".","ByteTensor","(","list","(","enc",")",")","torch",".","distributed",".","all_gather","(","out_buffers",",","in_buffer",".","cuda","(",")",")","results","=","[","]","for","i","in","range","(","world_size",")",":","out_buffer","=","out_buffers","[","i","]","size","=","(","255","*","out_buffer","[","0","]",".","item","(",")",")","+","out_buffer","[","1","]",".","item","(",")","bytes_list","=","bytes","(","out_buffer","[","2",":","size","+","2","]",".","tolist","(",")",")","result","=","pickle",".","loads","(","bytes_list",")","results",".","append","(","result",")","return","results"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/distributed.py#L89-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"build_loss_compute","parameters":"(model, tgt_vocab, opt, train=True)","argument_list":"","return_statement":"return compute","docstring":"This returns user-defined LossCompute object, which is used to\n compute loss in train\/validate process. You can implement your\n own *LossCompute class, by subclassing LossComputeBase.","docstring_summary":"This returns user-defined LossCompute object, which is used to\n compute loss in train\/validate process. You can implement your\n own *LossCompute class, by subclassing LossComputeBase.","docstring_tokens":["This","returns","user","-","defined","LossCompute","object","which","is","used","to","compute","loss","in","train","\/","validate","process",".","You","can","implement","your","own","*","LossCompute","class","by","subclassing","LossComputeBase","."],"function":"def build_loss_compute(model, tgt_vocab, opt, train=True):\n \"\"\"\n This returns user-defined LossCompute object, which is used to\n compute loss in train\/validate process. You can implement your\n own *LossCompute class, by subclassing LossComputeBase.\n \"\"\"\n device = torch.device(\"cuda\" if onmt.utils.misc.use_gpu(opt) else \"cpu\")\n\n if opt.copy_attn:\n compute = onmt.modules.CopyGeneratorLossCompute(\n model.generator, tgt_vocab, opt.copy_attn_force,\n opt.copy_loss_by_seqlength)\n else:\n compute = NMTLossCompute(\n model.generator, tgt_vocab,\n label_smoothing=opt.label_smoothing if train else 0.0)\n compute.to(device)\n\n return compute","function_tokens":["def","build_loss_compute","(","model",",","tgt_vocab",",","opt",",","train","=","True",")",":","device","=","torch",".","device","(","\"cuda\"","if","onmt",".","utils",".","misc",".","use_gpu","(","opt",")","else","\"cpu\"",")","if","opt",".","copy_attn",":","compute","=","onmt",".","modules",".","CopyGeneratorLossCompute","(","model",".","generator",",","tgt_vocab",",","opt",".","copy_attn_force",",","opt",".","copy_loss_by_seqlength",")","else",":","compute","=","NMTLossCompute","(","model",".","generator",",","tgt_vocab",",","label_smoothing","=","opt",".","label_smoothing","if","train","else","0.0",")","compute",".","to","(","device",")","return","compute"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L17-L35"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"filter_shard_state","parameters":"(state, shard_size=None)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def filter_shard_state(state, shard_size=None):\n \"\"\" ? \"\"\"\n for k, v in state.items():\n if shard_size is None:\n yield k, v\n\n if v is not None:\n v_split = []\n if isinstance(v, torch.Tensor):\n for v_chunk in torch.split(v, shard_size):\n v_chunk = v_chunk.data.clone()\n v_chunk.requires_grad = v.requires_grad\n v_split.append(v_chunk)\n yield k, (v, v_split)","function_tokens":["def","filter_shard_state","(","state",",","shard_size","=","None",")",":","for","k",",","v","in","state",".","items","(",")",":","if","shard_size","is","None",":","yield","k",",","v","if","v","is","not","None",":","v_split","=","[","]","if","isinstance","(","v",",","torch",".","Tensor",")",":","for","v_chunk","in","torch",".","split","(","v",",","shard_size",")",":","v_chunk","=","v_chunk",".","data",".","clone","(",")","v_chunk",".","requires_grad","=","v",".","requires_grad","v_split",".","append","(","v_chunk",")","yield","k",",","(","v",",","v_split",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L252-L265"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"shards","parameters":"(state, shard_size, eval_only=False)","argument_list":"","return_statement":"","docstring":"Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.","docstring_summary":"Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.","docstring_tokens":["Args",":","state",":","A","dictionary","which","corresponds","to","the","output","of","*","LossCompute",".","_make_shard_state","()",".","The","values","for","those","keys","are","Tensor","-","like","or","None",".","shard_size",":","The","maximum","size","of","the","shards","yielded","by","the","model",".","eval_only",":","If","True","only","yield","the","state","nothing","else",".","Otherwise","yield","shards","."],"function":"def shards(state, shard_size, eval_only=False):\n \"\"\"\n Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.\n \"\"\"\n if eval_only:\n yield filter_shard_state(state)\n else:\n # non_none: the subdict of the state dictionary where the values\n # are not None.\n non_none = dict(filter_shard_state(state, shard_size))\n\n # Now, the iteration:\n # state is a dictionary of sequences of tensor-like but we\n # want a sequence of dictionaries of tensors.\n # First, unzip the dictionary into a sequence of keys and a\n # sequence of tensor-like sequences.\n keys, values = zip(*((k, [v_chunk for v_chunk in v_split])\n for k, (_, v_split) in non_none.items()))\n\n # Now, yield a dictionary for each shard. The keys are always\n # the same. values is a sequence of length #keys where each\n # element is a sequence of length #shards. We want to iterate\n # over the shards, not over the keys: therefore, the values need\n # to be re-zipped by shard and then each shard can be paired\n # with the keys.\n for shard_tensors in zip(*values):\n yield dict(zip(keys, shard_tensors))\n\n # Assumed backprop'd\n variables = []\n for k, (v, v_split) in non_none.items():\n if isinstance(v, torch.Tensor) and state[k].requires_grad:\n variables.extend(zip(torch.split(state[k], shard_size),\n [v_chunk.grad for v_chunk in v_split]))\n inputs, grads = zip(*variables)\n torch.autograd.backward(inputs, grads)","function_tokens":["def","shards","(","state",",","shard_size",",","eval_only","=","False",")",":","if","eval_only",":","yield","filter_shard_state","(","state",")","else",":","# non_none: the subdict of the state dictionary where the values","# are not None.","non_none","=","dict","(","filter_shard_state","(","state",",","shard_size",")",")","# Now, the iteration:","# state is a dictionary of sequences of tensor-like but we","# want a sequence of dictionaries of tensors.","# First, unzip the dictionary into a sequence of keys and a","# sequence of tensor-like sequences.","keys",",","values","=","zip","(","*","(","(","k",",","[","v_chunk","for","v_chunk","in","v_split","]",")","for","k",",","(","_",",","v_split",")","in","non_none",".","items","(",")",")",")","# Now, yield a dictionary for each shard. The keys are always","# the same. values is a sequence of length #keys where each","# element is a sequence of length #shards. We want to iterate","# over the shards, not over the keys: therefore, the values need","# to be re-zipped by shard and then each shard can be paired","# with the keys.","for","shard_tensors","in","zip","(","*","values",")",":","yield","dict","(","zip","(","keys",",","shard_tensors",")",")","# Assumed backprop'd","variables","=","[","]","for","k",",","(","v",",","v_split",")","in","non_none",".","items","(",")",":","if","isinstance","(","v",",","torch",".","Tensor",")","and","state","[","k","]",".","requires_grad",":","variables",".","extend","(","zip","(","torch",".","split","(","state","[","k","]",",","shard_size",")",",","[","v_chunk",".","grad","for","v_chunk","in","v_split","]",")",")","inputs",",","grads","=","zip","(","*","variables",")","torch",".","autograd",".","backward","(","inputs",",","grads",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L268-L315"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase._make_shard_state","parameters":"(self, batch, output, range_, attns=None)","argument_list":"","return_statement":"return NotImplementedError","docstring":"Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.","docstring_summary":"Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.","docstring_tokens":["Make","shard","state","dictionary","for","shards","()","to","return","iterable","shards","for","efficient","loss","computation",".","Subclass","must","define","this","method","to","match","its","own","_compute_loss","()","interface",".","Args",":","batch",":","the","current","batch",".","output",":","the","predict","output","from","the","model",".","range_",":","the","range","of","examples","for","computing","the","whole","batch","or","a","trunc","of","it?","attns",":","the","attns","dictionary","returned","from","the","model","."],"function":"def _make_shard_state(self, batch, output, range_, attns=None):\n \"\"\"\n Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.\n \"\"\"\n return NotImplementedError","function_tokens":["def","_make_shard_state","(","self",",","batch",",","output",",","range_",",","attns","=","None",")",":","return","NotImplementedError"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L64-L76"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase._compute_loss","parameters":"(self, batch, output, target, **kwargs)","argument_list":"","return_statement":"return NotImplementedError","docstring":"Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.","docstring_summary":"Compute the loss. Subclass must define this method.","docstring_tokens":["Compute","the","loss",".","Subclass","must","define","this","method","."],"function":"def _compute_loss(self, batch, output, target, **kwargs):\n \"\"\"\n Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.\n \"\"\"\n return NotImplementedError","function_tokens":["def","_compute_loss","(","self",",","batch",",","output",",","target",",","*","*","kwargs",")",":","return","NotImplementedError"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L78-L89"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase.monolithic_compute_loss","parameters":"(self, batch, output, attns)","argument_list":"","return_statement":"return batch_stats","docstring":"Compute the forward loss for the batch.\n\n Args:\n batch (batch): batch of labeled examples\n output (:obj:`FloatTensor`):\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict of :obj:`FloatTensor`) :\n dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n Returns:\n :obj:`onmt.utils.Statistics`: loss statistics","docstring_summary":"Compute the forward loss for the batch.","docstring_tokens":["Compute","the","forward","loss","for","the","batch","."],"function":"def monolithic_compute_loss(self, batch, output, attns):\n \"\"\"\n Compute the forward loss for the batch.\n\n Args:\n batch (batch): batch of labeled examples\n output (:obj:`FloatTensor`):\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict of :obj:`FloatTensor`) :\n dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n Returns:\n :obj:`onmt.utils.Statistics`: loss statistics\n \"\"\"\n range_ = (0, batch.tgt.size(0))\n shard_state = self._make_shard_state(batch, output, range_, attns)\n _, batch_stats = self._compute_loss(batch, **shard_state)\n\n return batch_stats","function_tokens":["def","monolithic_compute_loss","(","self",",","batch",",","output",",","attns",")",":","range_","=","(","0",",","batch",".","tgt",".","size","(","0",")",")","shard_state","=","self",".","_make_shard_state","(","batch",",","output",",","range_",",","attns",")","_",",","batch_stats","=","self",".","_compute_loss","(","batch",",","*","*","shard_state",")","return","batch_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L91-L109"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase.sharded_compute_loss","parameters":"(self, batch, output, attns,\n cur_trunc, trunc_size, shard_size,\n normalization)","argument_list":"","return_statement":"return batch_stats","docstring":"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(cur_trunc, cur_trunc + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n cur_trunc (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n shard_size (int) : maximum number of examples in a shard\n normalization (int) : Loss is divided by this number\n\n Returns:\n :obj:`onmt.utils.Statistics`: validation loss statistics","docstring_summary":"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.","docstring_tokens":["Compute","the","forward","loss","and","backpropagate",".","Computation","is","done","with","shards","and","optionally","truncation","for","memory","efficiency","."],"function":"def sharded_compute_loss(self, batch, output, attns,\n cur_trunc, trunc_size, shard_size,\n normalization):\n \"\"\"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(cur_trunc, cur_trunc + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n cur_trunc (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n shard_size (int) : maximum number of examples in a shard\n normalization (int) : Loss is divided by this number\n\n Returns:\n :obj:`onmt.utils.Statistics`: validation loss statistics\n\n \"\"\"\n batch_stats = onmt.utils.Statistics()\n range_ = (cur_trunc, cur_trunc + trunc_size)\n shard_state = self._make_shard_state(batch, output, range_, attns)\n for shard in shards(shard_state, shard_size):\n loss, stats = self._compute_loss(batch, **shard)\n loss.div(float(normalization)).backward()\n batch_stats.update(stats)\n\n return batch_stats","function_tokens":["def","sharded_compute_loss","(","self",",","batch",",","output",",","attns",",","cur_trunc",",","trunc_size",",","shard_size",",","normalization",")",":","batch_stats","=","onmt",".","utils",".","Statistics","(",")","range_","=","(","cur_trunc",",","cur_trunc","+","trunc_size",")","shard_state","=","self",".","_make_shard_state","(","batch",",","output",",","range_",",","attns",")","for","shard","in","shards","(","shard_state",",","shard_size",")",":","loss",",","stats","=","self",".","_compute_loss","(","batch",",","*","*","shard",")","loss",".","div","(","float","(","normalization",")",")",".","backward","(",")","batch_stats",".","update","(","stats",")","return","batch_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L111-L149"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase._stats","parameters":"(self, loss, scores, target)","argument_list":"","return_statement":"return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)","docstring":"Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.","docstring_summary":"Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets","docstring_tokens":["Args",":","loss","(",":","obj",":","FloatTensor",")",":","the","loss","computed","by","the","loss","criterion",".","scores","(",":","obj",":","FloatTensor",")",":","a","score","for","each","possible","output","target","(",":","obj",":","FloatTensor",")",":","true","targets"],"function":"def _stats(self, loss, scores, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.\n \"\"\"\n pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target) \\\n .masked_select(non_padding) \\\n .sum() \\\n .item()\n num_non_padding = non_padding.sum().item()\n return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)","function_tokens":["def","_stats","(","self",",","loss",",","scores",",","target",")",":","pred","=","scores",".","max","(","1",")","[","1","]","non_padding","=","target",".","ne","(","self",".","padding_idx",")","num_correct","=","pred",".","eq","(","target",")",".","masked_select","(","non_padding",")",".","sum","(",")",".","item","(",")","num_non_padding","=","non_padding",".","sum","(",")",".","item","(",")","return","onmt",".","utils",".","Statistics","(","loss",".","item","(",")",",","num_non_padding",",","num_correct",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L151-L168"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py","language":"python","identifier":"LabelSmoothingLoss.forward","parameters":"(self, output, target)","argument_list":"","return_statement":"return F.kl_div(output, model_prob, reduction='sum')","docstring":"output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size","docstring_summary":"output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size","docstring_tokens":["output","(","FloatTensor",")",":","batch_size","x","n_classes","target","(","LongTensor",")",":","batch_size"],"function":"def forward(self, output, target):\n \"\"\"\n output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size\n \"\"\"\n model_prob = self.one_hot.repeat(target.size(0), 1)\n model_prob.scatter_(1, target.unsqueeze(1), self.confidence)\n model_prob.masked_fill_((target == self.padding_idx).unsqueeze(1), 0)\n\n return F.kl_div(output, model_prob, reduction='sum')","function_tokens":["def","forward","(","self",",","output",",","target",")",":","model_prob","=","self",".","one_hot",".","repeat","(","target",".","size","(","0",")",",","1",")","model_prob",".","scatter_","(","1",",","target",".","unsqueeze","(","1",")",",","self",".","confidence",")","model_prob",".","masked_fill_","(","(","target","==","self",".","padding_idx",")",".","unsqueeze","(","1",")",",","0",")","return","F",".","kl_div","(","output",",","model_prob",",","reduction","=","'sum'",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/loss.py#L195-L204"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py","language":"python","identifier":"aeq","parameters":"(*args)","argument_list":"","return_statement":"","docstring":"Assert all arguments have the same value","docstring_summary":"Assert all arguments have the same value","docstring_tokens":["Assert","all","arguments","have","the","same","value"],"function":"def aeq(*args):\n \"\"\"\n Assert all arguments have the same value\n \"\"\"\n arguments = (arg for arg in args)\n first = next(arguments)\n assert all(arg == first for arg in arguments), \\\n \"Not all arguments have the same value: \" + str(args)","function_tokens":["def","aeq","(","*","args",")",":","arguments","=","(","arg","for","arg","in","args",")","first","=","next","(","arguments",")","assert","all","(","arg","==","first","for","arg","in","arguments",")",",","\"Not all arguments have the same value: \"","+","str","(","args",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py#L6-L13"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py","language":"python","identifier":"sequence_mask","parameters":"(lengths, max_len=None)","argument_list":"","return_statement":"return (torch.arange(0, max_len)\n .type_as(lengths)\n .repeat(batch_size, 1)\n .lt(lengths.unsqueeze(1)))","docstring":"Creates a boolean mask from sequence lengths.","docstring_summary":"Creates a boolean mask from sequence lengths.","docstring_tokens":["Creates","a","boolean","mask","from","sequence","lengths","."],"function":"def sequence_mask(lengths, max_len=None):\n \"\"\"\n Creates a boolean mask from sequence lengths.\n \"\"\"\n batch_size = lengths.numel()\n max_len = max_len or lengths.max()\n return (torch.arange(0, max_len)\n .type_as(lengths)\n .repeat(batch_size, 1)\n .lt(lengths.unsqueeze(1)))","function_tokens":["def","sequence_mask","(","lengths",",","max_len","=","None",")",":","batch_size","=","lengths",".","numel","(",")","max_len","=","max_len","or","lengths",".","max","(",")","return","(","torch",".","arange","(","0",",","max_len",")",".","type_as","(","lengths",")",".","repeat","(","batch_size",",","1",")",".","lt","(","lengths",".","unsqueeze","(","1",")",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py#L16-L25"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py","language":"python","identifier":"tile","parameters":"(x, count, dim=0)","argument_list":"","return_statement":"return x","docstring":"Tiles x on dimension dim count times.","docstring_summary":"Tiles x on dimension dim count times.","docstring_tokens":["Tiles","x","on","dimension","dim","count","times","."],"function":"def tile(x, count, dim=0):\n \"\"\"\n Tiles x on dimension dim count times.\n \"\"\"\n perm = list(range(len(x.size())))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.size())\n out_size[0] *= count\n batch = x.size(0)\n x = x.view(batch, -1) \\\n .transpose(0, 1) \\\n .repeat(count, 1) \\\n .transpose(0, 1) \\\n .contiguous() \\\n .view(*out_size)\n if dim != 0:\n x = x.permute(perm).contiguous()\n return x","function_tokens":["def","tile","(","x",",","count",",","dim","=","0",")",":","perm","=","list","(","range","(","len","(","x",".","size","(",")",")",")",")","if","dim","!=","0",":","perm","[","0","]",",","perm","[","dim","]","=","perm","[","dim","]",",","perm","[","0","]","x","=","x",".","permute","(","perm",")",".","contiguous","(",")","out_size","=","list","(","x",".","size","(",")",")","out_size","[","0","]","*=","count","batch","=","x",".","size","(","0",")","x","=","x",".","view","(","batch",",","-","1",")",".","transpose","(","0",",","1",")",".","repeat","(","count",",","1",")",".","transpose","(","0",",","1",")",".","contiguous","(",")",".","view","(","*","out_size",")","if","dim","!=","0",":","x","=","x",".","permute","(","perm",")",".","contiguous","(",")","return","x"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py#L28-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py","language":"python","identifier":"use_gpu","parameters":"(opt)","argument_list":"","return_statement":"return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \\\n (hasattr(opt, 'gpu') and opt.gpu > -1)","docstring":"Creates a boolean if gpu used","docstring_summary":"Creates a boolean if gpu used","docstring_tokens":["Creates","a","boolean","if","gpu","used"],"function":"def use_gpu(opt):\n \"\"\"\n Creates a boolean if gpu used\n \"\"\"\n return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \\\n (hasattr(opt, 'gpu') and opt.gpu > -1)","function_tokens":["def","use_gpu","(","opt",")",":","return","(","hasattr","(","opt",",","'gpu_ranks'",")","and","len","(","opt",".","gpu_ranks",")",">","0",")","or","(","hasattr","(","opt",",","'gpu'",")","and","opt",".","gpu",">","-","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/misc.py#L50-L55"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase.__init__","parameters":"(self, report_every, start_time=-1.)","argument_list":"","return_statement":"","docstring":"Args:\n report_every(int): Report status every this many sentences\n start_time(float): manually set report start time. Negative values\n means that you will need to set it later or use `start()`","docstring_summary":"Args:\n report_every(int): Report status every this many sentences\n start_time(float): manually set report start time. Negative values\n means that you will need to set it later or use `start()`","docstring_tokens":["Args",":","report_every","(","int",")",":","Report","status","every","this","many","sentences","start_time","(","float",")",":","manually","set","report","start","time",".","Negative","values","means","that","you","will","need","to","set","it","later","or","use","start","()"],"function":"def __init__(self, report_every, start_time=-1.):\n \"\"\"\n Args:\n report_every(int): Report status every this many sentences\n start_time(float): manually set report start time. Negative values\n means that you will need to set it later or use `start()`\n \"\"\"\n self.report_every = report_every\n self.progress_step = 0\n self.start_time = start_time","function_tokens":["def","__init__","(","self",",","report_every",",","start_time","=","-","1.",")",":","self",".","report_every","=","report_every","self",".","progress_step","=","0","self",".","start_time","=","start_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py#L33-L42"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase.report_training","parameters":"(self, step, num_steps, learning_rate,\n report_stats, multigpu=False)","argument_list":"","return_statement":"return onmt.utils.Statistics()","docstring":"This is the user-defined batch-level traing progress\n report function.\n\n Args:\n step(int): current step count.\n num_steps(int): total number of batches.\n learning_rate(float): current learning rate.\n report_stats(Statistics): old Statistics instance.\n Returns:\n report_stats(Statistics): updated Statistics instance.","docstring_summary":"This is the user-defined batch-level traing progress\n report function.","docstring_tokens":["This","is","the","user","-","defined","batch","-","level","traing","progress","report","function","."],"function":"def report_training(self, step, num_steps, learning_rate,\n report_stats, multigpu=False):\n \"\"\"\n This is the user-defined batch-level traing progress\n report function.\n\n Args:\n step(int): current step count.\n num_steps(int): total number of batches.\n learning_rate(float): current learning rate.\n report_stats(Statistics): old Statistics instance.\n Returns:\n report_stats(Statistics): updated Statistics instance.\n \"\"\"\n if self.start_time < 0:\n raise ValueError(\"\"\"ReportMgr needs to be started\n (set 'start_time' or use 'start()'\"\"\")\n\n if multigpu:\n report_stats = onmt.utils.Statistics.all_gather_stats(report_stats)\n\n if step % self.report_every == 0:\n self._report_training(\n step, num_steps, learning_rate, report_stats)\n self.progress_step += 1\n return onmt.utils.Statistics()","function_tokens":["def","report_training","(","self",",","step",",","num_steps",",","learning_rate",",","report_stats",",","multigpu","=","False",")",":","if","self",".","start_time","<","0",":","raise","ValueError","(","\"\"\"ReportMgr needs to be started\n (set 'start_time' or use 'start()'\"\"\"",")","if","multigpu",":","report_stats","=","onmt",".","utils",".","Statistics",".","all_gather_stats","(","report_stats",")","if","step","%","self",".","report_every","==","0",":","self",".","_report_training","(","step",",","num_steps",",","learning_rate",",","report_stats",")","self",".","progress_step","+=","1","return","onmt",".","utils",".","Statistics","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py#L50-L75"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase._report_training","parameters":"(self, *args, **kwargs)","argument_list":"","return_statement":"","docstring":"To be overridden","docstring_summary":"To be overridden","docstring_tokens":["To","be","overridden"],"function":"def _report_training(self, *args, **kwargs):\n \"\"\" To be overridden \"\"\"\n raise NotImplementedError()","function_tokens":["def","_report_training","(","self",",","*","args",",","*","*","kwargs",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py#L77-L79"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase.report_step","parameters":"(self, lr, step, train_stats=None, valid_stats=None)","argument_list":"","return_statement":"","docstring":"Report stats of a step\n\n Args:\n train_stats(Statistics): training stats\n valid_stats(Statistics): validation stats\n lr(float): current learning rate","docstring_summary":"Report stats of a step","docstring_tokens":["Report","stats","of","a","step"],"function":"def report_step(self, lr, step, train_stats=None, valid_stats=None):\n \"\"\"\n Report stats of a step\n\n Args:\n train_stats(Statistics): training stats\n valid_stats(Statistics): validation stats\n lr(float): current learning rate\n \"\"\"\n self._report_step(\n lr, step, train_stats=train_stats, valid_stats=valid_stats)","function_tokens":["def","report_step","(","self",",","lr",",","step",",","train_stats","=","None",",","valid_stats","=","None",")",":","self",".","_report_step","(","lr",",","step",",","train_stats","=","train_stats",",","valid_stats","=","valid_stats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py#L81-L91"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgr.__init__","parameters":"(self, report_every, start_time=-1., tensorboard_writer=None)","argument_list":"","return_statement":"","docstring":"A report manager that writes statistics on standard output as well as\n (optionally) TensorBoard\n\n Args:\n report_every(int): Report status every this many sentences\n tensorboard_writer(:obj:`tensorboard.SummaryWriter`):\n The TensorBoard Summary writer to use or None","docstring_summary":"A report manager that writes statistics on standard output as well as\n (optionally) TensorBoard","docstring_tokens":["A","report","manager","that","writes","statistics","on","standard","output","as","well","as","(","optionally",")","TensorBoard"],"function":"def __init__(self, report_every, start_time=-1., tensorboard_writer=None):\n \"\"\"\n A report manager that writes statistics on standard output as well as\n (optionally) TensorBoard\n\n Args:\n report_every(int): Report status every this many sentences\n tensorboard_writer(:obj:`tensorboard.SummaryWriter`):\n The TensorBoard Summary writer to use or None\n \"\"\"\n super(ReportMgr, self).__init__(report_every, start_time)\n self.tensorboard_writer = tensorboard_writer","function_tokens":["def","__init__","(","self",",","report_every",",","start_time","=","-","1.",",","tensorboard_writer","=","None",")",":","super","(","ReportMgr",",","self",")",".","__init__","(","report_every",",","start_time",")","self",".","tensorboard_writer","=","tensorboard_writer"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py#L98-L109"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgr._report_training","parameters":"(self, step, num_steps, learning_rate,\n report_stats)","argument_list":"","return_statement":"return report_stats","docstring":"See base class method `ReportMgrBase.report_training`.","docstring_summary":"See base class method `ReportMgrBase.report_training`.","docstring_tokens":["See","base","class","method","ReportMgrBase",".","report_training","."],"function":"def _report_training(self, step, num_steps, learning_rate,\n report_stats):\n \"\"\"\n See base class method `ReportMgrBase.report_training`.\n \"\"\"\n report_stats.output(step, num_steps,\n learning_rate, self.start_time)\n\n # Log the progress using the number of batches on the x-axis.\n self.maybe_log_tensorboard(report_stats,\n \"progress\",\n learning_rate,\n self.progress_step)\n report_stats = onmt.utils.Statistics()\n\n return report_stats","function_tokens":["def","_report_training","(","self",",","step",",","num_steps",",","learning_rate",",","report_stats",")",":","report_stats",".","output","(","step",",","num_steps",",","learning_rate",",","self",".","start_time",")","# Log the progress using the number of batches on the x-axis.","self",".","maybe_log_tensorboard","(","report_stats",",","\"progress\"",",","learning_rate",",","self",".","progress_step",")","report_stats","=","onmt",".","utils",".","Statistics","(",")","return","report_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py#L116-L131"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgr._report_step","parameters":"(self, lr, step, train_stats=None, valid_stats=None)","argument_list":"","return_statement":"","docstring":"See base class method `ReportMgrBase.report_step`.","docstring_summary":"See base class method `ReportMgrBase.report_step`.","docstring_tokens":["See","base","class","method","ReportMgrBase",".","report_step","."],"function":"def _report_step(self, lr, step, train_stats=None, valid_stats=None):\n \"\"\"\n See base class method `ReportMgrBase.report_step`.\n \"\"\"\n if train_stats is not None:\n self.log('Train perplexity: %g' % train_stats.ppl())\n self.log('Train accuracy: %g' % train_stats.accuracy())\n\n self.maybe_log_tensorboard(train_stats,\n \"train\",\n lr,\n step)\n\n if valid_stats is not None:\n self.log('Validation perplexity: %g' % valid_stats.ppl())\n self.log('Validation accuracy: %g' % valid_stats.accuracy())\n\n self.maybe_log_tensorboard(valid_stats,\n \"valid\",\n lr,\n step)","function_tokens":["def","_report_step","(","self",",","lr",",","step",",","train_stats","=","None",",","valid_stats","=","None",")",":","if","train_stats","is","not","None",":","self",".","log","(","'Train perplexity: %g'","%","train_stats",".","ppl","(",")",")","self",".","log","(","'Train accuracy: %g'","%","train_stats",".","accuracy","(",")",")","self",".","maybe_log_tensorboard","(","train_stats",",","\"train\"",",","lr",",","step",")","if","valid_stats","is","not","None",":","self",".","log","(","'Validation perplexity: %g'","%","valid_stats",".","ppl","(",")",")","self",".","log","(","'Validation accuracy: %g'","%","valid_stats",".","accuracy","(",")",")","self",".","maybe_log_tensorboard","(","valid_stats",",","\"valid\"",",","lr",",","step",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/utils\/report_manager.py#L133-L153"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/multi_headed_attn.py","language":"python","identifier":"MultiHeadedAttention.forward","parameters":"(self, key, value, query, mask=None,\n layer_cache=None, type=None)","argument_list":"","return_statement":"return output, top_attn","docstring":"Compute the context vector and the attention vectors.\n\n Args:\n key (`FloatTensor`): set of `key_len`\n key vectors `[batch, key_len, dim]`\n value (`FloatTensor`): set of `key_len`\n value vectors `[batch, key_len, dim]`\n query (`FloatTensor`): set of `query_len`\n query vectors `[batch, query_len, dim]`\n mask: binary mask indicating which keys have\n non-zero attention `[batch, query_len, key_len]`\n Returns:\n (`FloatTensor`, `FloatTensor`) :\n\n * output context vectors `[batch, query_len, dim]`\n * one of the attention vectors `[batch, query_len, key_len]`","docstring_summary":"Compute the context vector and the attention vectors.","docstring_tokens":["Compute","the","context","vector","and","the","attention","vectors","."],"function":"def forward(self, key, value, query, mask=None,\n layer_cache=None, type=None):\n \"\"\"\n Compute the context vector and the attention vectors.\n\n Args:\n key (`FloatTensor`): set of `key_len`\n key vectors `[batch, key_len, dim]`\n value (`FloatTensor`): set of `key_len`\n value vectors `[batch, key_len, dim]`\n query (`FloatTensor`): set of `query_len`\n query vectors `[batch, query_len, dim]`\n mask: binary mask indicating which keys have\n non-zero attention `[batch, query_len, key_len]`\n Returns:\n (`FloatTensor`, `FloatTensor`) :\n\n * output context vectors `[batch, query_len, dim]`\n * one of the attention vectors `[batch, query_len, key_len]`\n \"\"\"\n\n # CHECKS\n # batch, k_len, d = key.size()\n # batch_, k_len_, d_ = value.size()\n # aeq(batch, batch_)\n # aeq(k_len, k_len_)\n # aeq(d, d_)\n # batch_, q_len, d_ = query.size()\n # aeq(batch, batch_)\n # aeq(d, d_)\n # aeq(self.model_dim % 8, 0)\n # if mask is not None:\n # batch_, q_len_, k_len_ = mask.size()\n # aeq(batch_, batch)\n # aeq(k_len_, k_len)\n # aeq(q_len_ == q_len)\n # END CHECKS\n\n batch_size = key.size(0)\n dim_per_head = self.dim_per_head\n head_count = self.head_count\n key_len = key.size(1)\n query_len = query.size(1)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(batch_size, -1, head_count, dim_per_head) \\\n .transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous() \\\n .view(batch_size, -1, head_count * dim_per_head)\n\n # 1) Project key, value, and query.\n if layer_cache is not None:\n if type == \"self\":\n query, key, value = self.linear_query(query),\\\n self.linear_keys(query),\\\n self.linear_values(query)\n\n key = shape(key)\n value = shape(value)\n\n if layer_cache is not None:\n device = key.device\n if layer_cache[\"self_keys\"] is not None:\n key = torch.cat(\n (layer_cache[\"self_keys\"].to(device), key),\n dim=2)\n if layer_cache[\"self_values\"] is not None:\n value = torch.cat(\n (layer_cache[\"self_values\"].to(device), value),\n dim=2)\n layer_cache[\"self_keys\"] = key\n layer_cache[\"self_values\"] = value\n elif type == \"context\":\n query = self.linear_query(query)\n if layer_cache is not None:\n if layer_cache[\"memory_keys\"] is None:\n key, value = self.linear_keys(key),\\\n self.linear_values(value)\n key = shape(key)\n value = shape(value)\n else:\n key, value = layer_cache[\"memory_keys\"],\\\n layer_cache[\"memory_values\"]\n layer_cache[\"memory_keys\"] = key\n layer_cache[\"memory_values\"] = value\n else:\n key, value = self.linear_keys(key),\\\n self.linear_values(value)\n key = shape(key)\n value = shape(value)\n else:\n key = self.linear_keys(key)\n value = self.linear_values(value)\n query = self.linear_query(query)\n key = shape(key)\n value = shape(value)\n\n query = shape(query)\n\n key_len = key.size(2)\n query_len = query.size(2)\n\n # 2) Calculate and scale scores.\n query = query \/ math.sqrt(dim_per_head)\n scores = torch.matmul(query, key.transpose(2, 3))\n\n if mask is not None:\n mask = mask.unsqueeze(1).expand_as(scores)\n scores = scores.masked_fill(mask, -1e18)\n\n # 3) Apply attention dropout and compute context vectors.\n attn = self.softmax(scores)\n drop_attn = self.dropout(attn)\n context = unshape(torch.matmul(drop_attn, value))\n\n output = self.final_linear(context)\n # CHECK\n # batch_, q_len_, d_ = output.size()\n # aeq(q_len, q_len_)\n # aeq(batch, batch_)\n # aeq(d, d_)\n\n # Return one attn\n top_attn = attn \\\n .view(batch_size, head_count,\n query_len, key_len)[:, 0, :, :] \\\n .contiguous()\n\n return output, top_attn","function_tokens":["def","forward","(","self",",","key",",","value",",","query",",","mask","=","None",",","layer_cache","=","None",",","type","=","None",")",":","# CHECKS","# batch, k_len, d = key.size()","# batch_, k_len_, d_ = value.size()","# aeq(batch, batch_)","# aeq(k_len, k_len_)","# aeq(d, d_)","# batch_, q_len, d_ = query.size()","# aeq(batch, batch_)","# aeq(d, d_)","# aeq(self.model_dim % 8, 0)","# if mask is not None:","# batch_, q_len_, k_len_ = mask.size()","# aeq(batch_, batch)","# aeq(k_len_, k_len)","# aeq(q_len_ == q_len)","# END CHECKS","batch_size","=","key",".","size","(","0",")","dim_per_head","=","self",".","dim_per_head","head_count","=","self",".","head_count","key_len","=","key",".","size","(","1",")","query_len","=","query",".","size","(","1",")","def","shape","(","x",")",":","\"\"\" projection \"\"\"","return","x",".","view","(","batch_size",",","-","1",",","head_count",",","dim_per_head",")",".","transpose","(","1",",","2",")","def","unshape","(","x",")",":","\"\"\" compute context \"\"\"","return","x",".","transpose","(","1",",","2",")",".","contiguous","(",")",".","view","(","batch_size",",","-","1",",","head_count","*","dim_per_head",")","# 1) Project key, value, and query.","if","layer_cache","is","not","None",":","if","type","==","\"self\"",":","query",",","key",",","value","=","self",".","linear_query","(","query",")",",","self",".","linear_keys","(","query",")",",","self",".","linear_values","(","query",")","key","=","shape","(","key",")","value","=","shape","(","value",")","if","layer_cache","is","not","None",":","device","=","key",".","device","if","layer_cache","[","\"self_keys\"","]","is","not","None",":","key","=","torch",".","cat","(","(","layer_cache","[","\"self_keys\"","]",".","to","(","device",")",",","key",")",",","dim","=","2",")","if","layer_cache","[","\"self_values\"","]","is","not","None",":","value","=","torch",".","cat","(","(","layer_cache","[","\"self_values\"","]",".","to","(","device",")",",","value",")",",","dim","=","2",")","layer_cache","[","\"self_keys\"","]","=","key","layer_cache","[","\"self_values\"","]","=","value","elif","type","==","\"context\"",":","query","=","self",".","linear_query","(","query",")","if","layer_cache","is","not","None",":","if","layer_cache","[","\"memory_keys\"","]","is","None",":","key",",","value","=","self",".","linear_keys","(","key",")",",","self",".","linear_values","(","value",")","key","=","shape","(","key",")","value","=","shape","(","value",")","else",":","key",",","value","=","layer_cache","[","\"memory_keys\"","]",",","layer_cache","[","\"memory_values\"","]","layer_cache","[","\"memory_keys\"","]","=","key","layer_cache","[","\"memory_values\"","]","=","value","else",":","key",",","value","=","self",".","linear_keys","(","key",")",",","self",".","linear_values","(","value",")","key","=","shape","(","key",")","value","=","shape","(","value",")","else",":","key","=","self",".","linear_keys","(","key",")","value","=","self",".","linear_values","(","value",")","query","=","self",".","linear_query","(","query",")","key","=","shape","(","key",")","value","=","shape","(","value",")","query","=","shape","(","query",")","key_len","=","key",".","size","(","2",")","query_len","=","query",".","size","(","2",")","# 2) Calculate and scale scores.","query","=","query","\/","math",".","sqrt","(","dim_per_head",")","scores","=","torch",".","matmul","(","query",",","key",".","transpose","(","2",",","3",")",")","if","mask","is","not","None",":","mask","=","mask",".","unsqueeze","(","1",")",".","expand_as","(","scores",")","scores","=","scores",".","masked_fill","(","mask",",","-","1e18",")","# 3) Apply attention dropout and compute context vectors.","attn","=","self",".","softmax","(","scores",")","drop_attn","=","self",".","dropout","(","attn",")","context","=","unshape","(","torch",".","matmul","(","drop_attn",",","value",")",")","output","=","self",".","final_linear","(","context",")","# CHECK","# batch_, q_len_, d_ = output.size()","# aeq(q_len, q_len_)","# aeq(batch, batch_)","# aeq(d, d_)","# Return one attn","top_attn","=","attn",".","view","(","batch_size",",","head_count",",","query_len",",","key_len",")","[",":",",","0",",",":",",",":","]",".","contiguous","(",")","return","output",",","top_attn"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/multi_headed_attn.py#L69-L201"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/position_ffn.py","language":"python","identifier":"PositionwiseFeedForward.forward","parameters":"(self, x)","argument_list":"","return_statement":"return output + x","docstring":"Layer definition.\n\n Args:\n input: [ batch_size, input_len, model_dim ]\n\n\n Returns:\n output: [ batch_size, input_len, model_dim ]","docstring_summary":"Layer definition.","docstring_tokens":["Layer","definition","."],"function":"def forward(self, x):\n \"\"\"\n Layer definition.\n\n Args:\n input: [ batch_size, input_len, model_dim ]\n\n\n Returns:\n output: [ batch_size, input_len, model_dim ]\n \"\"\"\n inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))\n output = self.dropout_2(self.w_2(inter))\n return output + x","function_tokens":["def","forward","(","self",",","x",")",":","inter","=","self",".","dropout_1","(","self",".","relu","(","self",".","w_1","(","self",".","layer_norm","(","x",")",")",")",")","output","=","self",".","dropout_2","(","self",".","w_2","(","inter",")",")","return","output","+","x"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/position_ffn.py#L29-L42"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/sparse_activations.py","language":"python","identifier":"threshold_and_support","parameters":"(z, dim=0)","argument_list":"","return_statement":"return tau_z, k_z","docstring":"z: any dimension\n dim: dimension along which to apply the sparsemax","docstring_summary":"z: any dimension\n dim: dimension along which to apply the sparsemax","docstring_tokens":["z",":","any","dimension","dim",":","dimension","along","which","to","apply","the","sparsemax"],"function":"def threshold_and_support(z, dim=0):\n \"\"\"\n z: any dimension\n dim: dimension along which to apply the sparsemax\n \"\"\"\n sorted_z, _ = torch.sort(z, descending=True, dim=dim)\n z_sum = sorted_z.cumsum(dim) - 1 # sort of a misnomer\n k = torch.arange(1, sorted_z.size(dim) + 1, device=z.device).float().view(\n torch.Size([-1] + [1] * (z.dim() - 1))\n ).transpose(0, dim)\n support = k * sorted_z > z_sum\n\n k_z_indices = support.sum(dim=dim).unsqueeze(dim)\n k_z = k_z_indices.float()\n tau_z = z_sum.gather(dim, k_z_indices - 1) \/ k_z\n return tau_z, k_z","function_tokens":["def","threshold_and_support","(","z",",","dim","=","0",")",":","sorted_z",",","_","=","torch",".","sort","(","z",",","descending","=","True",",","dim","=","dim",")","z_sum","=","sorted_z",".","cumsum","(","dim",")","-","1","# sort of a misnomer","k","=","torch",".","arange","(","1",",","sorted_z",".","size","(","dim",")","+","1",",","device","=","z",".","device",")",".","float","(",")",".","view","(","torch",".","Size","(","[","-","1","]","+","[","1","]","*","(","z",".","dim","(",")","-","1",")",")",")",".","transpose","(","0",",","dim",")","support","=","k","*","sorted_z",">","z_sum","k_z_indices","=","support",".","sum","(","dim","=","dim",")",".","unsqueeze","(","dim",")","k_z","=","k_z_indices",".","float","(",")","tau_z","=","z_sum",".","gather","(","dim",",","k_z_indices","-","1",")","\/","k_z","return","tau_z",",","k_z"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/sparse_activations.py#L11-L26"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/sparse_activations.py","language":"python","identifier":"SparsemaxFunction.forward","parameters":"(ctx, input, dim=0)","argument_list":"","return_statement":"return output","docstring":"input (FloatTensor): any shape\n returns (FloatTensor): same shape with sparsemax computed on given dim","docstring_summary":"input (FloatTensor): any shape\n returns (FloatTensor): same shape with sparsemax computed on given dim","docstring_tokens":["input","(","FloatTensor",")",":","any","shape","returns","(","FloatTensor",")",":","same","shape","with","sparsemax","computed","on","given","dim"],"function":"def forward(ctx, input, dim=0):\n \"\"\"\n input (FloatTensor): any shape\n returns (FloatTensor): same shape with sparsemax computed on given dim\n \"\"\"\n ctx.dim = dim\n tau_z, k_z = threshold_and_support(input, dim=dim)\n output = torch.clamp(input - tau_z, min=0)\n ctx.save_for_backward(k_z, output)\n return output","function_tokens":["def","forward","(","ctx",",","input",",","dim","=","0",")",":","ctx",".","dim","=","dim","tau_z",",","k_z","=","threshold_and_support","(","input",",","dim","=","dim",")","output","=","torch",".","clamp","(","input","-","tau_z",",","min","=","0",")","ctx",".","save_for_backward","(","k_z",",","output",")","return","output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/sparse_activations.py#L32-L41"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/global_attention.py","language":"python","identifier":"GlobalAttention.score","parameters":"(self, h_t, h_s)","argument_list":"","return_statement":"","docstring":"Args:\n h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`\n h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`\n\n Returns:\n :obj:`FloatTensor`:\n raw attention scores (unnormalized) for each src index\n `[batch x tgt_len x src_len]`","docstring_summary":"Args:\n h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`\n h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`","docstring_tokens":["Args",":","h_t","(","FloatTensor",")",":","sequence","of","queries","[","batch","x","tgt_len","x","dim","]","h_s","(","FloatTensor",")",":","sequence","of","sources","[","batch","x","src_len","x","dim","]"],"function":"def score(self, h_t, h_s):\n \"\"\"\n Args:\n h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`\n h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`\n\n Returns:\n :obj:`FloatTensor`:\n raw attention scores (unnormalized) for each src index\n `[batch x tgt_len x src_len]`\n\n \"\"\"\n\n # Check input sizes\n src_batch, src_len, src_dim = h_s.size()\n tgt_batch, tgt_len, tgt_dim = h_t.size()\n aeq(src_batch, tgt_batch)\n aeq(src_dim, tgt_dim)\n aeq(self.dim, src_dim)\n\n if self.attn_type in [\"general\", \"dot\"]:\n if self.attn_type == \"general\":\n h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)\n h_t_ = self.linear_in(h_t_)\n h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)\n h_s_ = h_s.transpose(1, 2)\n # (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)\n return torch.bmm(h_t, h_s_)\n else:\n dim = self.dim\n wq = self.linear_query(h_t.view(-1, dim))\n wq = wq.view(tgt_batch, tgt_len, 1, dim)\n wq = wq.expand(tgt_batch, tgt_len, src_len, dim)\n\n uh = self.linear_context(h_s.contiguous().view(-1, dim))\n uh = uh.view(src_batch, 1, src_len, dim)\n uh = uh.expand(src_batch, tgt_len, src_len, dim)\n\n # (batch, t_len, s_len, d)\n wquh = torch.tanh(wq + uh)\n\n return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)","function_tokens":["def","score","(","self",",","h_t",",","h_s",")",":","# Check input sizes","src_batch",",","src_len",",","src_dim","=","h_s",".","size","(",")","tgt_batch",",","tgt_len",",","tgt_dim","=","h_t",".","size","(",")","aeq","(","src_batch",",","tgt_batch",")","aeq","(","src_dim",",","tgt_dim",")","aeq","(","self",".","dim",",","src_dim",")","if","self",".","attn_type","in","[","\"general\"",",","\"dot\"","]",":","if","self",".","attn_type","==","\"general\"",":","h_t_","=","h_t",".","view","(","tgt_batch","*","tgt_len",",","tgt_dim",")","h_t_","=","self",".","linear_in","(","h_t_",")","h_t","=","h_t_",".","view","(","tgt_batch",",","tgt_len",",","tgt_dim",")","h_s_","=","h_s",".","transpose","(","1",",","2",")","# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)","return","torch",".","bmm","(","h_t",",","h_s_",")","else",":","dim","=","self",".","dim","wq","=","self",".","linear_query","(","h_t",".","view","(","-","1",",","dim",")",")","wq","=","wq",".","view","(","tgt_batch",",","tgt_len",",","1",",","dim",")","wq","=","wq",".","expand","(","tgt_batch",",","tgt_len",",","src_len",",","dim",")","uh","=","self",".","linear_context","(","h_s",".","contiguous","(",")",".","view","(","-","1",",","dim",")",")","uh","=","uh",".","view","(","src_batch",",","1",",","src_len",",","dim",")","uh","=","uh",".","expand","(","src_batch",",","tgt_len",",","src_len",",","dim",")","# (batch, t_len, s_len, d)","wquh","=","torch",".","tanh","(","wq","+","uh",")","return","self",".","v","(","wquh",".","view","(","-","1",",","dim",")",")",".","view","(","tgt_batch",",","tgt_len",",","src_len",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/global_attention.py#L95-L136"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/global_attention.py","language":"python","identifier":"GlobalAttention.forward","parameters":"(self, source, memory_bank, memory_lengths=None, coverage=None)","argument_list":"","return_statement":"return attn_h, align_vectors","docstring":"Args:\n source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`\n memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`\n memory_lengths (`LongTensor`): the source context lengths `[batch]`\n coverage (`FloatTensor`): None (not supported yet)\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * Computed vector `[tgt_len x batch x dim]`\n * Attention distribtutions for each query\n `[tgt_len x batch x src_len]`","docstring_summary":"","docstring_tokens":[],"function":"def forward(self, source, memory_bank, memory_lengths=None, coverage=None):\n \"\"\"\n\n Args:\n source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`\n memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`\n memory_lengths (`LongTensor`): the source context lengths `[batch]`\n coverage (`FloatTensor`): None (not supported yet)\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * Computed vector `[tgt_len x batch x dim]`\n * Attention distribtutions for each query\n `[tgt_len x batch x src_len]`\n \"\"\"\n\n # one step input\n if source.dim() == 2:\n one_step = True\n source = source.unsqueeze(1)\n else:\n one_step = False\n\n batch, source_l, dim = memory_bank.size()\n batch_, target_l, dim_ = source.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n aeq(self.dim, dim)\n if coverage is not None:\n batch_, source_l_ = coverage.size()\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n if coverage is not None:\n cover = coverage.view(-1).unsqueeze(1)\n memory_bank += self.linear_cover(cover).view_as(memory_bank)\n memory_bank = torch.tanh(memory_bank)\n\n # compute attention scores, as in Luong et al.\n align = self.score(source, memory_bank)\n\n if memory_lengths is not None:\n mask = sequence_mask(memory_lengths, max_len=align.size(-1))\n mask = mask.unsqueeze(1) # Make it broadcastable.\n align.masked_fill_(1 - mask, -float('inf'))\n\n # Softmax or sparsemax to normalize attention weights\n if self.attn_func == \"softmax\":\n align_vectors = F.softmax(align.view(batch*target_l, source_l), -1)\n else:\n align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)\n align_vectors = align_vectors.view(batch, target_l, source_l)\n\n # each context vector c_t is the weighted average\n # over all the source hidden states\n c = torch.bmm(align_vectors, memory_bank)\n\n # concatenate\n concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)\n attn_h = self.linear_out(concat_c).view(batch, target_l, dim)\n if self.attn_type in [\"general\", \"dot\"]:\n attn_h = torch.tanh(attn_h)\n\n if one_step:\n attn_h = attn_h.squeeze(1)\n align_vectors = align_vectors.squeeze(1)\n\n # Check output sizes\n batch_, dim_ = attn_h.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n batch_, source_l_ = align_vectors.size()\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n else:\n attn_h = attn_h.transpose(0, 1).contiguous()\n align_vectors = align_vectors.transpose(0, 1).contiguous()\n # Check output sizes\n target_l_, batch_, dim_ = attn_h.size()\n aeq(target_l, target_l_)\n aeq(batch, batch_)\n aeq(dim, dim_)\n target_l_, batch_, source_l_ = align_vectors.size()\n aeq(target_l, target_l_)\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n return attn_h, align_vectors","function_tokens":["def","forward","(","self",",","source",",","memory_bank",",","memory_lengths","=","None",",","coverage","=","None",")",":","# one step input","if","source",".","dim","(",")","==","2",":","one_step","=","True","source","=","source",".","unsqueeze","(","1",")","else",":","one_step","=","False","batch",",","source_l",",","dim","=","memory_bank",".","size","(",")","batch_",",","target_l",",","dim_","=","source",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","dim",",","dim_",")","aeq","(","self",".","dim",",","dim",")","if","coverage","is","not","None",":","batch_",",","source_l_","=","coverage",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","source_l",",","source_l_",")","if","coverage","is","not","None",":","cover","=","coverage",".","view","(","-","1",")",".","unsqueeze","(","1",")","memory_bank","+=","self",".","linear_cover","(","cover",")",".","view_as","(","memory_bank",")","memory_bank","=","torch",".","tanh","(","memory_bank",")","# compute attention scores, as in Luong et al.","align","=","self",".","score","(","source",",","memory_bank",")","if","memory_lengths","is","not","None",":","mask","=","sequence_mask","(","memory_lengths",",","max_len","=","align",".","size","(","-","1",")",")","mask","=","mask",".","unsqueeze","(","1",")","# Make it broadcastable.","align",".","masked_fill_","(","1","-","mask",",","-","float","(","'inf'",")",")","# Softmax or sparsemax to normalize attention weights","if","self",".","attn_func","==","\"softmax\"",":","align_vectors","=","F",".","softmax","(","align",".","view","(","batch","*","target_l",",","source_l",")",",","-","1",")","else",":","align_vectors","=","sparsemax","(","align",".","view","(","batch","*","target_l",",","source_l",")",",","-","1",")","align_vectors","=","align_vectors",".","view","(","batch",",","target_l",",","source_l",")","# each context vector c_t is the weighted average","# over all the source hidden states","c","=","torch",".","bmm","(","align_vectors",",","memory_bank",")","# concatenate","concat_c","=","torch",".","cat","(","[","c",",","source","]",",","2",")",".","view","(","batch","*","target_l",",","dim","*","2",")","attn_h","=","self",".","linear_out","(","concat_c",")",".","view","(","batch",",","target_l",",","dim",")","if","self",".","attn_type","in","[","\"general\"",",","\"dot\"","]",":","attn_h","=","torch",".","tanh","(","attn_h",")","if","one_step",":","attn_h","=","attn_h",".","squeeze","(","1",")","align_vectors","=","align_vectors",".","squeeze","(","1",")","# Check output sizes","batch_",",","dim_","=","attn_h",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","dim",",","dim_",")","batch_",",","source_l_","=","align_vectors",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","source_l",",","source_l_",")","else",":","attn_h","=","attn_h",".","transpose","(","0",",","1",")",".","contiguous","(",")","align_vectors","=","align_vectors",".","transpose","(","0",",","1",")",".","contiguous","(",")","# Check output sizes","target_l_",",","batch_",",","dim_","=","attn_h",".","size","(",")","aeq","(","target_l",",","target_l_",")","aeq","(","batch",",","batch_",")","aeq","(","dim",",","dim_",")","target_l_",",","batch_",",","source_l_","=","align_vectors",".","size","(",")","aeq","(","target_l",",","target_l_",")","aeq","(","batch",",","batch_",")","aeq","(","source_l",",","source_l_",")","return","attn_h",",","align_vectors"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/global_attention.py#L138-L227"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/gate.py","language":"python","identifier":"context_gate_factory","parameters":"(gate_type, embeddings_size, decoder_size,\n attention_size, output_size)","argument_list":"","return_statement":"return gate_types[gate_type](embeddings_size, decoder_size, attention_size,\n output_size)","docstring":"Returns the correct ContextGate class","docstring_summary":"Returns the correct ContextGate class","docstring_tokens":["Returns","the","correct","ContextGate","class"],"function":"def context_gate_factory(gate_type, embeddings_size, decoder_size,\n attention_size, output_size):\n \"\"\"Returns the correct ContextGate class\"\"\"\n\n gate_types = {'source': SourceContextGate,\n 'target': TargetContextGate,\n 'both': BothContextGate}\n\n assert gate_type in gate_types, \"Not valid ContextGate type: {0}\".format(\n gate_type)\n return gate_types[gate_type](embeddings_size, decoder_size, attention_size,\n output_size)","function_tokens":["def","context_gate_factory","(","gate_type",",","embeddings_size",",","decoder_size",",","attention_size",",","output_size",")",":","gate_types","=","{","'source'",":","SourceContextGate",",","'target'",":","TargetContextGate",",","'both'",":","BothContextGate","}","assert","gate_type","in","gate_types",",","\"Not valid ContextGate type: {0}\"",".","format","(","gate_type",")","return","gate_types","[","gate_type","]","(","embeddings_size",",","decoder_size",",","attention_size",",","output_size",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/gate.py#L6-L17"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/average_attn.py","language":"python","identifier":"AverageAttention.cumulative_average_mask","parameters":"(self, batch_size, inputs_len)","argument_list":"","return_statement":"return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)","docstring":"Builds the mask to compute the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Figure 3\n\n Args:\n batch_size (int): batch size\n inputs_len (int): length of the inputs\n\n Returns:\n (`FloatTensor`):\n\n * A Tensor of shape `[batch_size x input_len x input_len]`","docstring_summary":"Builds the mask to compute the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Figure 3","docstring_tokens":["Builds","the","mask","to","compute","the","cumulative","average","as","described","in","https",":","\/\/","arxiv",".","org","\/","abs","\/","1805",".","00631","--","Figure","3"],"function":"def cumulative_average_mask(self, batch_size, inputs_len):\n \"\"\"\n Builds the mask to compute the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Figure 3\n\n Args:\n batch_size (int): batch size\n inputs_len (int): length of the inputs\n\n Returns:\n (`FloatTensor`):\n\n * A Tensor of shape `[batch_size x input_len x input_len]`\n \"\"\"\n\n triangle = torch.tril(torch.ones(inputs_len, inputs_len))\n weights = torch.ones(1, inputs_len) \/ torch.arange(\n 1, inputs_len + 1, dtype=torch.float)\n mask = triangle * weights.transpose(0, 1)\n\n return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)","function_tokens":["def","cumulative_average_mask","(","self",",","batch_size",",","inputs_len",")",":","triangle","=","torch",".","tril","(","torch",".","ones","(","inputs_len",",","inputs_len",")",")","weights","=","torch",".","ones","(","1",",","inputs_len",")","\/","torch",".","arange","(","1",",","inputs_len","+","1",",","dtype","=","torch",".","float",")","mask","=","triangle","*","weights",".","transpose","(","0",",","1",")","return","mask",".","unsqueeze","(","0",")",".","expand","(","batch_size",",","inputs_len",",","inputs_len",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/average_attn.py#L31-L51"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/average_attn.py","language":"python","identifier":"AverageAttention.cumulative_average","parameters":"(self, inputs, mask_or_step,\n layer_cache=None, step=None)","argument_list":"","return_statement":"","docstring":"Computes the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Equations (1) (5) (6)\n\n Args:\n inputs (`FloatTensor`): sequence to average\n `[batch_size x input_len x dimension]`\n mask_or_step: if cache is set, this is assumed\n to be the current step of the\n dynamic decoding. Otherwise, it is the mask matrix\n used to compute the cumulative average.\n cache: a dictionary containing the cumulative average\n of the previous step.","docstring_summary":"Computes the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Equations (1) (5) (6)","docstring_tokens":["Computes","the","cumulative","average","as","described","in","https",":","\/\/","arxiv",".","org","\/","abs","\/","1805",".","00631","--","Equations","(","1",")","(","5",")","(","6",")"],"function":"def cumulative_average(self, inputs, mask_or_step,\n layer_cache=None, step=None):\n \"\"\"\n Computes the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Equations (1) (5) (6)\n\n Args:\n inputs (`FloatTensor`): sequence to average\n `[batch_size x input_len x dimension]`\n mask_or_step: if cache is set, this is assumed\n to be the current step of the\n dynamic decoding. Otherwise, it is the mask matrix\n used to compute the cumulative average.\n cache: a dictionary containing the cumulative average\n of the previous step.\n \"\"\"\n if layer_cache is not None:\n step = mask_or_step\n device = inputs.device\n average_attention = (inputs + step *\n layer_cache[\"prev_g\"].to(device)) \/ (step + 1)\n layer_cache[\"prev_g\"] = average_attention\n return average_attention\n else:\n mask = mask_or_step\n return torch.matmul(mask, inputs)","function_tokens":["def","cumulative_average","(","self",",","inputs",",","mask_or_step",",","layer_cache","=","None",",","step","=","None",")",":","if","layer_cache","is","not","None",":","step","=","mask_or_step","device","=","inputs",".","device","average_attention","=","(","inputs","+","step","*","layer_cache","[","\"prev_g\"","]",".","to","(","device",")",")","\/","(","step","+","1",")","layer_cache","[","\"prev_g\"","]","=","average_attention","return","average_attention","else",":","mask","=","mask_or_step","return","torch",".","matmul","(","mask",",","inputs",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/average_attn.py#L53-L78"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/average_attn.py","language":"python","identifier":"AverageAttention.forward","parameters":"(self, inputs, mask=None, layer_cache=None, step=None)","argument_list":"","return_statement":"return gating_outputs, average_outputs","docstring":"Args:\n inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * gating_outputs `[batch_size x 1 x model_dim]`\n * average_outputs average attention `[batch_size x 1 x model_dim]`","docstring_summary":"Args:\n inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`","docstring_tokens":["Args",":","inputs","(","FloatTensor",")",":","[","batch_size","x","input_len","x","model_dim","]"],"function":"def forward(self, inputs, mask=None, layer_cache=None, step=None):\n \"\"\"\n Args:\n inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * gating_outputs `[batch_size x 1 x model_dim]`\n * average_outputs average attention `[batch_size x 1 x model_dim]`\n \"\"\"\n batch_size = inputs.size(0)\n inputs_len = inputs.size(1)\n\n device = inputs.device\n average_outputs = self.cumulative_average(\n inputs, self.cumulative_average_mask(batch_size,\n inputs_len).to(device).float()\n if layer_cache is None else step, layer_cache=layer_cache)\n average_outputs = self.average_layer(average_outputs)\n gating_outputs = self.gating_layer(torch.cat((inputs,\n average_outputs), -1))\n input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)\n gating_outputs = torch.sigmoid(input_gate) * inputs + \\\n torch.sigmoid(forget_gate) * average_outputs\n\n return gating_outputs, average_outputs","function_tokens":["def","forward","(","self",",","inputs",",","mask","=","None",",","layer_cache","=","None",",","step","=","None",")",":","batch_size","=","inputs",".","size","(","0",")","inputs_len","=","inputs",".","size","(","1",")","device","=","inputs",".","device","average_outputs","=","self",".","cumulative_average","(","inputs",",","self",".","cumulative_average_mask","(","batch_size",",","inputs_len",")",".","to","(","device",")",".","float","(",")","if","layer_cache","is","None","else","step",",","layer_cache","=","layer_cache",")","average_outputs","=","self",".","average_layer","(","average_outputs",")","gating_outputs","=","self",".","gating_layer","(","torch",".","cat","(","(","inputs",",","average_outputs",")",",","-","1",")",")","input_gate",",","forget_gate","=","torch",".","chunk","(","gating_outputs",",","2",",","dim","=","2",")","gating_outputs","=","torch",".","sigmoid","(","input_gate",")","*","inputs","+","torch",".","sigmoid","(","forget_gate",")","*","average_outputs","return","gating_outputs",",","average_outputs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/average_attn.py#L80-L106"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/copy_generator.py","language":"python","identifier":"CopyGenerator.forward","parameters":"(self, hidden, attn, src_map)","argument_list":"","return_statement":"return torch.cat([out_prob, copy_prob], 1)","docstring":"Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.\n\n Args:\n hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`\n attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`\n src_map (`FloatTensor`):\n A sparse indicator matrix mapping each source word to\n its index in the \"extended\" vocab containing.\n `[src_len, batch, extra_words]`","docstring_summary":"Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.","docstring_tokens":["Compute","a","distribution","over","the","target","dictionary","extended","by","the","dynamic","dictionary","implied","by","compying","source","words","."],"function":"def forward(self, hidden, attn, src_map):\n \"\"\"\n Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.\n\n Args:\n hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`\n attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`\n src_map (`FloatTensor`):\n A sparse indicator matrix mapping each source word to\n its index in the \"extended\" vocab containing.\n `[src_len, batch, extra_words]`\n \"\"\"\n # CHECKS\n batch_by_tlen, _ = hidden.size()\n batch_by_tlen_, slen = attn.size()\n slen_, batch, cvocab = src_map.size()\n aeq(batch_by_tlen, batch_by_tlen_)\n aeq(slen, slen_)\n\n # Original probabilities.\n logits = self.linear(hidden)\n logits[:, self.tgt_dict.stoi[inputters.PAD_WORD]] = -float('inf')\n prob = self.softmax(logits)\n\n # Probability of copying p(z=1) batch.\n p_copy = self.sigmoid(self.linear_copy(hidden))\n # Probibility of not copying: p_{word}(w) * (1 - p(z))\n out_prob = torch.mul(prob, 1 - p_copy.expand_as(prob))\n mul_attn = torch.mul(attn, p_copy.expand_as(attn))\n copy_prob = torch.bmm(mul_attn.view(-1, batch, slen)\n .transpose(0, 1),\n src_map.transpose(0, 1)).transpose(0, 1)\n copy_prob = copy_prob.contiguous().view(-1, cvocab)\n return torch.cat([out_prob, copy_prob], 1)","function_tokens":["def","forward","(","self",",","hidden",",","attn",",","src_map",")",":","# CHECKS","batch_by_tlen",",","_","=","hidden",".","size","(",")","batch_by_tlen_",",","slen","=","attn",".","size","(",")","slen_",",","batch",",","cvocab","=","src_map",".","size","(",")","aeq","(","batch_by_tlen",",","batch_by_tlen_",")","aeq","(","slen",",","slen_",")","# Original probabilities.","logits","=","self",".","linear","(","hidden",")","logits","[",":",",","self",".","tgt_dict",".","stoi","[","inputters",".","PAD_WORD","]","]","=","-","float","(","'inf'",")","prob","=","self",".","softmax","(","logits",")","# Probability of copying p(z=1) batch.","p_copy","=","self",".","sigmoid","(","self",".","linear_copy","(","hidden",")",")","# Probibility of not copying: p_{word}(w) * (1 - p(z))","out_prob","=","torch",".","mul","(","prob",",","1","-","p_copy",".","expand_as","(","prob",")",")","mul_attn","=","torch",".","mul","(","attn",",","p_copy",".","expand_as","(","attn",")",")","copy_prob","=","torch",".","bmm","(","mul_attn",".","view","(","-","1",",","batch",",","slen",")",".","transpose","(","0",",","1",")",",","src_map",".","transpose","(","0",",","1",")",")",".","transpose","(","0",",","1",")","copy_prob","=","copy_prob",".","contiguous","(",")",".","view","(","-","1",",","cvocab",")","return","torch",".","cat","(","[","out_prob",",","copy_prob","]",",","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/copy_generator.py#L71-L106"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/copy_generator.py","language":"python","identifier":"CopyGeneratorLossCompute._make_shard_state","parameters":"(self, batch, output, range_, attns)","argument_list":"","return_statement":"return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n \"copy_attn\": attns.get(\"copy\"),\n \"align\": batch.alignment[range_[0] + 1: range_[1]]\n }","docstring":"See base class for args description.","docstring_summary":"See base class for args description.","docstring_tokens":["See","base","class","for","args","description","."],"function":"def _make_shard_state(self, batch, output, range_, attns):\n \"\"\" See base class for args description. \"\"\"\n if getattr(batch, \"alignment\", None) is None:\n raise AssertionError(\"using -copy_attn you need to pass in \"\n \"-dynamic_dict during preprocess stage.\")\n\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n \"copy_attn\": attns.get(\"copy\"),\n \"align\": batch.alignment[range_[0] + 1: range_[1]]\n }","function_tokens":["def","_make_shard_state","(","self",",","batch",",","output",",","range_",",","attns",")",":","if","getattr","(","batch",",","\"alignment\"",",","None",")","is","None",":","raise","AssertionError","(","\"using -copy_attn you need to pass in \"","\"-dynamic_dict during preprocess stage.\"",")","return","{","\"output\"",":","output",",","\"target\"",":","batch",".","tgt","[","range_","[","0","]","+","1",":","range_","[","1","]","]",",","\"copy_attn\"",":","attns",".","get","(","\"copy\"",")",",","\"align\"",":","batch",".","alignment","[","range_","[","0","]","+","1",":","range_","[","1","]","]","}"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/copy_generator.py#L163-L174"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/copy_generator.py","language":"python","identifier":"CopyGeneratorLossCompute._compute_loss","parameters":"(self, batch, output, target, copy_attn, align)","argument_list":"","return_statement":"return loss, stats","docstring":"Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.","docstring_summary":"Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.","docstring_tokens":["Compute","the","loss",".","The","args","must","match","self",".","_make_shard_state","()",".","Args",":","batch",":","the","current","batch",".","output",":","the","predict","output","from","the","model",".","target",":","the","validate","target","to","compare","output","with",".","copy_attn",":","the","copy","attention","value",".","align",":","the","align","info","."],"function":"def _compute_loss(self, batch, output, target, copy_attn, align):\n \"\"\"\n Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.\n \"\"\"\n target = target.view(-1)\n align = align.view(-1)\n scores = self.generator(self._bottle(output),\n self._bottle(copy_attn),\n batch.src_map)\n loss = self.criterion(scores, align, target)\n scores_data = scores.data.clone()\n scores_data = inputters.TextDataset.collapse_copy_scores(\n self._unbottle(scores_data, batch.batch_size),\n batch, self.tgt_vocab, batch.dataset.src_vocabs)\n scores_data = self._bottle(scores_data)\n\n # Correct target copy token instead of \n # tgt[i] = align[i] + len(tgt_vocab)\n # for i such that tgt[i] == 0 and align[i] != 0\n target_data = target.data.clone()\n correct_mask = target_data.eq(0) * align.data.ne(0)\n correct_copy = (align.data + len(self.tgt_vocab)) * correct_mask.long()\n target_data = target_data + correct_copy\n\n # Compute sum of perplexities for stats\n loss_data = loss.sum().data.clone()\n stats = self._stats(loss_data, scores_data, target_data)\n\n if self.normalize_by_length:\n # Compute Loss as NLL divided by seq length\n # Compute Sequence Lengths\n pad_ix = batch.dataset.fields['tgt'].vocab.stoi[inputters.PAD_WORD]\n tgt_lens = batch.tgt.ne(pad_ix).float().sum(0)\n # Compute Total Loss per sequence in batch\n loss = loss.view(-1, batch.batch_size).sum(0)\n # Divide by length of each sequence and sum\n loss = torch.div(loss, tgt_lens).sum()\n else:\n loss = loss.sum()\n\n return loss, stats","function_tokens":["def","_compute_loss","(","self",",","batch",",","output",",","target",",","copy_attn",",","align",")",":","target","=","target",".","view","(","-","1",")","align","=","align",".","view","(","-","1",")","scores","=","self",".","generator","(","self",".","_bottle","(","output",")",",","self",".","_bottle","(","copy_attn",")",",","batch",".","src_map",")","loss","=","self",".","criterion","(","scores",",","align",",","target",")","scores_data","=","scores",".","data",".","clone","(",")","scores_data","=","inputters",".","TextDataset",".","collapse_copy_scores","(","self",".","_unbottle","(","scores_data",",","batch",".","batch_size",")",",","batch",",","self",".","tgt_vocab",",","batch",".","dataset",".","src_vocabs",")","scores_data","=","self",".","_bottle","(","scores_data",")","# Correct target copy token instead of ","# tgt[i] = align[i] + len(tgt_vocab)","# for i such that tgt[i] == 0 and align[i] != 0","target_data","=","target",".","data",".","clone","(",")","correct_mask","=","target_data",".","eq","(","0",")","*","align",".","data",".","ne","(","0",")","correct_copy","=","(","align",".","data","+","len","(","self",".","tgt_vocab",")",")","*","correct_mask",".","long","(",")","target_data","=","target_data","+","correct_copy","# Compute sum of perplexities for stats","loss_data","=","loss",".","sum","(",")",".","data",".","clone","(",")","stats","=","self",".","_stats","(","loss_data",",","scores_data",",","target_data",")","if","self",".","normalize_by_length",":","# Compute Loss as NLL divided by seq length","# Compute Sequence Lengths","pad_ix","=","batch",".","dataset",".","fields","[","'tgt'","]",".","vocab",".","stoi","[","inputters",".","PAD_WORD","]","tgt_lens","=","batch",".","tgt",".","ne","(","pad_ix",")",".","float","(",")",".","sum","(","0",")","# Compute Total Loss per sequence in batch","loss","=","loss",".","view","(","-","1",",","batch",".","batch_size",")",".","sum","(","0",")","# Divide by length of each sequence and sum","loss","=","torch",".","div","(","loss",",","tgt_lens",")",".","sum","(",")","else",":","loss","=","loss",".","sum","(",")","return","loss",",","stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/copy_generator.py#L176-L222"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.word_lut","parameters":"(self)","argument_list":"","return_statement":"return self.make_embedding[0][0]","docstring":"word look-up table","docstring_summary":"word look-up table","docstring_tokens":["word","look","-","up","table"],"function":"def word_lut(self):\n \"\"\" word look-up table \"\"\"\n return self.make_embedding[0][0]","function_tokens":["def","word_lut","(","self",")",":","return","self",".","make_embedding","[","0","]","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py#L160-L162"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.emb_luts","parameters":"(self)","argument_list":"","return_statement":"return self.make_embedding[0]","docstring":"embedding look-up table","docstring_summary":"embedding look-up table","docstring_tokens":["embedding","look","-","up","table"],"function":"def emb_luts(self):\n \"\"\" embedding look-up table \"\"\"\n return self.make_embedding[0]","function_tokens":["def","emb_luts","(","self",")",":","return","self",".","make_embedding","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py#L165-L167"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.load_pretrained_vectors","parameters":"(self, emb_file, fixed)","argument_list":"","return_statement":"","docstring":"Load in pretrained embeddings.\n\n Args:\n emb_file (str) : path to torch serialized embeddings\n fixed (bool) : if true, embeddings are not updated","docstring_summary":"Load in pretrained embeddings.","docstring_tokens":["Load","in","pretrained","embeddings","."],"function":"def load_pretrained_vectors(self, emb_file, fixed):\n \"\"\"Load in pretrained embeddings.\n\n Args:\n emb_file (str) : path to torch serialized embeddings\n fixed (bool) : if true, embeddings are not updated\n \"\"\"\n if emb_file:\n pretrained = torch.load(emb_file)\n pretrained_vec_size = pretrained.size(1)\n if self.word_vec_size > pretrained_vec_size:\n self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained\n elif self.word_vec_size < pretrained_vec_size:\n self.word_lut.weight.data \\\n .copy_(pretrained[:, :self.word_vec_size])\n else:\n self.word_lut.weight.data.copy_(pretrained)\n if fixed:\n self.word_lut.weight.requires_grad = False","function_tokens":["def","load_pretrained_vectors","(","self",",","emb_file",",","fixed",")",":","if","emb_file",":","pretrained","=","torch",".","load","(","emb_file",")","pretrained_vec_size","=","pretrained",".","size","(","1",")","if","self",".","word_vec_size",">","pretrained_vec_size",":","self",".","word_lut",".","weight",".","data","[",":",",",":","pretrained_vec_size","]","=","pretrained","elif","self",".","word_vec_size","<","pretrained_vec_size",":","self",".","word_lut",".","weight",".","data",".","copy_","(","pretrained","[",":",",",":","self",".","word_vec_size","]",")","else",":","self",".","word_lut",".","weight",".","data",".","copy_","(","pretrained",")","if","fixed",":","self",".","word_lut",".","weight",".","requires_grad","=","False"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py#L169-L187"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.forward","parameters":"(self, source, step=None)","argument_list":"","return_statement":"return source","docstring":"Computes the embeddings for words and features.\n\n Args:\n source (`LongTensor`): index tensor `[len x batch x nfeat]`\n Return:\n `FloatTensor`: word embeddings `[len x batch x embedding_size]`","docstring_summary":"Computes the embeddings for words and features.","docstring_tokens":["Computes","the","embeddings","for","words","and","features","."],"function":"def forward(self, source, step=None):\n \"\"\"\n Computes the embeddings for words and features.\n\n Args:\n source (`LongTensor`): index tensor `[len x batch x nfeat]`\n Return:\n `FloatTensor`: word embeddings `[len x batch x embedding_size]`\n \"\"\"\n if self.position_encoding:\n for i, module in enumerate(self.make_embedding._modules.values()):\n if i == len(self.make_embedding._modules.values()) - 1:\n source = module(source, step=step)\n else:\n source = module(source)\n else:\n source = self.make_embedding(source)\n\n return source","function_tokens":["def","forward","(","self",",","source",",","step","=","None",")",":","if","self",".","position_encoding",":","for","i",",","module","in","enumerate","(","self",".","make_embedding",".","_modules",".","values","(",")",")",":","if","i","==","len","(","self",".","make_embedding",".","_modules",".","values","(",")",")","-","1",":","source","=","module","(","source",",","step","=","step",")","else",":","source","=","module","(","source",")","else",":","source","=","self",".","make_embedding","(","source",")","return","source"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/embeddings.py#L189-L207"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/sparse_losses.py","language":"python","identifier":"SparsemaxLossFunction.forward","parameters":"(ctx, input, target)","argument_list":"","return_statement":"return torch.clamp(x \/ 2 - z_k + 0.5, min=0.0)","docstring":"input (FloatTensor): n x num_classes\n target (LongTensor): n, the indices of the target classes","docstring_summary":"input (FloatTensor): n x num_classes\n target (LongTensor): n, the indices of the target classes","docstring_tokens":["input","(","FloatTensor",")",":","n","x","num_classes","target","(","LongTensor",")",":","n","the","indices","of","the","target","classes"],"function":"def forward(ctx, input, target):\n \"\"\"\n input (FloatTensor): n x num_classes\n target (LongTensor): n, the indices of the target classes\n \"\"\"\n input_batch, classes = input.size()\n target_batch = target.size(0)\n aeq(input_batch, target_batch)\n\n z_k = input.gather(1, target.unsqueeze(1)).squeeze()\n tau_z, support_size = threshold_and_support(input, dim=1)\n support = input > tau_z\n x = torch.where(\n support, input**2 - tau_z**2,\n torch.tensor(0.0, device=input.device)\n ).sum(dim=1)\n ctx.save_for_backward(input, target, tau_z)\n # clamping necessary because of numerical errors: loss should be lower\n # bounded by zero, but negative values near zero are possible without\n # the clamp\n return torch.clamp(x \/ 2 - z_k + 0.5, min=0.0)","function_tokens":["def","forward","(","ctx",",","input",",","target",")",":","input_batch",",","classes","=","input",".","size","(",")","target_batch","=","target",".","size","(","0",")","aeq","(","input_batch",",","target_batch",")","z_k","=","input",".","gather","(","1",",","target",".","unsqueeze","(","1",")",")",".","squeeze","(",")","tau_z",",","support_size","=","threshold_and_support","(","input",",","dim","=","1",")","support","=","input",">","tau_z","x","=","torch",".","where","(","support",",","input","**","2","-","tau_z","**","2",",","torch",".","tensor","(","0.0",",","device","=","input",".","device",")",")",".","sum","(","dim","=","1",")","ctx",".","save_for_backward","(","input",",","target",",","tau_z",")","# clamping necessary because of numerical errors: loss should be lower","# bounded by zero, but negative values near zero are possible without","# the clamp","return","torch",".","clamp","(","x","\/","2","-","z_k","+","0.5",",","min","=","0.0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/sparse_losses.py#L11-L31"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/conv_multi_step_attention.py","language":"python","identifier":"seq_linear","parameters":"(linear, x)","argument_list":"","return_statement":"return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)","docstring":"linear transform for 3-d tensor","docstring_summary":"linear transform for 3-d tensor","docstring_tokens":["linear","transform","for","3","-","d","tensor"],"function":"def seq_linear(linear, x):\n \"\"\" linear transform for 3-d tensor \"\"\"\n batch, hidden_size, length, _ = x.size()\n h = linear(torch.transpose(x, 1, 2).contiguous().view(\n batch * length, hidden_size))\n return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)","function_tokens":["def","seq_linear","(","linear",",","x",")",":","batch",",","hidden_size",",","length",",","_","=","x",".","size","(",")","h","=","linear","(","torch",".","transpose","(","x",",","1",",","2",")",".","contiguous","(",")",".","view","(","batch","*","length",",","hidden_size",")",")","return","torch",".","transpose","(","h",".","view","(","batch",",","length",",","hidden_size",",","1",")",",","1",",","2",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/conv_multi_step_attention.py#L11-L16"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/conv_multi_step_attention.py","language":"python","identifier":"ConvMultiStepAttention.apply_mask","parameters":"(self, mask)","argument_list":"","return_statement":"","docstring":"Apply mask","docstring_summary":"Apply mask","docstring_tokens":["Apply","mask"],"function":"def apply_mask(self, mask):\n \"\"\" Apply mask \"\"\"\n self.mask = mask","function_tokens":["def","apply_mask","(","self",",","mask",")",":","self",".","mask","=","mask"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/conv_multi_step_attention.py#L34-L36"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/conv_multi_step_attention.py","language":"python","identifier":"ConvMultiStepAttention.forward","parameters":"(self, base_target_emb, input_from_dec, encoder_out_top,\n encoder_out_combine)","argument_list":"","return_statement":"return context_output, attn","docstring":"Args:\n base_target_emb: target emb tensor\n input: output of decode conv\n encoder_out_t: the key matrix for calculation of attetion weight,\n which is the top output of encode conv\n encoder_out_combine:\n the value matrix for the attention-weighted sum,\n which is the combination of base emb and top output of encode","docstring_summary":"Args:\n base_target_emb: target emb tensor\n input: output of decode conv\n encoder_out_t: the key matrix for calculation of attetion weight,\n which is the top output of encode conv\n encoder_out_combine:\n the value matrix for the attention-weighted sum,\n which is the combination of base emb and top output of encode","docstring_tokens":["Args",":","base_target_emb",":","target","emb","tensor","input",":","output","of","decode","conv","encoder_out_t",":","the","key","matrix","for","calculation","of","attetion","weight","which","is","the","top","output","of","encode","conv","encoder_out_combine",":","the","value","matrix","for","the","attention","-","weighted","sum","which","is","the","combination","of","base","emb","and","top","output","of","encode"],"function":"def forward(self, base_target_emb, input_from_dec, encoder_out_top,\n encoder_out_combine):\n \"\"\"\n Args:\n base_target_emb: target emb tensor\n input: output of decode conv\n encoder_out_t: the key matrix for calculation of attetion weight,\n which is the top output of encode conv\n encoder_out_combine:\n the value matrix for the attention-weighted sum,\n which is the combination of base emb and top output of encode\n\n \"\"\"\n # checks\n # batch, channel, height, width = base_target_emb.size()\n batch, _, height, _ = base_target_emb.size()\n # batch_, channel_, height_, width_ = input_from_dec.size()\n batch_, _, height_, _ = input_from_dec.size()\n aeq(batch, batch_)\n aeq(height, height_)\n\n # enc_batch, enc_channel, enc_height = encoder_out_top.size()\n enc_batch, _, enc_height = encoder_out_top.size()\n # enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()\n enc_batch_, _, enc_height_ = encoder_out_combine.size()\n\n aeq(enc_batch, enc_batch_)\n aeq(enc_height, enc_height_)\n\n preatt = seq_linear(self.linear_in, input_from_dec)\n target = (base_target_emb + preatt) * SCALE_WEIGHT\n target = torch.squeeze(target, 3)\n target = torch.transpose(target, 1, 2)\n pre_attn = torch.bmm(target, encoder_out_top)\n\n if self.mask is not None:\n pre_attn.data.masked_fill_(self.mask, -float('inf'))\n\n pre_attn = pre_attn.transpose(0, 2)\n attn = F.softmax(pre_attn, dim=-1)\n attn = attn.transpose(0, 2).contiguous()\n context_output = torch.bmm(\n attn, torch.transpose(encoder_out_combine, 1, 2))\n context_output = torch.transpose(\n torch.unsqueeze(context_output, 3), 1, 2)\n return context_output, attn","function_tokens":["def","forward","(","self",",","base_target_emb",",","input_from_dec",",","encoder_out_top",",","encoder_out_combine",")",":","# checks","# batch, channel, height, width = base_target_emb.size()","batch",",","_",",","height",",","_","=","base_target_emb",".","size","(",")","# batch_, channel_, height_, width_ = input_from_dec.size()","batch_",",","_",",","height_",",","_","=","input_from_dec",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","height",",","height_",")","# enc_batch, enc_channel, enc_height = encoder_out_top.size()","enc_batch",",","_",",","enc_height","=","encoder_out_top",".","size","(",")","# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()","enc_batch_",",","_",",","enc_height_","=","encoder_out_combine",".","size","(",")","aeq","(","enc_batch",",","enc_batch_",")","aeq","(","enc_height",",","enc_height_",")","preatt","=","seq_linear","(","self",".","linear_in",",","input_from_dec",")","target","=","(","base_target_emb","+","preatt",")","*","SCALE_WEIGHT","target","=","torch",".","squeeze","(","target",",","3",")","target","=","torch",".","transpose","(","target",",","1",",","2",")","pre_attn","=","torch",".","bmm","(","target",",","encoder_out_top",")","if","self",".","mask","is","not","None",":","pre_attn",".","data",".","masked_fill_","(","self",".","mask",",","-","float","(","'inf'",")",")","pre_attn","=","pre_attn",".","transpose","(","0",",","2",")","attn","=","F",".","softmax","(","pre_attn",",","dim","=","-","1",")","attn","=","attn",".","transpose","(","0",",","2",")",".","contiguous","(",")","context_output","=","torch",".","bmm","(","attn",",","torch",".","transpose","(","encoder_out_combine",",","1",",","2",")",")","context_output","=","torch",".","transpose","(","torch",".","unsqueeze","(","context_output",",","3",")",",","1",",","2",")","return","context_output",",","attn"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/conv_multi_step_attention.py#L38-L83"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/weight_norm.py","language":"python","identifier":"get_var_maybe_avg","parameters":"(namespace, var_name, training, polyak_decay)","argument_list":"","return_statement":"","docstring":"utility for retrieving polyak averaged params\n Update average","docstring_summary":"utility for retrieving polyak averaged params\n Update average","docstring_tokens":["utility","for","retrieving","polyak","averaged","params","Update","average"],"function":"def get_var_maybe_avg(namespace, var_name, training, polyak_decay):\n \"\"\" utility for retrieving polyak averaged params\n Update average\n \"\"\"\n v = getattr(namespace, var_name)\n v_avg = getattr(namespace, var_name + '_avg')\n v_avg -= (1 - polyak_decay) * (v_avg - v.data)\n\n if training:\n return v\n else:\n return v_avg","function_tokens":["def","get_var_maybe_avg","(","namespace",",","var_name",",","training",",","polyak_decay",")",":","v","=","getattr","(","namespace",",","var_name",")","v_avg","=","getattr","(","namespace",",","var_name","+","'_avg'",")","v_avg","-=","(","1","-","polyak_decay",")","*","(","v_avg","-","v",".","data",")","if","training",":","return","v","else",":","return","v_avg"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/weight_norm.py#L8-L19"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/modules\/weight_norm.py","language":"python","identifier":"get_vars_maybe_avg","parameters":"(namespace, var_names, training, polyak_decay)","argument_list":"","return_statement":"return vars","docstring":"utility for retrieving polyak averaged params","docstring_summary":"utility for retrieving polyak averaged params","docstring_tokens":["utility","for","retrieving","polyak","averaged","params"],"function":"def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):\n \"\"\" utility for retrieving polyak averaged params \"\"\"\n vars = []\n for vn in var_names:\n vars.append(get_var_maybe_avg(\n namespace, vn, training, polyak_decay))\n return vars","function_tokens":["def","get_vars_maybe_avg","(","namespace",",","var_names",",","training",",","polyak_decay",")",":","vars","=","[","]","for","vn","in","var_names",":","vars",".","append","(","get_var_maybe_avg","(","namespace",",","vn",",","training",",","polyak_decay",")",")","return","vars"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/modules\/weight_norm.py#L22-L28"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase.__reduce_ex__","parameters":"(self, proto)","argument_list":"","return_statement":"return super(DatasetBase, self).__reduce_ex__()","docstring":"This is a hack. Something is broken with torch pickle.","docstring_summary":"This is a hack. Something is broken with torch pickle.","docstring_tokens":["This","is","a","hack",".","Something","is","broken","with","torch","pickle","."],"function":"def __reduce_ex__(self, proto):\n \"This is a hack. Something is broken with torch pickle.\"\n return super(DatasetBase, self).__reduce_ex__()","function_tokens":["def","__reduce_ex__","(","self",",","proto",")",":","return","super","(","DatasetBase",",","self",")",".","__reduce_ex__","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py#L37-L39"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase.load_fields","parameters":"(self, vocab_dict)","argument_list":"","return_statement":"","docstring":"Load fields from vocab.pt, and set the `fields` attribute.\n\n Args:\n vocab_dict (dict): a dict of loaded vocab from vocab.pt file.","docstring_summary":"Load fields from vocab.pt, and set the `fields` attribute.","docstring_tokens":["Load","fields","from","vocab",".","pt","and","set","the","fields","attribute","."],"function":"def load_fields(self, vocab_dict):\n \"\"\" Load fields from vocab.pt, and set the `fields` attribute.\n\n Args:\n vocab_dict (dict): a dict of loaded vocab from vocab.pt file.\n \"\"\"\n fields = onmt.inputters.inputter.load_fields_from_vocab(\n vocab_dict.items(), self.data_type)\n self.fields = dict([(k, f) for (k, f) in fields.items()\n if k in self.examples[0].__dict__])","function_tokens":["def","load_fields","(","self",",","vocab_dict",")",":","fields","=","onmt",".","inputters",".","inputter",".","load_fields_from_vocab","(","vocab_dict",".","items","(",")",",","self",".","data_type",")","self",".","fields","=","dict","(","[","(","k",",","f",")","for","(","k",",","f",")","in","fields",".","items","(",")","if","k","in","self",".","examples","[","0","]",".","__dict__","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py#L41-L50"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase.extract_text_features","parameters":"(tokens)","argument_list":"","return_statement":"return tuple(words), features, n_feats - 1","docstring":"Args:\n tokens: A list of tokens, where each token consists of a word,\n optionally followed by u\"\uffe8\"-delimited features.\n Returns:\n A sequence of words, a sequence of features, and num of features.","docstring_summary":"Args:\n tokens: A list of tokens, where each token consists of a word,\n optionally followed by u\"\uffe8\"-delimited features.\n Returns:\n A sequence of words, a sequence of features, and num of features.","docstring_tokens":["Args",":","tokens",":","A","list","of","tokens","where","each","token","consists","of","a","word","optionally","followed","by","u","\uffe8","-","delimited","features",".","Returns",":","A","sequence","of","words","a","sequence","of","features","and","num","of","features","."],"function":"def extract_text_features(tokens):\n \"\"\"\n Args:\n tokens: A list of tokens, where each token consists of a word,\n optionally followed by u\"\uffe8\"-delimited features.\n Returns:\n A sequence of words, a sequence of features, and num of features.\n \"\"\"\n if not tokens:\n return [], [], -1\n\n specials = [PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD]\n words = []\n features = []\n n_feats = None\n for token in tokens:\n split_token = token.split(u\"\uffe8\")\n assert all([special != split_token[0] for special in specials]), \\\n \"Dataset cannot contain Special Tokens\"\n\n if split_token[0]:\n words += [split_token[0]]\n features += [split_token[1:]]\n\n if n_feats is None:\n n_feats = len(split_token)\n else:\n assert len(split_token) == n_feats, \\\n \"all words must have the same number of features\"\n features = list(zip(*features))\n return tuple(words), features, n_feats - 1","function_tokens":["def","extract_text_features","(","tokens",")",":","if","not","tokens",":","return","[","]",",","[","]",",","-","1","specials","=","[","PAD_WORD",",","UNK_WORD",",","BOS_WORD",",","EOS_WORD","]","words","=","[","]","features","=","[","]","n_feats","=","None","for","token","in","tokens",":","split_token","=","token",".","split","(","u\"\uffe8\")","","assert","all","(","[","special","!=","split_token","[","0","]","for","special","in","specials","]",")",",","\"Dataset cannot contain Special Tokens\"","if","split_token","[","0","]",":","words","+=","[","split_token","[","0","]","]","features","+=","[","split_token","[","1",":","]","]","if","n_feats","is","None",":","n_feats","=","len","(","split_token",")","else",":","assert","len","(","split_token",")","==","n_feats",",","\"all words must have the same number of features\"","features","=","list","(","zip","(","*","features",")",")","return","tuple","(","words",")",",","features",",","n_feats","-","1"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py#L53-L83"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase._join_dicts","parameters":"(self, *args)","argument_list":"","return_statement":"return dict(chain(*[d.items() for d in args]))","docstring":"Args:\n dictionaries with disjoint keys.\n\n Returns:\n a single dictionary that has the union of these keys.","docstring_summary":"Args:\n dictionaries with disjoint keys.","docstring_tokens":["Args",":","dictionaries","with","disjoint","keys","."],"function":"def _join_dicts(self, *args):\n \"\"\"\n Args:\n dictionaries with disjoint keys.\n\n Returns:\n a single dictionary that has the union of these keys.\n \"\"\"\n return dict(chain(*[d.items() for d in args]))","function_tokens":["def","_join_dicts","(","self",",","*","args",")",":","return","dict","(","chain","(","*","[","d",".","items","(",")","for","d","in","args","]",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py#L87-L95"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase._peek","parameters":"(self, seq)","argument_list":"","return_statement":"return first, chain([first], seq)","docstring":"Args:\n seq: an iterator.\n\n Returns:\n the first thing returned by calling next() on the iterator\n and an iterator created by re-chaining that value to the beginning\n of the iterator.","docstring_summary":"Args:\n seq: an iterator.","docstring_tokens":["Args",":","seq",":","an","iterator","."],"function":"def _peek(self, seq):\n \"\"\"\n Args:\n seq: an iterator.\n\n Returns:\n the first thing returned by calling next() on the iterator\n and an iterator created by re-chaining that value to the beginning\n of the iterator.\n \"\"\"\n first = next(seq)\n return first, chain([first], seq)","function_tokens":["def","_peek","(","self",",","seq",")",":","first","=","next","(","seq",")","return","first",",","chain","(","[","first","]",",","seq",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py#L97-L108"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase._construct_example_fromlist","parameters":"(self, data, fields)","argument_list":"","return_statement":"return ex","docstring":"Args:\n data: the data to be set as the value of the attributes of\n the to-be-created `Example`, associating with respective\n `Field` objects with same key.\n fields: a dict of `torchtext.data.Field` objects. The keys\n are attributes of the to-be-created `Example`.\n\n Returns:\n the created `Example` object.","docstring_summary":"Args:\n data: the data to be set as the value of the attributes of\n the to-be-created `Example`, associating with respective\n `Field` objects with same key.\n fields: a dict of `torchtext.data.Field` objects. The keys\n are attributes of the to-be-created `Example`.","docstring_tokens":["Args",":","data",":","the","data","to","be","set","as","the","value","of","the","attributes","of","the","to","-","be","-","created","Example","associating","with","respective","Field","objects","with","same","key",".","fields",":","a","dict","of","torchtext",".","data",".","Field","objects",".","The","keys","are","attributes","of","the","to","-","be","-","created","Example","."],"function":"def _construct_example_fromlist(self, data, fields):\n \"\"\"\n Args:\n data: the data to be set as the value of the attributes of\n the to-be-created `Example`, associating with respective\n `Field` objects with same key.\n fields: a dict of `torchtext.data.Field` objects. The keys\n are attributes of the to-be-created `Example`.\n\n Returns:\n the created `Example` object.\n \"\"\"\n ex = torchtext.data.Example()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val))\n else:\n setattr(ex, name, val)\n return ex","function_tokens":["def","_construct_example_fromlist","(","self",",","data",",","fields",")",":","ex","=","torchtext",".","data",".","Example","(",")","for","(","name",",","field",")",",","val","in","zip","(","fields",",","data",")",":","if","field","is","not","None",":","setattr","(","ex",",","name",",","field",".","preprocess","(","val",")",")","else",":","setattr","(","ex",",","name",",","val",")","return","ex"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/dataset_base.py#L110-L128"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.sort_key","parameters":"(self, ex)","argument_list":"","return_statement":"return len(ex.src)","docstring":"Sort using length of source sentences.","docstring_summary":"Sort using length of source sentences.","docstring_tokens":["Sort","using","length","of","source","sentences","."],"function":"def sort_key(self, ex):\n \"\"\" Sort using length of source sentences. \"\"\"\n # Default to a balanced sort, prioritizing tgt len match.\n # TODO: make this configurable.\n if hasattr(ex, \"tgt\"):\n return len(ex.src), len(ex.tgt)\n return len(ex.src)","function_tokens":["def","sort_key","(","self",",","ex",")",":","# Default to a balanced sort, prioritizing tgt len match.","# TODO: make this configurable.","if","hasattr","(","ex",",","\"tgt\"",")",":","return","len","(","ex",".","src",")",",","len","(","ex",".","tgt",")","return","len","(","ex",".","src",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L97-L103"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.collapse_copy_scores","parameters":"(scores, batch, tgt_vocab, src_vocabs)","argument_list":"","return_statement":"return scores","docstring":"Given scores from an expanded dictionary\n corresponeding to a batch, sums together copies,\n with a dictionary word when it is ambigious.","docstring_summary":"Given scores from an expanded dictionary\n corresponeding to a batch, sums together copies,\n with a dictionary word when it is ambigious.","docstring_tokens":["Given","scores","from","an","expanded","dictionary","corresponeding","to","a","batch","sums","together","copies","with","a","dictionary","word","when","it","is","ambigious","."],"function":"def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs):\n \"\"\"\n Given scores from an expanded dictionary\n corresponeding to a batch, sums together copies,\n with a dictionary word when it is ambigious.\n \"\"\"\n offset = len(tgt_vocab)\n for b in range(batch.batch_size):\n blank = []\n fill = []\n index = batch.indices.data[b]\n src_vocab = src_vocabs[index]\n for i in range(1, len(src_vocab)):\n sw = src_vocab.itos[i]\n ti = tgt_vocab.stoi[sw]\n if ti != 0:\n blank.append(offset + i)\n fill.append(ti)\n if blank:\n blank = torch.Tensor(blank).type_as(batch.indices.data)\n fill = torch.Tensor(fill).type_as(batch.indices.data)\n scores[:, b].index_add_(1, fill,\n scores[:, b].index_select(1, blank))\n scores[:, b].index_fill_(1, blank, 1e-10)\n return scores","function_tokens":["def","collapse_copy_scores","(","scores",",","batch",",","tgt_vocab",",","src_vocabs",")",":","offset","=","len","(","tgt_vocab",")","for","b","in","range","(","batch",".","batch_size",")",":","blank","=","[","]","fill","=","[","]","index","=","batch",".","indices",".","data","[","b","]","src_vocab","=","src_vocabs","[","index","]","for","i","in","range","(","1",",","len","(","src_vocab",")",")",":","sw","=","src_vocab",".","itos","[","i","]","ti","=","tgt_vocab",".","stoi","[","sw","]","if","ti","!=","0",":","blank",".","append","(","offset","+","i",")","fill",".","append","(","ti",")","if","blank",":","blank","=","torch",".","Tensor","(","blank",")",".","type_as","(","batch",".","indices",".","data",")","fill","=","torch",".","Tensor","(","fill",")",".","type_as","(","batch",".","indices",".","data",")","scores","[",":",",","b","]",".","index_add_","(","1",",","fill",",","scores","[",":",",","b","]",".","index_select","(","1",",","blank",")",")","scores","[",":",",","b","]",".","index_fill_","(","1",",","blank",",","1e-10",")","return","scores"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L106-L130"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.make_text_examples_nfeats_tpl","parameters":"(text_iter, text_path, truncate, side)","argument_list":"","return_statement":"return (examples_iter, num_feats)","docstring":"Args:\n text_iter(iterator): an iterator (or None) that we can loop over\n to read examples.\n It may be an openned file, a string list etc...\n text_path(str): path to file or None\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Returns:\n (example_dict iterator, num_feats) tuple.","docstring_summary":"Args:\n text_iter(iterator): an iterator (or None) that we can loop over\n to read examples.\n It may be an openned file, a string list etc...\n text_path(str): path to file or None\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".","docstring_tokens":["Args",":","text_iter","(","iterator",")",":","an","iterator","(","or","None",")","that","we","can","loop","over","to","read","examples",".","It","may","be","an","openned","file","a","string","list","etc","...","text_path","(","str",")",":","path","to","file","or","None","path","(","str",")",":","location","of","a","src","or","tgt","file",".","truncate","(","int",")",":","maximum","sequence","length","(","0","for","unlimited",")",".","side","(","str",")",":","src","or","tgt","."],"function":"def make_text_examples_nfeats_tpl(text_iter, text_path, truncate, side):\n \"\"\"\n Args:\n text_iter(iterator): an iterator (or None) that we can loop over\n to read examples.\n It may be an openned file, a string list etc...\n text_path(str): path to file or None\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Returns:\n (example_dict iterator, num_feats) tuple.\n \"\"\"\n assert side in ['src', 'tgt']\n\n if text_iter is None:\n if text_path is not None:\n text_iter = TextDataset.make_text_iterator_from_file(text_path)\n else:\n return (None, 0)\n\n # All examples have same number of features, so we peek first one\n # to get the num_feats.\n examples_nfeats_iter = \\\n TextDataset.make_examples(text_iter, truncate, side)\n\n first_ex = next(examples_nfeats_iter)\n num_feats = first_ex[1]\n\n # Chain back the first element - we only want to peek it.\n examples_nfeats_iter = chain([first_ex], examples_nfeats_iter)\n examples_iter = (ex for ex, nfeats in examples_nfeats_iter)\n\n return (examples_iter, num_feats)","function_tokens":["def","make_text_examples_nfeats_tpl","(","text_iter",",","text_path",",","truncate",",","side",")",":","assert","side","in","[","'src'",",","'tgt'","]","if","text_iter","is","None",":","if","text_path","is","not","None",":","text_iter","=","TextDataset",".","make_text_iterator_from_file","(","text_path",")","else",":","return","(","None",",","0",")","# All examples have same number of features, so we peek first one","# to get the num_feats.","examples_nfeats_iter","=","TextDataset",".","make_examples","(","text_iter",",","truncate",",","side",")","first_ex","=","next","(","examples_nfeats_iter",")","num_feats","=","first_ex","[","1","]","# Chain back the first element - we only want to peek it.","examples_nfeats_iter","=","chain","(","[","first_ex","]",",","examples_nfeats_iter",")","examples_iter","=","(","ex","for","ex",",","nfeats","in","examples_nfeats_iter",")","return","(","examples_iter",",","num_feats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L133-L167"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.make_examples","parameters":"(text_iter, truncate, side)","argument_list":"","return_statement":"","docstring":"Args:\n text_iter (iterator): iterator of text sequences\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Yields:\n (word, features, nfeat) triples for each line.","docstring_summary":"Args:\n text_iter (iterator): iterator of text sequences\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".","docstring_tokens":["Args",":","text_iter","(","iterator",")",":","iterator","of","text","sequences","truncate","(","int",")",":","maximum","sequence","length","(","0","for","unlimited",")",".","side","(","str",")",":","src","or","tgt","."],"function":"def make_examples(text_iter, truncate, side):\n \"\"\"\n Args:\n text_iter (iterator): iterator of text sequences\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Yields:\n (word, features, nfeat) triples for each line.\n \"\"\"\n for i, line in enumerate(text_iter):\n line = line.strip().split()\n if truncate:\n line = line[:truncate]\n\n words, feats, n_feats = \\\n TextDataset.extract_text_features(line)\n\n example_dict = {side: words, \"indices\": i}\n if feats:\n prefix = side + \"_feat_\"\n example_dict.update((prefix + str(j), f)\n for j, f in enumerate(feats))\n yield example_dict, n_feats","function_tokens":["def","make_examples","(","text_iter",",","truncate",",","side",")",":","for","i",",","line","in","enumerate","(","text_iter",")",":","line","=","line",".","strip","(",")",".","split","(",")","if","truncate",":","line","=","line","[",":","truncate","]","words",",","feats",",","n_feats","=","TextDataset",".","extract_text_features","(","line",")","example_dict","=","{","side",":","words",",","\"indices\"",":","i","}","if","feats",":","prefix","=","side","+","\"_feat_\"","example_dict",".","update","(","(","prefix","+","str","(","j",")",",","f",")","for","j",",","f","in","enumerate","(","feats",")",")","yield","example_dict",",","n_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L170-L193"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.get_fields","parameters":"(n_src_features, n_tgt_features)","argument_list":"","return_statement":"return fields","docstring":"Args:\n n_src_features (int): the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features (int): the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.","docstring_summary":"Args:\n n_src_features (int): the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features (int): the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","n_src_features","(","int",")",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features","(","int",")",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(n_src_features, n_tgt_features):\n \"\"\"\n Args:\n n_src_features (int): the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features (int): the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.\n \"\"\"\n fields = {}\n\n fields[\"src\"] = torchtext.data.Field(\n pad_token=PAD_WORD,\n include_lengths=True)\n\n for j in range(n_src_features):\n fields[\"src_feat_\" + str(j)] = \\\n torchtext.data.Field(pad_token=PAD_WORD)\n\n fields[\"tgt\"] = torchtext.data.Field(\n init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n for j in range(n_tgt_features):\n fields[\"tgt_feat_\" + str(j)] = \\\n torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n def make_src(data, vocab):\n \"\"\" ? \"\"\"\n #pdb.set_trace()\n src_size = max([t.size(0) for t in data])\n \n src_vocab_size = int(max([t.max() for t in data])) + 1\n \n try:\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n except:\n\n print(src_size)\n print(len(data))\n print(src_vocab_size)\n\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n fields[\"src_map\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n\n def make_tgt(data, vocab):\n \"\"\" ? \"\"\"\n #pdb.set_trace()\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n fields[\"alignment\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n\n fields[\"indices\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n sequential=False)\n\n return fields","function_tokens":["def","get_fields","(","n_src_features",",","n_tgt_features",")",":","fields","=","{","}","fields","[","\"src\"","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",",","include_lengths","=","True",")","for","j","in","range","(","n_src_features",")",":","fields","[","\"src_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",")","fields","[","\"tgt\"","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","for","j","in","range","(","n_tgt_features",")",":","fields","[","\"tgt_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","def","make_src","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","#pdb.set_trace()","src_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","src_vocab_size","=","int","(","max","(","[","t",".","max","(",")","for","t","in","data","]",")",")","+","1","try",":","alignment","=","torch",".","zeros","(","src_size",",","len","(","data",")",",","src_vocab_size",")","except",":","print","(","src_size",")","print","(","len","(","data",")",")","print","(","src_vocab_size",")","for","i",",","sent","in","enumerate","(","data",")",":","for","j",",","t","in","enumerate","(","sent",")",":","alignment","[","j",",","i",",","t","]","=","1","return","alignment","fields","[","\"src_map\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_src",",","sequential","=","False",")","def","make_tgt","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","#pdb.set_trace()","tgt_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","alignment","=","torch",".","zeros","(","tgt_size",",","len","(","data",")",")",".","long","(",")","for","i",",","sent","in","enumerate","(","data",")",":","alignment","[",":","sent",".","size","(","0",")",",","i","]","=","sent","return","alignment","fields","[","\"alignment\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_tgt",",","sequential","=","False",")","fields","[","\"indices\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","sequential","=","False",")","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L202-L274"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.get_num_features","parameters":"(corpus_file, side)","argument_list":"","return_statement":"return num_feats","docstring":"Peek one line and get number of features of it.\n (All lines must have same number of features).\n For text corpus, both sides are in text form, thus\n it works the same.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.","docstring_summary":"Peek one line and get number of features of it.\n (All lines must have same number of features).\n For text corpus, both sides are in text form, thus\n it works the same.","docstring_tokens":["Peek","one","line","and","get","number","of","features","of","it",".","(","All","lines","must","have","same","number","of","features",")",".","For","text","corpus","both","sides","are","in","text","form","thus","it","works","the","same","."],"function":"def get_num_features(corpus_file, side):\n \"\"\"\n Peek one line and get number of features of it.\n (All lines must have same number of features).\n For text corpus, both sides are in text form, thus\n it works the same.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.\n \"\"\"\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = TextDataset.extract_text_features(f_line)\n\n return num_feats","function_tokens":["def","get_num_features","(","corpus_file",",","side",")",":","with","codecs",".","open","(","corpus_file",",","\"r\"",",","\"utf-8\"",")","as","cf",":","f_line","=","cf",".","readline","(",")",".","strip","(",")",".","split","(",")","_",",","_",",","num_feats","=","TextDataset",".","extract_text_features","(","f_line",")","return","num_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L277-L295"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.__init__","parameters":"(self, corpus_path, line_truncate, side, shard_size,\n assoc_iter=None)","argument_list":"","return_statement":"","docstring":"Args:\n corpus_path: the corpus file path.\n line_truncate: the maximum length of a line to read.\n 0 for unlimited.\n side: \"src\" or \"tgt\".\n shard_size: the shard size, 0 means not sharding the file.\n assoc_iter: if not None, it is the associate iterator that\n this iterator should align its step with.","docstring_summary":"Args:\n corpus_path: the corpus file path.\n line_truncate: the maximum length of a line to read.\n 0 for unlimited.\n side: \"src\" or \"tgt\".\n shard_size: the shard size, 0 means not sharding the file.\n assoc_iter: if not None, it is the associate iterator that\n this iterator should align its step with.","docstring_tokens":["Args",":","corpus_path",":","the","corpus","file","path",".","line_truncate",":","the","maximum","length","of","a","line","to","read",".","0","for","unlimited",".","side",":","src","or","tgt",".","shard_size",":","the","shard","size","0","means","not","sharding","the","file",".","assoc_iter",":","if","not","None","it","is","the","associate","iterator","that","this","iterator","should","align","its","step","with","."],"function":"def __init__(self, corpus_path, line_truncate, side, shard_size,\n assoc_iter=None):\n \"\"\"\n Args:\n corpus_path: the corpus file path.\n line_truncate: the maximum length of a line to read.\n 0 for unlimited.\n side: \"src\" or \"tgt\".\n shard_size: the shard size, 0 means not sharding the file.\n assoc_iter: if not None, it is the associate iterator that\n this iterator should align its step with.\n \"\"\"\n try:\n # The codecs module seems to have bugs with seek()\/tell(),\n # so we use io.open().\n self.corpus = io.open(corpus_path, \"r\", encoding=\"utf-8\")\n except IOError:\n sys.stderr.write(\"Failed to open corpus file: %s\" % corpus_path)\n sys.exit(1)\n\n self.line_truncate = line_truncate\n self.side = side\n self.shard_size = shard_size\n self.assoc_iter = assoc_iter\n self.last_pos = 0\n self.line_index = -1\n self.eof = False","function_tokens":["def","__init__","(","self",",","corpus_path",",","line_truncate",",","side",",","shard_size",",","assoc_iter","=","None",")",":","try",":","# The codecs module seems to have bugs with seek()\/tell(),","# so we use io.open().","self",".","corpus","=","io",".","open","(","corpus_path",",","\"r\"",",","encoding","=","\"utf-8\"",")","except","IOError",":","sys",".","stderr",".","write","(","\"Failed to open corpus file: %s\"","%","corpus_path",")","sys",".","exit","(","1",")","self",".","line_truncate","=","line_truncate","self",".","side","=","side","self",".","shard_size","=","shard_size","self",".","assoc_iter","=","assoc_iter","self",".","last_pos","=","0","self",".","line_index","=","-","1","self",".","eof","=","False"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L326-L352"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.__iter__","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Iterator of (example_dict, nfeats).\n On each call, it iterates over as many (example_dict, nfeats) tuples\n until this shard's size equals to or approximates `self.shard_size`.","docstring_summary":"Iterator of (example_dict, nfeats).\n On each call, it iterates over as many (example_dict, nfeats) tuples\n until this shard's size equals to or approximates `self.shard_size`.","docstring_tokens":["Iterator","of","(","example_dict","nfeats",")",".","On","each","call","it","iterates","over","as","many","(","example_dict","nfeats",")","tuples","until","this","shard","s","size","equals","to","or","approximates","self",".","shard_size","."],"function":"def __iter__(self):\n \"\"\"\n Iterator of (example_dict, nfeats).\n On each call, it iterates over as many (example_dict, nfeats) tuples\n until this shard's size equals to or approximates `self.shard_size`.\n \"\"\"\n iteration_index = -1\n if self.assoc_iter is not None:\n # We have associate iterator, just yields tuples\n # util we run parallel with it.\n while self.line_index < self.assoc_iter.line_index:\n line = self.corpus.readline()\n if line == '':\n raise AssertionError(\n \"Two corpuses must have same number of lines!\")\n\n self.line_index += 1\n iteration_index += 1\n yield self._example_dict_iter(line, iteration_index)\n\n if self.assoc_iter.eof:\n self.eof = True\n self.corpus.close()\n else:\n # Yield tuples util this shard's size reaches the threshold.\n self.corpus.seek(self.last_pos)\n while True:\n if self.shard_size != 0 and self.line_index % 64 == 0:\n # This part of check is time consuming on Py2 (but\n # it is quite fast on Py3, weird!). So we don't bother\n # to check for very line. Instead we chekc every 64\n # lines. Thus we are not dividing exactly per\n # `shard_size`, but it is not too much difference.\n cur_pos = self.corpus.tell()\n if cur_pos >= self.last_pos + self.shard_size:\n self.last_pos = cur_pos\n return\n\n line = self.corpus.readline()\n if line == '':\n self.eof = True\n self.corpus.close()\n return\n\n self.line_index += 1\n iteration_index += 1\n yield self._example_dict_iter(line, iteration_index)","function_tokens":["def","__iter__","(","self",")",":","iteration_index","=","-","1","if","self",".","assoc_iter","is","not","None",":","# We have associate iterator, just yields tuples","# util we run parallel with it.","while","self",".","line_index","<","self",".","assoc_iter",".","line_index",":","line","=","self",".","corpus",".","readline","(",")","if","line","==","''",":","raise","AssertionError","(","\"Two corpuses must have same number of lines!\"",")","self",".","line_index","+=","1","iteration_index","+=","1","yield","self",".","_example_dict_iter","(","line",",","iteration_index",")","if","self",".","assoc_iter",".","eof",":","self",".","eof","=","True","self",".","corpus",".","close","(",")","else",":","# Yield tuples util this shard's size reaches the threshold.","self",".","corpus",".","seek","(","self",".","last_pos",")","while","True",":","if","self",".","shard_size","!=","0","and","self",".","line_index","%","64","==","0",":","# This part of check is time consuming on Py2 (but","# it is quite fast on Py3, weird!). So we don't bother","# to check for very line. Instead we chekc every 64","# lines. Thus we are not dividing exactly per","# `shard_size`, but it is not too much difference.","cur_pos","=","self",".","corpus",".","tell","(",")","if","cur_pos",">=","self",".","last_pos","+","self",".","shard_size",":","self",".","last_pos","=","cur_pos","return","line","=","self",".","corpus",".","readline","(",")","if","line","==","''",":","self",".","eof","=","True","self",".","corpus",".","close","(",")","return","self",".","line_index","+=","1","iteration_index","+=","1","yield","self",".","_example_dict_iter","(","line",",","iteration_index",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L354-L400"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.hit_end","parameters":"(self)","argument_list":"","return_statement":"return self.eof","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def hit_end(self):\n \"\"\" ? \"\"\"\n return self.eof","function_tokens":["def","hit_end","(","self",")",":","return","self",".","eof"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L402-L404"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.num_feats","parameters":"(self)","argument_list":"","return_statement":"return self.n_feats","docstring":"We peek the first line and seek back to\n the beginning of the file.","docstring_summary":"We peek the first line and seek back to\n the beginning of the file.","docstring_tokens":["We","peek","the","first","line","and","seek","back","to","the","beginning","of","the","file","."],"function":"def num_feats(self):\n \"\"\"\n We peek the first line and seek back to\n the beginning of the file.\n \"\"\"\n saved_pos = self.corpus.tell()\n\n line = self.corpus.readline().split()\n if self.line_truncate:\n line = line[:self.line_truncate]\n _, _, self.n_feats = TextDataset.extract_text_features(line)\n\n self.corpus.seek(saved_pos)\n\n return self.n_feats","function_tokens":["def","num_feats","(","self",")",":","saved_pos","=","self",".","corpus",".","tell","(",")","line","=","self",".","corpus",".","readline","(",")",".","split","(",")","if","self",".","line_truncate",":","line","=","line","[",":","self",".","line_truncate","]","_",",","_",",","self",".","n_feats","=","TextDataset",".","extract_text_features","(","line",")","self",".","corpus",".","seek","(","saved_pos",")","return","self",".","n_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/text_dataset.py#L407-L421"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.sort_key","parameters":"(self, ex)","argument_list":"","return_statement":"return (ex.src.size(2), ex.src.size(1))","docstring":"Sort using the size of the image: (width, height).","docstring_summary":"Sort using the size of the image: (width, height).","docstring_tokens":["Sort","using","the","size","of","the","image",":","(","width","height",")","."],"function":"def sort_key(self, ex):\n \"\"\" Sort using the size of the image: (width, height).\"\"\"\n return (ex.src.size(2), ex.src.size(1))","function_tokens":["def","sort_key","(","self",",","ex",")",":","return","(","ex",".","src",".","size","(","2",")",",","ex",".","src",".","size","(","1",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py#L80-L82"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.make_image_examples_nfeats_tpl","parameters":"(img_iter, img_path, img_dir,\n image_channel_size=3)","argument_list":"","return_statement":"return (examples_iter, num_feats)","docstring":"Note: one of img_iter and img_path must be not None\n Args:\n img_iter(iterator): an iterator that yields pairs (img, filename)\n (or None)\n img_path(str): location of a src file containing image paths\n (or None)\n src_dir (str): location of source images\n\n Returns:\n (example_dict iterator, num_feats) tuple","docstring_summary":"Note: one of img_iter and img_path must be not None\n Args:\n img_iter(iterator): an iterator that yields pairs (img, filename)\n (or None)\n img_path(str): location of a src file containing image paths\n (or None)\n src_dir (str): location of source images","docstring_tokens":["Note",":","one","of","img_iter","and","img_path","must","be","not","None","Args",":","img_iter","(","iterator",")",":","an","iterator","that","yields","pairs","(","img","filename",")","(","or","None",")","img_path","(","str",")",":","location","of","a","src","file","containing","image","paths","(","or","None",")","src_dir","(","str",")",":","location","of","source","images"],"function":"def make_image_examples_nfeats_tpl(img_iter, img_path, img_dir,\n image_channel_size=3):\n \"\"\"\n Note: one of img_iter and img_path must be not None\n Args:\n img_iter(iterator): an iterator that yields pairs (img, filename)\n (or None)\n img_path(str): location of a src file containing image paths\n (or None)\n src_dir (str): location of source images\n\n Returns:\n (example_dict iterator, num_feats) tuple\n \"\"\"\n if img_iter is None:\n if img_path is not None:\n img_iter = ImageDataset. \\\n make_img_iterator_from_file(img_path,\n img_dir,\n image_channel_size)\n else:\n raise ValueError(\"\"\"One of 'img_iter' and 'img_path'\n must be not None\"\"\")\n examples_iter = ImageDataset.make_examples(img_iter, img_dir, 'src')\n num_feats = 0 # Source side(img) has no features.\n\n return (examples_iter, num_feats)","function_tokens":["def","make_image_examples_nfeats_tpl","(","img_iter",",","img_path",",","img_dir",",","image_channel_size","=","3",")",":","if","img_iter","is","None",":","if","img_path","is","not","None",":","img_iter","=","ImageDataset",".","make_img_iterator_from_file","(","img_path",",","img_dir",",","image_channel_size",")","else",":","raise","ValueError","(","\"\"\"One of 'img_iter' and 'img_path'\n must be not None\"\"\"",")","examples_iter","=","ImageDataset",".","make_examples","(","img_iter",",","img_dir",",","'src'",")","num_feats","=","0","# Source side(img) has no features.","return","(","examples_iter",",","num_feats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py#L85-L111"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.make_examples","parameters":"(img_iter, src_dir, side, truncate=None)","argument_list":"","return_statement":"","docstring":"Args:\n path (str): location of a src file containing image paths\n src_dir (str): location of source images\n side (str): 'src' or 'tgt'\n truncate: maximum img size ((0,0) or None for unlimited)\n\n Yields:\n a dictionary containing image data, path and index for each line.","docstring_summary":"Args:\n path (str): location of a src file containing image paths\n src_dir (str): location of source images\n side (str): 'src' or 'tgt'\n truncate: maximum img size ((0,0) or None for unlimited)","docstring_tokens":["Args",":","path","(","str",")",":","location","of","a","src","file","containing","image","paths","src_dir","(","str",")",":","location","of","source","images","side","(","str",")",":","src","or","tgt","truncate",":","maximum","img","size","((","0","0",")","or","None","for","unlimited",")"],"function":"def make_examples(img_iter, src_dir, side, truncate=None):\n \"\"\"\n Args:\n path (str): location of a src file containing image paths\n src_dir (str): location of source images\n side (str): 'src' or 'tgt'\n truncate: maximum img size ((0,0) or None for unlimited)\n\n Yields:\n a dictionary containing image data, path and index for each line.\n \"\"\"\n assert (src_dir is not None) and os.path.exists(src_dir), \\\n 'src_dir must be a valid directory if data_type is img'\n\n for index, (img, filename) in enumerate(img_iter):\n if truncate and truncate != (0, 0):\n if not (img.size(1) <= truncate[0]\n and img.size(2) <= truncate[1]):\n continue\n\n example_dict = {side: img,\n side + '_path': filename,\n 'indices': index}\n yield example_dict","function_tokens":["def","make_examples","(","img_iter",",","src_dir",",","side",",","truncate","=","None",")",":","assert","(","src_dir","is","not","None",")","and","os",".","path",".","exists","(","src_dir",")",",","'src_dir must be a valid directory if data_type is img'","for","index",",","(","img",",","filename",")","in","enumerate","(","img_iter",")",":","if","truncate","and","truncate","!=","(","0",",","0",")",":","if","not","(","img",".","size","(","1",")","<=","truncate","[","0","]","and","img",".","size","(","2",")","<=","truncate","[","1","]",")",":","continue","example_dict","=","{","side",":","img",",","side","+","'_path'",":","filename",",","'indices'",":","index","}","yield","example_dict"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py#L114-L137"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.make_img_iterator_from_file","parameters":"(path, src_dir, image_channel_size=3)","argument_list":"","return_statement":"","docstring":"Args:\n path(str):\n src_dir(str):\n\n Yields:\n img: and image tensor\n filename(str): the image filename","docstring_summary":"Args:\n path(str):\n src_dir(str):","docstring_tokens":["Args",":","path","(","str",")",":","src_dir","(","str",")",":"],"function":"def make_img_iterator_from_file(path, src_dir, image_channel_size=3):\n \"\"\"\n Args:\n path(str):\n src_dir(str):\n\n Yields:\n img: and image tensor\n filename(str): the image filename\n \"\"\"\n from PIL import Image\n from torchvision import transforms\n\n with codecs.open(path, \"r\", \"utf-8\") as corpus_file:\n for line in corpus_file:\n filename = line.strip()\n img_path = os.path.join(src_dir, filename)\n if not os.path.exists(img_path):\n img_path = line\n\n assert os.path.exists(img_path), \\\n 'img path %s not found' % (line.strip())\n\n if (image_channel_size == 1):\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n img = transforms.ToTensor()(Image.open(img_path))\n\n yield img, filename","function_tokens":["def","make_img_iterator_from_file","(","path",",","src_dir",",","image_channel_size","=","3",")",":","from","PIL","import","Image","from","torchvision","import","transforms","with","codecs",".","open","(","path",",","\"r\"",",","\"utf-8\"",")","as","corpus_file",":","for","line","in","corpus_file",":","filename","=","line",".","strip","(",")","img_path","=","os",".","path",".","join","(","src_dir",",","filename",")","if","not","os",".","path",".","exists","(","img_path",")",":","img_path","=","line","assert","os",".","path",".","exists","(","img_path",")",",","'img path %s not found'","%","(","line",".","strip","(",")",")","if","(","image_channel_size","==","1",")",":","img","=","transforms",".","ToTensor","(",")","(","Image",".","fromarray","(","cv2",".","imread","(","img_path",",","0",")",")",")","else",":","img","=","transforms",".","ToTensor","(",")","(","Image",".","open","(","img_path",")",")","yield","img",",","filename"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py#L140-L169"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.get_fields","parameters":"(n_src_features, n_tgt_features)","argument_list":"","return_statement":"return fields","docstring":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.","docstring_summary":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","n_src_features",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(n_src_features, n_tgt_features):\n \"\"\"\n Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.\n \"\"\"\n fields = {}\n\n def make_img(data, vocab):\n \"\"\" ? \"\"\"\n c = data[0].size(0)\n h = max([t.size(1) for t in data])\n w = max([t.size(2) for t in data])\n imgs = torch.zeros(len(data), c, h, w).fill_(1)\n for i, img in enumerate(data):\n imgs[i, :, 0:img.size(1), 0:img.size(2)] = img\n return imgs\n\n fields[\"src\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_img, sequential=False)\n\n for j in range(n_src_features):\n fields[\"src_feat_\" + str(j)] = \\\n torchtext.data.Field(pad_token=PAD_WORD)\n\n fields[\"tgt\"] = torchtext.data.Field(\n init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n for j in range(n_tgt_features):\n fields[\"tgt_feat_\" + str(j)] = \\\n torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n def make_src(data, vocab):\n \"\"\" ? \"\"\"\n src_size = max([t.size(0) for t in data])\n src_vocab_size = max([t.max() for t in data]) + 1\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n fields[\"src_map\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n\n def make_tgt(data, vocab):\n \"\"\" ? \"\"\"\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n fields[\"alignment\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n\n fields[\"indices\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n sequential=False)\n\n return fields","function_tokens":["def","get_fields","(","n_src_features",",","n_tgt_features",")",":","fields","=","{","}","def","make_img","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","c","=","data","[","0","]",".","size","(","0",")","h","=","max","(","[","t",".","size","(","1",")","for","t","in","data","]",")","w","=","max","(","[","t",".","size","(","2",")","for","t","in","data","]",")","imgs","=","torch",".","zeros","(","len","(","data",")",",","c",",","h",",","w",")",".","fill_","(","1",")","for","i",",","img","in","enumerate","(","data",")",":","imgs","[","i",",",":",",","0",":","img",".","size","(","1",")",",","0",":","img",".","size","(","2",")","]","=","img","return","imgs","fields","[","\"src\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_img",",","sequential","=","False",")","for","j","in","range","(","n_src_features",")",":","fields","[","\"src_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",")","fields","[","\"tgt\"","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","for","j","in","range","(","n_tgt_features",")",":","fields","[","\"tgt_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","def","make_src","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","src_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","src_vocab_size","=","max","(","[","t",".","max","(",")","for","t","in","data","]",")","+","1","alignment","=","torch",".","zeros","(","src_size",",","len","(","data",")",",","src_vocab_size",")","for","i",",","sent","in","enumerate","(","data",")",":","for","j",",","t","in","enumerate","(","sent",")",":","alignment","[","j",",","i",",","t","]","=","1","return","alignment","fields","[","\"src_map\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_src",",","sequential","=","False",")","def","make_tgt","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","tgt_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","alignment","=","torch",".","zeros","(","tgt_size",",","len","(","data",")",")",".","long","(",")","for","i",",","sent","in","enumerate","(","data",")",":","alignment","[",":","sent",".","size","(","0",")",",","i","]","=","sent","return","alignment","fields","[","\"alignment\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_tgt",",","sequential","=","False",")","fields","[","\"indices\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","sequential","=","False",")","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py#L172-L243"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.get_num_features","parameters":"(corpus_file, side)","argument_list":"","return_statement":"return num_feats","docstring":"For image corpus, source side is in form of image, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.","docstring_summary":"For image corpus, source side is in form of image, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.","docstring_tokens":["For","image","corpus","source","side","is","in","form","of","image","thus","no","feature",";","while","target","side","is","in","form","of","text","thus","we","can","extract","its","text","features","."],"function":"def get_num_features(corpus_file, side):\n \"\"\"\n For image corpus, source side is in form of image, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.\n \"\"\"\n if side == 'src':\n num_feats = 0\n else:\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = ImageDataset.extract_text_features(f_line)\n\n return num_feats","function_tokens":["def","get_num_features","(","corpus_file",",","side",")",":","if","side","==","'src'",":","num_feats","=","0","else",":","with","codecs",".","open","(","corpus_file",",","\"r\"",",","\"utf-8\"",")","as","cf",":","f_line","=","cf",".","readline","(",")",".","strip","(",")",".","split","(",")","_",",","_",",","num_feats","=","ImageDataset",".","extract_text_features","(","f_line",")","return","num_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/image_dataset.py#L246-L266"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.sort_key","parameters":"(self, ex)","argument_list":"","return_statement":"return ex.src.size(1)","docstring":"Sort using duration time of the sound spectrogram.","docstring_summary":"Sort using duration time of the sound spectrogram.","docstring_tokens":["Sort","using","duration","time","of","the","sound","spectrogram","."],"function":"def sort_key(self, ex):\n \"\"\" Sort using duration time of the sound spectrogram. \"\"\"\n return ex.src.size(1)","function_tokens":["def","sort_key","(","self",",","ex",")",":","return","ex",".","src",".","size","(","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py#L90-L92"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.make_audio_examples_nfeats_tpl","parameters":"(path, audio_dir,\n sample_rate, window_size,\n window_stride, window,\n normalize_audio, truncate=None)","argument_list":"","return_statement":"return (examples_iter, num_feats)","docstring":"Args:\n path (str): location of a src file containing audio paths.\n audio_dir (str): location of source audio files.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Returns:\n (example_dict iterator, num_feats) tuple","docstring_summary":"Args:\n path (str): location of a src file containing audio paths.\n audio_dir (str): location of source audio files.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).","docstring_tokens":["Args",":","path","(","str",")",":","location","of","a","src","file","containing","audio","paths",".","audio_dir","(","str",")",":","location","of","source","audio","files",".","sample_rate","(","int",")",":","sample_rate",".","window_size","(","float",")",":","window","size","for","spectrogram","in","seconds",".","window_stride","(","float",")",":","window","stride","for","spectrogram","in","seconds",".","window","(","str",")",":","window","type","for","spectrogram","generation",".","normalize_audio","(","bool",")",":","subtract","spectrogram","by","mean","and","divide","by","std","or","not",".","truncate","(","int",")",":","maximum","audio","length","(","0","or","None","for","unlimited",")","."],"function":"def make_audio_examples_nfeats_tpl(path, audio_dir,\n sample_rate, window_size,\n window_stride, window,\n normalize_audio, truncate=None):\n \"\"\"\n Args:\n path (str): location of a src file containing audio paths.\n audio_dir (str): location of source audio files.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Returns:\n (example_dict iterator, num_feats) tuple\n \"\"\"\n examples_iter = AudioDataset.read_audio_file(\n path, audio_dir, \"src\", sample_rate,\n window_size, window_stride, window,\n normalize_audio, truncate)\n num_feats = 0 # Source side(audio) has no features.\n\n return (examples_iter, num_feats)","function_tokens":["def","make_audio_examples_nfeats_tpl","(","path",",","audio_dir",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","truncate","=","None",")",":","examples_iter","=","AudioDataset",".","read_audio_file","(","path",",","audio_dir",",","\"src\"",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","truncate",")","num_feats","=","0","# Source side(audio) has no features.","return","(","examples_iter",",","num_feats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py#L95-L120"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.read_audio_file","parameters":"(path, src_dir, side, sample_rate, window_size,\n window_stride, window, normalize_audio,\n truncate=None)","argument_list":"","return_statement":"","docstring":"Args:\n path (str): location of a src file containing audio paths.\n src_dir (str): location of source audio files.\n side (str): 'src' or 'tgt'.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Yields:\n a dictionary containing audio data for each line.","docstring_summary":"Args:\n path (str): location of a src file containing audio paths.\n src_dir (str): location of source audio files.\n side (str): 'src' or 'tgt'.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).","docstring_tokens":["Args",":","path","(","str",")",":","location","of","a","src","file","containing","audio","paths",".","src_dir","(","str",")",":","location","of","source","audio","files",".","side","(","str",")",":","src","or","tgt",".","sample_rate","(","int",")",":","sample_rate",".","window_size","(","float",")",":","window","size","for","spectrogram","in","seconds",".","window_stride","(","float",")",":","window","stride","for","spectrogram","in","seconds",".","window","(","str",")",":","window","type","for","spectrogram","generation",".","normalize_audio","(","bool",")",":","subtract","spectrogram","by","mean","and","divide","by","std","or","not",".","truncate","(","int",")",":","maximum","audio","length","(","0","or","None","for","unlimited",")","."],"function":"def read_audio_file(path, src_dir, side, sample_rate, window_size,\n window_stride, window, normalize_audio,\n truncate=None):\n \"\"\"\n Args:\n path (str): location of a src file containing audio paths.\n src_dir (str): location of source audio files.\n side (str): 'src' or 'tgt'.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Yields:\n a dictionary containing audio data for each line.\n \"\"\"\n assert (src_dir is not None) and os.path.exists(src_dir),\\\n \"src_dir must be a valid directory if data_type is audio\"\n\n import torchaudio\n import librosa\n import numpy as np\n\n with codecs.open(path, \"r\", \"utf-8\") as corpus_file:\n index = 0\n for line in corpus_file:\n audio_path = os.path.join(src_dir, line.strip())\n if not os.path.exists(audio_path):\n audio_path = line\n\n assert os.path.exists(audio_path), \\\n 'audio path %s not found' % (line.strip())\n\n sound, sample_rate = torchaudio.load(audio_path)\n if truncate and truncate > 0:\n if sound.size(0) > truncate:\n continue\n\n assert sample_rate == sample_rate, \\\n 'Sample rate of %s != -sample_rate (%d vs %d)' \\\n % (audio_path, sample_rate, sample_rate)\n\n sound = sound.numpy()\n if len(sound.shape) > 1:\n if sound.shape[1] == 1:\n sound = sound.squeeze()\n else:\n sound = sound.mean(axis=1) # average multiple channels\n\n n_fft = int(sample_rate * window_size)\n win_length = n_fft\n hop_length = int(sample_rate * window_stride)\n # STFT\n d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, window=window)\n spect, _ = librosa.magphase(d)\n spect = np.log1p(spect)\n spect = torch.FloatTensor(spect)\n if normalize_audio:\n mean = spect.mean()\n std = spect.std()\n spect.add_(-mean)\n spect.div_(std)\n\n example_dict = {side: spect,\n side + '_path': line.strip(),\n 'indices': index}\n index += 1\n\n yield example_dict","function_tokens":["def","read_audio_file","(","path",",","src_dir",",","side",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","truncate","=","None",")",":","assert","(","src_dir","is","not","None",")","and","os",".","path",".","exists","(","src_dir",")",",","\"src_dir must be a valid directory if data_type is audio\"","import","torchaudio","import","librosa","import","numpy","as","np","with","codecs",".","open","(","path",",","\"r\"",",","\"utf-8\"",")","as","corpus_file",":","index","=","0","for","line","in","corpus_file",":","audio_path","=","os",".","path",".","join","(","src_dir",",","line",".","strip","(",")",")","if","not","os",".","path",".","exists","(","audio_path",")",":","audio_path","=","line","assert","os",".","path",".","exists","(","audio_path",")",",","'audio path %s not found'","%","(","line",".","strip","(",")",")","sound",",","sample_rate","=","torchaudio",".","load","(","audio_path",")","if","truncate","and","truncate",">","0",":","if","sound",".","size","(","0",")",">","truncate",":","continue","assert","sample_rate","==","sample_rate",",","'Sample rate of %s != -sample_rate (%d vs %d)'","%","(","audio_path",",","sample_rate",",","sample_rate",")","sound","=","sound",".","numpy","(",")","if","len","(","sound",".","shape",")",">","1",":","if","sound",".","shape","[","1","]","==","1",":","sound","=","sound",".","squeeze","(",")","else",":","sound","=","sound",".","mean","(","axis","=","1",")","# average multiple channels","n_fft","=","int","(","sample_rate","*","window_size",")","win_length","=","n_fft","hop_length","=","int","(","sample_rate","*","window_stride",")","# STFT","d","=","librosa",".","stft","(","sound",",","n_fft","=","n_fft",",","hop_length","=","hop_length",",","win_length","=","win_length",",","window","=","window",")","spect",",","_","=","librosa",".","magphase","(","d",")","spect","=","np",".","log1p","(","spect",")","spect","=","torch",".","FloatTensor","(","spect",")","if","normalize_audio",":","mean","=","spect",".","mean","(",")","std","=","spect",".","std","(",")","spect",".","add_","(","-","mean",")","spect",".","div_","(","std",")","example_dict","=","{","side",":","spect",",","side","+","'_path'",":","line",".","strip","(",")",",","'indices'",":","index","}","index","+=","1","yield","example_dict"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py#L123-L195"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.get_fields","parameters":"(n_src_features, n_tgt_features)","argument_list":"","return_statement":"return fields","docstring":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.","docstring_summary":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","n_src_features",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(n_src_features, n_tgt_features):\n \"\"\"\n Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.\n \"\"\"\n fields = {}\n\n def make_audio(data, vocab):\n \"\"\" ? \"\"\"\n nfft = data[0].size(0)\n t = max([t.size(1) for t in data])\n sounds = torch.zeros(len(data), 1, nfft, t)\n for i, spect in enumerate(data):\n sounds[i, :, :, 0:spect.size(1)] = spect\n return sounds\n\n fields[\"src\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_audio, sequential=False)\n\n for j in range(n_src_features):\n fields[\"src_feat_\" + str(j)] = \\\n torchtext.data.Field(pad_token=PAD_WORD)\n\n fields[\"tgt\"] = torchtext.data.Field(\n init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n for j in range(n_tgt_features):\n fields[\"tgt_feat_\" + str(j)] = \\\n torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n def make_src(data, vocab):\n \"\"\" ? \"\"\"\n src_size = max([t.size(0) for t in data])\n src_vocab_size = max([t.max() for t in data]) + 1\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n fields[\"src_map\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n\n def make_tgt(data, vocab):\n \"\"\" ? \"\"\"\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n fields[\"alignment\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n\n fields[\"indices\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n sequential=False)\n\n return fields","function_tokens":["def","get_fields","(","n_src_features",",","n_tgt_features",")",":","fields","=","{","}","def","make_audio","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","nfft","=","data","[","0","]",".","size","(","0",")","t","=","max","(","[","t",".","size","(","1",")","for","t","in","data","]",")","sounds","=","torch",".","zeros","(","len","(","data",")",",","1",",","nfft",",","t",")","for","i",",","spect","in","enumerate","(","data",")",":","sounds","[","i",",",":",",",":",",","0",":","spect",".","size","(","1",")","]","=","spect","return","sounds","fields","[","\"src\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_audio",",","sequential","=","False",")","for","j","in","range","(","n_src_features",")",":","fields","[","\"src_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",")","fields","[","\"tgt\"","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","for","j","in","range","(","n_tgt_features",")",":","fields","[","\"tgt_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","def","make_src","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","src_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","src_vocab_size","=","max","(","[","t",".","max","(",")","for","t","in","data","]",")","+","1","alignment","=","torch",".","zeros","(","src_size",",","len","(","data",")",",","src_vocab_size",")","for","i",",","sent","in","enumerate","(","data",")",":","for","j",",","t","in","enumerate","(","sent",")",":","alignment","[","j",",","i",",","t","]","=","1","return","alignment","fields","[","\"src_map\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_src",",","sequential","=","False",")","def","make_tgt","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","tgt_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","alignment","=","torch",".","zeros","(","tgt_size",",","len","(","data",")",")",".","long","(",")","for","i",",","sent","in","enumerate","(","data",")",":","alignment","[",":","sent",".","size","(","0",")",",","i","]","=","sent","return","alignment","fields","[","\"alignment\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_tgt",",","sequential","=","False",")","fields","[","\"indices\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","sequential","=","False",")","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py#L198-L268"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.get_num_features","parameters":"(corpus_file, side)","argument_list":"","return_statement":"return num_feats","docstring":"For audio corpus, source side is in form of audio, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.","docstring_summary":"For audio corpus, source side is in form of audio, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.","docstring_tokens":["For","audio","corpus","source","side","is","in","form","of","audio","thus","no","feature",";","while","target","side","is","in","form","of","text","thus","we","can","extract","its","text","features","."],"function":"def get_num_features(corpus_file, side):\n \"\"\"\n For audio corpus, source side is in form of audio, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.\n \"\"\"\n if side == 'src':\n num_feats = 0\n else:\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = AudioDataset.extract_text_features(f_line)\n\n return num_feats","function_tokens":["def","get_num_features","(","corpus_file",",","side",")",":","if","side","==","'src'",":","num_feats","=","0","else",":","with","codecs",".","open","(","corpus_file",",","\"r\"",",","\"utf-8\"",")","as","cf",":","f_line","=","cf",".","readline","(",")",".","strip","(",")",".","split","(",")","_",",","_",",","num_feats","=","AudioDataset",".","extract_text_features","(","f_line",")","return","num_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/audio_dataset.py#L271-L291"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"get_fields","parameters":"(data_type, n_src_features, n_tgt_features)","argument_list":"","return_statement":"","docstring":"Args:\n data_type: type of the source input. Options are [text|img|audio].\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values are the\n corresponding Field objects.","docstring_summary":"Args:\n data_type: type of the source input. Options are [text|img|audio].\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","data_type",":","type","of","the","source","input",".","Options","are","[","text|img|audio","]",".","n_src_features",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(data_type, n_src_features, n_tgt_features):\n \"\"\"\n Args:\n data_type: type of the source input. Options are [text|img|audio].\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values are the\n corresponding Field objects.\n \"\"\"\n if data_type == 'text':\n return TextDataset.get_fields(n_src_features, n_tgt_features)\n elif data_type == 'img':\n return ImageDataset.get_fields(n_src_features, n_tgt_features)\n elif data_type == 'audio':\n return AudioDataset.get_fields(n_src_features, n_tgt_features)\n else:\n raise ValueError(\"Data type not implemented\")","function_tokens":["def","get_fields","(","data_type",",","n_src_features",",","n_tgt_features",")",":","if","data_type","==","'text'",":","return","TextDataset",".","get_fields","(","n_src_features",",","n_tgt_features",")","elif","data_type","==","'img'",":","return","ImageDataset",".","get_fields","(","n_src_features",",","n_tgt_features",")","elif","data_type","==","'audio'",":","return","AudioDataset",".","get_fields","(","n_src_features",",","n_tgt_features",")","else",":","raise","ValueError","(","\"Data type not implemented\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L36-L56"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"load_fields_from_vocab","parameters":"(vocab, data_type=\"text\")","argument_list":"","return_statement":"return fields","docstring":"Load Field objects from `vocab.pt` file.","docstring_summary":"Load Field objects from `vocab.pt` file.","docstring_tokens":["Load","Field","objects","from","vocab",".","pt","file","."],"function":"def load_fields_from_vocab(vocab, data_type=\"text\"):\n \"\"\"\n Load Field objects from `vocab.pt` file.\n \"\"\"\n vocab = dict(vocab)\n n_src_features = len(collect_features(vocab, 'src'))\n n_tgt_features = len(collect_features(vocab, 'tgt'))\n fields = get_fields(data_type, n_src_features, n_tgt_features)\n for k, v in vocab.items():\n # Hack. Can't pickle defaultdict :(\n v.stoi = defaultdict(lambda: 0, v.stoi)\n fields[k].vocab = v\n return fields","function_tokens":["def","load_fields_from_vocab","(","vocab",",","data_type","=","\"text\"",")",":","vocab","=","dict","(","vocab",")","n_src_features","=","len","(","collect_features","(","vocab",",","'src'",")",")","n_tgt_features","=","len","(","collect_features","(","vocab",",","'tgt'",")",")","fields","=","get_fields","(","data_type",",","n_src_features",",","n_tgt_features",")","for","k",",","v","in","vocab",".","items","(",")",":","# Hack. Can't pickle defaultdict :(","v",".","stoi","=","defaultdict","(","lambda",":","0",",","v",".","stoi",")","fields","[","k","]",".","vocab","=","v","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L59-L71"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"save_fields_to_vocab","parameters":"(fields)","argument_list":"","return_statement":"return vocab","docstring":"Save Vocab objects in Field objects to `vocab.pt` file.","docstring_summary":"Save Vocab objects in Field objects to `vocab.pt` file.","docstring_tokens":["Save","Vocab","objects","in","Field","objects","to","vocab",".","pt","file","."],"function":"def save_fields_to_vocab(fields):\n \"\"\"\n Save Vocab objects in Field objects to `vocab.pt` file.\n \"\"\"\n vocab = []\n for k, f in fields.items():\n if f is not None and 'vocab' in f.__dict__:\n f.vocab.stoi = f.vocab.stoi\n vocab.append((k, f.vocab))\n return vocab","function_tokens":["def","save_fields_to_vocab","(","fields",")",":","vocab","=","[","]","for","k",",","f","in","fields",".","items","(",")",":","if","f","is","not","None","and","'vocab'","in","f",".","__dict__",":","f",".","vocab",".","stoi","=","f",".","vocab",".","stoi","vocab",".","append","(","(","k",",","f",".","vocab",")",")","return","vocab"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L74-L83"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"merge_vocabs","parameters":"(vocabs, vocab_size=None)","argument_list":"","return_statement":"return torchtext.vocab.Vocab(merged,\n specials=[UNK_WORD, PAD_WORD,\n BOS_WORD, EOS_WORD],\n max_size=vocab_size)","docstring":"Merge individual vocabularies (assumed to be generated from disjoint\n documents) into a larger vocabulary.\n\n Args:\n vocabs: `torchtext.vocab.Vocab` vocabularies to be merged\n vocab_size: `int` the final vocabulary size. `None` for no limit.\n Return:\n `torchtext.vocab.Vocab`","docstring_summary":"Merge individual vocabularies (assumed to be generated from disjoint\n documents) into a larger vocabulary.","docstring_tokens":["Merge","individual","vocabularies","(","assumed","to","be","generated","from","disjoint","documents",")","into","a","larger","vocabulary","."],"function":"def merge_vocabs(vocabs, vocab_size=None):\n \"\"\"\n Merge individual vocabularies (assumed to be generated from disjoint\n documents) into a larger vocabulary.\n\n Args:\n vocabs: `torchtext.vocab.Vocab` vocabularies to be merged\n vocab_size: `int` the final vocabulary size. `None` for no limit.\n Return:\n `torchtext.vocab.Vocab`\n \"\"\"\n merged = sum([vocab.freqs for vocab in vocabs], Counter())\n return torchtext.vocab.Vocab(merged,\n specials=[UNK_WORD, PAD_WORD,\n BOS_WORD, EOS_WORD],\n max_size=vocab_size)","function_tokens":["def","merge_vocabs","(","vocabs",",","vocab_size","=","None",")",":","merged","=","sum","(","[","vocab",".","freqs","for","vocab","in","vocabs","]",",","Counter","(",")",")","return","torchtext",".","vocab",".","Vocab","(","merged",",","specials","=","[","UNK_WORD",",","PAD_WORD",",","BOS_WORD",",","EOS_WORD","]",",","max_size","=","vocab_size",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L86-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"get_num_features","parameters":"(data_type, corpus_file, side)","argument_list":"","return_statement":"","docstring":"Args:\n data_type (str): type of the source input.\n Options are [text|img|audio].\n corpus_file (str): file path to get the features.\n side (str): for source or for target.\n\n Returns:\n number of features on `side`.","docstring_summary":"Args:\n data_type (str): type of the source input.\n Options are [text|img|audio].\n corpus_file (str): file path to get the features.\n side (str): for source or for target.","docstring_tokens":["Args",":","data_type","(","str",")",":","type","of","the","source","input",".","Options","are","[","text|img|audio","]",".","corpus_file","(","str",")",":","file","path","to","get","the","features",".","side","(","str",")",":","for","source","or","for","target","."],"function":"def get_num_features(data_type, corpus_file, side):\n \"\"\"\n Args:\n data_type (str): type of the source input.\n Options are [text|img|audio].\n corpus_file (str): file path to get the features.\n side (str): for source or for target.\n\n Returns:\n number of features on `side`.\n \"\"\"\n assert side in [\"src\", \"tgt\"]\n\n if data_type == 'text':\n return TextDataset.get_num_features(corpus_file, side)\n elif data_type == 'img':\n return ImageDataset.get_num_features(corpus_file, side)\n elif data_type == 'audio':\n return AudioDataset.get_num_features(corpus_file, side)\n else:\n raise ValueError(\"Data type not implemented\")","function_tokens":["def","get_num_features","(","data_type",",","corpus_file",",","side",")",":","assert","side","in","[","\"src\"",",","\"tgt\"","]","if","data_type","==","'text'",":","return","TextDataset",".","get_num_features","(","corpus_file",",","side",")","elif","data_type","==","'img'",":","return","ImageDataset",".","get_num_features","(","corpus_file",",","side",")","elif","data_type","==","'audio'",":","return","AudioDataset",".","get_num_features","(","corpus_file",",","side",")","else",":","raise","ValueError","(","\"Data type not implemented\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L104-L124"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"make_features","parameters":"(batch, side, data_type='text')","argument_list":"","return_statement":"","docstring":"Args:\n batch (Tensor): a batch of source or target data.\n side (str): for source or for target.\n data_type (str): type of the source input.\n Options are [text|img|audio].\n Returns:\n A sequence of src\/tgt tensors with optional feature tensors\n of size (len x batch).","docstring_summary":"Args:\n batch (Tensor): a batch of source or target data.\n side (str): for source or for target.\n data_type (str): type of the source input.\n Options are [text|img|audio].\n Returns:\n A sequence of src\/tgt tensors with optional feature tensors\n of size (len x batch).","docstring_tokens":["Args",":","batch","(","Tensor",")",":","a","batch","of","source","or","target","data",".","side","(","str",")",":","for","source","or","for","target",".","data_type","(","str",")",":","type","of","the","source","input",".","Options","are","[","text|img|audio","]",".","Returns",":","A","sequence","of","src","\/","tgt","tensors","with","optional","feature","tensors","of","size","(","len","x","batch",")","."],"function":"def make_features(batch, side, data_type='text'):\n \"\"\"\n Args:\n batch (Tensor): a batch of source or target data.\n side (str): for source or for target.\n data_type (str): type of the source input.\n Options are [text|img|audio].\n Returns:\n A sequence of src\/tgt tensors with optional feature tensors\n of size (len x batch).\n \"\"\"\n assert side in ['src', 'tgt']\n if isinstance(batch.__dict__[side], tuple):\n data = batch.__dict__[side][0]\n else:\n data = batch.__dict__[side]\n\n feat_start = side + \"_feat_\"\n keys = sorted([k for k in batch.__dict__ if feat_start in k])\n features = [batch.__dict__[k] for k in keys]\n levels = [data] + features\n\n if data_type == 'text':\n return torch.cat([level.unsqueeze(2) for level in levels], 2)\n else:\n return levels[0]","function_tokens":["def","make_features","(","batch",",","side",",","data_type","=","'text'",")",":","assert","side","in","[","'src'",",","'tgt'","]","if","isinstance","(","batch",".","__dict__","[","side","]",",","tuple",")",":","data","=","batch",".","__dict__","[","side","]","[","0","]","else",":","data","=","batch",".","__dict__","[","side","]","feat_start","=","side","+","\"_feat_\"","keys","=","sorted","(","[","k","for","k","in","batch",".","__dict__","if","feat_start","in","k","]",")","features","=","[","batch",".","__dict__","[","k","]","for","k","in","keys","]","levels","=","[","data","]","+","features","if","data_type","==","'text'",":","return","torch",".","cat","(","[","level",".","unsqueeze","(","2",")","for","level","in","levels","]",",","2",")","else",":","return","levels","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L127-L152"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"collect_features","parameters":"(fields, side=\"src\")","argument_list":"","return_statement":"return feats","docstring":"Collect features from Field object.","docstring_summary":"Collect features from Field object.","docstring_tokens":["Collect","features","from","Field","object","."],"function":"def collect_features(fields, side=\"src\"):\n \"\"\"\n Collect features from Field object.\n \"\"\"\n assert side in [\"src\", \"tgt\"]\n feats = []\n for j in count():\n key = side + \"_feat_\" + str(j)\n if key not in fields:\n break\n feats.append(key)\n return feats","function_tokens":["def","collect_features","(","fields",",","side","=","\"src\"",")",":","assert","side","in","[","\"src\"",",","\"tgt\"","]","feats","=","[","]","for","j","in","count","(",")",":","key","=","side","+","\"_feat_\"","+","str","(","j",")","if","key","not","in","fields",":","break","feats",".","append","(","key",")","return","feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L155-L166"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"collect_feature_vocabs","parameters":"(fields, side)","argument_list":"","return_statement":"return feature_vocabs","docstring":"Collect feature Vocab objects from Field object.","docstring_summary":"Collect feature Vocab objects from Field object.","docstring_tokens":["Collect","feature","Vocab","objects","from","Field","object","."],"function":"def collect_feature_vocabs(fields, side):\n \"\"\"\n Collect feature Vocab objects from Field object.\n \"\"\"\n assert side in ['src', 'tgt']\n feature_vocabs = []\n for j in count():\n key = side + \"_feat_\" + str(j)\n if key not in fields:\n break\n feature_vocabs.append(fields[key].vocab)\n return feature_vocabs","function_tokens":["def","collect_feature_vocabs","(","fields",",","side",")",":","assert","side","in","[","'src'",",","'tgt'","]","feature_vocabs","=","[","]","for","j","in","count","(",")",":","key","=","side","+","\"_feat_\"","+","str","(","j",")","if","key","not","in","fields",":","break","feature_vocabs",".","append","(","fields","[","key","]",".","vocab",")","return","feature_vocabs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L169-L180"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"build_dataset","parameters":"(fields, data_type, src_data_iter=None, src_path=None,\n src_dir=None, tgt_data_iter=None, tgt_path=None,\n src_seq_length=0, tgt_seq_length=0,\n src_seq_length_trunc=0, tgt_seq_length_trunc=0,\n dynamic_dict=True, sample_rate=0,\n window_size=0, window_stride=0, window=None,\n normalize_audio=True, use_filter_pred=True,\n image_channel_size=3)","argument_list":"","return_statement":"return dataset","docstring":"Build src\/tgt examples iterator from corpus files, also extract\n number of features.","docstring_summary":"Build src\/tgt examples iterator from corpus files, also extract\n number of features.","docstring_tokens":["Build","src","\/","tgt","examples","iterator","from","corpus","files","also","extract","number","of","features","."],"function":"def build_dataset(fields, data_type, src_data_iter=None, src_path=None,\n src_dir=None, tgt_data_iter=None, tgt_path=None,\n src_seq_length=0, tgt_seq_length=0,\n src_seq_length_trunc=0, tgt_seq_length_trunc=0,\n dynamic_dict=True, sample_rate=0,\n window_size=0, window_stride=0, window=None,\n normalize_audio=True, use_filter_pred=True,\n image_channel_size=3):\n \"\"\"\n Build src\/tgt examples iterator from corpus files, also extract\n number of features.\n \"\"\"\n\n def _make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,\n src_seq_length_trunc, sample_rate,\n window_size, window_stride,\n window, normalize_audio,\n image_channel_size=3):\n \"\"\"\n Process the corpus into (example_dict iterator, num_feats) tuple\n on source side for different 'data_type'.\n \"\"\"\n\n if data_type == 'text':\n src_examples_iter, num_src_feats = \\\n TextDataset.make_text_examples_nfeats_tpl(\n src_data_iter, src_path, src_seq_length_trunc, \"src\")\n\n elif data_type == 'img':\n src_examples_iter, num_src_feats = \\\n ImageDataset.make_image_examples_nfeats_tpl(\n src_data_iter, src_path, src_dir, image_channel_size)\n\n elif data_type == 'audio':\n if src_data_iter:\n raise ValueError(\"\"\"Data iterator for AudioDataset isn't\n implemented\"\"\")\n\n if src_path is None:\n raise ValueError(\"AudioDataset requires a non None path\")\n src_examples_iter, num_src_feats = \\\n AudioDataset.make_audio_examples_nfeats_tpl(\n src_path, src_dir, sample_rate,\n window_size, window_stride, window,\n normalize_audio)\n\n return src_examples_iter, num_src_feats\n\n src_examples_iter, num_src_feats = \\\n _make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,\n src_seq_length_trunc, sample_rate,\n window_size, window_stride,\n window, normalize_audio,\n image_channel_size=image_channel_size)\n\n # For all data types, the tgt side corpus is in form of text.\n tgt_examples_iter, num_tgt_feats = \\\n TextDataset.make_text_examples_nfeats_tpl(\n tgt_data_iter, tgt_path, tgt_seq_length_trunc, \"tgt\")\n\n if data_type == 'text':\n dataset = TextDataset(fields, src_examples_iter, tgt_examples_iter,\n num_src_feats, num_tgt_feats,\n src_seq_length=src_seq_length,\n tgt_seq_length=tgt_seq_length,\n dynamic_dict=dynamic_dict,\n use_filter_pred=use_filter_pred)\n\n elif data_type == 'img':\n dataset = ImageDataset(fields, src_examples_iter, tgt_examples_iter,\n num_src_feats, num_tgt_feats,\n tgt_seq_length=tgt_seq_length,\n use_filter_pred=use_filter_pred,\n image_channel_size=image_channel_size)\n\n elif data_type == 'audio':\n dataset = AudioDataset(fields, src_examples_iter, tgt_examples_iter,\n num_src_feats, num_tgt_feats,\n tgt_seq_length=tgt_seq_length,\n sample_rate=sample_rate,\n window_size=window_size,\n window_stride=window_stride,\n window=window,\n normalize_audio=normalize_audio,\n use_filter_pred=use_filter_pred)\n\n return dataset","function_tokens":["def","build_dataset","(","fields",",","data_type",",","src_data_iter","=","None",",","src_path","=","None",",","src_dir","=","None",",","tgt_data_iter","=","None",",","tgt_path","=","None",",","src_seq_length","=","0",",","tgt_seq_length","=","0",",","src_seq_length_trunc","=","0",",","tgt_seq_length_trunc","=","0",",","dynamic_dict","=","True",",","sample_rate","=","0",",","window_size","=","0",",","window_stride","=","0",",","window","=","None",",","normalize_audio","=","True",",","use_filter_pred","=","True",",","image_channel_size","=","3",")",":","def","_make_examples_nfeats_tpl","(","data_type",",","src_data_iter",",","src_path",",","src_dir",",","src_seq_length_trunc",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","image_channel_size","=","3",")",":","\"\"\"\n Process the corpus into (example_dict iterator, num_feats) tuple\n on source side for different 'data_type'.\n \"\"\"","if","data_type","==","'text'",":","src_examples_iter",",","num_src_feats","=","TextDataset",".","make_text_examples_nfeats_tpl","(","src_data_iter",",","src_path",",","src_seq_length_trunc",",","\"src\"",")","elif","data_type","==","'img'",":","src_examples_iter",",","num_src_feats","=","ImageDataset",".","make_image_examples_nfeats_tpl","(","src_data_iter",",","src_path",",","src_dir",",","image_channel_size",")","elif","data_type","==","'audio'",":","if","src_data_iter",":","raise","ValueError","(","\"\"\"Data iterator for AudioDataset isn't\n implemented\"\"\"",")","if","src_path","is","None",":","raise","ValueError","(","\"AudioDataset requires a non None path\"",")","src_examples_iter",",","num_src_feats","=","AudioDataset",".","make_audio_examples_nfeats_tpl","(","src_path",",","src_dir",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",")","return","src_examples_iter",",","num_src_feats","src_examples_iter",",","num_src_feats","=","_make_examples_nfeats_tpl","(","data_type",",","src_data_iter",",","src_path",",","src_dir",",","src_seq_length_trunc",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","image_channel_size","=","image_channel_size",")","# For all data types, the tgt side corpus is in form of text.","tgt_examples_iter",",","num_tgt_feats","=","TextDataset",".","make_text_examples_nfeats_tpl","(","tgt_data_iter",",","tgt_path",",","tgt_seq_length_trunc",",","\"tgt\"",")","if","data_type","==","'text'",":","dataset","=","TextDataset","(","fields",",","src_examples_iter",",","tgt_examples_iter",",","num_src_feats",",","num_tgt_feats",",","src_seq_length","=","src_seq_length",",","tgt_seq_length","=","tgt_seq_length",",","dynamic_dict","=","dynamic_dict",",","use_filter_pred","=","use_filter_pred",")","elif","data_type","==","'img'",":","dataset","=","ImageDataset","(","fields",",","src_examples_iter",",","tgt_examples_iter",",","num_src_feats",",","num_tgt_feats",",","tgt_seq_length","=","tgt_seq_length",",","use_filter_pred","=","use_filter_pred",",","image_channel_size","=","image_channel_size",")","elif","data_type","==","'audio'",":","dataset","=","AudioDataset","(","fields",",","src_examples_iter",",","tgt_examples_iter",",","num_src_feats",",","num_tgt_feats",",","tgt_seq_length","=","tgt_seq_length",",","sample_rate","=","sample_rate",",","window_size","=","window_size",",","window_stride","=","window_stride",",","window","=","window",",","normalize_audio","=","normalize_audio",",","use_filter_pred","=","use_filter_pred",")","return","dataset"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L183-L269"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"build_vocab","parameters":"(train_dataset_files, fields, data_type, share_vocab,\n src_vocab_path, src_vocab_size, src_words_min_frequency,\n tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency)","argument_list":"","return_statement":"return fields","docstring":"Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict): fields to build vocab for.\n data_type: \"text\", \"img\" or \"audio\"?\n share_vocab(bool): share source and target vocabulary?\n src_vocab_path(string): Path to src vocabulary file.\n src_vocab_size(int): size of the source vocabulary.\n src_words_min_frequency(int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path(string): Path to tgt vocabulary file.\n tgt_vocab_size(int): size of the target vocabulary.\n tgt_words_min_frequency(int): the minimum frequency needed to\n include a target word in the vocabulary.\n\n Returns:\n Dict of Fields","docstring_summary":"Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict): fields to build vocab for.\n data_type: \"text\", \"img\" or \"audio\"?\n share_vocab(bool): share source and target vocabulary?\n src_vocab_path(string): Path to src vocabulary file.\n src_vocab_size(int): size of the source vocabulary.\n src_words_min_frequency(int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path(string): Path to tgt vocabulary file.\n tgt_vocab_size(int): size of the target vocabulary.\n tgt_words_min_frequency(int): the minimum frequency needed to\n include a target word in the vocabulary.","docstring_tokens":["Args",":","train_dataset_files",":","a","list","of","train","dataset","pt","file",".","fields","(","dict",")",":","fields","to","build","vocab","for",".","data_type",":","text","img","or","audio","?","share_vocab","(","bool",")",":","share","source","and","target","vocabulary?","src_vocab_path","(","string",")",":","Path","to","src","vocabulary","file",".","src_vocab_size","(","int",")",":","size","of","the","source","vocabulary",".","src_words_min_frequency","(","int",")",":","the","minimum","frequency","needed","to","include","a","source","word","in","the","vocabulary",".","tgt_vocab_path","(","string",")",":","Path","to","tgt","vocabulary","file",".","tgt_vocab_size","(","int",")",":","size","of","the","target","vocabulary",".","tgt_words_min_frequency","(","int",")",":","the","minimum","frequency","needed","to","include","a","target","word","in","the","vocabulary","."],"function":"def build_vocab(train_dataset_files, fields, data_type, share_vocab,\n src_vocab_path, src_vocab_size, src_words_min_frequency,\n tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency):\n \"\"\"\n Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict): fields to build vocab for.\n data_type: \"text\", \"img\" or \"audio\"?\n share_vocab(bool): share source and target vocabulary?\n src_vocab_path(string): Path to src vocabulary file.\n src_vocab_size(int): size of the source vocabulary.\n src_words_min_frequency(int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path(string): Path to tgt vocabulary file.\n tgt_vocab_size(int): size of the target vocabulary.\n tgt_words_min_frequency(int): the minimum frequency needed to\n include a target word in the vocabulary.\n\n Returns:\n Dict of Fields\n \"\"\"\n counter = {}\n\n # Prop src from field to get lower memory using when training with image\n if data_type == 'img':\n fields.pop(\"src\")\n\n for k in fields:\n counter[k] = Counter()\n\n # Load vocabulary\n src_vocab = load_vocabulary(src_vocab_path, tag=\"source\")\n tgt_vocab = load_vocabulary(tgt_vocab_path, tag=\"target\")\n\n for index, path in enumerate(train_dataset_files):\n dataset = torch.load(path)\n logger.info(\" * reloading %s.\" % path)\n for ex in dataset.examples:\n for k in fields:\n val = getattr(ex, k, None)\n if val is not None and not fields[k].sequential:\n val = [val]\n elif k == 'src' and src_vocab:\n val = [item for item in val if item in src_vocab]\n elif k == 'tgt' and tgt_vocab:\n val = [item for item in val if item in tgt_vocab]\n counter[k].update(val)\n\n # Drop the none-using from memory but keep the last\n if (index < len(train_dataset_files) - 1):\n dataset.examples = None\n gc.collect()\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n _build_field_vocab(fields[\"tgt\"], counter[\"tgt\"],\n max_size=tgt_vocab_size,\n min_freq=tgt_words_min_frequency)\n logger.info(\" * tgt vocab size: %d.\" % len(fields[\"tgt\"].vocab))\n\n # All datasets have same num of n_tgt_features,\n # getting the last one is OK.\n for j in range(dataset.n_tgt_feats):\n key = \"tgt_feat_\" + str(j)\n _build_field_vocab(fields[key], counter[key])\n logger.info(\" * %s vocab size: %d.\" % (key,\n len(fields[key].vocab)))\n\n if data_type == 'text':\n _build_field_vocab(fields[\"src\"], counter[\"src\"],\n max_size=src_vocab_size,\n min_freq=src_words_min_frequency)\n logger.info(\" * src vocab size: %d.\" % len(fields[\"src\"].vocab))\n\n # All datasets have same num of n_src_features,\n # getting the last one is OK.\n for j in range(dataset.n_src_feats):\n key = \"src_feat_\" + str(j)\n _build_field_vocab(fields[key], counter[key])\n logger.info(\" * %s vocab size: %d.\" %\n (key, len(fields[key].vocab)))\n\n # Merge the input and output vocabularies.\n if share_vocab:\n # `tgt_vocab_size` is ignored when sharing vocabularies\n logger.info(\" * merging src and tgt vocab...\")\n merged_vocab = merge_vocabs(\n [fields[\"src\"].vocab, fields[\"tgt\"].vocab],\n vocab_size=src_vocab_size)\n fields[\"src\"].vocab = merged_vocab\n fields[\"tgt\"].vocab = merged_vocab\n\n return fields","function_tokens":["def","build_vocab","(","train_dataset_files",",","fields",",","data_type",",","share_vocab",",","src_vocab_path",",","src_vocab_size",",","src_words_min_frequency",",","tgt_vocab_path",",","tgt_vocab_size",",","tgt_words_min_frequency",")",":","counter","=","{","}","# Prop src from field to get lower memory using when training with image","if","data_type","==","'img'",":","fields",".","pop","(","\"src\"",")","for","k","in","fields",":","counter","[","k","]","=","Counter","(",")","# Load vocabulary","src_vocab","=","load_vocabulary","(","src_vocab_path",",","tag","=","\"source\"",")","tgt_vocab","=","load_vocabulary","(","tgt_vocab_path",",","tag","=","\"target\"",")","for","index",",","path","in","enumerate","(","train_dataset_files",")",":","dataset","=","torch",".","load","(","path",")","logger",".","info","(","\" * reloading %s.\"","%","path",")","for","ex","in","dataset",".","examples",":","for","k","in","fields",":","val","=","getattr","(","ex",",","k",",","None",")","if","val","is","not","None","and","not","fields","[","k","]",".","sequential",":","val","=","[","val","]","elif","k","==","'src'","and","src_vocab",":","val","=","[","item","for","item","in","val","if","item","in","src_vocab","]","elif","k","==","'tgt'","and","tgt_vocab",":","val","=","[","item","for","item","in","val","if","item","in","tgt_vocab","]","counter","[","k","]",".","update","(","val",")","# Drop the none-using from memory but keep the last","if","(","index","<","len","(","train_dataset_files",")","-","1",")",":","dataset",".","examples","=","None","gc",".","collect","(",")","del","dataset",".","examples","gc",".","collect","(",")","del","dataset","gc",".","collect","(",")","_build_field_vocab","(","fields","[","\"tgt\"","]",",","counter","[","\"tgt\"","]",",","max_size","=","tgt_vocab_size",",","min_freq","=","tgt_words_min_frequency",")","logger",".","info","(","\" * tgt vocab size: %d.\"","%","len","(","fields","[","\"tgt\"","]",".","vocab",")",")","# All datasets have same num of n_tgt_features,","# getting the last one is OK.","for","j","in","range","(","dataset",".","n_tgt_feats",")",":","key","=","\"tgt_feat_\"","+","str","(","j",")","_build_field_vocab","(","fields","[","key","]",",","counter","[","key","]",")","logger",".","info","(","\" * %s vocab size: %d.\"","%","(","key",",","len","(","fields","[","key","]",".","vocab",")",")",")","if","data_type","==","'text'",":","_build_field_vocab","(","fields","[","\"src\"","]",",","counter","[","\"src\"","]",",","max_size","=","src_vocab_size",",","min_freq","=","src_words_min_frequency",")","logger",".","info","(","\" * src vocab size: %d.\"","%","len","(","fields","[","\"src\"","]",".","vocab",")",")","# All datasets have same num of n_src_features,","# getting the last one is OK.","for","j","in","range","(","dataset",".","n_src_feats",")",":","key","=","\"src_feat_\"","+","str","(","j",")","_build_field_vocab","(","fields","[","key","]",",","counter","[","key","]",")","logger",".","info","(","\" * %s vocab size: %d.\"","%","(","key",",","len","(","fields","[","key","]",".","vocab",")",")",")","# Merge the input and output vocabularies.","if","share_vocab",":","# `tgt_vocab_size` is ignored when sharing vocabularies","logger",".","info","(","\" * merging src and tgt vocab...\"",")","merged_vocab","=","merge_vocabs","(","[","fields","[","\"src\"","]",".","vocab",",","fields","[","\"tgt\"","]",".","vocab","]",",","vocab_size","=","src_vocab_size",")","fields","[","\"src\"","]",".","vocab","=","merged_vocab","fields","[","\"tgt\"","]",".","vocab","=","merged_vocab","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L280-L374"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"load_vocabulary","parameters":"(vocabulary_path, tag=\"\")","argument_list":"","return_statement":"return vocabulary","docstring":"Loads a vocabulary from the given path.\n :param vocabulary_path: path to load vocabulary from\n :param tag: tag for vocabulary (only used for logging)\n :return: vocabulary or None if path is null","docstring_summary":"Loads a vocabulary from the given path.\n :param vocabulary_path: path to load vocabulary from\n :param tag: tag for vocabulary (only used for logging)\n :return: vocabulary or None if path is null","docstring_tokens":["Loads","a","vocabulary","from","the","given","path",".",":","param","vocabulary_path",":","path","to","load","vocabulary","from",":","param","tag",":","tag","for","vocabulary","(","only","used","for","logging",")",":","return",":","vocabulary","or","None","if","path","is","null"],"function":"def load_vocabulary(vocabulary_path, tag=\"\"):\n \"\"\"\n Loads a vocabulary from the given path.\n :param vocabulary_path: path to load vocabulary from\n :param tag: tag for vocabulary (only used for logging)\n :return: vocabulary or None if path is null\n \"\"\"\n vocabulary = None\n if vocabulary_path:\n vocabulary = set([])\n logger.info(\"Loading {} vocabulary from {}\".format(tag,\n vocabulary_path))\n\n if not os.path.exists(vocabulary_path):\n raise RuntimeError(\n \"{} vocabulary not found at {}!\".format(tag, vocabulary_path))\n else:\n with open(vocabulary_path) as f:\n for line in f:\n if len(line.strip()) == 0:\n continue\n word = line.strip().split()[0]\n vocabulary.add(word)\n return vocabulary","function_tokens":["def","load_vocabulary","(","vocabulary_path",",","tag","=","\"\"",")",":","vocabulary","=","None","if","vocabulary_path",":","vocabulary","=","set","(","[","]",")","logger",".","info","(","\"Loading {} vocabulary from {}\"",".","format","(","tag",",","vocabulary_path",")",")","if","not","os",".","path",".","exists","(","vocabulary_path",")",":","raise","RuntimeError","(","\"{} vocabulary not found at {}!\"",".","format","(","tag",",","vocabulary_path",")",")","else",":","with","open","(","vocabulary_path",")","as","f",":","for","line","in","f",":","if","len","(","line",".","strip","(",")",")","==","0",":","continue","word","=","line",".","strip","(",")",".","split","(",")","[","0","]","vocabulary",".","add","(","word",")","return","vocabulary"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L377-L400"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"build_dataset_iter","parameters":"(datasets, fields, opt, is_train=True)","argument_list":"","return_statement":"return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,\n device, is_train)","docstring":"This returns user-defined train\/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.","docstring_summary":"This returns user-defined train\/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.","docstring_tokens":["This","returns","user","-","defined","train","\/","validate","data","iterator","for","the","trainer","to","iterate","over",".","We","implement","simple","ordered","iterator","strategy","here","but","more","sophisticated","strategy","like","curriculum","learning","is","ok","too","."],"function":"def build_dataset_iter(datasets, fields, opt, is_train=True):\n \"\"\"\n This returns user-defined train\/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.\n \"\"\"\n batch_size = opt.batch_size if is_train else opt.valid_batch_size\n if is_train and opt.batch_type == \"tokens\":\n def batch_size_fn(new, count, sofar):\n \"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src\/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"\n # Maintains the longest src and tgt length in the current batch\n global max_src_in_batch, max_tgt_in_batch\n # Reset current longest length at a new batch (count=1)\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n # Src: w1 ... wN \n max_src_in_batch = max(max_src_in_batch, len(new.src) + 2)\n # Tgt: w1 ... wN \n max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt) + 1)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n else:\n batch_size_fn = None\n\n if opt.gpu_ranks:\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,\n device, is_train)","function_tokens":["def","build_dataset_iter","(","datasets",",","fields",",","opt",",","is_train","=","True",")",":","batch_size","=","opt",".","batch_size","if","is_train","else","opt",".","valid_batch_size","if","is_train","and","opt",".","batch_type","==","\"tokens\"",":","def","batch_size_fn","(","new",",","count",",","sofar",")",":","\"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src\/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"","# Maintains the longest src and tgt length in the current batch","global","max_src_in_batch",",","max_tgt_in_batch","# Reset current longest length at a new batch (count=1)","if","count","==","1",":","max_src_in_batch","=","0","max_tgt_in_batch","=","0","# Src: w1 ... wN ","max_src_in_batch","=","max","(","max_src_in_batch",",","len","(","new",".","src",")","+","2",")","# Tgt: w1 ... wN ","max_tgt_in_batch","=","max","(","max_tgt_in_batch",",","len","(","new",".","tgt",")","+","1",")","src_elements","=","count","*","max_src_in_batch","tgt_elements","=","count","*","max_tgt_in_batch","return","max","(","src_elements",",","tgt_elements",")","else",":","batch_size_fn","=","None","if","opt",".","gpu_ranks",":","device","=","\"cuda\"","else",":","device","=","\"cpu\"","return","DatasetLazyIter","(","datasets",",","fields",",","batch_size",",","batch_size_fn",",","device",",","is_train",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L491-L527"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"lazily_load_dataset","parameters":"(corpus_type, opt)","argument_list":"","return_statement":"","docstring":"Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.","docstring_summary":"Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.","docstring_tokens":["Dataset","generator",".","Don","t","do","extra","stuff","here","like","printing","because","they","will","be","postponed","to","the","first","loading","time","."],"function":"def lazily_load_dataset(corpus_type, opt):\n \"\"\"\n Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.\n \"\"\"\n assert corpus_type in [\"train\", \"valid\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n # Only one inputters.*Dataset, simple!\n pt = opt.data + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type)","function_tokens":["def","lazily_load_dataset","(","corpus_type",",","opt",")",":","assert","corpus_type","in","[","\"train\"",",","\"valid\"","]","def","_lazy_dataset_loader","(","pt_file",",","corpus_type",")",":","dataset","=","torch",".","load","(","pt_file",")","logger",".","info","(","'Loading %s dataset from %s, number of examples: %d'","%","(","corpus_type",",","pt_file",",","len","(","dataset",")",")",")","return","dataset","# Sort the glob output by file name (by increasing indexes).","pts","=","sorted","(","glob",".","glob","(","opt",".","data","+","'.'","+","corpus_type","+","'.[0-9]*.pt'",")",")","if","pts",":","for","pt","in","pts",":","yield","_lazy_dataset_loader","(","pt",",","corpus_type",")","else",":","# Only one inputters.*Dataset, simple!","pt","=","opt",".","data","+","'.'","+","corpus_type","+","'.pt'","yield","_lazy_dataset_loader","(","pt",",","corpus_type",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L530-L556"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py","language":"python","identifier":"OrderedIterator.create_batches","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Create batches","docstring_summary":"Create batches","docstring_tokens":["Create","batches"],"function":"def create_batches(self):\n \"\"\" Create batches \"\"\"\n if self.train:\n def _pool(data, random_shuffler):\n for p in torchtext.data.batch(data, self.batch_size * 100):\n p_batch = torchtext.data.batch(\n sorted(p, key=self.sort_key),\n self.batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n\n self.batches = _pool(self.data(), self.random_shuffler)\n else:\n self.batches = []\n for b in torchtext.data.batch(self.data(), self.batch_size,\n self.batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))","function_tokens":["def","create_batches","(","self",")",":","if","self",".","train",":","def","_pool","(","data",",","random_shuffler",")",":","for","p","in","torchtext",".","data",".","batch","(","data",",","self",".","batch_size","*","100",")",":","p_batch","=","torchtext",".","data",".","batch","(","sorted","(","p",",","key","=","self",".","sort_key",")",",","self",".","batch_size",",","self",".","batch_size_fn",")","for","b","in","random_shuffler","(","list","(","p_batch",")",")",":","yield","b","self",".","batches","=","_pool","(","self",".","data","(",")",",","self",".","random_shuffler",")","else",":","self",".","batches","=","[","]","for","b","in","torchtext",".","data",".","batch","(","self",".","data","(",")",",","self",".","batch_size",",","self",".","batch_size_fn",")",":","self",".","batches",".","append","(","sorted","(","b",",","key","=","self",".","sort_key",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/inputters\/inputter.py#L406-L422"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.start","parameters":"(self, config_file)","argument_list":"","return_statement":"","docstring":"Read the config file and pre-\/load the models","docstring_summary":"Read the config file and pre-\/load the models","docstring_tokens":["Read","the","config","file","and","pre","-","\/","load","the","models"],"function":"def start(self, config_file):\n \"\"\"Read the config file and pre-\/load the models\n \"\"\"\n self.config_file = config_file\n with open(self.config_file) as f:\n self.confs = json.load(f)\n\n self.models_root = self.confs.get('models_root', '.\/available_models')\n for i, conf in enumerate(self.confs[\"models\"]):\n if \"models\" not in conf:\n if \"model\" in conf:\n # backwards compatibility for confs\n conf[\"models\"] = [conf[\"model\"]]\n else:\n raise ValueError(\"\"\"Incorrect config file: missing 'models'\n parameter for model #%d\"\"\" % i)\n kwargs = {'timeout': conf.get('timeout', None),\n 'load': conf.get('load', None),\n 'tokenizer_opt': conf.get('tokenizer', None),\n 'on_timeout': conf.get('on_timeout', None),\n 'model_root': conf.get('model_root', self.models_root)\n }\n kwargs = {k: v for (k, v) in kwargs.items() if v is not None}\n model_id = conf.get(\"id\", None)\n opt = conf[\"opt\"]\n opt[\"models\"] = conf[\"models\"]\n self.preload_model(opt, model_id=model_id, **kwargs)","function_tokens":["def","start","(","self",",","config_file",")",":","self",".","config_file","=","config_file","with","open","(","self",".","config_file",")","as","f",":","self",".","confs","=","json",".","load","(","f",")","self",".","models_root","=","self",".","confs",".","get","(","'models_root'",",","'.\/available_models'",")","for","i",",","conf","in","enumerate","(","self",".","confs","[","\"models\"","]",")",":","if","\"models\"","not","in","conf",":","if","\"model\"","in","conf",":","# backwards compatibility for confs","conf","[","\"models\"","]","=","[","conf","[","\"model\"","]","]","else",":","raise","ValueError","(","\"\"\"Incorrect config file: missing 'models'\n parameter for model #%d\"\"\"","%","i",")","kwargs","=","{","'timeout'",":","conf",".","get","(","'timeout'",",","None",")",",","'load'",":","conf",".","get","(","'load'",",","None",")",",","'tokenizer_opt'",":","conf",".","get","(","'tokenizer'",",","None",")",",","'on_timeout'",":","conf",".","get","(","'on_timeout'",",","None",")",",","'model_root'",":","conf",".","get","(","'model_root'",",","self",".","models_root",")","}","kwargs","=","{","k",":","v","for","(","k",",","v",")","in","kwargs",".","items","(",")","if","v","is","not","None","}","model_id","=","conf",".","get","(","\"id\"",",","None",")","opt","=","conf","[","\"opt\"","]","opt","[","\"models\"","]","=","conf","[","\"models\"","]","self",".","preload_model","(","opt",",","model_id","=","model_id",",","*","*","kwargs",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L54-L80"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.clone_model","parameters":"(self, model_id, opt, timeout=-1)","argument_list":"","return_statement":"","docstring":"Clone a model `model_id`.\n Different options may be passed. If `opt` is None, it will use the\n same set of options","docstring_summary":"Clone a model `model_id`.\n Different options may be passed. If `opt` is None, it will use the\n same set of options","docstring_tokens":["Clone","a","model","model_id",".","Different","options","may","be","passed",".","If","opt","is","None","it","will","use","the","same","set","of","options"],"function":"def clone_model(self, model_id, opt, timeout=-1):\n \"\"\"Clone a model `model_id`.\n Different options may be passed. If `opt` is None, it will use the\n same set of options\n \"\"\"\n if model_id in self.models:\n if opt is None:\n opt = self.models[model_id].user_opt\n opt[\"models\"] = self.models[model_id].opt.models\n return self.load_model(opt, timeout)\n else:\n raise ServerModelError(\"No such model '%s'\" % str(model_id))","function_tokens":["def","clone_model","(","self",",","model_id",",","opt",",","timeout","=","-","1",")",":","if","model_id","in","self",".","models",":","if","opt","is","None",":","opt","=","self",".","models","[","model_id","]",".","user_opt","opt","[","\"models\"","]","=","self",".","models","[","model_id","]",".","opt",".","models","return","self",".","load_model","(","opt",",","timeout",")","else",":","raise","ServerModelError","(","\"No such model '%s'\"","%","str","(","model_id",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L82-L93"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.load_model","parameters":"(self, opt, model_id=None, **model_kwargs)","argument_list":"","return_statement":"return model_id, load_time","docstring":"Loading a model given a set of options","docstring_summary":"Loading a model given a set of options","docstring_tokens":["Loading","a","model","given","a","set","of","options"],"function":"def load_model(self, opt, model_id=None, **model_kwargs):\n \"\"\"Loading a model given a set of options\n \"\"\"\n model_id = self.preload_model(opt, model_id=model_id, **model_kwargs)\n load_time = self.models[model_id].load_time\n\n return model_id, load_time","function_tokens":["def","load_model","(","self",",","opt",",","model_id","=","None",",","*","*","model_kwargs",")",":","model_id","=","self",".","preload_model","(","opt",",","model_id","=","model_id",",","*","*","model_kwargs",")","load_time","=","self",".","models","[","model_id","]",".","load_time","return","model_id",",","load_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L95-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.preload_model","parameters":"(self, opt, model_id=None, **model_kwargs)","argument_list":"","return_statement":"return model_id","docstring":"Preloading the model: updating internal datastructure\n It will effectively load the model if `load` is set","docstring_summary":"Preloading the model: updating internal datastructure\n It will effectively load the model if `load` is set","docstring_tokens":["Preloading","the","model",":","updating","internal","datastructure","It","will","effectively","load","the","model","if","load","is","set"],"function":"def preload_model(self, opt, model_id=None, **model_kwargs):\n \"\"\"Preloading the model: updating internal datastructure\n It will effectively load the model if `load` is set\n \"\"\"\n if model_id is not None:\n if model_id in self.models.keys():\n raise ValueError(\"Model ID %d already exists\" % model_id)\n else:\n model_id = self.next_id\n while model_id in self.models.keys():\n model_id += 1\n self.next_id = model_id + 1\n print(\"Pre-loading model %d\" % model_id)\n model = ServerModel(opt, model_id, **model_kwargs)\n self.models[model_id] = model\n\n return model_id","function_tokens":["def","preload_model","(","self",",","opt",",","model_id","=","None",",","*","*","model_kwargs",")",":","if","model_id","is","not","None",":","if","model_id","in","self",".","models",".","keys","(",")",":","raise","ValueError","(","\"Model ID %d already exists\"","%","model_id",")","else",":","model_id","=","self",".","next_id","while","model_id","in","self",".","models",".","keys","(",")",":","model_id","+=","1","self",".","next_id","=","model_id","+","1","print","(","\"Pre-loading model %d\"","%","model_id",")","model","=","ServerModel","(","opt",",","model_id",",","*","*","model_kwargs",")","self",".","models","[","model_id","]","=","model","return","model_id"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L103-L119"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.run","parameters":"(self, inputs)","argument_list":"","return_statement":"","docstring":"Translate `inputs`\n We keep the same format as the Lua version i.e.\n [{\"id\": model_id, \"src\": \"sequence to translate\"},{ ...}]\n\n We use inputs[0][\"id\"] as the model id","docstring_summary":"Translate `inputs`\n We keep the same format as the Lua version i.e.\n [{\"id\": model_id, \"src\": \"sequence to translate\"},{ ...}]","docstring_tokens":["Translate","inputs","We","keep","the","same","format","as","the","Lua","version","i",".","e",".","[","{","id",":","model_id","src",":","sequence","to","translate","}","{","...","}","]"],"function":"def run(self, inputs):\n \"\"\"Translate `inputs`\n We keep the same format as the Lua version i.e.\n [{\"id\": model_id, \"src\": \"sequence to translate\"},{ ...}]\n\n We use inputs[0][\"id\"] as the model id\n \"\"\"\n model_id = inputs[0].get(\"id\", 0)\n if model_id in self.models and self.models[model_id] is not None:\n return self.models[model_id].run(inputs)\n else:\n print(\"Error No such model '%s'\" % str(model_id))\n raise ServerModelError(\"No such model '%s'\" % str(model_id))","function_tokens":["def","run","(","self",",","inputs",")",":","model_id","=","inputs","[","0","]",".","get","(","\"id\"",",","0",")","if","model_id","in","self",".","models","and","self",".","models","[","model_id","]","is","not","None",":","return","self",".","models","[","model_id","]",".","run","(","inputs",")","else",":","print","(","\"Error No such model '%s'\"","%","str","(","model_id",")",")","raise","ServerModelError","(","\"No such model '%s'\"","%","str","(","model_id",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L121-L133"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.unload_model","parameters":"(self, model_id)","argument_list":"","return_statement":"","docstring":"Manually unload a model.\n It will free the memory and cancel the timer","docstring_summary":"Manually unload a model.\n It will free the memory and cancel the timer","docstring_tokens":["Manually","unload","a","model",".","It","will","free","the","memory","and","cancel","the","timer"],"function":"def unload_model(self, model_id):\n \"\"\"Manually unload a model.\n It will free the memory and cancel the timer\n \"\"\"\n if model_id in self.models and self.models[model_id] is not None:\n self.models[model_id].unload()\n else:\n raise ServerModelError(\"No such model '%s'\" % str(model_id))","function_tokens":["def","unload_model","(","self",",","model_id",")",":","if","model_id","in","self",".","models","and","self",".","models","[","model_id","]","is","not","None",":","self",".","models","[","model_id","]",".","unload","(",")","else",":","raise","ServerModelError","(","\"No such model '%s'\"","%","str","(","model_id",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L135-L142"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.list_models","parameters":"(self)","argument_list":"","return_statement":"return models","docstring":"Return the list of available models","docstring_summary":"Return the list of available models","docstring_tokens":["Return","the","list","of","available","models"],"function":"def list_models(self):\n \"\"\"Return the list of available models\n \"\"\"\n models = []\n for _, model in self.models.items():\n models += [model.to_dict()]\n return models","function_tokens":["def","list_models","(","self",")",":","models","=","[","]","for","_",",","model","in","self",".","models",".","items","(",")",":","models","+=","[","model",".","to_dict","(",")","]","return","models"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L144-L150"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.__init__","parameters":"(self, opt, model_id, tokenizer_opt=None, load=False,\n timeout=-1, on_timeout=\"to_cpu\", model_root=\".\/\")","argument_list":"","return_statement":"","docstring":"Args:\n opt: (dict) options for the Translator\n model_id: (int) model id\n tokenizer_opt: (dict) options for the tokenizer or None\n load: (bool) whether to load the model during __init__\n timeout: (int) seconds before running `do_timeout`\n Negative values means no timeout\n on_timeout: (str) in [\"to_cpu\", \"unload\"] set what to do on\n timeout (see function `do_timeout`)\n model_root: (str) path to the model directory\n it must contain de model and tokenizer file","docstring_summary":"Args:\n opt: (dict) options for the Translator\n model_id: (int) model id\n tokenizer_opt: (dict) options for the tokenizer or None\n load: (bool) whether to load the model during __init__\n timeout: (int) seconds before running `do_timeout`\n Negative values means no timeout\n on_timeout: (str) in [\"to_cpu\", \"unload\"] set what to do on\n timeout (see function `do_timeout`)\n model_root: (str) path to the model directory\n it must contain de model and tokenizer file","docstring_tokens":["Args",":","opt",":","(","dict",")","options","for","the","Translator","model_id",":","(","int",")","model","id","tokenizer_opt",":","(","dict",")","options","for","the","tokenizer","or","None","load",":","(","bool",")","whether","to","load","the","model","during","__init__","timeout",":","(","int",")","seconds","before","running","do_timeout","Negative","values","means","no","timeout","on_timeout",":","(","str",")","in","[","to_cpu","unload","]","set","what","to","do","on","timeout","(","see","function","do_timeout",")","model_root",":","(","str",")","path","to","the","model","directory","it","must","contain","de","model","and","tokenizer","file"],"function":"def __init__(self, opt, model_id, tokenizer_opt=None, load=False,\n timeout=-1, on_timeout=\"to_cpu\", model_root=\".\/\"):\n \"\"\"\n Args:\n opt: (dict) options for the Translator\n model_id: (int) model id\n tokenizer_opt: (dict) options for the tokenizer or None\n load: (bool) whether to load the model during __init__\n timeout: (int) seconds before running `do_timeout`\n Negative values means no timeout\n on_timeout: (str) in [\"to_cpu\", \"unload\"] set what to do on\n timeout (see function `do_timeout`)\n model_root: (str) path to the model directory\n it must contain de model and tokenizer file\n\n \"\"\"\n self.model_root = model_root\n self.opt = self.parse_opt(opt)\n if self.opt.n_best > 1:\n raise ValueError(\"Values of n_best > 1 are not supported\")\n\n self.model_id = model_id\n self.tokenizer_opt = tokenizer_opt\n self.timeout = timeout\n self.on_timeout = on_timeout\n\n self.unload_timer = None\n self.user_opt = opt\n self.tokenizer = None\n self.logger = init_logger(self.opt.log_file)\n self.loading_lock = threading.Event()\n self.loading_lock.set()\n\n if load:\n self.load()","function_tokens":["def","__init__","(","self",",","opt",",","model_id",",","tokenizer_opt","=","None",",","load","=","False",",","timeout","=","-","1",",","on_timeout","=","\"to_cpu\"",",","model_root","=","\".\/\"",")",":","self",".","model_root","=","model_root","self",".","opt","=","self",".","parse_opt","(","opt",")","if","self",".","opt",".","n_best",">","1",":","raise","ValueError","(","\"Values of n_best > 1 are not supported\"",")","self",".","model_id","=","model_id","self",".","tokenizer_opt","=","tokenizer_opt","self",".","timeout","=","timeout","self",".","on_timeout","=","on_timeout","self",".","unload_timer","=","None","self",".","user_opt","=","opt","self",".","tokenizer","=","None","self",".","logger","=","init_logger","(","self",".","opt",".","log_file",")","self",".","loading_lock","=","threading",".","Event","(",")","self",".","loading_lock",".","set","(",")","if","load",":","self",".","load","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L154-L188"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.parse_opt","parameters":"(self, opt)","argument_list":"","return_statement":"return opt","docstring":"Parse the option set passed by the user using `onmt.opts`\n Args:\n opt: (dict) options passed by the user\n\n Returns:\n opt: (Namespace) full set of options for the Translator","docstring_summary":"Parse the option set passed by the user using `onmt.opts`\n Args:\n opt: (dict) options passed by the user","docstring_tokens":["Parse","the","option","set","passed","by","the","user","using","onmt",".","opts","Args",":","opt",":","(","dict",")","options","passed","by","the","user"],"function":"def parse_opt(self, opt):\n \"\"\"Parse the option set passed by the user using `onmt.opts`\n Args:\n opt: (dict) options passed by the user\n\n Returns:\n opt: (Namespace) full set of options for the Translator\n \"\"\"\n prec_argv = sys.argv\n sys.argv = sys.argv[:1]\n parser = argparse.ArgumentParser()\n onmt.opts.translate_opts(parser)\n\n models = opt['models']\n if not isinstance(models, (list, tuple)):\n models = [models]\n opt['models'] = [os.path.join(self.model_root, model)\n for model in models]\n opt['src'] = \"dummy_src\"\n\n for (k, v) in opt.items():\n if k == 'models':\n sys.argv += ['-model']\n sys.argv += [str(model) for model in v]\n elif type(v) == bool:\n sys.argv += ['-%s' % k]\n else:\n sys.argv += ['-%s' % k, str(v)]\n\n opt = parser.parse_args()\n opt.cuda = opt.gpu > -1\n\n sys.argv = prec_argv\n return opt","function_tokens":["def","parse_opt","(","self",",","opt",")",":","prec_argv","=","sys",".","argv","sys",".","argv","=","sys",".","argv","[",":","1","]","parser","=","argparse",".","ArgumentParser","(",")","onmt",".","opts",".","translate_opts","(","parser",")","models","=","opt","[","'models'","]","if","not","isinstance","(","models",",","(","list",",","tuple",")",")",":","models","=","[","models","]","opt","[","'models'","]","=","[","os",".","path",".","join","(","self",".","model_root",",","model",")","for","model","in","models","]","opt","[","'src'","]","=","\"dummy_src\"","for","(","k",",","v",")","in","opt",".","items","(",")",":","if","k","==","'models'",":","sys",".","argv","+=","[","'-model'","]","sys",".","argv","+=","[","str","(","model",")","for","model","in","v","]","elif","type","(","v",")","==","bool",":","sys",".","argv","+=","[","'-%s'","%","k","]","else",":","sys",".","argv","+=","[","'-%s'","%","k",",","str","(","v",")","]","opt","=","parser",".","parse_args","(",")","opt",".","cuda","=","opt",".","gpu",">","-","1","sys",".","argv","=","prec_argv","return","opt"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L190-L223"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.run","parameters":"(self, inputs)","argument_list":"","return_statement":"return results, scores, self.opt.n_best, timer.times","docstring":"Translate `inputs` using this model\n\n Args:\n inputs: [{\"src\": \"...\"},{\"src\": ...}]\n\n Returns:\n result: (list) translations\n times: (dict) containing times","docstring_summary":"Translate `inputs` using this model","docstring_tokens":["Translate","inputs","using","this","model"],"function":"def run(self, inputs):\n \"\"\"Translate `inputs` using this model\n\n Args:\n inputs: [{\"src\": \"...\"},{\"src\": ...}]\n\n Returns:\n result: (list) translations\n times: (dict) containing times\n \"\"\"\n self.stop_unload_timer()\n\n timer = Timer()\n timer.start()\n self.logger.info(\"Running translation using %d\" % self.model_id)\n\n if not self.loading_lock.is_set():\n self.logger.info(\n \"Model #%d is being loaded by another thread, waiting\"\n % self.model_id)\n if not self.loading_lock.wait(timeout=30):\n raise ServerModelError(\"Model %d loading timeout\"\n % self.model_id)\n\n else:\n if not self.loaded:\n self.load()\n timer.tick(name=\"load\")\n elif self.opt.cuda:\n self.to_gpu()\n timer.tick(name=\"to_gpu\")\n\n texts = []\n head_spaces = []\n tail_spaces = []\n sslength = []\n for i, inp in enumerate(inputs):\n src = inp['src']\n if src.strip() == \"\":\n head_spaces.append(src)\n texts.append(\"\")\n tail_spaces.append(\"\")\n else:\n whitespaces_before, whitespaces_after = \"\", \"\"\n match_before = re.search(r'^\\s+', src)\n match_after = re.search(r'\\s+$', src)\n if match_before is not None:\n whitespaces_before = match_before.group(0)\n if match_after is not None:\n whitespaces_after = match_after.group(0)\n head_spaces.append(whitespaces_before)\n tok = self.maybe_tokenize(src.strip())\n texts.append(tok)\n sslength.append(len(tok.split()))\n tail_spaces.append(whitespaces_after)\n\n empty_indices = [i for i, x in enumerate(texts) if x == \"\"]\n texts_to_translate = [x for x in texts if x != \"\"]\n\n scores = []\n predictions = []\n if len(texts_to_translate) > 0:\n try:\n scores, predictions = self.translator.translate(\n src_data_iter=texts_to_translate,\n batch_size=self.opt.batch_size)\n except RuntimeError as e:\n raise ServerModelError(\"Runtime Error: %s\" % str(e))\n\n timer.tick(name=\"translation\")\n self.logger.info(\"\"\"Using model #%d\\t%d inputs\n \\ttranslation time: %f\"\"\" % (self.model_id, len(texts),\n timer.times['translation']))\n self.reset_unload_timer()\n\n # NOTE: translator returns lists of `n_best` list\n # we can ignore that (i.e. flatten lists) only because\n # we restrict `n_best=1`\n def flatten_list(_list): return sum(_list, [])\n results = flatten_list(predictions)\n scores = [score_tensor.item()\n for score_tensor in flatten_list(scores)]\n\n results = [self.maybe_detokenize(item)\n for item in results]\n\n # build back results with empty texts\n for i in empty_indices:\n results.insert(i, \"\")\n scores.insert(i, 0)\n\n results = [\"\".join(items)\n for items in zip(head_spaces, results, tail_spaces)]\n\n self.logger.info(\"Translation Results: %d\", len(results))\n\n return results, scores, self.opt.n_best, timer.times","function_tokens":["def","run","(","self",",","inputs",")",":","self",".","stop_unload_timer","(",")","timer","=","Timer","(",")","timer",".","start","(",")","self",".","logger",".","info","(","\"Running translation using %d\"","%","self",".","model_id",")","if","not","self",".","loading_lock",".","is_set","(",")",":","self",".","logger",".","info","(","\"Model #%d is being loaded by another thread, waiting\"","%","self",".","model_id",")","if","not","self",".","loading_lock",".","wait","(","timeout","=","30",")",":","raise","ServerModelError","(","\"Model %d loading timeout\"","%","self",".","model_id",")","else",":","if","not","self",".","loaded",":","self",".","load","(",")","timer",".","tick","(","name","=","\"load\"",")","elif","self",".","opt",".","cuda",":","self",".","to_gpu","(",")","timer",".","tick","(","name","=","\"to_gpu\"",")","texts","=","[","]","head_spaces","=","[","]","tail_spaces","=","[","]","sslength","=","[","]","for","i",",","inp","in","enumerate","(","inputs",")",":","src","=","inp","[","'src'","]","if","src",".","strip","(",")","==","\"\"",":","head_spaces",".","append","(","src",")","texts",".","append","(","\"\"",")","tail_spaces",".","append","(","\"\"",")","else",":","whitespaces_before",",","whitespaces_after","=","\"\"",",","\"\"","match_before","=","re",".","search","(","r'^\\s+'",",","src",")","match_after","=","re",".","search","(","r'\\s+$'",",","src",")","if","match_before","is","not","None",":","whitespaces_before","=","match_before",".","group","(","0",")","if","match_after","is","not","None",":","whitespaces_after","=","match_after",".","group","(","0",")","head_spaces",".","append","(","whitespaces_before",")","tok","=","self",".","maybe_tokenize","(","src",".","strip","(",")",")","texts",".","append","(","tok",")","sslength",".","append","(","len","(","tok",".","split","(",")",")",")","tail_spaces",".","append","(","whitespaces_after",")","empty_indices","=","[","i","for","i",",","x","in","enumerate","(","texts",")","if","x","==","\"\"","]","texts_to_translate","=","[","x","for","x","in","texts","if","x","!=","\"\"","]","scores","=","[","]","predictions","=","[","]","if","len","(","texts_to_translate",")",">","0",":","try",":","scores",",","predictions","=","self",".","translator",".","translate","(","src_data_iter","=","texts_to_translate",",","batch_size","=","self",".","opt",".","batch_size",")","except","RuntimeError","as","e",":","raise","ServerModelError","(","\"Runtime Error: %s\"","%","str","(","e",")",")","timer",".","tick","(","name","=","\"translation\"",")","self",".","logger",".","info","(","\"\"\"Using model #%d\\t%d inputs\n \\ttranslation time: %f\"\"\"","%","(","self",".","model_id",",","len","(","texts",")",",","timer",".","times","[","'translation'","]",")",")","self",".","reset_unload_timer","(",")","# NOTE: translator returns lists of `n_best` list","# we can ignore that (i.e. flatten lists) only because","# we restrict `n_best=1`","def","flatten_list","(","_list",")",":","return","sum","(","_list",",","[","]",")","results","=","flatten_list","(","predictions",")","scores","=","[","score_tensor",".","item","(",")","for","score_tensor","in","flatten_list","(","scores",")","]","results","=","[","self",".","maybe_detokenize","(","item",")","for","item","in","results","]","# build back results with empty texts","for","i","in","empty_indices",":","results",".","insert","(","i",",","\"\"",")","scores",".","insert","(","i",",","0",")","results","=","[","\"\"",".","join","(","items",")","for","items","in","zip","(","head_spaces",",","results",",","tail_spaces",")","]","self",".","logger",".","info","(","\"Translation Results: %d\"",",","len","(","results",")",")","return","results",",","scores",",","self",".","opt",".","n_best",",","timer",".","times"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L286-L382"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.do_timeout","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Timeout function that free GPU memory by moving the model to CPU\n or unloading it; depending on `self.on_timemout` value","docstring_summary":"Timeout function that free GPU memory by moving the model to CPU\n or unloading it; depending on `self.on_timemout` value","docstring_tokens":["Timeout","function","that","free","GPU","memory","by","moving","the","model","to","CPU","or","unloading","it",";","depending","on","self",".","on_timemout","value"],"function":"def do_timeout(self):\n \"\"\"Timeout function that free GPU memory by moving the model to CPU\n or unloading it; depending on `self.on_timemout` value\n \"\"\"\n if self.on_timeout == \"unload\":\n self.logger.info(\"Timeout: unloading model %d\" % self.model_id)\n self.unload()\n if self.on_timeout == \"to_cpu\":\n self.logger.info(\"Timeout: sending model %d to CPU\"\n % self.model_id)\n self.to_cpu()","function_tokens":["def","do_timeout","(","self",")",":","if","self",".","on_timeout","==","\"unload\"",":","self",".","logger",".","info","(","\"Timeout: unloading model %d\"","%","self",".","model_id",")","self",".","unload","(",")","if","self",".","on_timeout","==","\"to_cpu\"",":","self",".","logger",".","info","(","\"Timeout: sending model %d to CPU\"","%","self",".","model_id",")","self",".","to_cpu","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L384-L394"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.to_cpu","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Move the model to CPU and clear CUDA cache","docstring_summary":"Move the model to CPU and clear CUDA cache","docstring_tokens":["Move","the","model","to","CPU","and","clear","CUDA","cache"],"function":"def to_cpu(self):\n \"\"\"Move the model to CPU and clear CUDA cache\n \"\"\"\n self.translator.model.cpu()\n if self.opt.cuda:\n torch.cuda.empty_cache()","function_tokens":["def","to_cpu","(","self",")",":","self",".","translator",".","model",".","cpu","(",")","if","self",".","opt",".","cuda",":","torch",".","cuda",".","empty_cache","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L428-L433"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.to_gpu","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Move the model to GPU","docstring_summary":"Move the model to GPU","docstring_tokens":["Move","the","model","to","GPU"],"function":"def to_gpu(self):\n \"\"\"Move the model to GPU\n \"\"\"\n torch.cuda.set_device(self.opt.gpu)\n self.translator.model.cuda()","function_tokens":["def","to_gpu","(","self",")",":","torch",".","cuda",".","set_device","(","self",".","opt",".","gpu",")","self",".","translator",".","model",".","cuda","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L435-L439"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.maybe_tokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return sequence","docstring":"Tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`","docstring_summary":"Tokenize the sequence (or not)","docstring_tokens":["Tokenize","the","sequence","(","or","not",")"],"function":"def maybe_tokenize(self, sequence):\n \"\"\"Tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`\n \"\"\"\n if self.tokenizer_opt is not None:\n return self.tokenize(sequence)\n return sequence","function_tokens":["def","maybe_tokenize","(","self",",","sequence",")",":","if","self",".","tokenizer_opt","is","not","None",":","return","self",".","tokenize","(","sequence",")","return","sequence"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L441-L448"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.tokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return tok","docstring":"Tokenize a single sequence\n\n Args:\n sequence: (str) the sequence to tokenize\n\n Returns:\n tok: (str) the tokenized sequence","docstring_summary":"Tokenize a single sequence","docstring_tokens":["Tokenize","a","single","sequence"],"function":"def tokenize(self, sequence):\n \"\"\"Tokenize a single sequence\n\n Args:\n sequence: (str) the sequence to tokenize\n\n Returns:\n tok: (str) the tokenized sequence\n\n \"\"\"\n if self.tokenizer is None:\n raise ValueError(\"No tokenizer loaded\")\n\n if self.tokenizer_opt[\"type\"] == \"sentencepiece\":\n tok = self.tokenizer.EncodeAsPieces(sequence)\n tok = \" \".join(tok)\n elif self.tokenizer_opt[\"type\"] == \"pyonmttok\":\n tok, _ = self.tokenizer.tokenize(sequence)\n tok = \" \".join(tok)\n return tok","function_tokens":["def","tokenize","(","self",",","sequence",")",":","if","self",".","tokenizer","is","None",":","raise","ValueError","(","\"No tokenizer loaded\"",")","if","self",".","tokenizer_opt","[","\"type\"","]","==","\"sentencepiece\"",":","tok","=","self",".","tokenizer",".","EncodeAsPieces","(","sequence",")","tok","=","\" \"",".","join","(","tok",")","elif","self",".","tokenizer_opt","[","\"type\"","]","==","\"pyonmttok\"",":","tok",",","_","=","self",".","tokenizer",".","tokenize","(","sequence",")","tok","=","\" \"",".","join","(","tok",")","return","tok"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L450-L469"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.maybe_detokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return sequence","docstring":"De-tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`","docstring_summary":"De-tokenize the sequence (or not)","docstring_tokens":["De","-","tokenize","the","sequence","(","or","not",")"],"function":"def maybe_detokenize(self, sequence):\n \"\"\"De-tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`\n \"\"\"\n if self.tokenizer_opt is not None and ''.join(sequence.split()) != '':\n return self.detokenize(sequence)\n return sequence","function_tokens":["def","maybe_detokenize","(","self",",","sequence",")",":","if","self",".","tokenizer_opt","is","not","None","and","''",".","join","(","sequence",".","split","(",")",")","!=","''",":","return","self",".","detokenize","(","sequence",")","return","sequence"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L471-L478"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.detokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return detok","docstring":"Detokenize a single sequence\n\n Same args\/returns as `tokenize`","docstring_summary":"Detokenize a single sequence","docstring_tokens":["Detokenize","a","single","sequence"],"function":"def detokenize(self, sequence):\n \"\"\"Detokenize a single sequence\n\n Same args\/returns as `tokenize`\n \"\"\"\n if self.tokenizer is None:\n raise ValueError(\"No tokenizer loaded\")\n\n if self.tokenizer_opt[\"type\"] == \"sentencepiece\":\n detok = self.tokenizer.DecodePieces(sequence.split())\n elif self.tokenizer_opt[\"type\"] == \"pyonmttok\":\n detok = self.tokenizer.detokenize(sequence.split())\n\n return detok","function_tokens":["def","detokenize","(","self",",","sequence",")",":","if","self",".","tokenizer","is","None",":","raise","ValueError","(","\"No tokenizer loaded\"",")","if","self",".","tokenizer_opt","[","\"type\"","]","==","\"sentencepiece\"",":","detok","=","self",".","tokenizer",".","DecodePieces","(","sequence",".","split","(",")",")","elif","self",".","tokenizer_opt","[","\"type\"","]","==","\"pyonmttok\"",":","detok","=","self",".","tokenizer",".","detokenize","(","sequence",".","split","(",")",")","return","detok"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation_server.py#L480-L493"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.coverage_wu","parameters":"(self, beam, cov, beta=0.)","argument_list":"","return_statement":"return beta * penalty","docstring":"NMT coverage re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_summary":"NMT coverage re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_tokens":["NMT","coverage","re","-","ranking","score","from","Google","s","Neural","Machine","Translation","System",":","cite",":","wu2016google","."],"function":"def coverage_wu(self, beam, cov, beta=0.):\n \"\"\"\n NMT coverage re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.\n \"\"\"\n penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(1)\n return beta * penalty","function_tokens":["def","coverage_wu","(","self",",","beam",",","cov",",","beta","=","0.",")",":","penalty","=","-","torch",".","min","(","cov",",","cov",".","clone","(",")",".","fill_","(","1.0",")",")",".","log","(",")",".","sum","(","1",")","return","beta","*","penalty"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py#L38-L44"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.coverage_summary","parameters":"(self, beam, cov, beta=0.)","argument_list":"","return_statement":"return beta * penalty","docstring":"Our summary penalty.","docstring_summary":"Our summary penalty.","docstring_tokens":["Our","summary","penalty","."],"function":"def coverage_summary(self, beam, cov, beta=0.):\n \"\"\"\n Our summary penalty.\n \"\"\"\n penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(1)\n penalty -= cov.size(1)\n return beta * penalty","function_tokens":["def","coverage_summary","(","self",",","beam",",","cov",",","beta","=","0.",")",":","penalty","=","torch",".","max","(","cov",",","cov",".","clone","(",")",".","fill_","(","1.0",")",")",".","sum","(","1",")","penalty","-=","cov",".","size","(","1",")","return","beta","*","penalty"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py#L46-L52"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.coverage_none","parameters":"(self, beam, cov, beta=0.)","argument_list":"","return_statement":"return beam.scores.clone().fill_(0.0)","docstring":"returns zero as penalty","docstring_summary":"returns zero as penalty","docstring_tokens":["returns","zero","as","penalty"],"function":"def coverage_none(self, beam, cov, beta=0.):\n \"\"\"\n returns zero as penalty\n \"\"\"\n return beam.scores.clone().fill_(0.0)","function_tokens":["def","coverage_none","(","self",",","beam",",","cov",",","beta","=","0.",")",":","return","beam",".","scores",".","clone","(",")",".","fill_","(","0.0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py#L54-L58"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.length_wu","parameters":"(self, beam, logprobs, alpha=0.)","argument_list":"","return_statement":"return (logprobs \/ modifier)","docstring":"NMT length re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_summary":"NMT length re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_tokens":["NMT","length","re","-","ranking","score","from","Google","s","Neural","Machine","Translation","System",":","cite",":","wu2016google","."],"function":"def length_wu(self, beam, logprobs, alpha=0.):\n \"\"\"\n NMT length re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.\n \"\"\"\n\n modifier = (((5 + len(beam.next_ys)) ** alpha) \/\n ((5 + 1) ** alpha))\n return (logprobs \/ modifier)","function_tokens":["def","length_wu","(","self",",","beam",",","logprobs",",","alpha","=","0.",")",":","modifier","=","(","(","(","5","+","len","(","beam",".","next_ys",")",")","**","alpha",")","\/","(","(","5","+","1",")","**","alpha",")",")","return","(","logprobs","\/","modifier",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py#L60-L68"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.length_average","parameters":"(self, beam, logprobs, alpha=0.)","argument_list":"","return_statement":"return logprobs \/ len(beam.next_ys)","docstring":"Returns the average probability of tokens in a sequence.","docstring_summary":"Returns the average probability of tokens in a sequence.","docstring_tokens":["Returns","the","average","probability","of","tokens","in","a","sequence","."],"function":"def length_average(self, beam, logprobs, alpha=0.):\n \"\"\"\n Returns the average probability of tokens in a sequence.\n \"\"\"\n return logprobs \/ len(beam.next_ys)","function_tokens":["def","length_average","(","self",",","beam",",","logprobs",",","alpha","=","0.",")",":","return","logprobs","\/","len","(","beam",".","next_ys",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py#L70-L74"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.length_none","parameters":"(self, beam, logprobs, alpha=0., beta=0.)","argument_list":"","return_statement":"return logprobs","docstring":"Returns unmodified scores.","docstring_summary":"Returns unmodified scores.","docstring_tokens":["Returns","unmodified","scores","."],"function":"def length_none(self, beam, logprobs, alpha=0., beta=0.):\n \"\"\"\n Returns unmodified scores.\n \"\"\"\n return logprobs","function_tokens":["def","length_none","(","self",",","beam",",","logprobs",",","alpha","=","0.",",","beta","=","0.",")",":","return","logprobs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/penalties.py#L76-L80"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translator.py","language":"python","identifier":"Translator.translate","parameters":"(self,\n src_path=None,\n src_data_iter=None,\n tgt_path=None,\n tgt_data_iter=None,\n src_dir=None,\n batch_size=None,\n attn_debug=False)","argument_list":"","return_statement":"return all_scores, all_predictions","docstring":"Translate content of `src_data_iter` (if not None) or `src_path`\n and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.\n\n Note: batch_size must not be None\n Note: one of ('src_path', 'src_data_iter') must not be None\n\n Args:\n src_path (str): filepath of source data\n src_data_iter (iterator): an interator generating source data\n e.g. it may be a list or an openned file\n tgt_path (str): filepath of target data\n tgt_data_iter (iterator): an interator generating target data\n src_dir (str): source directory path\n (used for Audio and Image datasets)\n batch_size (int): size of examples per mini-batch\n attn_debug (bool): enables the attention logging\n\n Returns:\n (`list`, `list`)\n\n * all_scores is a list of `batch_size` lists of `n_best` scores\n * all_predictions is a list of `batch_size` lists\n of `n_best` predictions","docstring_summary":"Translate content of `src_data_iter` (if not None) or `src_path`\n and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.","docstring_tokens":["Translate","content","of","src_data_iter","(","if","not","None",")","or","src_path","and","get","gold","scores","if","one","of","tgt_data_iter","or","tgt_path","is","set","."],"function":"def translate(self,\n src_path=None,\n src_data_iter=None,\n tgt_path=None,\n tgt_data_iter=None,\n src_dir=None,\n batch_size=None,\n attn_debug=False):\n \"\"\"\n Translate content of `src_data_iter` (if not None) or `src_path`\n and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.\n\n Note: batch_size must not be None\n Note: one of ('src_path', 'src_data_iter') must not be None\n\n Args:\n src_path (str): filepath of source data\n src_data_iter (iterator): an interator generating source data\n e.g. it may be a list or an openned file\n tgt_path (str): filepath of target data\n tgt_data_iter (iterator): an interator generating target data\n src_dir (str): source directory path\n (used for Audio and Image datasets)\n batch_size (int): size of examples per mini-batch\n attn_debug (bool): enables the attention logging\n\n Returns:\n (`list`, `list`)\n\n * all_scores is a list of `batch_size` lists of `n_best` scores\n * all_predictions is a list of `batch_size` lists\n of `n_best` predictions\n \"\"\"\n\n assert src_data_iter is not None or src_path is not None\n\n if batch_size is None:\n raise ValueError(\"batch_size must be set\")\n data = inputters. \\\n build_dataset(self.fields,\n self.data_type,\n src_path=src_path,\n src_data_iter=src_data_iter,\n tgt_path=tgt_path,\n tgt_data_iter=tgt_data_iter,\n src_dir=src_dir,\n sample_rate=self.sample_rate,\n window_size=self.window_size,\n window_stride=self.window_stride,\n window=self.window,\n use_filter_pred=self.use_filter_pred,\n image_channel_size=self.image_channel_size)\n\n if self.cuda:\n cur_device = \"cuda\"\n else:\n cur_device = \"cpu\"\n\n data_iter = inputters.OrderedIterator(\n dataset=data, device=cur_device,\n batch_size=batch_size, train=False, sort=False,\n sort_within_batch=True, shuffle=False)\n\n builder = onmt.translate.TranslationBuilder(\n data, self.fields,\n self.n_best, self.replace_unk, tgt_path)\n\n # Statistics\n counter = count(1)\n pred_score_total, pred_words_total = 0, 0\n gold_score_total, gold_words_total = 0, 0\n\n all_scores = []\n all_predictions = []\n\n for batch in data_iter:\n batch_data = self.translate_batch(batch, data, fast=self.fast)\n # import pdb;pdb.set_trace()\n translations = builder.from_batch(batch_data)\n\n for trans in translations:\n all_scores += [trans.pred_scores[:self.n_best]]\n pred_score_total += trans.pred_scores[0]\n pred_words_total += len(trans.pred_sents[0])\n if tgt_path is not None:\n gold_score_total += trans.gold_score\n gold_words_total += len(trans.gold_sent) + 1\n\n n_best_preds = [\" \".join(pred)\n for pred in trans.pred_sents[:self.n_best]]\n all_predictions += [n_best_preds]\n self.out_file.write('\\n'.join(n_best_preds) + '\\n')\n self.out_file.flush()\n\n if self.verbose:\n sent_number = next(counter)\n output = trans.log(sent_number)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n # Debug attention.\n if attn_debug:\n srcs = trans.src_raw\n preds = trans.pred_sents[0]\n preds.append('<\/s>')\n attns = trans.attns[0].tolist()\n header_format = \"{:>10.10} \" + \"{:>10.7} \" * len(srcs)\n row_format = \"{:>10.10} \" + \"{:>10.7f} \" * len(srcs)\n output = header_format.format(\"\", *trans.src_raw) + '\\n'\n for word, row in zip(preds, attns):\n max_index = row.index(max(row))\n row_format = row_format.replace(\n \"{:>10.7f} \", \"{:*>10.7f} \", max_index + 1)\n row_format = row_format.replace(\n \"{:*>10.7f} \", \"{:>10.7f} \", max_index)\n output += row_format.format(word, *row) + '\\n'\n row_format = \"{:>10.10} \" + \"{:>10.7f} \" * len(srcs)\n os.write(1, output.encode('utf-8'))\n\n #TODO change back\n #if self.report_score:\n # msg = self._report_score('PRED', pred_score_total,\n # pred_words_total)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n # if tgt_path is not None:\n # msg = self._report_score('GOLD', gold_score_total,\n # gold_words_total)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n # if self.report_bleu:\n # msg = self._report_bleu(tgt_path)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n # if self.report_rouge:\n # msg = self._report_rouge(tgt_path)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n\n if self.dump_beam:\n import json\n json.dump(self.translator.beam_accum,\n codecs.open(self.dump_beam, 'w', 'utf-8'))\n return all_scores, all_predictions","function_tokens":["def","translate","(","self",",","src_path","=","None",",","src_data_iter","=","None",",","tgt_path","=","None",",","tgt_data_iter","=","None",",","src_dir","=","None",",","batch_size","=","None",",","attn_debug","=","False",")",":","assert","src_data_iter","is","not","None","or","src_path","is","not","None","if","batch_size","is","None",":","raise","ValueError","(","\"batch_size must be set\"",")","data","=","inputters",".","build_dataset","(","self",".","fields",",","self",".","data_type",",","src_path","=","src_path",",","src_data_iter","=","src_data_iter",",","tgt_path","=","tgt_path",",","tgt_data_iter","=","tgt_data_iter",",","src_dir","=","src_dir",",","sample_rate","=","self",".","sample_rate",",","window_size","=","self",".","window_size",",","window_stride","=","self",".","window_stride",",","window","=","self",".","window",",","use_filter_pred","=","self",".","use_filter_pred",",","image_channel_size","=","self",".","image_channel_size",")","if","self",".","cuda",":","cur_device","=","\"cuda\"","else",":","cur_device","=","\"cpu\"","data_iter","=","inputters",".","OrderedIterator","(","dataset","=","data",",","device","=","cur_device",",","batch_size","=","batch_size",",","train","=","False",",","sort","=","False",",","sort_within_batch","=","True",",","shuffle","=","False",")","builder","=","onmt",".","translate",".","TranslationBuilder","(","data",",","self",".","fields",",","self",".","n_best",",","self",".","replace_unk",",","tgt_path",")","# Statistics","counter","=","count","(","1",")","pred_score_total",",","pred_words_total","=","0",",","0","gold_score_total",",","gold_words_total","=","0",",","0","all_scores","=","[","]","all_predictions","=","[","]","for","batch","in","data_iter",":","batch_data","=","self",".","translate_batch","(","batch",",","data",",","fast","=","self",".","fast",")","# import pdb;pdb.set_trace()","translations","=","builder",".","from_batch","(","batch_data",")","for","trans","in","translations",":","all_scores","+=","[","trans",".","pred_scores","[",":","self",".","n_best","]","]","pred_score_total","+=","trans",".","pred_scores","[","0","]","pred_words_total","+=","len","(","trans",".","pred_sents","[","0","]",")","if","tgt_path","is","not","None",":","gold_score_total","+=","trans",".","gold_score","gold_words_total","+=","len","(","trans",".","gold_sent",")","+","1","n_best_preds","=","[","\" \"",".","join","(","pred",")","for","pred","in","trans",".","pred_sents","[",":","self",".","n_best","]","]","all_predictions","+=","[","n_best_preds","]","self",".","out_file",".","write","(","'\\n'",".","join","(","n_best_preds",")","+","'\\n'",")","self",".","out_file",".","flush","(",")","if","self",".","verbose",":","sent_number","=","next","(","counter",")","output","=","trans",".","log","(","sent_number",")","if","self",".","logger",":","self",".","logger",".","info","(","output",")","else",":","os",".","write","(","1",",","output",".","encode","(","'utf-8'",")",")","# Debug attention.","if","attn_debug",":","srcs","=","trans",".","src_raw","preds","=","trans",".","pred_sents","[","0","]","preds",".","append","(","'<\/s>'",")","attns","=","trans",".","attns","[","0","]",".","tolist","(",")","header_format","=","\"{:>10.10} \"","+","\"{:>10.7} \"","*","len","(","srcs",")","row_format","=","\"{:>10.10} \"","+","\"{:>10.7f} \"","*","len","(","srcs",")","output","=","header_format",".","format","(","\"\"",",","*","trans",".","src_raw",")","+","'\\n'","for","word",",","row","in","zip","(","preds",",","attns",")",":","max_index","=","row",".","index","(","max","(","row",")",")","row_format","=","row_format",".","replace","(","\"{:>10.7f} \"",",","\"{:*>10.7f} \"",",","max_index","+","1",")","row_format","=","row_format",".","replace","(","\"{:*>10.7f} \"",",","\"{:>10.7f} \"",",","max_index",")","output","+=","row_format",".","format","(","word",",","*","row",")","+","'\\n'","row_format","=","\"{:>10.10} \"","+","\"{:>10.7f} \"","*","len","(","srcs",")","os",".","write","(","1",",","output",".","encode","(","'utf-8'",")",")","#TODO change back","#if self.report_score:","# msg = self._report_score('PRED', pred_score_total,","# pred_words_total)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","# if tgt_path is not None:","# msg = self._report_score('GOLD', gold_score_total,","# gold_words_total)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","# if self.report_bleu:","# msg = self._report_bleu(tgt_path)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","# if self.report_rouge:","# msg = self._report_rouge(tgt_path)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","if","self",".","dump_beam",":","import","json","json",".","dump","(","self",".","translator",".","beam_accum",",","codecs",".","open","(","self",".","dump_beam",",","'w'",",","'utf-8'",")",")","return","all_scores",",","all_predictions"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translator.py#L150-L303"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translator.py","language":"python","identifier":"Translator.translate_batch","parameters":"(self, batch, data, fast=False)","argument_list":"","return_statement":"","docstring":"Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.","docstring_summary":"Translate a batch of sentences.","docstring_tokens":["Translate","a","batch","of","sentences","."],"function":"def translate_batch(self, batch, data, fast=False):\n \"\"\"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.\n \"\"\"\n with torch.no_grad():\n if fast:\n return self._fast_translate_batch(\n batch,\n data,\n self.max_length,\n min_length=self.min_length,\n n_best=self.n_best,\n return_attention=self.replace_unk)\n else:\n return self._translate_batch(batch, data)","function_tokens":["def","translate_batch","(","self",",","batch",",","data",",","fast","=","False",")",":","with","torch",".","no_grad","(",")",":","if","fast",":","return","self",".","_fast_translate_batch","(","batch",",","data",",","self",".","max_length",",","min_length","=","self",".","min_length",",","n_best","=","self",".","n_best",",","return_attention","=","self",".","replace_unk",")","else",":","return","self",".","_translate_batch","(","batch",",","data",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translator.py#L305-L329"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.get_current_state","parameters":"(self)","argument_list":"","return_statement":"return self.next_ys[-1]","docstring":"Get the outputs for the current timestep.","docstring_summary":"Get the outputs for the current timestep.","docstring_tokens":["Get","the","outputs","for","the","current","timestep","."],"function":"def get_current_state(self):\n \"Get the outputs for the current timestep.\"\n return self.next_ys[-1]","function_tokens":["def","get_current_state","(","self",")",":","return","self",".","next_ys","[","-","1","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py#L66-L68"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.get_current_origin","parameters":"(self)","argument_list":"","return_statement":"return self.prev_ks[-1]","docstring":"Get the backpointers for the current timestep.","docstring_summary":"Get the backpointers for the current timestep.","docstring_tokens":["Get","the","backpointers","for","the","current","timestep","."],"function":"def get_current_origin(self):\n \"Get the backpointers for the current timestep.\"\n return self.prev_ks[-1]","function_tokens":["def","get_current_origin","(","self",")",":","return","self",".","prev_ks","[","-","1","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py#L70-L72"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.advance","parameters":"(self, word_probs, attn_out)","argument_list":"","return_statement":"","docstring":"Given prob over words for every last beam `wordLk` and attention\n `attn_out`: Compute and update the beam search.\n\n Parameters:\n\n * `word_probs`- probs of advancing from the last step (K x words)\n * `attn_out`- attention at the last step\n\n Returns: True if beam search is complete.","docstring_summary":"Given prob over words for every last beam `wordLk` and attention\n `attn_out`: Compute and update the beam search.","docstring_tokens":["Given","prob","over","words","for","every","last","beam","wordLk","and","attention","attn_out",":","Compute","and","update","the","beam","search","."],"function":"def advance(self, word_probs, attn_out):\n \"\"\"\n Given prob over words for every last beam `wordLk` and attention\n `attn_out`: Compute and update the beam search.\n\n Parameters:\n\n * `word_probs`- probs of advancing from the last step (K x words)\n * `attn_out`- attention at the last step\n\n Returns: True if beam search is complete.\n \"\"\"\n num_words = word_probs.size(1)\n if self.stepwise_penalty:\n self.global_scorer.update_score(self, attn_out)\n # force the output to be longer than self.min_length\n cur_len = len(self.next_ys)\n if cur_len < self.min_length:\n for k in range(len(word_probs)):\n word_probs[k][self._eos] = -1e20\n # Sum the previous scores.\n if len(self.prev_ks) > 0:\n beam_scores = word_probs + \\\n self.scores.unsqueeze(1).expand_as(word_probs)\n # Don't let EOS have children.\n for i in range(self.next_ys[-1].size(0)):\n if self.next_ys[-1][i] == self._eos:\n beam_scores[i] = -1e20\n\n # Block ngram repeats\n if self.block_ngram_repeat > 0:\n ngrams = []\n le = len(self.next_ys)\n for j in range(self.next_ys[-1].size(0)):\n hyp, _ = self.get_hyp(le - 1, j)\n ngrams = set()\n fail = False\n gram = []\n for i in range(le - 1):\n # Last n tokens, n = block_ngram_repeat\n gram = (gram +\n [hyp[i].item()])[-self.block_ngram_repeat:]\n # Skip the blocking if it is in the exclusion list\n if set(gram) & self.exclusion_tokens:\n continue\n if tuple(gram) in ngrams:\n fail = True\n ngrams.add(tuple(gram))\n if fail:\n beam_scores[j] = -10e20\n else:\n beam_scores = word_probs[0]\n flat_beam_scores = beam_scores.view(-1)\n best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,\n True, True)\n\n self.all_scores.append(self.scores)\n self.scores = best_scores\n\n # best_scores_id is flattened beam x word array, so calculate which\n # word and beam each score came from\n prev_k = best_scores_id \/ num_words\n self.prev_ks.append(prev_k)\n self.next_ys.append((best_scores_id - prev_k * num_words))\n self.attn.append(attn_out.index_select(0, prev_k))\n self.global_scorer.update_global_state(self)\n\n for i in range(self.next_ys[-1].size(0)):\n if self.next_ys[-1][i] == self._eos:\n global_scores = self.global_scorer.score(self, self.scores)\n s = global_scores[i]\n self.finished.append((s, len(self.next_ys) - 1, i))\n\n # End condition is when top-of-beam is EOS and no global score.\n if self.next_ys[-1][0] == self._eos:\n self.all_scores.append(self.scores)\n self.eos_top = True","function_tokens":["def","advance","(","self",",","word_probs",",","attn_out",")",":","num_words","=","word_probs",".","size","(","1",")","if","self",".","stepwise_penalty",":","self",".","global_scorer",".","update_score","(","self",",","attn_out",")","# force the output to be longer than self.min_length","cur_len","=","len","(","self",".","next_ys",")","if","cur_len","<","self",".","min_length",":","for","k","in","range","(","len","(","word_probs",")",")",":","word_probs","[","k","]","[","self",".","_eos","]","=","-","1e20","# Sum the previous scores.","if","len","(","self",".","prev_ks",")",">","0",":","beam_scores","=","word_probs","+","self",".","scores",".","unsqueeze","(","1",")",".","expand_as","(","word_probs",")","# Don't let EOS have children.","for","i","in","range","(","self",".","next_ys","[","-","1","]",".","size","(","0",")",")",":","if","self",".","next_ys","[","-","1","]","[","i","]","==","self",".","_eos",":","beam_scores","[","i","]","=","-","1e20","# Block ngram repeats","if","self",".","block_ngram_repeat",">","0",":","ngrams","=","[","]","le","=","len","(","self",".","next_ys",")","for","j","in","range","(","self",".","next_ys","[","-","1","]",".","size","(","0",")",")",":","hyp",",","_","=","self",".","get_hyp","(","le","-","1",",","j",")","ngrams","=","set","(",")","fail","=","False","gram","=","[","]","for","i","in","range","(","le","-","1",")",":","# Last n tokens, n = block_ngram_repeat","gram","=","(","gram","+","[","hyp","[","i","]",".","item","(",")","]",")","[","-","self",".","block_ngram_repeat",":","]","# Skip the blocking if it is in the exclusion list","if","set","(","gram",")","&","self",".","exclusion_tokens",":","continue","if","tuple","(","gram",")","in","ngrams",":","fail","=","True","ngrams",".","add","(","tuple","(","gram",")",")","if","fail",":","beam_scores","[","j","]","=","-","10e20","else",":","beam_scores","=","word_probs","[","0","]","flat_beam_scores","=","beam_scores",".","view","(","-","1",")","best_scores",",","best_scores_id","=","flat_beam_scores",".","topk","(","self",".","size",",","0",",","True",",","True",")","self",".","all_scores",".","append","(","self",".","scores",")","self",".","scores","=","best_scores","# best_scores_id is flattened beam x word array, so calculate which","# word and beam each score came from","prev_k","=","best_scores_id","\/","num_words","self",".","prev_ks",".","append","(","prev_k",")","self",".","next_ys",".","append","(","(","best_scores_id","-","prev_k","*","num_words",")",")","self",".","attn",".","append","(","attn_out",".","index_select","(","0",",","prev_k",")",")","self",".","global_scorer",".","update_global_state","(","self",")","for","i","in","range","(","self",".","next_ys","[","-","1","]",".","size","(","0",")",")",":","if","self",".","next_ys","[","-","1","]","[","i","]","==","self",".","_eos",":","global_scores","=","self",".","global_scorer",".","score","(","self",",","self",".","scores",")","s","=","global_scores","[","i","]","self",".","finished",".","append","(","(","s",",","len","(","self",".","next_ys",")","-","1",",","i",")",")","# End condition is when top-of-beam is EOS and no global score.","if","self",".","next_ys","[","-","1","]","[","0","]","==","self",".","_eos",":","self",".","all_scores",".","append","(","self",".","scores",")","self",".","eos_top","=","True"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py#L74-L150"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.get_hyp","parameters":"(self, timestep, k)","argument_list":"","return_statement":"return hyp[::-1], torch.stack(attn[::-1])","docstring":"Walk back to construct the full hypothesis.","docstring_summary":"Walk back to construct the full hypothesis.","docstring_tokens":["Walk","back","to","construct","the","full","hypothesis","."],"function":"def get_hyp(self, timestep, k):\n \"\"\"\n Walk back to construct the full hypothesis.\n \"\"\"\n hyp, attn = [], []\n for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):\n hyp.append(self.next_ys[j + 1][k])\n attn.append(self.attn[j][k])\n k = self.prev_ks[j][k]\n return hyp[::-1], torch.stack(attn[::-1])","function_tokens":["def","get_hyp","(","self",",","timestep",",","k",")",":","hyp",",","attn","=","[","]",",","[","]","for","j","in","range","(","len","(","self",".","prev_ks","[",":","timestep","]",")","-","1",",","-","1",",","-","1",")",":","hyp",".","append","(","self",".","next_ys","[","j","+","1","]","[","k","]",")","attn",".","append","(","self",".","attn","[","j","]","[","k","]",")","k","=","self",".","prev_ks","[","j","]","[","k","]","return","hyp","[",":",":","-","1","]",",","torch",".","stack","(","attn","[",":",":","-","1","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py#L170-L179"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py","language":"python","identifier":"GNMTGlobalScorer.score","parameters":"(self, beam, logprobs)","argument_list":"","return_statement":"return normalized_probs","docstring":"Rescores a prediction based on penalty functions","docstring_summary":"Rescores a prediction based on penalty functions","docstring_tokens":["Rescores","a","prediction","based","on","penalty","functions"],"function":"def score(self, beam, logprobs):\n \"\"\"\n Rescores a prediction based on penalty functions\n \"\"\"\n normalized_probs = self.length_penalty(beam,\n logprobs,\n self.alpha)\n if not beam.stepwise_penalty:\n penalty = self.cov_penalty(beam,\n beam.global_state[\"coverage\"],\n self.beta)\n normalized_probs -= penalty\n\n return normalized_probs","function_tokens":["def","score","(","self",",","beam",",","logprobs",")",":","normalized_probs","=","self",".","length_penalty","(","beam",",","logprobs",",","self",".","alpha",")","if","not","beam",".","stepwise_penalty",":","penalty","=","self",".","cov_penalty","(","beam",",","beam",".","global_state","[","\"coverage\"","]",",","self",".","beta",")","normalized_probs","-=","penalty","return","normalized_probs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py#L202-L215"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py","language":"python","identifier":"GNMTGlobalScorer.update_score","parameters":"(self, beam, attn)","argument_list":"","return_statement":"","docstring":"Function to update scores of a Beam that is not finished","docstring_summary":"Function to update scores of a Beam that is not finished","docstring_tokens":["Function","to","update","scores","of","a","Beam","that","is","not","finished"],"function":"def update_score(self, beam, attn):\n \"\"\"\n Function to update scores of a Beam that is not finished\n \"\"\"\n if \"prev_penalty\" in beam.global_state.keys():\n beam.scores.add_(beam.global_state[\"prev_penalty\"])\n penalty = self.cov_penalty(beam,\n beam.global_state[\"coverage\"] + attn,\n self.beta)\n beam.scores.sub_(penalty)","function_tokens":["def","update_score","(","self",",","beam",",","attn",")",":","if","\"prev_penalty\"","in","beam",".","global_state",".","keys","(",")",":","beam",".","scores",".","add_","(","beam",".","global_state","[","\"prev_penalty\"","]",")","penalty","=","self",".","cov_penalty","(","beam",",","beam",".","global_state","[","\"coverage\"","]","+","attn",",","self",".","beta",")","beam",".","scores",".","sub_","(","penalty",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py#L217-L226"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py","language":"python","identifier":"GNMTGlobalScorer.update_global_state","parameters":"(self, beam)","argument_list":"","return_statement":"","docstring":"Keeps the coverage vector as sum of attentions","docstring_summary":"Keeps the coverage vector as sum of attentions","docstring_tokens":["Keeps","the","coverage","vector","as","sum","of","attentions"],"function":"def update_global_state(self, beam):\n \"Keeps the coverage vector as sum of attentions\"\n if len(beam.prev_ks) == 1:\n beam.global_state[\"prev_penalty\"] = beam.scores.clone().fill_(0.0)\n beam.global_state[\"coverage\"] = beam.attn[-1]\n self.cov_total = beam.attn[-1].sum(1)\n else:\n self.cov_total += torch.min(beam.attn[-1],\n beam.global_state['coverage']).sum(1)\n beam.global_state[\"coverage\"] = beam.global_state[\"coverage\"] \\\n .index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])\n\n prev_penalty = self.cov_penalty(beam,\n beam.global_state[\"coverage\"],\n self.beta)\n beam.global_state[\"prev_penalty\"] = prev_penalty","function_tokens":["def","update_global_state","(","self",",","beam",")",":","if","len","(","beam",".","prev_ks",")","==","1",":","beam",".","global_state","[","\"prev_penalty\"","]","=","beam",".","scores",".","clone","(",")",".","fill_","(","0.0",")","beam",".","global_state","[","\"coverage\"","]","=","beam",".","attn","[","-","1","]","self",".","cov_total","=","beam",".","attn","[","-","1","]",".","sum","(","1",")","else",":","self",".","cov_total","+=","torch",".","min","(","beam",".","attn","[","-","1","]",",","beam",".","global_state","[","'coverage'","]",")",".","sum","(","1",")","beam",".","global_state","[","\"coverage\"","]","=","beam",".","global_state","[","\"coverage\"","]",".","index_select","(","0",",","beam",".","prev_ks","[","-","1","]",")",".","add","(","beam",".","attn","[","-","1","]",")","prev_penalty","=","self",".","cov_penalty","(","beam",",","beam",".","global_state","[","\"coverage\"","]",",","self",".","beta",")","beam",".","global_state","[","\"prev_penalty\"","]","=","prev_penalty"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/beam.py#L228-L243"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/translate\/translation.py","language":"python","identifier":"Translation.log","parameters":"(self, sent_number)","argument_list":"","return_statement":"return output","docstring":"Log translation.","docstring_summary":"Log translation.","docstring_tokens":["Log","translation","."],"function":"def log(self, sent_number):\n \"\"\"\n Log translation.\n \"\"\"\n\n output = '\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n output += 'PRED {}: {}\\n'.format(sent_number, pred_sent)\n output += \"PRED SCORE: {:.4f}\\n\".format(best_score)\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n output += 'GOLD {}: {}\\n'.format(sent_number, tgt_sent)\n output += (\"GOLD SCORE: {:.4f}\\n\".format(self.gold_score))\n if len(self.pred_sents) > 1:\n output += '\\nBEST HYP:\\n'\n for score, sent in zip(self.pred_scores, self.pred_sents):\n output += \"[{:.4f}] {}\\n\".format(score, sent)\n\n return output","function_tokens":["def","log","(","self",",","sent_number",")",":","output","=","'\\nSENT {}: {}\\n'",".","format","(","sent_number",",","self",".","src_raw",")","best_pred","=","self",".","pred_sents","[","0","]","best_score","=","self",".","pred_scores","[","0","]","pred_sent","=","' '",".","join","(","best_pred",")","output","+=","'PRED {}: {}\\n'",".","format","(","sent_number",",","pred_sent",")","output","+=","\"PRED SCORE: {:.4f}\\n\"",".","format","(","best_score",")","if","self",".","gold_sent","is","not","None",":","tgt_sent","=","' '",".","join","(","self",".","gold_sent",")","output","+=","'GOLD {}: {}\\n'",".","format","(","sent_number",",","tgt_sent",")","output","+=","(","\"GOLD SCORE: {:.4f}\\n\"",".","format","(","self",".","gold_score",")",")","if","len","(","self",".","pred_sents",")",">","1",":","output","+=","'\\nBEST HYP:\\n'","for","score",",","sent","in","zip","(","self",".","pred_scores",",","self",".","pred_sents",")",":","output","+=","\"[{:.4f}] {}\\n\"",".","format","(","score",",","sent",")","return","output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/translate\/translation.py#L134-L156"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderBase.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None)","argument_list":"","return_statement":"return decoder_outputs, state, attns","docstring":"Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.","docstring_summary":"Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.","docstring_tokens":["Args",":","tgt","(","LongTensor",")",":","sequences","of","padded","tokens","[","tgt_len","x","batch","x","nfeats","]",".","memory_bank","(","FloatTensor",")",":","vectors","from","the","encoder","[","src_len","x","batch","x","hidden","]",".","state","(",":","obj",":","onmt",".","models",".","DecoderState",")",":","decoder","state","object","to","initialize","the","decoder","memory_lengths","(","LongTensor",")",":","the","padded","source","lengths","[","batch","]",".","Returns",":","(","FloatTensor",":","obj",":","onmt",".","Models",".","DecoderState","FloatTensor",")",":","*","decoder_outputs",":","output","from","the","decoder","(","after","attn",")","[","tgt_len","x","batch","x","hidden","]",".","*","decoder_state",":","final","hidden","state","from","the","decoder","*","attns",":","distribution","over","src","at","each","tgt","[","tgt_len","x","batch","x","src_len","]","."],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None):\n \"\"\"\n Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.\n \"\"\"\n # Check\n assert isinstance(state, RNNDecoderState)\n # tgt.size() returns tgt length and batch\n _, tgt_batch, _ = tgt.size()\n _, memory_batch, _ = memory_bank.size()\n aeq(tgt_batch, memory_batch)\n # END\n\n # Run the forward pass of the RNN.\n decoder_final, decoder_outputs, attns = self._run_forward_pass(\n tgt, memory_bank, state, memory_lengths=memory_lengths)\n\n # Update the state with the result.\n final_output = decoder_outputs[-1]\n coverage = None\n if \"coverage\" in attns:\n coverage = attns[\"coverage\"][-1].unsqueeze(0)\n state.update_state(decoder_final, final_output.unsqueeze(0), coverage)\n\n # Concatenates sequence of tensors along a new dimension.\n # NOTE: v0.3 to 0.4: decoder_outputs \/ attns[*] may not be list\n # (in particular in case of SRU) it was not raising error in 0.3\n # since stack(Variable) was allowed.\n # In 0.4, SRU returns a tensor that shouldn't be stacke\n if type(decoder_outputs) == list:\n decoder_outputs = torch.stack(decoder_outputs)\n\n for k in attns:\n if type(attns[k]) == list:\n attns[k] = torch.stack(attns[k])\n\n return decoder_outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",")",":","# Check","assert","isinstance","(","state",",","RNNDecoderState",")","# tgt.size() returns tgt length and batch","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","_",",","memory_batch",",","_","=","memory_bank",".","size","(",")","aeq","(","tgt_batch",",","memory_batch",")","# END","# Run the forward pass of the RNN.","decoder_final",",","decoder_outputs",",","attns","=","self",".","_run_forward_pass","(","tgt",",","memory_bank",",","state",",","memory_lengths","=","memory_lengths",")","# Update the state with the result.","final_output","=","decoder_outputs","[","-","1","]","coverage","=","None","if","\"coverage\"","in","attns",":","coverage","=","attns","[","\"coverage\"","]","[","-","1","]",".","unsqueeze","(","0",")","state",".","update_state","(","decoder_final",",","final_output",".","unsqueeze","(","0",")",",","coverage",")","# Concatenates sequence of tensors along a new dimension.","# NOTE: v0.3 to 0.4: decoder_outputs \/ attns[*] may not be list","# (in particular in case of SRU) it was not raising error in 0.3","# since stack(Variable) was allowed.","# In 0.4, SRU returns a tensor that shouldn't be stacke","if","type","(","decoder_outputs",")","==","list",":","decoder_outputs","=","torch",".","stack","(","decoder_outputs",")","for","k","in","attns",":","if","type","(","attns","[","k","]",")","==","list",":","attns","[","k","]","=","torch",".","stack","(","attns","[","k","]",")","return","decoder_outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L107-L158"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderBase.init_decoder_state","parameters":"(self, src, memory_bank, encoder_final,\n with_cache=False)","argument_list":"","return_statement":"","docstring":"Init decoder state with last state of the encoder","docstring_summary":"Init decoder state with last state of the encoder","docstring_tokens":["Init","decoder","state","with","last","state","of","the","encoder"],"function":"def init_decoder_state(self, src, memory_bank, encoder_final,\n with_cache=False):\n \"\"\" Init decoder state with last state of the encoder \"\"\"\n def _fix_enc_hidden(hidden):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n if self.bidirectional_encoder:\n hidden = torch.cat([hidden[0:hidden.size(0):2],\n hidden[1:hidden.size(0):2]], 2)\n return hidden\n\n if isinstance(encoder_final, tuple): # LSTM\n return RNNDecoderState(self.hidden_size,\n tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final]))\n else: # GRU\n return RNNDecoderState(self.hidden_size,\n _fix_enc_hidden(encoder_final))","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","encoder_final",",","with_cache","=","False",")",":","def","_fix_enc_hidden","(","hidden",")",":","# The encoder hidden is (layers*directions) x batch x dim.","# We need to convert it to layers x batch x (directions*dim).","if","self",".","bidirectional_encoder",":","hidden","=","torch",".","cat","(","[","hidden","[","0",":","hidden",".","size","(","0",")",":","2","]",",","hidden","[","1",":","hidden",".","size","(","0",")",":","2","]","]",",","2",")","return","hidden","if","isinstance","(","encoder_final",",","tuple",")",":","# LSTM","return","RNNDecoderState","(","self",".","hidden_size",",","tuple","(","[","_fix_enc_hidden","(","enc_hid",")","for","enc_hid","in","encoder_final","]",")",")","else",":","# GRU","return","RNNDecoderState","(","self",".","hidden_size",",","_fix_enc_hidden","(","encoder_final",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L160-L177"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"StdRNNDecoder._run_forward_pass","parameters":"(self, tgt, memory_bank, state, memory_lengths=None)","argument_list":"","return_statement":"return decoder_final, decoder_outputs, attns","docstring":"Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.","docstring_summary":"Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.","docstring_tokens":["Private","helper","for","running","the","specific","RNN","forward","pass",".","Must","be","overriden","by","all","subclasses",".","Args",":","tgt","(","LongTensor",")",":","a","sequence","of","input","tokens","tensors","[","len","x","batch","x","nfeats","]",".","memory_bank","(","FloatTensor",")",":","output","(","tensor","sequence",")","from","the","encoder","RNN","of","size","(","src_len","x","batch","x","hidden_size",")",".","state","(","FloatTensor",")",":","hidden","state","from","the","encoder","RNN","for","initializing","the","decoder",".","memory_lengths","(","LongTensor",")",":","the","source","memory_bank","lengths",".","Returns",":","decoder_final","(","Tensor",")",":","final","hidden","state","from","the","decoder",".","decoder_outputs","(","[","FloatTensor","]",")",":","an","array","of","output","of","every","time","step","from","the","decoder",".","attns","(","dict","of","(","str","[","FloatTensor","]",")",":","a","dictionary","of","different","type","of","attention","Tensor","array","of","every","time","step","from","the","decoder","."],"function":"def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):\n \"\"\"\n Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.\n \"\"\"\n assert not self._copy # TODO, no support yet.\n assert not self._coverage # TODO, no support yet.\n\n # Initialize local and return variables.\n attns = {}\n emb = self.embeddings(tgt)\n\n # Run the forward pass of the RNN.\n if isinstance(self.rnn, nn.GRU):\n rnn_output, decoder_final = self.rnn(emb, state.hidden[0])\n else:\n rnn_output, decoder_final = self.rnn(emb, state.hidden)\n\n # Check\n tgt_len, tgt_batch, _ = tgt.size()\n output_len, output_batch, _ = rnn_output.size()\n aeq(tgt_len, output_len)\n aeq(tgt_batch, output_batch)\n # END\n\n # Calculate the attention.\n decoder_outputs, p_attn = self.attn(\n rnn_output.transpose(0, 1).contiguous(),\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths\n )\n attns[\"std\"] = p_attn\n\n # Calculate the context gate.\n if self.context_gate is not None:\n decoder_outputs = self.context_gate(\n emb.view(-1, emb.size(2)),\n rnn_output.view(-1, rnn_output.size(2)),\n decoder_outputs.view(-1, decoder_outputs.size(2))\n )\n decoder_outputs = \\\n decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)\n\n decoder_outputs = self.dropout(decoder_outputs)\n return decoder_final, decoder_outputs, attns","function_tokens":["def","_run_forward_pass","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",")",":","assert","not","self",".","_copy","# TODO, no support yet.","assert","not","self",".","_coverage","# TODO, no support yet.","# Initialize local and return variables.","attns","=","{","}","emb","=","self",".","embeddings","(","tgt",")","# Run the forward pass of the RNN.","if","isinstance","(","self",".","rnn",",","nn",".","GRU",")",":","rnn_output",",","decoder_final","=","self",".","rnn","(","emb",",","state",".","hidden","[","0","]",")","else",":","rnn_output",",","decoder_final","=","self",".","rnn","(","emb",",","state",".","hidden",")","# Check","tgt_len",",","tgt_batch",",","_","=","tgt",".","size","(",")","output_len",",","output_batch",",","_","=","rnn_output",".","size","(",")","aeq","(","tgt_len",",","output_len",")","aeq","(","tgt_batch",",","output_batch",")","# END","# Calculate the attention.","decoder_outputs",",","p_attn","=","self",".","attn","(","rnn_output",".","transpose","(","0",",","1",")",".","contiguous","(",")",",","memory_bank",".","transpose","(","0",",","1",")",",","memory_lengths","=","memory_lengths",")","attns","[","\"std\"","]","=","p_attn","# Calculate the context gate.","if","self",".","context_gate","is","not","None",":","decoder_outputs","=","self",".","context_gate","(","emb",".","view","(","-","1",",","emb",".","size","(","2",")",")",",","rnn_output",".","view","(","-","1",",","rnn_output",".","size","(","2",")",")",",","decoder_outputs",".","view","(","-","1",",","decoder_outputs",".","size","(","2",")",")",")","decoder_outputs","=","decoder_outputs",".","view","(","tgt_len",",","tgt_batch",",","self",".","hidden_size",")","decoder_outputs","=","self",".","dropout","(","decoder_outputs",")","return","decoder_final",",","decoder_outputs",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L196-L255"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"StdRNNDecoder._input_size","parameters":"(self)","argument_list":"","return_statement":"return self.embeddings.embedding_size","docstring":"Private helper returning the number of expected features.","docstring_summary":"Private helper returning the number of expected features.","docstring_tokens":["Private","helper","returning","the","number","of","expected","features","."],"function":"def _input_size(self):\n \"\"\"\n Private helper returning the number of expected features.\n \"\"\"\n return self.embeddings.embedding_size","function_tokens":["def","_input_size","(","self",")",":","return","self",".","embeddings",".","embedding_size"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L262-L266"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._run_forward_pass","parameters":"(self, tgt, memory_bank, state, memory_lengths=None)","argument_list":"","return_statement":"return hidden, decoder_outputs, attns","docstring":"See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.","docstring_summary":"See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.","docstring_tokens":["See","StdRNNDecoder",".","_run_forward_pass","()","for","description","of","arguments","and","return","values","."],"function":"def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):\n \"\"\"\n See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n \"\"\"\n # Additional args check.\n input_feed = state.input_feed.squeeze(0)\n # print(\"input feed size: {}\\n\".format(input_feed.size()))\n input_feed_batch, _ = input_feed.size()\n _, tgt_batch, _ = tgt.size()\n aeq(tgt_batch, input_feed_batch)\n # END Additional args check.\n\n # Initialize local and return variables.\n decoder_outputs = []\n attns = {\"std\": []}\n if self._copy:\n attns[\"copy\"] = []\n if self._coverage:\n attns[\"coverage\"] = []\n\n emb = self.embeddings(tgt)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n hidden = state.hidden\n coverage = state.coverage.squeeze(0) \\\n if state.coverage is not None else None\n\n # Input feed concatenates hidden state with\n # input at every time step.\n #pdb.set_trace()\n #print(\"emb size: {}\\n\".format(emb.size()));exit()\n for _, emb_t in enumerate(emb.split(1)):\n emb_t = emb_t.squeeze(0)\n decoder_input = torch.cat([emb_t, input_feed], 1)\n\n rnn_output, hidden = self.rnn(decoder_input, hidden)\n decoder_output, p_attn = self.attn(\n rnn_output,\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths)\n if self.context_gate is not None:\n # TODO: context gate should be employed\n # instead of second RNN transform.\n decoder_output = self.context_gate(\n decoder_input, rnn_output, decoder_output\n )\n decoder_output = self.dropout(decoder_output)\n input_feed = decoder_output\n\n decoder_outputs += [decoder_output]\n attns[\"std\"] += [p_attn]\n\n # Update the coverage attention.\n if self._coverage:\n coverage = coverage + p_attn \\\n if coverage is not None else p_attn\n attns[\"coverage\"] += [coverage]\n\n # Run the forward pass of the copy attention layer.\n if self._copy and not self._reuse_copy_attn:\n _, copy_attn = self.copy_attn(decoder_output,\n memory_bank.transpose(0, 1))\n attns[\"copy\"] += [copy_attn]\n elif self._copy:\n attns[\"copy\"] = attns[\"std\"]\n # Return result.\n return hidden, decoder_outputs, attns","function_tokens":["def","_run_forward_pass","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",")",":","# Additional args check.","input_feed","=","state",".","input_feed",".","squeeze","(","0",")","# print(\"input feed size: {}\\n\".format(input_feed.size()))","input_feed_batch",",","_","=","input_feed",".","size","(",")","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","aeq","(","tgt_batch",",","input_feed_batch",")","# END Additional args check.","# Initialize local and return variables.","decoder_outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","if","self",".","_coverage",":","attns","[","\"coverage\"","]","=","[","]","emb","=","self",".","embeddings","(","tgt",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","hidden","=","state",".","hidden","coverage","=","state",".","coverage",".","squeeze","(","0",")","if","state",".","coverage","is","not","None","else","None","# Input feed concatenates hidden state with","# input at every time step.","#pdb.set_trace()","#print(\"emb size: {}\\n\".format(emb.size()));exit()","for","_",",","emb_t","in","enumerate","(","emb",".","split","(","1",")",")",":","emb_t","=","emb_t",".","squeeze","(","0",")","decoder_input","=","torch",".","cat","(","[","emb_t",",","input_feed","]",",","1",")","rnn_output",",","hidden","=","self",".","rnn","(","decoder_input",",","hidden",")","decoder_output",",","p_attn","=","self",".","attn","(","rnn_output",",","memory_bank",".","transpose","(","0",",","1",")",",","memory_lengths","=","memory_lengths",")","if","self",".","context_gate","is","not","None",":","# TODO: context gate should be employed","# instead of second RNN transform.","decoder_output","=","self",".","context_gate","(","decoder_input",",","rnn_output",",","decoder_output",")","decoder_output","=","self",".","dropout","(","decoder_output",")","input_feed","=","decoder_output","decoder_outputs","+=","[","decoder_output","]","attns","[","\"std\"","]","+=","[","p_attn","]","# Update the coverage attention.","if","self",".","_coverage",":","coverage","=","coverage","+","p_attn","if","coverage","is","not","None","else","p_attn","attns","[","\"coverage\"","]","+=","[","coverage","]","# Run the forward pass of the copy attention layer.","if","self",".","_copy","and","not","self",".","_reuse_copy_attn",":","_",",","copy_attn","=","self",".","copy_attn","(","decoder_output",",","memory_bank",".","transpose","(","0",",","1",")",")","attns","[","\"copy\"","]","+=","[","copy_attn","]","elif","self",".","_copy",":","attns","[","\"copy\"","]","=","attns","[","\"std\"","]","# Return result.","return","hidden",",","decoder_outputs",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L296-L363"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._input_size","parameters":"(self)","argument_list":"","return_statement":"return self.embeddings.embedding_size + self.hidden_size","docstring":"Using input feed by concatenating input with attention vectors.","docstring_summary":"Using input feed by concatenating input with attention vectors.","docstring_tokens":["Using","input","feed","by","concatenating","input","with","attention","vectors","."],"function":"def _input_size(self):\n \"\"\"\n Using input feed by concatenating input with attention vectors.\n \"\"\"\n return self.embeddings.embedding_size + self.hidden_size","function_tokens":["def","_input_size","(","self",")",":","return","self",".","embeddings",".","embedding_size","+","self",".","hidden_size"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L377-L381"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"DecoderState.detach","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Need to document this","docstring_summary":"Need to document this","docstring_tokens":["Need","to","document","this"],"function":"def detach(self):\n \"\"\" Need to document this \"\"\"\n self.hidden = tuple([_.detach() for _ in self.hidden])\n self.input_feed = self.input_feed.detach()","function_tokens":["def","detach","(","self",")",":","self",".","hidden","=","tuple","(","[","_",".","detach","(",")","for","_","in","self",".","hidden","]",")","self",".","input_feed","=","self",".","input_feed",".","detach","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L392-L395"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"DecoderState.beam_update","parameters":"(self, idx, positions, beam_size)","argument_list":"","return_statement":"","docstring":"Need to document this","docstring_summary":"Need to document this","docstring_tokens":["Need","to","document","this"],"function":"def beam_update(self, idx, positions, beam_size):\n \"\"\" Need to document this \"\"\"\n for e in self._all:\n sizes = e.size()\n br = sizes[1]\n if len(sizes) == 3:\n sent_states = e.view(sizes[0], beam_size, br \/\/ beam_size,\n sizes[2])[:, :, idx]\n else:\n sent_states = e.view(sizes[0], beam_size,\n br \/\/ beam_size,\n sizes[2],\n sizes[3])[:, :, idx]\n\n sent_states.data.copy_(\n sent_states.data.index_select(1, positions))","function_tokens":["def","beam_update","(","self",",","idx",",","positions",",","beam_size",")",":","for","e","in","self",".","_all",":","sizes","=","e",".","size","(",")","br","=","sizes","[","1","]","if","len","(","sizes",")","==","3",":","sent_states","=","e",".","view","(","sizes","[","0","]",",","beam_size",",","br","\/\/","beam_size",",","sizes","[","2","]",")","[",":",",",":",",","idx","]","else",":","sent_states","=","e",".","view","(","sizes","[","0","]",",","beam_size",",","br","\/\/","beam_size",",","sizes","[","2","]",",","sizes","[","3","]",")","[",":",",",":",",","idx","]","sent_states",".","data",".","copy_","(","sent_states",".","data",".","index_select","(","1",",","positions",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L397-L412"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderState.__init__","parameters":"(self, hidden_size, rnnstate)","argument_list":"","return_statement":"","docstring":"Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).","docstring_summary":"Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).","docstring_tokens":["Args",":","hidden_size","(","int",")",":","the","size","of","hidden","layer","of","the","decoder",".","rnnstate",":","final","hidden","state","from","the","encoder",".","transformed","to","shape",":","layers","x","batch","x","(","directions","*","dim",")","."],"function":"def __init__(self, hidden_size, rnnstate):\n \"\"\"\n Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).\n \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.coverage = None\n\n # Init the input feed.\n batch_size = self.hidden[0].size(1)\n h_size = (batch_size, hidden_size)\n self.input_feed = self.hidden[0].data.new(*h_size).zero_() \\\n .unsqueeze(0)","function_tokens":["def","__init__","(","self",",","hidden_size",",","rnnstate",")",":","if","not","isinstance","(","rnnstate",",","tuple",")",":","self",".","hidden","=","(","rnnstate",",",")","else",":","self",".","hidden","=","rnnstate","self",".","coverage","=","None","# Init the input feed.","batch_size","=","self",".","hidden","[","0","]",".","size","(","1",")","h_size","=","(","batch_size",",","hidden_size",")","self",".","input_feed","=","self",".","hidden","[","0","]",".","data",".","new","(","*","h_size",")",".","zero_","(",")",".","unsqueeze","(","0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L421-L438"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderState.update_state","parameters":"(self, rnnstate, input_feed, coverage)","argument_list":"","return_statement":"","docstring":"Update decoder state","docstring_summary":"Update decoder state","docstring_tokens":["Update","decoder","state"],"function":"def update_state(self, rnnstate, input_feed, coverage):\n \"\"\" Update decoder state \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.input_feed = input_feed\n self.coverage = coverage","function_tokens":["def","update_state","(","self",",","rnnstate",",","input_feed",",","coverage",")",":","if","not","isinstance","(","rnnstate",",","tuple",")",":","self",".","hidden","=","(","rnnstate",",",")","else",":","self",".","hidden","=","rnnstate","self",".","input_feed","=","input_feed","self",".","coverage","=","coverage"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L444-L451"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n vars = [e.data.repeat(1, beam_size, 1)\n for e in self._all]\n self.hidden = tuple(vars[:-1])\n self.input_feed = vars[-1]","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","vars","=","[","e",".","data",".","repeat","(","1",",","beam_size",",","1",")","for","e","in","self",".","_all","]","self",".","hidden","=","tuple","(","vars","[",":","-","1","]",")","self",".","input_feed","=","vars","[","-","1","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/decoder.py#L453-L458"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py","language":"python","identifier":"load_test_model","parameters":"(opt, dummy_opt)","argument_list":"","return_statement":"return shared_fields, ensemble_model, shared_model_opt","docstring":"Read in multiple models for ensemble","docstring_summary":"Read in multiple models for ensemble","docstring_tokens":["Read","in","multiple","models","for","ensemble"],"function":"def load_test_model(opt, dummy_opt):\n \"\"\" Read in multiple models for ensemble \"\"\"\n shared_fields = None\n shared_model_opt = None\n models = []\n for model_path in opt.models:\n fields, model, model_opt = \\\n onmt.model_builder.load_test_model(opt,\n dummy_opt,\n model_path=model_path)\n if shared_fields is None:\n shared_fields = fields\n else:\n for key, field in fields.items():\n if field is not None and 'vocab' in field.__dict__:\n assert field.vocab.stoi == shared_fields[key].vocab.stoi, \\\n 'Ensemble models must use the same preprocessed data'\n models.append(model)\n if shared_model_opt is None:\n shared_model_opt = model_opt\n ensemble_model = EnsembleModel(models)\n return shared_fields, ensemble_model, shared_model_opt","function_tokens":["def","load_test_model","(","opt",",","dummy_opt",")",":","shared_fields","=","None","shared_model_opt","=","None","models","=","[","]","for","model_path","in","opt",".","models",":","fields",",","model",",","model_opt","=","onmt",".","model_builder",".","load_test_model","(","opt",",","dummy_opt",",","model_path","=","model_path",")","if","shared_fields","is","None",":","shared_fields","=","fields","else",":","for","key",",","field","in","fields",".","items","(",")",":","if","field","is","not","None","and","'vocab'","in","field",".","__dict__",":","assert","field",".","vocab",".","stoi","==","shared_fields","[","key","]",".","vocab",".","stoi",",","'Ensemble models must use the same preprocessed data'","models",".","append","(","model",")","if","shared_model_opt","is","None",":","shared_model_opt","=","model_opt","ensemble_model","=","EnsembleModel","(","models",")","return","shared_fields",",","ensemble_model",",","shared_model_opt"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py#L135-L156"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n for model_state in self.model_decoder_states:\n model_state.repeat_beam_size_times(beam_size)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","for","model_state","in","self",".","model_decoder_states",":","model_state",".","repeat_beam_size_times","(","beam_size",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py#L27-L30"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoderOutput.squeeze","parameters":"(self, dim=None)","argument_list":"","return_statement":"return EnsembleDecoderOutput([\n x.squeeze(dim) for x in self.model_outputs])","docstring":"Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`","docstring_summary":"Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`","docstring_tokens":["Delegate","squeeze","to","avoid","modifying",":","obj",":","Translator",".","translate_batch","()"],"function":"def squeeze(self, dim=None):\n \"\"\"\n Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`\n \"\"\"\n return EnsembleDecoderOutput([\n x.squeeze(dim) for x in self.model_outputs])","function_tokens":["def","squeeze","(","self",",","dim","=","None",")",":","return","EnsembleDecoderOutput","(","[","x",".","squeeze","(","dim",")","for","x","in","self",".","model_outputs","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py#L41-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None)","argument_list":"","return_statement":"return (EnsembleDecoderOutput(outputs),\n EnsembleDecoderState(states),\n mean_attns)","docstring":"See :obj:`RNNDecoderBase.forward()`","docstring_summary":"See :obj:`RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None):\n \"\"\" See :obj:`RNNDecoderBase.forward()` \"\"\"\n # Memory_lengths is a single tensor shared between all models.\n # This assumption will not hold if Translator is modified\n # to calculate memory_lengths as something other than the length\n # of the input.\n outputs, states, attns = zip(*[\n model_decoder.forward(\n tgt, memory_bank[i], state[i], memory_lengths, step=step)\n for (i, model_decoder)\n in enumerate(self.model_decoders)])\n mean_attns = self.combine_attns(attns)\n return (EnsembleDecoderOutput(outputs),\n EnsembleDecoderState(states),\n mean_attns)","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",")",":","# Memory_lengths is a single tensor shared between all models.","# This assumption will not hold if Translator is modified","# to calculate memory_lengths as something other than the length","# of the input.","outputs",",","states",",","attns","=","zip","(","*","[","model_decoder",".","forward","(","tgt",",","memory_bank","[","i","]",",","state","[","i","]",",","memory_lengths",",","step","=","step",")","for","(","i",",","model_decoder",")","in","enumerate","(","self",".","model_decoders",")","]",")","mean_attns","=","self",".","combine_attns","(","attns",")","return","(","EnsembleDecoderOutput","(","outputs",")",",","EnsembleDecoderState","(","states",")",",","mean_attns",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py#L72-L87"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoder.init_decoder_state","parameters":"(self, src, memory_bank, enc_hidden)","argument_list":"","return_statement":"return EnsembleDecoderState(\n [model_decoder.init_decoder_state(src,\n memory_bank[i],\n enc_hidden[i])\n for (i, model_decoder) in enumerate(self.model_decoders)])","docstring":"See :obj:`RNNDecoderBase.init_decoder_state()`","docstring_summary":"See :obj:`RNNDecoderBase.init_decoder_state()`","docstring_tokens":["See",":","obj",":","RNNDecoderBase",".","init_decoder_state","()"],"function":"def init_decoder_state(self, src, memory_bank, enc_hidden):\n \"\"\" See :obj:`RNNDecoderBase.init_decoder_state()` \"\"\"\n return EnsembleDecoderState(\n [model_decoder.init_decoder_state(src,\n memory_bank[i],\n enc_hidden[i])\n for (i, model_decoder) in enumerate(self.model_decoders)])","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","enc_hidden",")",":","return","EnsembleDecoderState","(","[","model_decoder",".","init_decoder_state","(","src",",","memory_bank","[","i","]",",","enc_hidden","[","i","]",")","for","(","i",",","model_decoder",")","in","enumerate","(","self",".","model_decoders",")","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py#L95-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleGenerator.forward","parameters":"(self, hidden)","argument_list":"","return_statement":"return torch.stack(distributions).mean(0)","docstring":"Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.","docstring_summary":"Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.","docstring_tokens":["Compute","a","distribution","over","the","target","dictionary","by","averaging","distributions","from","models","in","the","ensemble",".","All","models","in","the","ensemble","must","share","a","target","vocabulary","."],"function":"def forward(self, hidden):\n \"\"\"\n Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.\n \"\"\"\n distributions = [model_generator.forward(hidden[i])\n for (i, model_generator)\n in enumerate(self.model_generators)]\n return torch.stack(distributions).mean(0)","function_tokens":["def","forward","(","self",",","hidden",")",":","distributions","=","[","model_generator",".","forward","(","hidden","[","i","]",")","for","(","i",",","model_generator",")","in","enumerate","(","self",".","model_generators",")","]","return","torch",".","stack","(","distributions",")",".","mean","(","0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/ensemble.py#L113-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderLayer.forward","parameters":"(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,\n previous_input=None, layer_cache=None, step=None)","argument_list":"","return_statement":"return output, attn, all_input","docstring":"Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`, `FloatTensor`):\n\n * output `[batch_size x 1 x model_dim]`\n * attn `[batch_size x 1 x src_len]`\n * all_input `[batch_size x current_step x model_dim]`","docstring_summary":"Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`","docstring_tokens":["Args",":","inputs","(","FloatTensor",")",":","[","batch_size","x","1","x","model_dim","]","memory_bank","(","FloatTensor",")",":","[","batch_size","x","src_len","x","model_dim","]","src_pad_mask","(","LongTensor",")",":","[","batch_size","x","1","x","src_len","]","tgt_pad_mask","(","LongTensor",")",":","[","batch_size","x","1","x","1","]"],"function":"def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,\n previous_input=None, layer_cache=None, step=None):\n \"\"\"\n Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`, `FloatTensor`):\n\n * output `[batch_size x 1 x model_dim]`\n * attn `[batch_size x 1 x src_len]`\n * all_input `[batch_size x current_step x model_dim]`\n\n \"\"\"\n dec_mask = torch.gt(tgt_pad_mask +\n self.mask[:, :tgt_pad_mask.size(1),\n :tgt_pad_mask.size(1)], 0)\n input_norm = self.layer_norm_1(inputs)\n all_input = input_norm\n if previous_input is not None:\n all_input = torch.cat((previous_input, input_norm), dim=1)\n dec_mask = None\n\n if self.self_attn_type == \"scaled-dot\":\n query, attn = self.self_attn(all_input, all_input, input_norm,\n mask=dec_mask,\n layer_cache=layer_cache,\n type=\"self\")\n elif self.self_attn_type == \"average\":\n query, attn = self.self_attn(input_norm, mask=dec_mask,\n layer_cache=layer_cache, step=step)\n\n query = self.drop(query) + inputs\n\n query_norm = self.layer_norm_2(query)\n mid, attn = self.context_attn(memory_bank, memory_bank, query_norm,\n mask=src_pad_mask,\n layer_cache=layer_cache,\n type=\"context\")\n output = self.feed_forward(self.drop(mid) + query)\n\n return output, attn, all_input","function_tokens":["def","forward","(","self",",","inputs",",","memory_bank",",","src_pad_mask",",","tgt_pad_mask",",","previous_input","=","None",",","layer_cache","=","None",",","step","=","None",")",":","dec_mask","=","torch",".","gt","(","tgt_pad_mask","+","self",".","mask","[",":",",",":","tgt_pad_mask",".","size","(","1",")",",",":","tgt_pad_mask",".","size","(","1",")","]",",","0",")","input_norm","=","self",".","layer_norm_1","(","inputs",")","all_input","=","input_norm","if","previous_input","is","not","None",":","all_input","=","torch",".","cat","(","(","previous_input",",","input_norm",")",",","dim","=","1",")","dec_mask","=","None","if","self",".","self_attn_type","==","\"scaled-dot\"",":","query",",","attn","=","self",".","self_attn","(","all_input",",","all_input",",","input_norm",",","mask","=","dec_mask",",","layer_cache","=","layer_cache",",","type","=","\"self\"",")","elif","self",".","self_attn_type","==","\"average\"",":","query",",","attn","=","self",".","self_attn","(","input_norm",",","mask","=","dec_mask",",","layer_cache","=","layer_cache",",","step","=","step",")","query","=","self",".","drop","(","query",")","+","inputs","query_norm","=","self",".","layer_norm_2","(","query",")","mid",",","attn","=","self",".","context_attn","(","memory_bank",",","memory_bank",",","query_norm",",","mask","=","src_pad_mask",",","layer_cache","=","layer_cache",",","type","=","\"context\"",")","output","=","self",".","feed_forward","(","self",".","drop","(","mid",")","+","query",")","return","output",",","attn",",","all_input"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py#L53-L97"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderLayer._get_attn_subsequent_mask","parameters":"(self, size)","argument_list":"","return_statement":"return subsequent_mask","docstring":"Get an attention mask to avoid using the subsequent info.\n\n Args:\n size: int\n\n Returns:\n (`LongTensor`):\n\n * subsequent_mask `[1 x size x size]`","docstring_summary":"Get an attention mask to avoid using the subsequent info.","docstring_tokens":["Get","an","attention","mask","to","avoid","using","the","subsequent","info","."],"function":"def _get_attn_subsequent_mask(self, size):\n \"\"\"\n Get an attention mask to avoid using the subsequent info.\n\n Args:\n size: int\n\n Returns:\n (`LongTensor`):\n\n * subsequent_mask `[1 x size x size]`\n \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n subsequent_mask = torch.from_numpy(subsequent_mask)\n return subsequent_mask","function_tokens":["def","_get_attn_subsequent_mask","(","self",",","size",")",":","attn_shape","=","(","1",",","size",",","size",")","subsequent_mask","=","np",".","triu","(","np",".","ones","(","attn_shape",")",",","k","=","1",")",".","astype","(","'uint8'",")","subsequent_mask","=","torch",".","from_numpy","(","subsequent_mask",")","return","subsequent_mask"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py#L99-L114"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None, cache=None)","argument_list":"","return_statement":"return outputs, state, attns","docstring":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_summary":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","modules",".","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None, cache=None):\n \"\"\"\n See :obj:`onmt.modules.RNNDecoderBase.forward()`\n \"\"\"\n src = state.src\n src_words = src[:, :, 0].transpose(0, 1)\n tgt_words = tgt[:, :, 0].transpose(0, 1)\n src_batch, src_len = src_words.size()\n tgt_batch, tgt_len = tgt_words.size()\n\n # Initialize return variables.\n outputs = []\n attns = {\"std\": []}\n if self._copy:\n attns[\"copy\"] = []\n\n # Run the forward pass of the TransformerDecoder.\n emb = self.embeddings(tgt, step=step)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n output = emb.transpose(0, 1).contiguous()\n src_memory_bank = memory_bank.transpose(0, 1).contiguous()\n\n padding_idx = self.embeddings.word_padding_idx\n src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(src_batch, tgt_len, src_len)\n tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(tgt_batch, tgt_len, tgt_len)\n\n if state.cache is None:\n saved_inputs = []\n\n for i in range(self.num_layers):\n prev_layer_input = None\n if state.cache is None:\n if state.previous_input is not None:\n prev_layer_input = state.previous_layer_inputs[i]\n output, attn, all_input \\\n = self.transformer_layers[i](\n output, src_memory_bank,\n src_pad_mask, tgt_pad_mask,\n previous_input=prev_layer_input,\n layer_cache=state.cache[\"layer_{}\".format(i)]\n if state.cache is not None else None,\n step=step)\n if state.cache is None:\n saved_inputs.append(all_input)\n\n if state.cache is None:\n saved_inputs = torch.stack(saved_inputs)\n\n output = self.layer_norm(output)\n\n # Process the result and update the attentions.\n outputs = output.transpose(0, 1).contiguous()\n attn = attn.transpose(0, 1).contiguous()\n\n attns[\"std\"] = attn\n if self._copy:\n attns[\"copy\"] = attn\n\n if state.cache is None:\n state = state.update_state(tgt, saved_inputs)\n\n return outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",",","cache","=","None",")",":","src","=","state",".","src","src_words","=","src","[",":",",",":",",","0","]",".","transpose","(","0",",","1",")","tgt_words","=","tgt","[",":",",",":",",","0","]",".","transpose","(","0",",","1",")","src_batch",",","src_len","=","src_words",".","size","(",")","tgt_batch",",","tgt_len","=","tgt_words",".","size","(",")","# Initialize return variables.","outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","# Run the forward pass of the TransformerDecoder.","emb","=","self",".","embeddings","(","tgt",",","step","=","step",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","output","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","src_memory_bank","=","memory_bank",".","transpose","(","0",",","1",")",".","contiguous","(",")","padding_idx","=","self",".","embeddings",".","word_padding_idx","src_pad_mask","=","src_words",".","data",".","eq","(","padding_idx",")",".","unsqueeze","(","1",")",".","expand","(","src_batch",",","tgt_len",",","src_len",")","tgt_pad_mask","=","tgt_words",".","data",".","eq","(","padding_idx",")",".","unsqueeze","(","1",")",".","expand","(","tgt_batch",",","tgt_len",",","tgt_len",")","if","state",".","cache","is","None",":","saved_inputs","=","[","]","for","i","in","range","(","self",".","num_layers",")",":","prev_layer_input","=","None","if","state",".","cache","is","None",":","if","state",".","previous_input","is","not","None",":","prev_layer_input","=","state",".","previous_layer_inputs","[","i","]","output",",","attn",",","all_input","=","self",".","transformer_layers","[","i","]","(","output",",","src_memory_bank",",","src_pad_mask",",","tgt_pad_mask",",","previous_input","=","prev_layer_input",",","layer_cache","=","state",".","cache","[","\"layer_{}\"",".","format","(","i",")","]","if","state",".","cache","is","not","None","else","None",",","step","=","step",")","if","state",".","cache","is","None",":","saved_inputs",".","append","(","all_input",")","if","state",".","cache","is","None",":","saved_inputs","=","torch",".","stack","(","saved_inputs",")","output","=","self",".","layer_norm","(","output",")","# Process the result and update the attentions.","outputs","=","output",".","transpose","(","0",",","1",")",".","contiguous","(",")","attn","=","attn",".","transpose","(","0",",","1",")",".","contiguous","(",")","attns","[","\"std\"","]","=","attn","if","self",".","_copy",":","attns","[","\"copy\"","]","=","attn","if","state",".","cache","is","None",":","state","=","state",".","update_state","(","tgt",",","saved_inputs",")","return","outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py#L172-L237"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoder.init_decoder_state","parameters":"(self, src, memory_bank, enc_hidden,\n with_cache=False)","argument_list":"","return_statement":"return state","docstring":"Init decoder state","docstring_summary":"Init decoder state","docstring_tokens":["Init","decoder","state"],"function":"def init_decoder_state(self, src, memory_bank, enc_hidden,\n with_cache=False):\n \"\"\" Init decoder state \"\"\"\n state = TransformerDecoderState(src)\n if with_cache:\n state._init_cache(memory_bank, self.num_layers,\n self.self_attn_type)\n return state","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","enc_hidden",",","with_cache","=","False",")",":","state","=","TransformerDecoderState","(","src",")","if","with_cache",":","state",".","_init_cache","(","memory_bank",",","self",".","num_layers",",","self",".","self_attn_type",")","return","state"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py#L239-L246"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderState.__init__","parameters":"(self, src)","argument_list":"","return_statement":"","docstring":"Args:\n src (FloatTensor): a sequence of source words tensors\n with optional feature tensors, of size (len x batch).","docstring_summary":"Args:\n src (FloatTensor): a sequence of source words tensors\n with optional feature tensors, of size (len x batch).","docstring_tokens":["Args",":","src","(","FloatTensor",")",":","a","sequence","of","source","words","tensors","with","optional","feature","tensors","of","size","(","len","x","batch",")","."],"function":"def __init__(self, src):\n \"\"\"\n Args:\n src (FloatTensor): a sequence of source words tensors\n with optional feature tensors, of size (len x batch).\n \"\"\"\n self.src = src\n self.previous_input = None\n self.previous_layer_inputs = None\n self.cache = None","function_tokens":["def","__init__","(","self",",","src",")",":","self",".","src","=","src","self",".","previous_input","=","None","self",".","previous_layer_inputs","=","None","self",".","cache","=","None"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py#L252-L261"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderState._all","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Contains attributes that need to be updated in self.beam_update().","docstring_summary":"Contains attributes that need to be updated in self.beam_update().","docstring_tokens":["Contains","attributes","that","need","to","be","updated","in","self",".","beam_update","()","."],"function":"def _all(self):\n \"\"\"\n Contains attributes that need to be updated in self.beam_update().\n \"\"\"\n if (self.previous_input is not None\n and self.previous_layer_inputs is not None):\n return (self.previous_input,\n self.previous_layer_inputs,\n self.src)\n else:\n return (self.src,)","function_tokens":["def","_all","(","self",")",":","if","(","self",".","previous_input","is","not","None","and","self",".","previous_layer_inputs","is","not","None",")",":","return","(","self",".","previous_input",",","self",".","previous_layer_inputs",",","self",".","src",")","else",":","return","(","self",".","src",",",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py#L264-L274"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n self.src = self.src.data.repeat(1, beam_size, 1)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","self",".","src","=","self",".","src",".","data",".","repeat","(","1",",","beam_size",",","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/transformer.py#L309-L311"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None, step=None)","argument_list":"","return_statement":"return outputs, state, attns","docstring":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_summary":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","modules",".","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None, step=None):\n \"\"\" See :obj:`onmt.modules.RNNDecoderBase.forward()`\"\"\"\n # NOTE: memory_lengths is only here for compatibility reasons\n # with onmt.modules.RNNDecoderBase.forward()\n # CHECKS\n assert isinstance(state, CNNDecoderState)\n _, tgt_batch, _ = tgt.size()\n _, contxt_batch, _ = memory_bank.size()\n aeq(tgt_batch, contxt_batch)\n # END CHECKS\n\n if state.previous_input is not None:\n tgt = torch.cat([state.previous_input, tgt], 0)\n\n # Initialize return variables.\n outputs = []\n attns = {\"std\": []}\n assert not self._copy, \"Copy mechanism not yet tested in conv2conv\"\n if self._copy:\n attns[\"copy\"] = []\n\n emb = self.embeddings(tgt)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n tgt_emb = emb.transpose(0, 1).contiguous()\n # The output of CNNEncoder.\n src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()\n # The combination of output of CNNEncoder and source embeddings.\n src_memory_bank_c = state.init_src.transpose(0, 1).contiguous()\n\n # Run the forward pass of the CNNDecoder.\n emb_reshape = tgt_emb.contiguous().view(\n tgt_emb.size(0) * tgt_emb.size(1), -1)\n linear_out = self.linear(emb_reshape)\n x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)\n x = shape_transform(x)\n\n pad = torch.zeros(x.size(0), x.size(1),\n self.cnn_kernel_width - 1, 1)\n\n pad = pad.type_as(x)\n base_target_emb = x\n\n for conv, attention in zip(self.conv_layers, self.attn_layers):\n new_target_input = torch.cat([pad, x], 2)\n out = conv(new_target_input)\n c, attn = attention(base_target_emb, out,\n src_memory_bank_t, src_memory_bank_c)\n x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT\n output = x.squeeze(3).transpose(1, 2)\n\n # Process the result and update the attentions.\n outputs = output.transpose(0, 1).contiguous()\n if state.previous_input is not None:\n outputs = outputs[state.previous_input.size(0):]\n attn = attn[:, state.previous_input.size(0):].squeeze()\n attn = torch.stack([attn])\n attns[\"std\"] = attn\n if self._copy:\n attns[\"copy\"] = attn\n\n # Update the state.\n state.update_state(tgt)\n\n return outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",")",":","# NOTE: memory_lengths is only here for compatibility reasons","# with onmt.modules.RNNDecoderBase.forward()","# CHECKS","assert","isinstance","(","state",",","CNNDecoderState",")","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","_",",","contxt_batch",",","_","=","memory_bank",".","size","(",")","aeq","(","tgt_batch",",","contxt_batch",")","# END CHECKS","if","state",".","previous_input","is","not","None",":","tgt","=","torch",".","cat","(","[","state",".","previous_input",",","tgt","]",",","0",")","# Initialize return variables.","outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","assert","not","self",".","_copy",",","\"Copy mechanism not yet tested in conv2conv\"","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","emb","=","self",".","embeddings","(","tgt",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","tgt_emb","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","# The output of CNNEncoder.","src_memory_bank_t","=","memory_bank",".","transpose","(","0",",","1",")",".","contiguous","(",")","# The combination of output of CNNEncoder and source embeddings.","src_memory_bank_c","=","state",".","init_src",".","transpose","(","0",",","1",")",".","contiguous","(",")","# Run the forward pass of the CNNDecoder.","emb_reshape","=","tgt_emb",".","contiguous","(",")",".","view","(","tgt_emb",".","size","(","0",")","*","tgt_emb",".","size","(","1",")",",","-","1",")","linear_out","=","self",".","linear","(","emb_reshape",")","x","=","linear_out",".","view","(","tgt_emb",".","size","(","0",")",",","tgt_emb",".","size","(","1",")",",","-","1",")","x","=","shape_transform","(","x",")","pad","=","torch",".","zeros","(","x",".","size","(","0",")",",","x",".","size","(","1",")",",","self",".","cnn_kernel_width","-","1",",","1",")","pad","=","pad",".","type_as","(","x",")","base_target_emb","=","x","for","conv",",","attention","in","zip","(","self",".","conv_layers",",","self",".","attn_layers",")",":","new_target_input","=","torch",".","cat","(","[","pad",",","x","]",",","2",")","out","=","conv","(","new_target_input",")","c",",","attn","=","attention","(","base_target_emb",",","out",",","src_memory_bank_t",",","src_memory_bank_c",")","x","=","(","x","+","(","c","+","out",")","*","SCALE_WEIGHT",")","*","SCALE_WEIGHT","output","=","x",".","squeeze","(","3",")",".","transpose","(","1",",","2",")","# Process the result and update the attentions.","outputs","=","output",".","transpose","(","0",",","1",")",".","contiguous","(",")","if","state",".","previous_input","is","not","None",":","outputs","=","outputs","[","state",".","previous_input",".","size","(","0",")",":","]","attn","=","attn","[",":",",","state",".","previous_input",".","size","(","0",")",":","]",".","squeeze","(",")","attn","=","torch",".","stack","(","[","attn","]",")","attns","[","\"std\"","]","=","attn","if","self",".","_copy",":","attns","[","\"copy\"","]","=","attn","# Update the state.","state",".","update_state","(","tgt",")","return","outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py#L58-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoder.init_decoder_state","parameters":"(self, _, memory_bank, enc_hidden, with_cache=False)","argument_list":"","return_statement":"return CNNDecoderState(memory_bank, enc_hidden)","docstring":"Init decoder state.","docstring_summary":"Init decoder state.","docstring_tokens":["Init","decoder","state","."],"function":"def init_decoder_state(self, _, memory_bank, enc_hidden, with_cache=False):\n \"\"\"\n Init decoder state.\n \"\"\"\n return CNNDecoderState(memory_bank, enc_hidden)","function_tokens":["def","init_decoder_state","(","self",",","_",",","memory_bank",",","enc_hidden",",","with_cache","=","False",")",":","return","CNNDecoderState","(","memory_bank",",","enc_hidden",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py#L124-L128"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState._all","parameters":"(self)","argument_list":"","return_statement":"return (self.previous_input,)","docstring":"Contains attributes that need to be updated in self.beam_update().","docstring_summary":"Contains attributes that need to be updated in self.beam_update().","docstring_tokens":["Contains","attributes","that","need","to","be","updated","in","self",".","beam_update","()","."],"function":"def _all(self):\n \"\"\"\n Contains attributes that need to be updated in self.beam_update().\n \"\"\"\n return (self.previous_input,)","function_tokens":["def","_all","(","self",")",":","return","(","self",".","previous_input",",",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py#L141-L145"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState.update_state","parameters":"(self, new_input)","argument_list":"","return_statement":"","docstring":"Called for every decoder forward pass.","docstring_summary":"Called for every decoder forward pass.","docstring_tokens":["Called","for","every","decoder","forward","pass","."],"function":"def update_state(self, new_input):\n \"\"\" Called for every decoder forward pass. \"\"\"\n self.previous_input = new_input","function_tokens":["def","update_state","(","self",",","new_input",")",":","self",".","previous_input","=","new_input"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py#L150-L152"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n self.init_src = self.init_src.data.repeat(1, beam_size, 1)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","self",".","init_src","=","self",".","init_src",".","data",".","repeat","(","1",",","beam_size",",","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/onmt\/decoders\/cnn_decoder.py#L154-L156"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/learn_bpe.py","language":"python","identifier":"get_vocabulary","parameters":"(fobj, is_dict=False)","argument_list":"","return_statement":"return vocab","docstring":"Read text and return dictionary that encodes vocabulary","docstring_summary":"Read text and return dictionary that encodes vocabulary","docstring_tokens":["Read","text","and","return","dictionary","that","encodes","vocabulary"],"function":"def get_vocabulary(fobj, is_dict=False):\n \"\"\"Read text and return dictionary that encodes vocabulary\n \"\"\"\n vocab = Counter()\n for line in fobj:\n if is_dict:\n word, count = line.strip().split()\n vocab[word] = int(count)\n else:\n for word in line.split():\n vocab[word] += 1\n return vocab","function_tokens":["def","get_vocabulary","(","fobj",",","is_dict","=","False",")",":","vocab","=","Counter","(",")","for","line","in","fobj",":","if","is_dict",":","word",",","count","=","line",".","strip","(",")",".","split","(",")","vocab","[","word","]","=","int","(","count",")","else",":","for","word","in","line",".","split","(",")",":","vocab","[","word","]","+=","1","return","vocab"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/learn_bpe.py#L59-L70"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/learn_bpe.py","language":"python","identifier":"update_pair_statistics","parameters":"(pair, changed, stats, indices)","argument_list":"","return_statement":"","docstring":"Minimally update the indices and frequency of symbol pairs\n\n if we merge a pair of symbols, only pairs that overlap with occurrences\n of this pair are affected, and need to be updated.","docstring_summary":"Minimally update the indices and frequency of symbol pairs","docstring_tokens":["Minimally","update","the","indices","and","frequency","of","symbol","pairs"],"function":"def update_pair_statistics(pair, changed, stats, indices):\n \"\"\"Minimally update the indices and frequency of symbol pairs\n\n if we merge a pair of symbols, only pairs that overlap with occurrences\n of this pair are affected, and need to be updated.\n \"\"\"\n stats[pair] = 0\n indices[pair] = defaultdict(int)\n first, second = pair\n new_pair = first + second\n for j, word, old_word, freq in changed:\n\n # find all instances of pair, and update frequency\/indices around it\n i = 0\n while True:\n # find first symbol\n try:\n i = old_word.index(first, i)\n except ValueError:\n break\n # if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])\n if i < len(old_word) - 1 and old_word[i + 1] == second:\n # assuming a symbol sequence \"A B C\", if \"B C\" is merged, reduce the frequency of \"A B\"\n if i:\n prev = old_word[i - 1:i + 1]\n stats[prev] -= freq\n indices[prev][j] -= 1\n if i < len(old_word) - 2:\n # assuming a symbol sequence \"A B C B\", if \"B C\" is merged, reduce the frequency of \"C B\".\n # however, skip this if the sequence is A B C B C, because the frequency of \"C B\" will be reduced by the previous code block\n if old_word[i + 2] != first or i >= len(old_word) - 3 or old_word[i + 3] != second:\n nex = old_word[i + 1:i + 3]\n stats[nex] -= freq\n indices[nex][j] -= 1\n i += 2\n else:\n i += 1\n\n i = 0\n while True:\n try:\n # find new pair\n i = word.index(new_pair, i)\n except ValueError:\n break\n # assuming a symbol sequence \"A BC D\", if \"B C\" is merged, increase the frequency of \"A BC\"\n if i:\n prev = word[i - 1:i + 1]\n stats[prev] += freq\n indices[prev][j] += 1\n # assuming a symbol sequence \"A BC B\", if \"B C\" is merged, increase the frequency of \"BC B\"\n # however, if the sequence is A BC BC, skip this step because the count of \"BC BC\" will be incremented by the previous code block\n if i < len(word) - 1 and word[i + 1] != new_pair:\n nex = word[i:i + 2]\n stats[nex] += freq\n indices[nex][j] += 1\n i += 1","function_tokens":["def","update_pair_statistics","(","pair",",","changed",",","stats",",","indices",")",":","stats","[","pair","]","=","0","indices","[","pair","]","=","defaultdict","(","int",")","first",",","second","=","pair","new_pair","=","first","+","second","for","j",",","word",",","old_word",",","freq","in","changed",":","# find all instances of pair, and update frequency\/indices around it","i","=","0","while","True",":","# find first symbol","try",":","i","=","old_word",".","index","(","first",",","i",")","except","ValueError",":","break","# if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])","if","i","<","len","(","old_word",")","-","1","and","old_word","[","i","+","1","]","==","second",":","# assuming a symbol sequence \"A B C\", if \"B C\" is merged, reduce the frequency of \"A B\"","if","i",":","prev","=","old_word","[","i","-","1",":","i","+","1","]","stats","[","prev","]","-=","freq","indices","[","prev","]","[","j","]","-=","1","if","i","<","len","(","old_word",")","-","2",":","# assuming a symbol sequence \"A B C B\", if \"B C\" is merged, reduce the frequency of \"C B\".","# however, skip this if the sequence is A B C B C, because the frequency of \"C B\" will be reduced by the previous code block","if","old_word","[","i","+","2","]","!=","first","or","i",">=","len","(","old_word",")","-","3","or","old_word","[","i","+","3","]","!=","second",":","nex","=","old_word","[","i","+","1",":","i","+","3","]","stats","[","nex","]","-=","freq","indices","[","nex","]","[","j","]","-=","1","i","+=","2","else",":","i","+=","1","i","=","0","while","True",":","try",":","# find new pair","i","=","word",".","index","(","new_pair",",","i",")","except","ValueError",":","break","# assuming a symbol sequence \"A BC D\", if \"B C\" is merged, increase the frequency of \"A BC\"","if","i",":","prev","=","word","[","i","-","1",":","i","+","1","]","stats","[","prev","]","+=","freq","indices","[","prev","]","[","j","]","+=","1","# assuming a symbol sequence \"A BC B\", if \"B C\" is merged, increase the frequency of \"BC B\"","# however, if the sequence is A BC BC, skip this step because the count of \"BC BC\" will be incremented by the previous code block","if","i","<","len","(","word",")","-","1","and","word","[","i","+","1","]","!=","new_pair",":","nex","=","word","[","i",":","i","+","2","]","stats","[","nex","]","+=","freq","indices","[","nex","]","[","j","]","+=","1","i","+=","1"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/learn_bpe.py#L73-L129"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/learn_bpe.py","language":"python","identifier":"get_pair_statistics","parameters":"(vocab)","argument_list":"","return_statement":"return stats, indices","docstring":"Count frequency of all symbol pairs, and create index","docstring_summary":"Count frequency of all symbol pairs, and create index","docstring_tokens":["Count","frequency","of","all","symbol","pairs","and","create","index"],"function":"def get_pair_statistics(vocab):\n \"\"\"Count frequency of all symbol pairs, and create index\"\"\"\n\n # data structure of pair frequencies\n stats = defaultdict(int)\n\n # index from pairs to words\n indices = defaultdict(lambda: defaultdict(int))\n\n for i, (word, freq) in enumerate(vocab):\n prev_char = word[0]\n for char in word[1:]:\n stats[prev_char, char] += freq\n indices[prev_char, char][i] += 1\n prev_char = char\n\n return stats, indices","function_tokens":["def","get_pair_statistics","(","vocab",")",":","# data structure of pair frequencies","stats","=","defaultdict","(","int",")","# index from pairs to words","indices","=","defaultdict","(","lambda",":","defaultdict","(","int",")",")","for","i",",","(","word",",","freq",")","in","enumerate","(","vocab",")",":","prev_char","=","word","[","0","]","for","char","in","word","[","1",":","]",":","stats","[","prev_char",",","char","]","+=","freq","indices","[","prev_char",",","char","]","[","i","]","+=","1","prev_char","=","char","return","stats",",","indices"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/learn_bpe.py#L132-L148"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/learn_bpe.py","language":"python","identifier":"replace_pair","parameters":"(pair, vocab, indices)","argument_list":"","return_statement":"return changes","docstring":"Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB","docstring_summary":"Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB","docstring_tokens":["Replace","all","occurrences","of","a","symbol","pair","(","A","B",")","with","a","new","symbol","AB"],"function":"def replace_pair(pair, vocab, indices):\n \"\"\"Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'\"\"\"\n first, second = pair\n pair_str = ''.join(pair)\n pair_str = pair_str.replace('\\\\', '\\\\\\\\')\n changes = []\n pattern = re.compile(\n r'(?');\n # version numbering allows bckward compatibility\n outfile.write('#version: 0.2\\n')\n\n vocab = get_vocabulary(infile, is_dict)\n vocab = dict([(tuple(x[:-1]) + (x[-1] + '<\/w>',), y)\n for (x, y) in vocab.items()])\n sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True)\n\n stats, indices = get_pair_statistics(sorted_vocab)\n big_stats = copy.deepcopy(stats)\n # threshold is inspired by Zipfian assumption, but should only affect speed\n threshold = max(stats.values()) \/ 10\n for i in range(num_symbols):\n if stats:\n most_frequent = max(stats, key=lambda x: (stats[x], x))\n\n # we probably missed the best pair because of pruning; go back to full statistics\n if not stats or (i and stats[most_frequent] < threshold):\n prune_stats(stats, big_stats, threshold)\n stats = copy.deepcopy(big_stats)\n most_frequent = max(stats, key=lambda x: (stats[x], x))\n # threshold is inspired by Zipfian assumption, but should only affect speed\n threshold = stats[most_frequent] * i \/ (i + 10000.0)\n prune_stats(stats, big_stats, threshold)\n\n if stats[most_frequent] < min_frequency:\n sys.stderr.write(\n 'no pair has frequency >= {0}. Stopping\\n'.format(min_frequency))\n break\n\n if verbose:\n sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\\n'.format(\n i, most_frequent[0], most_frequent[1], stats[most_frequent]))\n outfile.write('{0} {1}\\n'.format(*most_frequent))\n changes = replace_pair(most_frequent, sorted_vocab, indices)\n update_pair_statistics(most_frequent, changes, stats, indices)\n stats[most_frequent] = 0\n if not i % 100:\n prune_stats(stats, big_stats, threshold)","function_tokens":["def","main","(","infile",",","outfile",",","num_symbols",",","min_frequency","=","2",",","verbose","=","False",",","is_dict","=","False",")",":","# version 0.2 changes the handling of the end-of-word token ('<\/w>');","# version numbering allows bckward compatibility","outfile",".","write","(","'#version: 0.2\\n'",")","vocab","=","get_vocabulary","(","infile",",","is_dict",")","vocab","=","dict","(","[","(","tuple","(","x","[",":","-","1","]",")","+","(","x","[","-","1","]","+","'<\/w>'",",",")",",","y",")","for","(","x",",","y",")","in","vocab",".","items","(",")","]",")","sorted_vocab","=","sorted","(","vocab",".","items","(",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","stats",",","indices","=","get_pair_statistics","(","sorted_vocab",")","big_stats","=","copy",".","deepcopy","(","stats",")","# threshold is inspired by Zipfian assumption, but should only affect speed","threshold","=","max","(","stats",".","values","(",")",")","\/","10","for","i","in","range","(","num_symbols",")",":","if","stats",":","most_frequent","=","max","(","stats",",","key","=","lambda","x",":","(","stats","[","x","]",",","x",")",")","# we probably missed the best pair because of pruning; go back to full statistics","if","not","stats","or","(","i","and","stats","[","most_frequent","]","<","threshold",")",":","prune_stats","(","stats",",","big_stats",",","threshold",")","stats","=","copy",".","deepcopy","(","big_stats",")","most_frequent","=","max","(","stats",",","key","=","lambda","x",":","(","stats","[","x","]",",","x",")",")","# threshold is inspired by Zipfian assumption, but should only affect speed","threshold","=","stats","[","most_frequent","]","*","i","\/","(","i","+","10000.0",")","prune_stats","(","stats",",","big_stats",",","threshold",")","if","stats","[","most_frequent","]","<","min_frequency",":","sys",".","stderr",".","write","(","'no pair has frequency >= {0}. Stopping\\n'",".","format","(","min_frequency",")",")","break","if","verbose",":","sys",".","stderr",".","write","(","'pair {0}: {1} {2} -> {1}{2} (frequency {3})\\n'",".","format","(","i",",","most_frequent","[","0","]",",","most_frequent","[","1","]",",","stats","[","most_frequent","]",")",")","outfile",".","write","(","'{0} {1}\\n'",".","format","(","*","most_frequent",")",")","changes","=","replace_pair","(","most_frequent",",","sorted_vocab",",","indices",")","update_pair_statistics","(","most_frequent",",","changes",",","stats",",","indices",")","stats","[","most_frequent","]","=","0","if","not","i","%","100",":","prune_stats","(","stats",",","big_stats",",","threshold",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/learn_bpe.py#L193-L236"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/apply_bpe.py","language":"python","identifier":"get_pairs","parameters":"(word)","argument_list":"","return_statement":"return pairs","docstring":"Return set of symbol pairs in a word.\n\n word is represented as tuple of symbols (symbols being variable-length strings)","docstring_summary":"Return set of symbol pairs in a word.","docstring_tokens":["Return","set","of","symbol","pairs","in","a","word","."],"function":"def get_pairs(word):\n \"\"\"Return set of symbol pairs in a word.\n\n word is represented as tuple of symbols (symbols being variable-length strings)\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs","function_tokens":["def","get_pairs","(","word",")",":","pairs","=","set","(",")","prev_char","=","word","[","0","]","for","char","in","word","[","1",":","]",":","pairs",".","add","(","(","prev_char",",","char",")",")","prev_char","=","char","return","pairs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/apply_bpe.py#L126-L136"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/apply_bpe.py","language":"python","identifier":"encode","parameters":"(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries=None)","argument_list":"","return_statement":"return word","docstring":"Encode word based on list of BPE merge operations, which are applied consecutively","docstring_summary":"Encode word based on list of BPE merge operations, which are applied consecutively","docstring_tokens":["Encode","word","based","on","list","of","BPE","merge","operations","which","are","applied","consecutively"],"function":"def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries=None):\n \"\"\"Encode word based on list of BPE merge operations, which are applied consecutively\n \"\"\"\n\n if orig in cache:\n return cache[orig]\n\n if orig in glossaries:\n cache[orig] = (orig,)\n return (orig,)\n\n if version == (0, 1):\n word = tuple(orig) + ('<\/w>',)\n elif version == (0, 2): # more consistent handling of word-final segments\n word = tuple(orig[:-1]) + (orig[-1] + '<\/w>',)\n else:\n raise NotImplementedError\n\n pairs = get_pairs(word)\n\n if not pairs:\n return orig\n\n while True:\n bigram = min(pairs, key=lambda pair: bpe_codes.get(pair, float('inf')))\n if bigram not in bpe_codes:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n\n # don't print end-of-word symbols\n if word[-1] == '<\/w>':\n word = word[:-1]\n elif word[-1].endswith('<\/w>'):\n word = word[:-1] + (word[-1].replace('<\/w>', ''),)\n\n if vocab:\n word = check_vocab_and_split(word, bpe_codes_reverse, vocab, separator)\n\n cache[orig] = word\n return word","function_tokens":["def","encode","(","orig",",","bpe_codes",",","bpe_codes_reverse",",","vocab",",","separator",",","version",",","cache",",","glossaries","=","None",")",":","if","orig","in","cache",":","return","cache","[","orig","]","if","orig","in","glossaries",":","cache","[","orig","]","=","(","orig",",",")","return","(","orig",",",")","if","version","==","(","0",",","1",")",":","word","=","tuple","(","orig",")","+","(","'<\/w>'",",",")","elif","version","==","(","0",",","2",")",":","# more consistent handling of word-final segments","word","=","tuple","(","orig","[",":","-","1","]",")","+","(","orig","[","-","1","]","+","'<\/w>'",",",")","else",":","raise","NotImplementedError","pairs","=","get_pairs","(","word",")","if","not","pairs",":","return","orig","while","True",":","bigram","=","min","(","pairs",",","key","=","lambda","pair",":","bpe_codes",".","get","(","pair",",","float","(","'inf'",")",")",")","if","bigram","not","in","bpe_codes",":","break","first",",","second","=","bigram","new_word","=","[","]","i","=","0","while","i","<","len","(","word",")",":","try",":","j","=","word",".","index","(","first",",","i",")","new_word",".","extend","(","word","[","i",":","j","]",")","i","=","j","except",":","new_word",".","extend","(","word","[","i",":","]",")","break","if","word","[","i","]","==","first","and","i","<","len","(","word",")","-","1","and","word","[","i","+","1","]","==","second",":","new_word",".","append","(","first","+","second",")","i","+=","2","else",":","new_word",".","append","(","word","[","i","]",")","i","+=","1","new_word","=","tuple","(","new_word",")","word","=","new_word","if","len","(","word",")","==","1",":","break","else",":","pairs","=","get_pairs","(","word",")","# don't print end-of-word symbols","if","word","[","-","1","]","==","'<\/w>'",":","word","=","word","[",":","-","1","]","elif","word","[","-","1","]",".","endswith","(","'<\/w>'",")",":","word","=","word","[",":","-","1","]","+","(","word","[","-","1","]",".","replace","(","'<\/w>'",",","''",")",",",")","if","vocab",":","word","=","check_vocab_and_split","(","word",",","bpe_codes_reverse",",","vocab",",","separator",")","cache","[","orig","]","=","word","return","word"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/apply_bpe.py#L139-L201"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/apply_bpe.py","language":"python","identifier":"recursive_split","parameters":"(segment, bpe_codes, vocab, separator, final=False)","argument_list":"","return_statement":"","docstring":"Recursively split segment into smaller units (by reversing BPE merges)\n until all units are either in-vocabulary, or cannot be split futher.","docstring_summary":"Recursively split segment into smaller units (by reversing BPE merges)\n until all units are either in-vocabulary, or cannot be split futher.","docstring_tokens":["Recursively","split","segment","into","smaller","units","(","by","reversing","BPE","merges",")","until","all","units","are","either","in","-","vocabulary","or","cannot","be","split","futher","."],"function":"def recursive_split(segment, bpe_codes, vocab, separator, final=False):\n \"\"\"Recursively split segment into smaller units (by reversing BPE merges)\n until all units are either in-vocabulary, or cannot be split futher.\"\"\"\n\n try:\n if final:\n left, right = bpe_codes[segment + '<\/w>']\n right = right[:-4]\n else:\n left, right = bpe_codes[segment]\n except:\n #sys.stderr.write('cannot split {0} further.\\n'.format(segment))\n yield segment\n return\n\n if left + separator in vocab:\n yield left\n else:\n for item in recursive_split(left, bpe_codes, vocab, separator, False):\n yield item\n\n if (final and right in vocab) or (not final and right + separator in vocab):\n yield right\n else:\n for item in recursive_split(right, bpe_codes, vocab, separator, final):\n yield item","function_tokens":["def","recursive_split","(","segment",",","bpe_codes",",","vocab",",","separator",",","final","=","False",")",":","try",":","if","final",":","left",",","right","=","bpe_codes","[","segment","+","'<\/w>'","]","right","=","right","[",":","-","4","]","else",":","left",",","right","=","bpe_codes","[","segment","]","except",":","#sys.stderr.write('cannot split {0} further.\\n'.format(segment))","yield","segment","return","if","left","+","separator","in","vocab",":","yield","left","else",":","for","item","in","recursive_split","(","left",",","bpe_codes",",","vocab",",","separator",",","False",")",":","yield","item","if","(","final","and","right","in","vocab",")","or","(","not","final","and","right","+","separator","in","vocab",")",":","yield","right","else",":","for","item","in","recursive_split","(","right",",","bpe_codes",",","vocab",",","separator",",","final",")",":","yield","item"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/apply_bpe.py#L204-L229"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/apply_bpe.py","language":"python","identifier":"check_vocab_and_split","parameters":"(orig, bpe_codes, vocab, separator)","argument_list":"","return_statement":"return out","docstring":"Check for each segment in word if it is in-vocabulary,\n and segment OOV segments into smaller units by reversing the BPE merge operations","docstring_summary":"Check for each segment in word if it is in-vocabulary,\n and segment OOV segments into smaller units by reversing the BPE merge operations","docstring_tokens":["Check","for","each","segment","in","word","if","it","is","in","-","vocabulary","and","segment","OOV","segments","into","smaller","units","by","reversing","the","BPE","merge","operations"],"function":"def check_vocab_and_split(orig, bpe_codes, vocab, separator):\n \"\"\"Check for each segment in word if it is in-vocabulary,\n and segment OOV segments into smaller units by reversing the BPE merge operations\"\"\"\n\n out = []\n\n for segment in orig[:-1]:\n if segment + separator in vocab:\n out.append(segment)\n else:\n #sys.stderr.write('OOV: {0}\\n'.format(segment))\n for item in recursive_split(segment, bpe_codes, vocab, separator, False):\n out.append(item)\n\n segment = orig[-1]\n if segment in vocab:\n out.append(segment)\n else:\n #sys.stderr.write('OOV: {0}\\n'.format(segment))\n for item in recursive_split(segment, bpe_codes, vocab, separator, True):\n out.append(item)\n\n return out","function_tokens":["def","check_vocab_and_split","(","orig",",","bpe_codes",",","vocab",",","separator",")",":","out","=","[","]","for","segment","in","orig","[",":","-","1","]",":","if","segment","+","separator","in","vocab",":","out",".","append","(","segment",")","else",":","#sys.stderr.write('OOV: {0}\\n'.format(segment))","for","item","in","recursive_split","(","segment",",","bpe_codes",",","vocab",",","separator",",","False",")",":","out",".","append","(","item",")","segment","=","orig","[","-","1","]","if","segment","in","vocab",":","out",".","append","(","segment",")","else",":","#sys.stderr.write('OOV: {0}\\n'.format(segment))","for","item","in","recursive_split","(","segment",",","bpe_codes",",","vocab",",","separator",",","True",")",":","out",".","append","(","item",")","return","out"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/apply_bpe.py#L232-L254"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/apply_bpe.py","language":"python","identifier":"read_vocabulary","parameters":"(vocab_file, threshold)","argument_list":"","return_statement":"return vocabulary","docstring":"read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.","docstring_summary":"read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.","docstring_tokens":["read","vocabulary","file","produced","by","get_vocab",".","py","and","filter","according","to","frequency","threshold","."],"function":"def read_vocabulary(vocab_file, threshold):\n \"\"\"read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.\n \"\"\"\n\n vocabulary = set()\n\n for line in vocab_file:\n word, freq = line.split()\n freq = int(freq)\n if threshold == None or freq >= threshold:\n vocabulary.add(word)\n\n return vocabulary","function_tokens":["def","read_vocabulary","(","vocab_file",",","threshold",")",":","vocabulary","=","set","(",")","for","line","in","vocab_file",":","word",",","freq","=","line",".","split","(",")","freq","=","int","(","freq",")","if","threshold","==","None","or","freq",">=","threshold",":","vocabulary",".","add","(","word",")","return","vocabulary"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/apply_bpe.py#L257-L269"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/apply_bpe.py","language":"python","identifier":"isolate_glossary","parameters":"(word, glossary)","argument_list":"","return_statement":"","docstring":"Isolate a glossary present inside a word.\n\n Returns a list of subwords. In which all 'glossary' glossaries are isolated \n\n For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is:\n ['1934', 'USA', 'B', 'USA']","docstring_summary":"Isolate a glossary present inside a word.","docstring_tokens":["Isolate","a","glossary","present","inside","a","word","."],"function":"def isolate_glossary(word, glossary):\n \"\"\"\n Isolate a glossary present inside a word.\n\n Returns a list of subwords. In which all 'glossary' glossaries are isolated \n\n For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is:\n ['1934', 'USA', 'B', 'USA']\n \"\"\"\n if word == glossary or glossary not in word:\n return [word]\n else:\n splits = word.split(glossary)\n segments = [segment.strip() for split in splits[:-1]\n for segment in [split, glossary] if segment != '']\n return segments + [splits[-1].strip()] if splits[-1] != '' else segments","function_tokens":["def","isolate_glossary","(","word",",","glossary",")",":","if","word","==","glossary","or","glossary","not","in","word",":","return","[","word","]","else",":","splits","=","word",".","split","(","glossary",")","segments","=","[","segment",".","strip","(",")","for","split","in","splits","[",":","-","1","]","for","segment","in","[","split",",","glossary","]","if","segment","!=","''","]","return","segments","+","[","splits","[","-","1","]",".","strip","(",")","]","if","splits","[","-","1","]","!=","''","else","segments"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/apply_bpe.py#L272-L287"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/OpenNMT-py-baselines\/tools\/apply_bpe.py","language":"python","identifier":"BPE.segment","parameters":"(self, sentence)","argument_list":"","return_statement":"return ' '.join(output)","docstring":"segment single sentence (whitespace-tokenized string) with BPE encoding","docstring_summary":"segment single sentence (whitespace-tokenized string) with BPE encoding","docstring_tokens":["segment","single","sentence","(","whitespace","-","tokenized","string",")","with","BPE","encoding"],"function":"def segment(self, sentence):\n \"\"\"segment single sentence (whitespace-tokenized string) with BPE encoding\"\"\"\n output = []\n for word in sentence.split():\n new_word = [out for segment in self._isolate_glossaries(word)\n for out in encode(segment,\n self.bpe_codes,\n self.bpe_codes_reverse,\n self.vocab,\n self.separator,\n self.version,\n self.cache,\n self.glossaries)]\n\n for item in new_word[:-1]:\n output.append(item + self.separator)\n output.append(new_word[-1])\n\n return ' '.join(output)","function_tokens":["def","segment","(","self",",","sentence",")",":","output","=","[","]","for","word","in","sentence",".","split","(",")",":","new_word","=","[","out","for","segment","in","self",".","_isolate_glossaries","(","word",")","for","out","in","encode","(","segment",",","self",".","bpe_codes",",","self",".","bpe_codes_reverse",",","self",".","vocab",",","self",".","separator",",","self",".","version",",","self",".","cache",",","self",".","glossaries",")","]","for","item","in","new_word","[",":","-","1","]",":","output",".","append","(","item","+","self",".","separator",")","output",".","append","(","new_word","[","-","1","]",")","return","' '",".","join","(","output",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/OpenNMT-py-baselines\/tools\/apply_bpe.py#L61-L79"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/preprocess.py","language":"python","identifier":"check_existing_pt_files","parameters":"(opt)","argument_list":"","return_statement":"","docstring":"Checking if there are existing .pt files to avoid tampering","docstring_summary":"Checking if there are existing .pt files to avoid tampering","docstring_tokens":["Checking","if","there","are","existing",".","pt","files","to","avoid","tampering"],"function":"def check_existing_pt_files(opt):\n \"\"\" Checking if there are existing .pt files to avoid tampering \"\"\"\n # We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt\n # when training, so check to avoid tampering with existing pt files\n # or mixing them up.\n for t in ['train', 'valid', 'vocab']:\n pattern = opt.save_data + '.' + t + '*.pt'\n if glob.glob(pattern):\n sys.stderr.write(\"Please backup existing pt file: %s, \"\n \"to avoid tampering!\\n\" % pattern)\n sys.exit(1)","function_tokens":["def","check_existing_pt_files","(","opt",")",":","# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt","# when training, so check to avoid tampering with existing pt files","# or mixing them up.","for","t","in","[","'train'",",","'valid'",",","'vocab'","]",":","pattern","=","opt",".","save_data","+","'.'","+","t","+","'*.pt'","if","glob",".","glob","(","pattern",")",":","sys",".","stderr",".","write","(","\"Please backup existing pt file: %s, \"","\"to avoid tampering!\\n\"","%","pattern",")","sys",".","exit","(","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/preprocess.py#L20-L30"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/preprocess.py","language":"python","identifier":"parse_args","parameters":"()","argument_list":"","return_statement":"return opt","docstring":"Parsing arguments","docstring_summary":"Parsing arguments","docstring_tokens":["Parsing","arguments"],"function":"def parse_args():\n \"\"\" Parsing arguments \"\"\"\n parser = argparse.ArgumentParser(\n description='preprocess.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.preprocess_opts(parser)\n\n opt = parser.parse_args()\n torch.manual_seed(opt.seed)\n\n check_existing_pt_files(opt)\n\n return opt","function_tokens":["def","parse_args","(",")",":","parser","=","argparse",".","ArgumentParser","(","description","=","'preprocess.py'",",","formatter_class","=","argparse",".","ArgumentDefaultsHelpFormatter",")","opts",".","add_md_help_argument","(","parser",")","opts",".","preprocess_opts","(","parser",")","opt","=","parser",".","parse_args","(",")","torch",".","manual_seed","(","opt",".","seed",")","check_existing_pt_files","(","opt",")","return","opt"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/preprocess.py#L33-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/preprocess.py","language":"python","identifier":"build_save_in_shards","parameters":"(src_corpus, tgt_corpus, fields,\n corpus_type, opt)","argument_list":"","return_statement":"return ret_list","docstring":"Divide the big corpus into shards, and build dataset separately.\n This is currently only for data_type=='text'.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.\n\n To tackle this, we only read in part of the corpus file of size\n `max_shard_size`(actually it is multiples of 64 bytes that equals\n or is slightly larger than this size), and process it into dataset,\n then write it to disk along the way. By doing this, we only focus on\n part of the corpus at any moment, thus effectively reducing memory use.\n According to test, this method can reduce memory footprint by ~50%.\n\n Note! As we process along the shards, previous shards might still\n stay in memory, but since we are done with them, and no more\n reference to them, if there is memory tight situation, the OS could\n easily reclaim these memory.\n\n If `max_shard_size` is 0 or is larger than the corpus size, it is\n effectively preprocessed into one dataset, i.e. no sharding.\n\n NOTE! `max_shard_size` is measuring the input corpus size, not the\n output pt file size. So a shard pt file consists of examples of size\n 2 * `max_shard_size`(source + target).","docstring_summary":"Divide the big corpus into shards, and build dataset separately.\n This is currently only for data_type=='text'.","docstring_tokens":["Divide","the","big","corpus","into","shards","and","build","dataset","separately",".","This","is","currently","only","for","data_type","==","text","."],"function":"def build_save_in_shards(src_corpus, tgt_corpus, fields,\n corpus_type, opt):\n \"\"\"\n Divide the big corpus into shards, and build dataset separately.\n This is currently only for data_type=='text'.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.\n\n To tackle this, we only read in part of the corpus file of size\n `max_shard_size`(actually it is multiples of 64 bytes that equals\n or is slightly larger than this size), and process it into dataset,\n then write it to disk along the way. By doing this, we only focus on\n part of the corpus at any moment, thus effectively reducing memory use.\n According to test, this method can reduce memory footprint by ~50%.\n\n Note! As we process along the shards, previous shards might still\n stay in memory, but since we are done with them, and no more\n reference to them, if there is memory tight situation, the OS could\n easily reclaim these memory.\n\n If `max_shard_size` is 0 or is larger than the corpus size, it is\n effectively preprocessed into one dataset, i.e. no sharding.\n\n NOTE! `max_shard_size` is measuring the input corpus size, not the\n output pt file size. So a shard pt file consists of examples of size\n 2 * `max_shard_size`(source + target).\n \"\"\"\n\n corpus_size = os.path.getsize(src_corpus)\n if corpus_size > 10 * (1024 ** 2) and opt.max_shard_size == 0:\n logger.info(\"Warning. The corpus %s is larger than 10M bytes, \"\n \"you can set '-max_shard_size' to process it by \"\n \"small shards to use less memory.\" % src_corpus)\n\n if opt.max_shard_size != 0:\n logger.info(' * divide corpus into shards and build dataset '\n 'separately (shard_size = %d bytes).'\n % opt.max_shard_size)\n\n ret_list = []\n src_iter = inputters.ShardedTextCorpusIterator(\n src_corpus, opt.src_seq_length_trunc,\n \"src\", opt.max_shard_size)\n tgt_iter = inputters.ShardedTextCorpusIterator(\n tgt_corpus, opt.tgt_seq_length_trunc,\n \"tgt\", opt.max_shard_size,\n assoc_iter=src_iter)\n\n\n index = 0\n while not src_iter.hit_end():\n index += 1\n dataset = inputters.TextDataset(\n fields, src_iter, tgt_iter,\n src_iter.num_feats, tgt_iter.num_feats,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n dynamic_dict=opt.dynamic_dict)\n\n # We save fields in vocab.pt separately, so make it empty.\n dataset.fields = []\n\n pt_file = \"{:s}.{:s}.{:d}.pt\".format(\n opt.save_data, corpus_type, index)\n logger.info(\" * saving %s data shard to %s.\"\n % (corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n ret_list.append(pt_file)\n\n return ret_list","function_tokens":["def","build_save_in_shards","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")",":","corpus_size","=","os",".","path",".","getsize","(","src_corpus",")","if","corpus_size",">","10","*","(","1024","**","2",")","and","opt",".","max_shard_size","==","0",":","logger",".","info","(","\"Warning. The corpus %s is larger than 10M bytes, \"","\"you can set '-max_shard_size' to process it by \"","\"small shards to use less memory.\"","%","src_corpus",")","if","opt",".","max_shard_size","!=","0",":","logger",".","info","(","' * divide corpus into shards and build dataset '","'separately (shard_size = %d bytes).'","%","opt",".","max_shard_size",")","ret_list","=","[","]","src_iter","=","inputters",".","ShardedTextCorpusIterator","(","src_corpus",",","opt",".","src_seq_length_trunc",",","\"src\"",",","opt",".","max_shard_size",")","tgt_iter","=","inputters",".","ShardedTextCorpusIterator","(","tgt_corpus",",","opt",".","tgt_seq_length_trunc",",","\"tgt\"",",","opt",".","max_shard_size",",","assoc_iter","=","src_iter",")","index","=","0","while","not","src_iter",".","hit_end","(",")",":","index","+=","1","dataset","=","inputters",".","TextDataset","(","fields",",","src_iter",",","tgt_iter",",","src_iter",".","num_feats",",","tgt_iter",".","num_feats",",","src_seq_length","=","opt",".","src_seq_length",",","tgt_seq_length","=","opt",".","tgt_seq_length",",","dynamic_dict","=","opt",".","dynamic_dict",")","# We save fields in vocab.pt separately, so make it empty.","dataset",".","fields","=","[","]","pt_file","=","\"{:s}.{:s}.{:d}.pt\"",".","format","(","opt",".","save_data",",","corpus_type",",","index",")","logger",".","info","(","\" * saving %s data shard to %s.\"","%","(","corpus_type",",","pt_file",")",")","torch",".","save","(","dataset",",","pt_file",")","ret_list",".","append","(","pt_file",")","return","ret_list"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/preprocess.py#L50-L121"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/preprocess.py","language":"python","identifier":"build_save_in_shards_using_shards_size","parameters":"(src_corpus, tgt_corpus, fields,\n corpus_type, opt)","argument_list":"","return_statement":"return ret_list","docstring":"Divide src_corpus and tgt_corpus into smaller multiples\n src_copus and tgt corpus files, then build shards, each\n shard will have opt.shard_size samples except last shard.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.","docstring_summary":"Divide src_corpus and tgt_corpus into smaller multiples\n src_copus and tgt corpus files, then build shards, each\n shard will have opt.shard_size samples except last shard.","docstring_tokens":["Divide","src_corpus","and","tgt_corpus","into","smaller","multiples","src_copus","and","tgt","corpus","files","then","build","shards","each","shard","will","have","opt",".","shard_size","samples","except","last","shard","."],"function":"def build_save_in_shards_using_shards_size(src_corpus, tgt_corpus, fields,\n corpus_type, opt):\n \"\"\"\n Divide src_corpus and tgt_corpus into smaller multiples\n src_copus and tgt corpus files, then build shards, each\n shard will have opt.shard_size samples except last shard.\n\n The reason we do this is to avoid taking up too much memory due\n to sucking in a huge corpus file.\n \"\"\"\n\n src_data = open(src_corpus, \"r\", encoding=\"utf-8\").readlines()\n tgt_data = open(tgt_corpus, \"r\", encoding=\"utf-8\").readlines()\n\n src_corpus = \"\".join(src_corpus.split(\".\")[:-1])\n tgt_corpus = \"\".join(tgt_corpus.split(\".\")[:-1])\n\n for x in range(int(len(src_data) \/ opt.shard_size)):\n open(src_corpus + \".{0}.txt\".format(x), \"w\",\n encoding=\"utf-8\").writelines(\n src_data[x * opt.shard_size: (x + 1) * opt.shard_size])\n open(tgt_corpus + \".{0}.txt\".format(x), \"w\",\n encoding=\"utf-8\").writelines(\n tgt_data[x * opt.shard_size: (x + 1) * opt.shard_size])\n\n src_list = sorted(glob.glob(src_corpus + '.*.txt'))\n tgt_list = sorted(glob.glob(tgt_corpus + '.*.txt'))\n\n ret_list = []\n\n for index, src in enumerate(src_list):\n dataset = inputters.build_dataset(\n fields, opt.data_type,\n src_path=src,\n tgt_path=tgt_list[index],\n src_dir=opt.src_dir,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n src_seq_length_trunc=opt.src_seq_length_trunc,\n tgt_seq_length_trunc=opt.tgt_seq_length_trunc,\n dynamic_dict=opt.dynamic_dict,\n sample_rate=opt.sample_rate,\n window_size=opt.window_size,\n window_stride=opt.window_stride,\n window=opt.window,\n image_channel_size=opt.image_channel_size\n )\n\n pt_file = \"{:s}.{:s}.{:d}.pt\".format(\n opt.save_data, corpus_type, index)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n logger.info(\" * saving %sth %s data image shard to %s.\"\n % (index, corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n ret_list.append(pt_file)\n\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n return ret_list","function_tokens":["def","build_save_in_shards_using_shards_size","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")",":","src_data","=","open","(","src_corpus",",","\"r\"",",","encoding","=","\"utf-8\"",")",".","readlines","(",")","tgt_data","=","open","(","tgt_corpus",",","\"r\"",",","encoding","=","\"utf-8\"",")",".","readlines","(",")","src_corpus","=","\"\"",".","join","(","src_corpus",".","split","(","\".\"",")","[",":","-","1","]",")","tgt_corpus","=","\"\"",".","join","(","tgt_corpus",".","split","(","\".\"",")","[",":","-","1","]",")","for","x","in","range","(","int","(","len","(","src_data",")","\/","opt",".","shard_size",")",")",":","open","(","src_corpus","+","\".{0}.txt\"",".","format","(","x",")",",","\"w\"",",","encoding","=","\"utf-8\"",")",".","writelines","(","src_data","[","x","*","opt",".","shard_size",":","(","x","+","1",")","*","opt",".","shard_size","]",")","open","(","tgt_corpus","+","\".{0}.txt\"",".","format","(","x",")",",","\"w\"",",","encoding","=","\"utf-8\"",")",".","writelines","(","tgt_data","[","x","*","opt",".","shard_size",":","(","x","+","1",")","*","opt",".","shard_size","]",")","src_list","=","sorted","(","glob",".","glob","(","src_corpus","+","'.*.txt'",")",")","tgt_list","=","sorted","(","glob",".","glob","(","tgt_corpus","+","'.*.txt'",")",")","ret_list","=","[","]","for","index",",","src","in","enumerate","(","src_list",")",":","dataset","=","inputters",".","build_dataset","(","fields",",","opt",".","data_type",",","src_path","=","src",",","tgt_path","=","tgt_list","[","index","]",",","src_dir","=","opt",".","src_dir",",","src_seq_length","=","opt",".","src_seq_length",",","tgt_seq_length","=","opt",".","tgt_seq_length",",","src_seq_length_trunc","=","opt",".","src_seq_length_trunc",",","tgt_seq_length_trunc","=","opt",".","tgt_seq_length_trunc",",","dynamic_dict","=","opt",".","dynamic_dict",",","sample_rate","=","opt",".","sample_rate",",","window_size","=","opt",".","window_size",",","window_stride","=","opt",".","window_stride",",","window","=","opt",".","window",",","image_channel_size","=","opt",".","image_channel_size",")","pt_file","=","\"{:s}.{:s}.{:d}.pt\"",".","format","(","opt",".","save_data",",","corpus_type",",","index",")","# We save fields in vocab.pt seperately, so make it empty.","dataset",".","fields","=","[","]","logger",".","info","(","\" * saving %sth %s data image shard to %s.\"","%","(","index",",","corpus_type",",","pt_file",")",")","torch",".","save","(","dataset",",","pt_file",")","ret_list",".","append","(","pt_file",")","del","dataset",".","examples","gc",".","collect","(",")","del","dataset","gc",".","collect","(",")","return","ret_list"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/preprocess.py#L124-L189"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/preprocess.py","language":"python","identifier":"build_save_dataset","parameters":"(corpus_type, fields, opt)","argument_list":"","return_statement":"return [pt_file]","docstring":"Building and saving the dataset","docstring_summary":"Building and saving the dataset","docstring_tokens":["Building","and","saving","the","dataset"],"function":"def build_save_dataset(corpus_type, fields, opt):\n \"\"\" Building and saving the dataset \"\"\"\n assert corpus_type in ['train', 'valid']\n\n if corpus_type == 'train':\n src_corpus = opt.train_src\n tgt_corpus = opt.train_tgt\n else:\n src_corpus = opt.valid_src\n tgt_corpus = opt.valid_tgt\n\n # Currently we only do preprocess sharding for corpus: data_type=='text'.\n if opt.data_type == 'text':\n return build_save_in_shards(\n src_corpus, tgt_corpus, fields,\n corpus_type, opt)\n\n if (opt.shard_size > 0):\n return build_save_in_shards_using_shards_size(src_corpus,\n tgt_corpus,\n fields,\n corpus_type,\n opt)\n\n # For data_type == 'img' or 'audio', currently we don't do\n # preprocess sharding. We only build a monolithic dataset.\n # But since the interfaces are uniform, it would be not hard\n # to do this should users need this feature.\n dataset = inputters.build_dataset(\n fields, opt.data_type,\n src_path=src_corpus,\n tgt_path=tgt_corpus,\n src_dir=opt.src_dir,\n src_seq_length=opt.src_seq_length,\n tgt_seq_length=opt.tgt_seq_length,\n src_seq_length_trunc=opt.src_seq_length_trunc,\n tgt_seq_length_trunc=opt.tgt_seq_length_trunc,\n dynamic_dict=opt.dynamic_dict,\n sample_rate=opt.sample_rate,\n window_size=opt.window_size,\n window_stride=opt.window_stride,\n window=opt.window,\n image_channel_size=opt.image_channel_size)\n\n # We save fields in vocab.pt seperately, so make it empty.\n dataset.fields = []\n\n pt_file = \"{:s}.{:s}.pt\".format(opt.save_data, corpus_type)\n logger.info(\" * saving %s dataset to %s.\" % (corpus_type, pt_file))\n torch.save(dataset, pt_file)\n\n return [pt_file]","function_tokens":["def","build_save_dataset","(","corpus_type",",","fields",",","opt",")",":","assert","corpus_type","in","[","'train'",",","'valid'","]","if","corpus_type","==","'train'",":","src_corpus","=","opt",".","train_src","tgt_corpus","=","opt",".","train_tgt","else",":","src_corpus","=","opt",".","valid_src","tgt_corpus","=","opt",".","valid_tgt","# Currently we only do preprocess sharding for corpus: data_type=='text'.","if","opt",".","data_type","==","'text'",":","return","build_save_in_shards","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")","if","(","opt",".","shard_size",">","0",")",":","return","build_save_in_shards_using_shards_size","(","src_corpus",",","tgt_corpus",",","fields",",","corpus_type",",","opt",")","# For data_type == 'img' or 'audio', currently we don't do","# preprocess sharding. We only build a monolithic dataset.","# But since the interfaces are uniform, it would be not hard","# to do this should users need this feature.","dataset","=","inputters",".","build_dataset","(","fields",",","opt",".","data_type",",","src_path","=","src_corpus",",","tgt_path","=","tgt_corpus",",","src_dir","=","opt",".","src_dir",",","src_seq_length","=","opt",".","src_seq_length",",","tgt_seq_length","=","opt",".","tgt_seq_length",",","src_seq_length_trunc","=","opt",".","src_seq_length_trunc",",","tgt_seq_length_trunc","=","opt",".","tgt_seq_length_trunc",",","dynamic_dict","=","opt",".","dynamic_dict",",","sample_rate","=","opt",".","sample_rate",",","window_size","=","opt",".","window_size",",","window_stride","=","opt",".","window_stride",",","window","=","opt",".","window",",","image_channel_size","=","opt",".","image_channel_size",")","# We save fields in vocab.pt seperately, so make it empty.","dataset",".","fields","=","[","]","pt_file","=","\"{:s}.{:s}.pt\"",".","format","(","opt",".","save_data",",","corpus_type",")","logger",".","info","(","\" * saving %s dataset to %s.\"","%","(","corpus_type",",","pt_file",")",")","torch",".","save","(","dataset",",","pt_file",")","return","[","pt_file","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/preprocess.py#L192-L243"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/preprocess.py","language":"python","identifier":"build_save_vocab","parameters":"(train_dataset, fields, opt)","argument_list":"","return_statement":"","docstring":"Building and saving the vocab","docstring_summary":"Building and saving the vocab","docstring_tokens":["Building","and","saving","the","vocab"],"function":"def build_save_vocab(train_dataset, fields, opt):\n \"\"\" Building and saving the vocab \"\"\"\n fields = inputters.build_vocab(train_dataset, fields, opt.data_type,\n opt.share_vocab,\n opt.src_vocab,\n opt.src_vocab_size,\n opt.src_words_min_frequency,\n opt.tgt_vocab,\n opt.tgt_vocab_size,\n opt.tgt_words_min_frequency)\n\n # Can't save fields, so remove\/reconstruct at training time.\n vocab_file = opt.save_data + '.vocab.pt'\n torch.save(inputters.save_fields_to_vocab(fields), vocab_file)","function_tokens":["def","build_save_vocab","(","train_dataset",",","fields",",","opt",")",":","fields","=","inputters",".","build_vocab","(","train_dataset",",","fields",",","opt",".","data_type",",","opt",".","share_vocab",",","opt",".","src_vocab",",","opt",".","src_vocab_size",",","opt",".","src_words_min_frequency",",","opt",".","tgt_vocab",",","opt",".","tgt_vocab_size",",","opt",".","tgt_words_min_frequency",")","# Can't save fields, so remove\/reconstruct at training time.","vocab_file","=","opt",".","save_data","+","'.vocab.pt'","torch",".","save","(","inputters",".","save_fields_to_vocab","(","fields",")",",","vocab_file",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/preprocess.py#L246-L259"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/train.py","language":"python","identifier":"run","parameters":"(opt, device_id, error_queue)","argument_list":"","return_statement":"","docstring":"run process","docstring_summary":"run process","docstring_tokens":["run","process"],"function":"def run(opt, device_id, error_queue):\n \"\"\" run process \"\"\"\n try:\n gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)\n if gpu_rank != opt.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n single_main(opt, device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))","function_tokens":["def","run","(","opt",",","device_id",",","error_queue",")",":","try",":","gpu_rank","=","onmt",".","utils",".","distributed",".","multi_init","(","opt",",","device_id",")","if","gpu_rank","!=","opt",".","gpu_ranks","[","device_id","]",":","raise","AssertionError","(","\"An error occurred in \\\n Distributed initialization\"",")","single_main","(","opt",",","device_id",")","except","KeyboardInterrupt",":","pass","# killed by parent, do nothing","except","Exception",":","# propagate exception to parent process, keeping original traceback","import","traceback","error_queue",".","put","(","(","opt",".","gpu_ranks","[","device_id","]",",","traceback",".","format_exc","(",")",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/train.py#L62-L75"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/train.py","language":"python","identifier":"ErrorHandler.__init__","parameters":"(self, error_queue)","argument_list":"","return_statement":"","docstring":"init error handler","docstring_summary":"init error handler","docstring_tokens":["init","error","handler"],"function":"def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)","function_tokens":["def","__init__","(","self",",","error_queue",")",":","import","signal","import","threading","self",".","error_queue","=","error_queue","self",".","children_pids","=","[","]","self",".","error_thread","=","threading",".","Thread","(","target","=","self",".","error_listener",",","daemon","=","True",")","self",".","error_thread",".","start","(",")","signal",".","signal","(","signal",".","SIGUSR1",",","self",".","signal_handler",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/train.py#L82-L91"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/train.py","language":"python","identifier":"ErrorHandler.add_child","parameters":"(self, pid)","argument_list":"","return_statement":"","docstring":"error handler","docstring_summary":"error handler","docstring_tokens":["error","handler"],"function":"def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)","function_tokens":["def","add_child","(","self",",","pid",")",":","self",".","children_pids",".","append","(","pid",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/train.py#L93-L95"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/train.py","language":"python","identifier":"ErrorHandler.error_listener","parameters":"(self)","argument_list":"","return_statement":"","docstring":"error listener","docstring_summary":"error listener","docstring_tokens":["error","listener"],"function":"def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)","function_tokens":["def","error_listener","(","self",")",":","(","rank",",","original_trace",")","=","self",".","error_queue",".","get","(",")","self",".","error_queue",".","put","(","(","rank",",","original_trace",")",")","os",".","kill","(","os",".","getpid","(",")",",","signal",".","SIGUSR1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/train.py#L97-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/train.py","language":"python","identifier":"ErrorHandler.signal_handler","parameters":"(self, signalnum, stackframe)","argument_list":"","return_statement":"","docstring":"signal handler","docstring_summary":"signal handler","docstring_tokens":["signal","handler"],"function":"def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)","function_tokens":["def","signal_handler","(","self",",","signalnum",",","stackframe",")",":","for","pid","in","self",".","children_pids",":","os",".","kill","(","pid",",","signal",".","SIGINT",")","# kill children processes","(","rank",",","original_trace",")","=","self",".","error_queue",".","get","(",")","msg","=","\"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"","msg","+=","original_trace","raise","Exception","(","msg",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/train.py#L103-L111"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/model_builder.py","language":"python","identifier":"build_embeddings","parameters":"(opt, word_dict, feature_dicts, for_encoder=True)","argument_list":"","return_statement":"return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n feat_merge=opt.feat_merge,\n feat_vec_exponent=opt.feat_vec_exponent,\n feat_vec_size=opt.feat_vec_size,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n feat_padding_idx=feats_padding_idx,\n word_vocab_size=num_word_embeddings,\n feat_vocab_sizes=num_feat_embeddings,\n sparse=opt.optim == \"sparseadam\")","docstring":"Build an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?","docstring_summary":"Build an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?","docstring_tokens":["Build","an","Embeddings","instance",".","Args",":","opt",":","the","option","in","current","environment",".","word_dict","(","Vocab",")",":","words","dictionary",".","feature_dicts","(","[","Vocab","]","optional",")",":","a","list","of","feature","dictionary",".","for_encoder","(","bool",")",":","build","Embeddings","for","encoder","or","decoder?"],"function":"def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):\n \"\"\"\n Build an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?\n \"\"\"\n if for_encoder:\n embedding_dim = opt.src_word_vec_size\n else:\n embedding_dim = opt.tgt_word_vec_size\n\n word_padding_idx = word_dict.stoi[inputters.PAD_WORD]\n num_word_embeddings = len(word_dict)\n\n feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]\n for feat_dict in feature_dicts]\n num_feat_embeddings = [len(feat_dict) for feat_dict in\n feature_dicts]\n\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n feat_merge=opt.feat_merge,\n feat_vec_exponent=opt.feat_vec_exponent,\n feat_vec_size=opt.feat_vec_size,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n feat_padding_idx=feats_padding_idx,\n word_vocab_size=num_word_embeddings,\n feat_vocab_sizes=num_feat_embeddings,\n sparse=opt.optim == \"sparseadam\")","function_tokens":["def","build_embeddings","(","opt",",","word_dict",",","feature_dicts",",","for_encoder","=","True",")",":","if","for_encoder",":","embedding_dim","=","opt",".","src_word_vec_size","else",":","embedding_dim","=","opt",".","tgt_word_vec_size","word_padding_idx","=","word_dict",".","stoi","[","inputters",".","PAD_WORD","]","num_word_embeddings","=","len","(","word_dict",")","feats_padding_idx","=","[","feat_dict",".","stoi","[","inputters",".","PAD_WORD","]","for","feat_dict","in","feature_dicts","]","num_feat_embeddings","=","[","len","(","feat_dict",")","for","feat_dict","in","feature_dicts","]","return","Embeddings","(","word_vec_size","=","embedding_dim",",","position_encoding","=","opt",".","position_encoding",",","feat_merge","=","opt",".","feat_merge",",","feat_vec_exponent","=","opt",".","feat_vec_exponent",",","feat_vec_size","=","opt",".","feat_vec_size",",","dropout","=","opt",".","dropout",",","word_padding_idx","=","word_padding_idx",",","feat_padding_idx","=","feats_padding_idx",",","word_vocab_size","=","num_word_embeddings",",","feat_vocab_sizes","=","num_feat_embeddings",",","sparse","=","opt",".","optim","==","\"sparseadam\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/model_builder.py#L29-L62"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/model_builder.py","language":"python","identifier":"build_encoder","parameters":"(opt, embeddings)","argument_list":"","return_statement":"","docstring":"Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.","docstring_summary":"Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.","docstring_tokens":["Various","encoder","dispatcher","function",".","Args",":","opt",":","the","option","in","current","environment",".","embeddings","(","Embeddings",")",":","vocab","embeddings","for","this","encoder","."],"function":"def build_encoder(opt, embeddings):\n \"\"\"\n Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.\n \"\"\"\n if opt.encoder_type == \"transformer\":\n # return TransformerEncoder(opt.enc_layers, opt.rnn_size,\n # opt.heads, opt.transformer_ff,\n # opt.dropout, embeddings)\n return None\n elif opt.encoder_type == \"cnn\":\n return CNNEncoder(opt.enc_layers, opt.rnn_size,\n opt.cnn_kernel_width,\n opt.dropout, embeddings)\n elif opt.encoder_type == \"mean\":\n return MeanEncoder(opt.enc_layers, embeddings)\n else:\n # \"rnn\" or \"brnn\"\n return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,\n opt.rnn_size, opt.dropout, embeddings,\n opt.bridge)","function_tokens":["def","build_encoder","(","opt",",","embeddings",")",":","if","opt",".","encoder_type","==","\"transformer\"",":","# return TransformerEncoder(opt.enc_layers, opt.rnn_size,","# opt.heads, opt.transformer_ff,","# opt.dropout, embeddings)","return","None","elif","opt",".","encoder_type","==","\"cnn\"",":","return","CNNEncoder","(","opt",".","enc_layers",",","opt",".","rnn_size",",","opt",".","cnn_kernel_width",",","opt",".","dropout",",","embeddings",")","elif","opt",".","encoder_type","==","\"mean\"",":","return","MeanEncoder","(","opt",".","enc_layers",",","embeddings",")","else",":","# \"rnn\" or \"brnn\"","return","RNNEncoder","(","opt",".","rnn_type",",","opt",".","brnn",",","opt",".","enc_layers",",","opt",".","rnn_size",",","opt",".","dropout",",","embeddings",",","opt",".","bridge",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/model_builder.py#L65-L87"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/model_builder.py","language":"python","identifier":"build_decoder","parameters":"(opt, embeddings)","argument_list":"","return_statement":"","docstring":"Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.","docstring_summary":"Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.","docstring_tokens":["Various","decoder","dispatcher","function",".","Args",":","opt",":","the","option","in","current","environment",".","embeddings","(","Embeddings",")",":","vocab","embeddings","for","this","decoder","."],"function":"def build_decoder(opt, embeddings):\n \"\"\"\n Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.\n \"\"\"\n if opt.decoder_type == \"transformer\":\n return TransformerDecoder(opt.dec_layers, opt.rnn_size,\n opt.heads, opt.transformer_ff,\n opt.global_attention, opt.copy_attn,\n opt.self_attn_type,\n opt.dropout, embeddings)\n elif opt.decoder_type == \"cnn\":\n return CNNDecoder(opt.dec_layers, opt.rnn_size,\n opt.global_attention, opt.copy_attn,\n opt.cnn_kernel_width, opt.dropout,\n embeddings)\n elif opt.input_feed:\n # others\n return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.rnn_size,\n opt.global_attention,\n opt.global_attention_function,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings,\n opt.reuse_copy_attn)\n else:\n return StdRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.rnn_size,\n opt.global_attention,\n opt.global_attention_function,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings,\n opt.reuse_copy_attn)","function_tokens":["def","build_decoder","(","opt",",","embeddings",")",":","if","opt",".","decoder_type","==","\"transformer\"",":","return","TransformerDecoder","(","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","heads",",","opt",".","transformer_ff",",","opt",".","global_attention",",","opt",".","copy_attn",",","opt",".","self_attn_type",",","opt",".","dropout",",","embeddings",")","elif","opt",".","decoder_type","==","\"cnn\"",":","return","CNNDecoder","(","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","global_attention",",","opt",".","copy_attn",",","opt",".","cnn_kernel_width",",","opt",".","dropout",",","embeddings",")","elif","opt",".","input_feed",":","# others","return","InputFeedRNNDecoder","(","opt",".","rnn_type",",","opt",".","brnn",",","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","global_attention",",","opt",".","global_attention_function",",","opt",".","coverage_attn",",","opt",".","context_gate",",","opt",".","copy_attn",",","opt",".","dropout",",","embeddings",",","opt",".","reuse_copy_attn",")","else",":","return","StdRNNDecoder","(","opt",".","rnn_type",",","opt",".","brnn",",","opt",".","dec_layers",",","opt",".","rnn_size",",","opt",".","global_attention",",","opt",".","global_attention_function",",","opt",".","coverage_attn",",","opt",".","context_gate",",","opt",".","copy_attn",",","opt",".","dropout",",","embeddings",",","opt",".","reuse_copy_attn",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/model_builder.py#L90-L130"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/model_builder.py","language":"python","identifier":"build_base_model","parameters":"(model_opt, fields, gpu, checkpoint=None)","argument_list":"","return_statement":"return model","docstring":"Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.","docstring_summary":"Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.","docstring_tokens":["Args",":","model_opt",":","the","option","loaded","from","checkpoint",".","fields",":","Field","objects","for","the","model",".","gpu","(","bool",")",":","whether","to","use","gpu",".","checkpoint",":","the","model","gnerated","by","train","phase","or","a","resumed","snapshot","model","from","a","stopped","training",".","Returns",":","the","NMTModel","."],"function":"def build_base_model(model_opt, fields, gpu, checkpoint=None):\n \"\"\"\n Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.\n \"\"\"\n assert model_opt.model_type in [\"text\", \"img\", \"audio\"], \\\n (\"Unsupported model type %s\" % (model_opt.model_type))\n\n # Build encoder.\n if model_opt.model_type == \"text\":\n src_dict = fields[\"src\"].vocab\n\n feature_dicts = inputters.collect_feature_vocabs(fields, 'src')\n src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)\n encoder = build_encoder(model_opt, src_embeddings)\n\n\n elif model_opt.model_type == \"img\":\n if (\"image_channel_size\" not in model_opt.__dict__):\n image_channel_size = 3\n else:\n image_channel_size = model_opt.image_channel_size\n\n encoder = ImageEncoder(model_opt.enc_layers,\n model_opt.brnn,\n model_opt.rnn_size,\n model_opt.dropout,\n image_channel_size)\n elif model_opt.model_type == \"audio\":\n encoder = AudioEncoder(model_opt.enc_layers,\n model_opt.brnn,\n model_opt.rnn_size,\n model_opt.dropout,\n model_opt.sample_rate,\n model_opt.window_size)\n\n model_parameters = filter(lambda p: p.requires_grad, encoder.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n # Build decoder.\n tgt_dict = fields[\"tgt\"].vocab\n feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')\n tgt_embeddings = build_embeddings(model_opt, tgt_dict,\n feature_dicts, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src\/tgt vocab should be the same if `-share_vocab` is specified.\n if src_dict != tgt_dict:\n raise AssertionError('The `-share_vocab` should be set during '\n 'preprocess if you use share_embeddings!')\n\n tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight\n\n decoder = build_decoder(model_opt, tgt_embeddings)\n\n # Build NMTModel(= encoder + decoder).\n device = torch.device(\"cuda\" if gpu else \"cpu\")\n\n model = onmt.models.NMTModel(encoder, decoder)\n model.model_type = model_opt.model_type\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n generator = nn.Sequential(\n nn.Linear(model_opt.rnn_size, len(fields[\"tgt\"].vocab)), gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n else:\n generator = CopyGenerator(model_opt.rnn_size,\n fields[\"tgt\"].vocab)\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n model.load_state_dict(checkpoint['model'], strict=False)\n generator.load_state_dict(checkpoint['generator'])\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if hasattr(model.encoder, 'embeddings'):\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)\n\n # Add generator to model (this registers it as parameter of model).\n model.generator = generator\n model.to(device)\n\n return model","function_tokens":["def","build_base_model","(","model_opt",",","fields",",","gpu",",","checkpoint","=","None",")",":","assert","model_opt",".","model_type","in","[","\"text\"",",","\"img\"",",","\"audio\"","]",",","(","\"Unsupported model type %s\"","%","(","model_opt",".","model_type",")",")","# Build encoder.","if","model_opt",".","model_type","==","\"text\"",":","src_dict","=","fields","[","\"src\"","]",".","vocab","feature_dicts","=","inputters",".","collect_feature_vocabs","(","fields",",","'src'",")","src_embeddings","=","build_embeddings","(","model_opt",",","src_dict",",","feature_dicts",")","encoder","=","build_encoder","(","model_opt",",","src_embeddings",")","elif","model_opt",".","model_type","==","\"img\"",":","if","(","\"image_channel_size\"","not","in","model_opt",".","__dict__",")",":","image_channel_size","=","3","else",":","image_channel_size","=","model_opt",".","image_channel_size","encoder","=","ImageEncoder","(","model_opt",".","enc_layers",",","model_opt",".","brnn",",","model_opt",".","rnn_size",",","model_opt",".","dropout",",","image_channel_size",")","elif","model_opt",".","model_type","==","\"audio\"",":","encoder","=","AudioEncoder","(","model_opt",".","enc_layers",",","model_opt",".","brnn",",","model_opt",".","rnn_size",",","model_opt",".","dropout",",","model_opt",".","sample_rate",",","model_opt",".","window_size",")","model_parameters","=","filter","(","lambda","p",":","p",".","requires_grad",",","encoder",".","parameters","(",")",")","params","=","sum","(","[","np",".","prod","(","p",".","size","(",")",")","for","p","in","model_parameters","]",")","# Build decoder.","tgt_dict","=","fields","[","\"tgt\"","]",".","vocab","feature_dicts","=","inputters",".","collect_feature_vocabs","(","fields",",","'tgt'",")","tgt_embeddings","=","build_embeddings","(","model_opt",",","tgt_dict",",","feature_dicts",",","for_encoder","=","False",")","# Share the embedding matrix - preprocess with share_vocab required.","if","model_opt",".","share_embeddings",":","# src\/tgt vocab should be the same if `-share_vocab` is specified.","if","src_dict","!=","tgt_dict",":","raise","AssertionError","(","'The `-share_vocab` should be set during '","'preprocess if you use share_embeddings!'",")","tgt_embeddings",".","word_lut",".","weight","=","src_embeddings",".","word_lut",".","weight","decoder","=","build_decoder","(","model_opt",",","tgt_embeddings",")","# Build NMTModel(= encoder + decoder).","device","=","torch",".","device","(","\"cuda\"","if","gpu","else","\"cpu\"",")","model","=","onmt",".","models",".","NMTModel","(","encoder",",","decoder",")","model",".","model_type","=","model_opt",".","model_type","# Build Generator.","if","not","model_opt",".","copy_attn",":","if","model_opt",".","generator_function","==","\"sparsemax\"",":","gen_func","=","onmt",".","modules",".","sparse_activations",".","LogSparsemax","(","dim","=","-","1",")","else",":","gen_func","=","nn",".","LogSoftmax","(","dim","=","-","1",")","generator","=","nn",".","Sequential","(","nn",".","Linear","(","model_opt",".","rnn_size",",","len","(","fields","[","\"tgt\"","]",".","vocab",")",")",",","gen_func",")","if","model_opt",".","share_decoder_embeddings",":","generator","[","0","]",".","weight","=","decoder",".","embeddings",".","word_lut",".","weight","else",":","generator","=","CopyGenerator","(","model_opt",".","rnn_size",",","fields","[","\"tgt\"","]",".","vocab",")","# Load the model states from checkpoint or initialize them.","if","checkpoint","is","not","None",":","model",".","load_state_dict","(","checkpoint","[","'model'","]",",","strict","=","False",")","generator",".","load_state_dict","(","checkpoint","[","'generator'","]",")","else",":","if","model_opt",".","param_init","!=","0.0",":","for","p","in","model",".","parameters","(",")",":","p",".","data",".","uniform_","(","-","model_opt",".","param_init",",","model_opt",".","param_init",")","for","p","in","generator",".","parameters","(",")",":","p",".","data",".","uniform_","(","-","model_opt",".","param_init",",","model_opt",".","param_init",")","if","model_opt",".","param_init_glorot",":","for","p","in","model",".","parameters","(",")",":","if","p",".","dim","(",")",">","1",":","xavier_uniform_","(","p",")","for","p","in","generator",".","parameters","(",")",":","if","p",".","dim","(",")",">","1",":","xavier_uniform_","(","p",")","if","hasattr","(","model",".","encoder",",","'embeddings'",")",":","model",".","encoder",".","embeddings",".","load_pretrained_vectors","(","model_opt",".","pre_word_vecs_enc",",","model_opt",".","fix_word_vecs_enc",")","if","hasattr","(","model",".","decoder",",","'embeddings'",")",":","model",".","decoder",".","embeddings",".","load_pretrained_vectors","(","model_opt",".","pre_word_vecs_dec",",","model_opt",".","fix_word_vecs_dec",")","# Add generator to model (this registers it as parameter of model).","model",".","generator","=","generator","model",".","to","(","device",")","return","model"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/model_builder.py#L155-L266"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/model_builder.py","language":"python","identifier":"build_model","parameters":"(model_opt, opt, fields, checkpoint)","argument_list":"","return_statement":"return model","docstring":"Build the Model","docstring_summary":"Build the Model","docstring_tokens":["Build","the","Model"],"function":"def build_model(model_opt, opt, fields, checkpoint):\n \"\"\" Build the Model \"\"\"\n logger.info('Building model...')\n model = build_base_model(model_opt, fields,\n use_gpu(opt), checkpoint)\n logger.info(model)\n return model","function_tokens":["def","build_model","(","model_opt",",","opt",",","fields",",","checkpoint",")",":","logger",".","info","(","'Building model...'",")","model","=","build_base_model","(","model_opt",",","fields",",","use_gpu","(","opt",")",",","checkpoint",")","logger",".","info","(","model",")","return","model"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/model_builder.py#L269-L275"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"build_trainer","parameters":"(opt, device_id, model, fields,\n optim, data_type, model_saver=None)","argument_list":"","return_statement":"return trainer","docstring":"Simplify `Trainer` creation based on user `opt`s*\n\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model","docstring_summary":"Simplify `Trainer` creation based on user `opt`s*","docstring_tokens":["Simplify","Trainer","creation","based","on","user","opt","s","*"],"function":"def build_trainer(opt, device_id, model, fields,\n optim, data_type, model_saver=None):\n \"\"\"\n Simplify `Trainer` creation based on user `opt`s*\n\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model\n \"\"\"\n # fine\n\n train_loss = onmt.utils.loss.build_loss_compute(\n model, fields[\"tgt\"].vocab, opt)\n valid_loss = onmt.utils.loss.build_loss_compute(\n model, fields[\"tgt\"].vocab, opt, train=False)\n\n trunc_size = opt.truncated_decoder # Badly named...\n shard_size = opt.max_generator_batches\n norm_method = opt.normalization\n grad_accum_count = opt.accum_count\n n_gpu = opt.world_size\n if device_id >= 0:\n gpu_rank = opt.gpu_ranks[device_id]\n else:\n gpu_rank = 0\n n_gpu = 0\n gpu_verbose_level = opt.gpu_verbose_level\n\n report_manager = onmt.utils.build_report_manager(opt)\n trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size,\n shard_size, data_type, norm_method,\n grad_accum_count, n_gpu, gpu_rank,\n gpu_verbose_level, report_manager,\n model_saver=model_saver)\n return trainer","function_tokens":["def","build_trainer","(","opt",",","device_id",",","model",",","fields",",","optim",",","data_type",",","model_saver","=","None",")",":","# fine","train_loss","=","onmt",".","utils",".","loss",".","build_loss_compute","(","model",",","fields","[","\"tgt\"","]",".","vocab",",","opt",")","valid_loss","=","onmt",".","utils",".","loss",".","build_loss_compute","(","model",",","fields","[","\"tgt\"","]",".","vocab",",","opt",",","train","=","False",")","trunc_size","=","opt",".","truncated_decoder","# Badly named...","shard_size","=","opt",".","max_generator_batches","norm_method","=","opt",".","normalization","grad_accum_count","=","opt",".","accum_count","n_gpu","=","opt",".","world_size","if","device_id",">=","0",":","gpu_rank","=","opt",".","gpu_ranks","[","device_id","]","else",":","gpu_rank","=","0","n_gpu","=","0","gpu_verbose_level","=","opt",".","gpu_verbose_level","report_manager","=","onmt",".","utils",".","build_report_manager","(","opt",")","trainer","=","onmt",".","Trainer","(","model",",","train_loss",",","valid_loss",",","optim",",","trunc_size",",","shard_size",",","data_type",",","norm_method",",","grad_accum_count",",","n_gpu",",","gpu_rank",",","gpu_verbose_level",",","report_manager",",","model_saver","=","model_saver",")","return","trainer"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L23-L63"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"Trainer.train","parameters":"(self, train_iter_fct, valid_iter_fct, train_steps, valid_steps)","argument_list":"","return_statement":"return total_stats","docstring":"The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`\n\n Args:\n train_iter_fct(function): a function that returns the train\n iterator. e.g. something like\n train_iter_fct = lambda: generator(*args, **kwargs)\n valid_iter_fct(function): same as train_iter_fct, for valid data\n train_steps(int):\n valid_steps(int):\n save_checkpoint_steps(int):\n\n Return:\n None","docstring_summary":"The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`","docstring_tokens":["The","main","training","loops",".","by","iterating","over","training","data","(","i",".","e",".","train_iter_fct",")","and","running","validation","(","i",".","e",".","iterating","over","valid_iter_fct"],"function":"def train(self, train_iter_fct, valid_iter_fct, train_steps, valid_steps):\n \"\"\"\n The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`\n\n Args:\n train_iter_fct(function): a function that returns the train\n iterator. e.g. something like\n train_iter_fct = lambda: generator(*args, **kwargs)\n valid_iter_fct(function): same as train_iter_fct, for valid data\n train_steps(int):\n valid_steps(int):\n save_checkpoint_steps(int):\n\n Return:\n None\n \"\"\"\n logger.info('Start training...')\n\n step = self.optim._step + 1\n true_batchs = []\n accum = 0\n normalization = 0\n train_iter = train_iter_fct()\n\n total_stats = onmt.utils.Statistics()\n report_stats = onmt.utils.Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n\n while step <= train_steps:\n reduce_counter = 0\n for i, batch in enumerate(train_iter):\n\n # import pdb; pdb.set_trace()\n\n if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):\n if self.gpu_verbose_level > 1:\n logger.info(\"GpuRank %d: index: %d accum: %d\"\n % (self.gpu_rank, i, accum))\n\n true_batchs.append(batch)\n\n if self.norm_method == \"tokens\":\n num_tokens = batch.tgt[1:].ne(\n self.train_loss.padding_idx).sum()\n normalization += num_tokens.item()\n else:\n normalization += batch.batch_size\n accum += 1\n if accum == self.grad_accum_count:\n reduce_counter += 1\n if self.gpu_verbose_level > 0:\n logger.info(\"GpuRank %d: reduce_counter: %d \\\n n_minibatch %d\"\n % (self.gpu_rank, reduce_counter,\n len(true_batchs)))\n if self.n_gpu > 1:\n normalization = sum(onmt.utils.distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n true_batchs, normalization, total_stats,\n report_stats)\n\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optim.learning_rate,\n report_stats)\n\n true_batchs = []\n accum = 0\n normalization = 0\n if (step % valid_steps == 0):\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: validate step %d'\n % (self.gpu_rank, step))\n valid_iter = valid_iter_fct()\n with torch.no_grad():\n valid_stats = self.validate(valid_iter)\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: gather valid stat \\\n step %d' % (self.gpu_rank, step))\n valid_stats = self._maybe_gather_stats(valid_stats)\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: report stat step %d'\n % (self.gpu_rank, step))\n self._report_step(self.optim.learning_rate,\n step, valid_stats=valid_stats)\n\n if self.gpu_rank == 0:\n self._maybe_save(step)\n step += 1\n if step > train_steps:\n break\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: we completed an epoch \\\n at step %d' % (self.gpu_rank, step))\n train_iter = train_iter_fct()\n\n return total_stats","function_tokens":["def","train","(","self",",","train_iter_fct",",","valid_iter_fct",",","train_steps",",","valid_steps",")",":","logger",".","info","(","'Start training...'",")","step","=","self",".","optim",".","_step","+","1","true_batchs","=","[","]","accum","=","0","normalization","=","0","train_iter","=","train_iter_fct","(",")","total_stats","=","onmt",".","utils",".","Statistics","(",")","report_stats","=","onmt",".","utils",".","Statistics","(",")","self",".","_start_report_manager","(","start_time","=","total_stats",".","start_time",")","while","step","<=","train_steps",":","reduce_counter","=","0","for","i",",","batch","in","enumerate","(","train_iter",")",":","# import pdb; pdb.set_trace()","if","self",".","n_gpu","==","0","or","(","i","%","self",".","n_gpu","==","self",".","gpu_rank",")",":","if","self",".","gpu_verbose_level",">","1",":","logger",".","info","(","\"GpuRank %d: index: %d accum: %d\"","%","(","self",".","gpu_rank",",","i",",","accum",")",")","true_batchs",".","append","(","batch",")","if","self",".","norm_method","==","\"tokens\"",":","num_tokens","=","batch",".","tgt","[","1",":","]",".","ne","(","self",".","train_loss",".","padding_idx",")",".","sum","(",")","normalization","+=","num_tokens",".","item","(",")","else",":","normalization","+=","batch",".","batch_size","accum","+=","1","if","accum","==","self",".","grad_accum_count",":","reduce_counter","+=","1","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","\"GpuRank %d: reduce_counter: %d \\\n n_minibatch %d\"","%","(","self",".","gpu_rank",",","reduce_counter",",","len","(","true_batchs",")",")",")","if","self",".","n_gpu",">","1",":","normalization","=","sum","(","onmt",".","utils",".","distributed",".","all_gather_list","(","normalization",")",")","self",".","_gradient_accumulation","(","true_batchs",",","normalization",",","total_stats",",","report_stats",")","report_stats","=","self",".","_maybe_report_training","(","step",",","train_steps",",","self",".","optim",".","learning_rate",",","report_stats",")","true_batchs","=","[","]","accum","=","0","normalization","=","0","if","(","step","%","valid_steps","==","0",")",":","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: validate step %d'","%","(","self",".","gpu_rank",",","step",")",")","valid_iter","=","valid_iter_fct","(",")","with","torch",".","no_grad","(",")",":","valid_stats","=","self",".","validate","(","valid_iter",")","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: gather valid stat \\\n step %d'","%","(","self",".","gpu_rank",",","step",")",")","valid_stats","=","self",".","_maybe_gather_stats","(","valid_stats",")","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: report stat step %d'","%","(","self",".","gpu_rank",",","step",")",")","self",".","_report_step","(","self",".","optim",".","learning_rate",",","step",",","valid_stats","=","valid_stats",")","if","self",".","gpu_rank","==","0",":","self",".","_maybe_save","(","step",")","step","+=","1","if","step",">","train_steps",":","break","if","self",".","gpu_verbose_level",">","0",":","logger",".","info","(","'GpuRank %d: we completed an epoch \\\n at step %d'","%","(","self",".","gpu_rank",",","step",")",")","train_iter","=","train_iter_fct","(",")","return","total_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L120-L222"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"Trainer.validate","parameters":"(self, valid_iter)","argument_list":"","return_statement":"return stats","docstring":"Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics","docstring_summary":"Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics","docstring_tokens":["Validate","model",".","valid_iter",":","validate","data","iterator","Returns",":",":","obj",":","nmt",".","Statistics",":","validation","loss","statistics"],"function":"def validate(self, valid_iter):\n \"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n # Set model in validating mode.\n self.model.eval()\n\n stats = onmt.utils.Statistics()\n\n\n for batch in valid_iter:\n src = inputters.make_features(batch, 'src', self.data_type)\n if self.data_type == 'text':\n _, src_lengths = batch.src\n else:\n src_lengths = None\n\n tgt = inputters.make_features(batch, 'tgt')\n\n # F-prop through the model.\n src_sents = batch.src_sents\n outputs, attns, _ = self.model(src, tgt, src_sents,src_lengths)\n\n # Compute loss.\n batch_stats = self.valid_loss.monolithic_compute_loss(\n batch, outputs, attns)\n\n # Update statistics.\n stats.update(batch_stats)\n\n # Set model back to training mode.\n self.model.train()\n\n return stats","function_tokens":["def","validate","(","self",",","valid_iter",")",":","# Set model in validating mode.","self",".","model",".","eval","(",")","stats","=","onmt",".","utils",".","Statistics","(",")","for","batch","in","valid_iter",":","src","=","inputters",".","make_features","(","batch",",","'src'",",","self",".","data_type",")","if","self",".","data_type","==","'text'",":","_",",","src_lengths","=","batch",".","src","else",":","src_lengths","=","None","tgt","=","inputters",".","make_features","(","batch",",","'tgt'",")","# F-prop through the model.","src_sents","=","batch",".","src_sents","outputs",",","attns",",","_","=","self",".","model","(","src",",","tgt",",","src_sents",",","src_lengths",")","# Compute loss.","batch_stats","=","self",".","valid_loss",".","monolithic_compute_loss","(","batch",",","outputs",",","attns",")","# Update statistics.","stats",".","update","(","batch_stats",")","# Set model back to training mode.","self",".","model",".","train","(",")","return","stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L224-L259"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"Trainer._start_report_manager","parameters":"(self, start_time=None)","argument_list":"","return_statement":"","docstring":"Simple function to start report manager (if any)","docstring_summary":"Simple function to start report manager (if any)","docstring_tokens":["Simple","function","to","start","report","manager","(","if","any",")"],"function":"def _start_report_manager(self, start_time=None):\n \"\"\"\n Simple function to start report manager (if any)\n \"\"\"\n if self.report_manager is not None:\n if start_time is None:\n self.report_manager.start()\n else:\n self.report_manager.start_time = start_time","function_tokens":["def","_start_report_manager","(","self",",","start_time","=","None",")",":","if","self",".","report_manager","is","not","None",":","if","start_time","is","None",":","self",".","report_manager",".","start","(",")","else",":","self",".","report_manager",".","start_time","=","start_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L338-L346"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"Trainer._maybe_gather_stats","parameters":"(self, stat)","argument_list":"","return_statement":"return stat","docstring":"Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object","docstring_summary":"Gather statistics in multi-processes cases","docstring_tokens":["Gather","statistics","in","multi","-","processes","cases"],"function":"def _maybe_gather_stats(self, stat):\n \"\"\"\n Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object\n \"\"\"\n if stat is not None and self.n_gpu > 1:\n return onmt.utils.Statistics.all_gather_stats(stat)\n return stat","function_tokens":["def","_maybe_gather_stats","(","self",",","stat",")",":","if","stat","is","not","None","and","self",".","n_gpu",">","1",":","return","onmt",".","utils",".","Statistics",".","all_gather_stats","(","stat",")","return","stat"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L348-L361"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"Trainer._maybe_report_training","parameters":"(self, step, num_steps, learning_rate,\n report_stats)","argument_list":"","return_statement":"","docstring":"Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc","docstring_summary":"Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc","docstring_tokens":["Simple","function","to","report","training","stats","(","if","report_manager","is","set",")","see","onmt",".","utils",".","ReportManagerBase",".","report_training","for","doc"],"function":"def _maybe_report_training(self, step, num_steps, learning_rate,\n report_stats):\n \"\"\"\n Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_training(\n step, num_steps, learning_rate, report_stats,\n multigpu=self.n_gpu > 1)","function_tokens":["def","_maybe_report_training","(","self",",","step",",","num_steps",",","learning_rate",",","report_stats",")",":","if","self",".","report_manager","is","not","None",":","return","self",".","report_manager",".","report_training","(","step",",","num_steps",",","learning_rate",",","report_stats",",","multigpu","=","self",".","n_gpu",">","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L363-L372"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"Trainer._report_step","parameters":"(self, learning_rate, step, train_stats=None,\n valid_stats=None)","argument_list":"","return_statement":"","docstring":"Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc","docstring_summary":"Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc","docstring_tokens":["Simple","function","to","report","stats","(","if","report_manager","is","set",")","see","onmt",".","utils",".","ReportManagerBase",".","report_step","for","doc"],"function":"def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n \"\"\"\n Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)","function_tokens":["def","_report_step","(","self",",","learning_rate",",","step",",","train_stats","=","None",",","valid_stats","=","None",")",":","if","self",".","report_manager","is","not","None",":","return","self",".","report_manager",".","report_step","(","learning_rate",",","step",",","train_stats","=","train_stats",",","valid_stats","=","valid_stats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L374-L383"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/trainer.py","language":"python","identifier":"Trainer._maybe_save","parameters":"(self, step)","argument_list":"","return_statement":"","docstring":"Save the model if a model saver is set","docstring_summary":"Save the model if a model saver is set","docstring_tokens":["Save","the","model","if","a","model","saver","is","set"],"function":"def _maybe_save(self, step):\n \"\"\"\n Save the model if a model saver is set\n \"\"\"\n if self.model_saver is not None:\n self.model_saver.maybe_save(step)","function_tokens":["def","_maybe_save","(","self",",","step",")",":","if","self",".","model_saver","is","not","None",":","self",".","model_saver",".","maybe_save","(","step",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/trainer.py#L385-L390"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/opts.py","language":"python","identifier":"model_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"These options are passed to the construction of the model.\n Be careful with these as they will be used during translation.","docstring_summary":"These options are passed to the construction of the model.\n Be careful with these as they will be used during translation.","docstring_tokens":["These","options","are","passed","to","the","construction","of","the","model",".","Be","careful","with","these","as","they","will","be","used","during","translation","."],"function":"def model_opts(parser):\n \"\"\"\n These options are passed to the construction of the model.\n Be careful with these as they will be used during translation.\n \"\"\"\n\n # Embedding Options\n group = parser.add_argument_group('Model-Embeddings')\n group.add_argument('-src_word_vec_size', type=int, default=500,\n help='Word embedding size for src.')\n group.add_argument('-tgt_word_vec_size', type=int, default=500,\n help='Word embedding size for tgt.')\n group.add_argument('-word_vec_size', type=int, default=-1,\n help='Word embedding size for src and tgt.')\n\n group.add_argument('-share_decoder_embeddings', action='store_true',\n help=\"\"\"Use a shared weight matrix for the input and\n output word embeddings in the decoder.\"\"\")\n group.add_argument('-share_embeddings', action='store_true',\n help=\"\"\"Share the word embeddings between encoder\n and decoder. Need to use shared dictionary for this\n option.\"\"\")\n group.add_argument('-position_encoding', action='store_true',\n help=\"\"\"Use a sin to mark relative words positions.\n Necessary for non-RNN style models.\n \"\"\")\n\n group = parser.add_argument_group('Model-Embedding Features')\n group.add_argument('-feat_merge', type=str, default='concat',\n choices=['concat', 'sum', 'mlp'],\n help=\"\"\"Merge action for incorporating features embeddings.\n Options [concat|sum|mlp].\"\"\")\n group.add_argument('-feat_vec_size', type=int, default=-1,\n help=\"\"\"If specified, feature embedding sizes\n will be set to this. Otherwise, feat_vec_exponent\n will be used.\"\"\")\n group.add_argument('-feat_vec_exponent', type=float, default=0.7,\n help=\"\"\"If -feat_merge_size is not set, feature\n embedding sizes will be set to N^feat_vec_exponent\n where N is the number of values the feature takes.\"\"\")\n\n # Encoder-Decoder Options\n group = parser.add_argument_group('Model- Encoder-Decoder')\n group.add_argument('-model_type', default='text',\n help=\"\"\"Type of source model to use. Allows\n the system to incorporate non-text inputs.\n Options are [text|img|audio].\"\"\")\n\n group.add_argument('-encoder_type', type=str, default='rnn',\n choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],\n help=\"\"\"Type of encoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|brnn|mean|transformer|cnn].\"\"\")\n group.add_argument('-decoder_type', type=str, default='rnn',\n choices=['rnn', 'transformer', 'cnn'],\n help=\"\"\"Type of decoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|transformer|cnn].\"\"\")\n\n group.add_argument('-layers', type=int, default=-1,\n help='Number of layers in enc\/dec.')\n group.add_argument('-enc_layers', type=int, default=2,\n help='Number of layers in the encoder')\n group.add_argument('-dec_layers', type=int, default=2,\n help='Number of layers in the decoder')\n group.add_argument('-rnn_size', type=int, default=500,\n help='Size of rnn hidden states')\n group.add_argument('-cnn_kernel_width', type=int, default=3,\n help=\"\"\"Size of windows in the cnn, the kernel_size is\n (cnn_kernel_width, 1) in conv layer\"\"\")\n\n group.add_argument('-input_feed', type=int, default=1,\n help=\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\")\n group.add_argument('-bridge', action=\"store_true\",\n help=\"\"\"Have an additional layer between the last encoder\n state and the first decoder state\"\"\")\n group.add_argument('-rnn_type', type=str, default='LSTM',\n choices=['LSTM', 'GRU', 'SRU'],\n action=CheckSRU,\n help=\"\"\"The gate type to use in the RNNs\"\"\")\n # group.add_argument('-residual', action=\"store_true\",\n # help=\"Add residual connections between RNN layers.\")\n\n group.add_argument('-brnn', action=DeprecateAction,\n help=\"Deprecated, use `encoder_type`.\")\n\n group.add_argument('-context_gate', type=str, default=None,\n choices=['source', 'target', 'both'],\n help=\"\"\"Type of context gate to use.\n Do not select for no context gate.\"\"\")\n\n # Attention options\n group = parser.add_argument_group('Model- Attention')\n group.add_argument('-global_attention', type=str, default='general',\n choices=['dot', 'general', 'mlp'],\n help=\"\"\"The attention type to use:\n dotprod or general (Luong) or MLP (Bahdanau)\"\"\")\n group.add_argument('-global_attention_function', type=str,\n default=\"softmax\", choices=[\"softmax\", \"sparsemax\"])\n group.add_argument('-self_attn_type', type=str, default=\"scaled-dot\",\n help=\"\"\"Self attention type in Transformer decoder\n layer -- currently \"scaled-dot\" or \"average\" \"\"\")\n group.add_argument('-heads', type=int, default=8,\n help='Number of heads for transformer self-attention')\n group.add_argument('-transformer_ff', type=int, default=2048,\n help='Size of hidden transformer feed-forward')\n\n # Generator and loss options.\n group.add_argument('-copy_attn', action=\"store_true\",\n help='Train copy attention layer.')\n group.add_argument('-generator_function', default=\"log_softmax\",\n choices=[\"log_softmax\", \"sparsemax\"],\n help=\"\"\"Which function to use for generating\n probabilities over the target vocabulary (choices:\n log_softmax, sparsemax)\"\"\")\n group.add_argument('-copy_attn_force', action=\"store_true\",\n help='When available, train to copy.')\n group.add_argument('-reuse_copy_attn', action=\"store_true\",\n help=\"Reuse standard attention for copy\")\n group.add_argument('-copy_loss_by_seqlength', action=\"store_true\",\n help=\"Divide copy loss by length of sequence\")\n group.add_argument('-coverage_attn', action=\"store_true\",\n help='Train a coverage attention layer.')\n group.add_argument('-lambda_coverage', type=float, default=1,\n help='Lambda value for coverage.')","function_tokens":["def","model_opts","(","parser",")",":","# Embedding Options","group","=","parser",".","add_argument_group","(","'Model-Embeddings'",")","group",".","add_argument","(","'-src_word_vec_size'",",","type","=","int",",","default","=","500",",","help","=","'Word embedding size for src.'",")","group",".","add_argument","(","'-tgt_word_vec_size'",",","type","=","int",",","default","=","500",",","help","=","'Word embedding size for tgt.'",")","group",".","add_argument","(","'-word_vec_size'",",","type","=","int",",","default","=","-","1",",","help","=","'Word embedding size for src and tgt.'",")","group",".","add_argument","(","'-share_decoder_embeddings'",",","action","=","'store_true'",",","help","=","\"\"\"Use a shared weight matrix for the input and\n output word embeddings in the decoder.\"\"\"",")","group",".","add_argument","(","'-share_embeddings'",",","action","=","'store_true'",",","help","=","\"\"\"Share the word embeddings between encoder\n and decoder. Need to use shared dictionary for this\n option.\"\"\"",")","group",".","add_argument","(","'-position_encoding'",",","action","=","'store_true'",",","help","=","\"\"\"Use a sin to mark relative words positions.\n Necessary for non-RNN style models.\n \"\"\"",")","group","=","parser",".","add_argument_group","(","'Model-Embedding Features'",")","group",".","add_argument","(","'-feat_merge'",",","type","=","str",",","default","=","'concat'",",","choices","=","[","'concat'",",","'sum'",",","'mlp'","]",",","help","=","\"\"\"Merge action for incorporating features embeddings.\n Options [concat|sum|mlp].\"\"\"",")","group",".","add_argument","(","'-feat_vec_size'",",","type","=","int",",","default","=","-","1",",","help","=","\"\"\"If specified, feature embedding sizes\n will be set to this. Otherwise, feat_vec_exponent\n will be used.\"\"\"",")","group",".","add_argument","(","'-feat_vec_exponent'",",","type","=","float",",","default","=","0.7",",","help","=","\"\"\"If -feat_merge_size is not set, feature\n embedding sizes will be set to N^feat_vec_exponent\n where N is the number of values the feature takes.\"\"\"",")","# Encoder-Decoder Options","group","=","parser",".","add_argument_group","(","'Model- Encoder-Decoder'",")","group",".","add_argument","(","'-model_type'",",","default","=","'text'",",","help","=","\"\"\"Type of source model to use. Allows\n the system to incorporate non-text inputs.\n Options are [text|img|audio].\"\"\"",")","group",".","add_argument","(","'-encoder_type'",",","type","=","str",",","default","=","'rnn'",",","choices","=","[","'rnn'",",","'brnn'",",","'mean'",",","'transformer'",",","'cnn'","]",",","help","=","\"\"\"Type of encoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|brnn|mean|transformer|cnn].\"\"\"",")","group",".","add_argument","(","'-decoder_type'",",","type","=","str",",","default","=","'rnn'",",","choices","=","[","'rnn'",",","'transformer'",",","'cnn'","]",",","help","=","\"\"\"Type of decoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|transformer|cnn].\"\"\"",")","group",".","add_argument","(","'-layers'",",","type","=","int",",","default","=","-","1",",","help","=","'Number of layers in enc\/dec.'",")","group",".","add_argument","(","'-enc_layers'",",","type","=","int",",","default","=","2",",","help","=","'Number of layers in the encoder'",")","group",".","add_argument","(","'-dec_layers'",",","type","=","int",",","default","=","2",",","help","=","'Number of layers in the decoder'",")","group",".","add_argument","(","'-rnn_size'",",","type","=","int",",","default","=","500",",","help","=","'Size of rnn hidden states'",")","group",".","add_argument","(","'-cnn_kernel_width'",",","type","=","int",",","default","=","3",",","help","=","\"\"\"Size of windows in the cnn, the kernel_size is\n (cnn_kernel_width, 1) in conv layer\"\"\"",")","group",".","add_argument","(","'-input_feed'",",","type","=","int",",","default","=","1",",","help","=","\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\"",")","group",".","add_argument","(","'-bridge'",",","action","=","\"store_true\"",",","help","=","\"\"\"Have an additional layer between the last encoder\n state and the first decoder state\"\"\"",")","group",".","add_argument","(","'-rnn_type'",",","type","=","str",",","default","=","'LSTM'",",","choices","=","[","'LSTM'",",","'GRU'",",","'SRU'","]",",","action","=","CheckSRU",",","help","=","\"\"\"The gate type to use in the RNNs\"\"\"",")","# group.add_argument('-residual', action=\"store_true\",","# help=\"Add residual connections between RNN layers.\")","group",".","add_argument","(","'-brnn'",",","action","=","DeprecateAction",",","help","=","\"Deprecated, use `encoder_type`.\"",")","group",".","add_argument","(","'-context_gate'",",","type","=","str",",","default","=","None",",","choices","=","[","'source'",",","'target'",",","'both'","]",",","help","=","\"\"\"Type of context gate to use.\n Do not select for no context gate.\"\"\"",")","# Attention options","group","=","parser",".","add_argument_group","(","'Model- Attention'",")","group",".","add_argument","(","'-global_attention'",",","type","=","str",",","default","=","'general'",",","choices","=","[","'dot'",",","'general'",",","'mlp'","]",",","help","=","\"\"\"The attention type to use:\n dotprod or general (Luong) or MLP (Bahdanau)\"\"\"",")","group",".","add_argument","(","'-global_attention_function'",",","type","=","str",",","default","=","\"softmax\"",",","choices","=","[","\"softmax\"",",","\"sparsemax\"","]",")","group",".","add_argument","(","'-self_attn_type'",",","type","=","str",",","default","=","\"scaled-dot\"",",","help","=","\"\"\"Self attention type in Transformer decoder\n layer -- currently \"scaled-dot\" or \"average\" \"\"\"",")","group",".","add_argument","(","'-heads'",",","type","=","int",",","default","=","8",",","help","=","'Number of heads for transformer self-attention'",")","group",".","add_argument","(","'-transformer_ff'",",","type","=","int",",","default","=","2048",",","help","=","'Size of hidden transformer feed-forward'",")","# Generator and loss options.","group",".","add_argument","(","'-copy_attn'",",","action","=","\"store_true\"",",","help","=","'Train copy attention layer.'",")","group",".","add_argument","(","'-generator_function'",",","default","=","\"log_softmax\"",",","choices","=","[","\"log_softmax\"",",","\"sparsemax\"","]",",","help","=","\"\"\"Which function to use for generating\n probabilities over the target vocabulary (choices:\n log_softmax, sparsemax)\"\"\"",")","group",".","add_argument","(","'-copy_attn_force'",",","action","=","\"store_true\"",",","help","=","'When available, train to copy.'",")","group",".","add_argument","(","'-reuse_copy_attn'",",","action","=","\"store_true\"",",","help","=","\"Reuse standard attention for copy\"",")","group",".","add_argument","(","'-copy_loss_by_seqlength'",",","action","=","\"store_true\"",",","help","=","\"Divide copy loss by length of sequence\"",")","group",".","add_argument","(","'-coverage_attn'",",","action","=","\"store_true\"",",","help","=","'Train a coverage attention layer.'",")","group",".","add_argument","(","'-lambda_coverage'",",","type","=","float",",","default","=","1",",","help","=","'Lambda value for coverage.'",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/opts.py#L8-L134"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/opts.py","language":"python","identifier":"preprocess_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"Pre-procesing options","docstring_summary":"Pre-procesing options","docstring_tokens":["Pre","-","procesing","options"],"function":"def preprocess_opts(parser):\n \"\"\" Pre-procesing options \"\"\"\n # Data options\n group = parser.add_argument_group('Data')\n group.add_argument('-data_type', default=\"text\",\n help=\"\"\"Type of the source input.\n Options are [text|img].\"\"\")\n\n group.add_argument('-train_src', required=True,\n help=\"Path to the training source data\")\n group.add_argument('-train_tgt', required=True,\n help=\"Path to the training target data\")\n group.add_argument('-valid_src', required=True,\n help=\"Path to the validation source data\")\n group.add_argument('-valid_tgt', required=True,\n help=\"Path to the validation target data\")\n\n group.add_argument('-src_dir', default=\"\",\n help=\"Source directory for image or audio files.\")\n\n group.add_argument('-save_data', required=True,\n help=\"Output file for the prepared data\")\n\n group.add_argument('-max_shard_size', type=int, default=0,\n help=\"\"\"For text corpus of large volume, it will\n be divided into shards of this size to preprocess.\n If 0, the data will be handled as a whole. The unit\n is in bytes. Optimal value should be multiples of\n 64 bytes. A commonly used sharding value is 131072000.\n It is recommended to ensure the corpus is shuffled\n before sharding.\"\"\")\n\n group.add_argument('-shard_size', type=int, default=0,\n help=\"\"\"Divide src_corpus and tgt_corpus into\n smaller multiple src_copus and tgt corpus files, then\n build shards, each shard will have\n opt.shard_size samples except last shard.\n shard_size=0 means no segmentation\n shard_size>0 means segment dataset into multiple shards,\n each shard has shard_size samples\"\"\")\n\n # Dictionary options, for text corpus\n\n group = parser.add_argument_group('Vocab')\n group.add_argument('-src_vocab', default=\"\",\n help=\"\"\"Path to an existing source vocabulary. Format:\n one word per line.\"\"\")\n group.add_argument('-tgt_vocab', default=\"\",\n help=\"\"\"Path to an existing target vocabulary. Format:\n one word per line.\"\"\")\n group.add_argument('-features_vocabs_prefix', type=str, default='',\n help=\"Path prefix to existing features vocabularies\")\n group.add_argument('-src_vocab_size', type=int, default=50000,\n help=\"Size of the source vocabulary\")\n group.add_argument('-tgt_vocab_size', type=int, default=50000,\n help=\"Size of the target vocabulary\")\n\n group.add_argument('-src_words_min_frequency', type=int, default=0)\n group.add_argument('-tgt_words_min_frequency', type=int, default=0)\n\n group.add_argument('-dynamic_dict', action='store_true',\n help=\"Create dynamic dictionaries\")\n group.add_argument('-share_vocab', action='store_true',\n help=\"Share source and target vocabulary\")\n\n # Truncation options, for text corpus\n group = parser.add_argument_group('Pruning')\n group.add_argument('-src_seq_length', type=int, default=50,\n help=\"Maximum source sequence length\")\n group.add_argument('-src_seq_length_trunc', type=int, default=0,\n help=\"Truncate source sequence length.\")\n group.add_argument('-tgt_seq_length', type=int, default=50,\n help=\"Maximum target sequence length to keep.\")\n group.add_argument('-tgt_seq_length_trunc', type=int, default=0,\n help=\"Truncate target sequence length.\")\n group.add_argument('-lower', action='store_true', help='lowercase data')\n\n # Data processing options\n group = parser.add_argument_group('Random')\n group.add_argument('-shuffle', type=int, default=1,\n help=\"Shuffle data\")\n group.add_argument('-seed', type=int, default=3435,\n help=\"Random seed\")\n\n group = parser.add_argument_group('Logging')\n group.add_argument('-report_every', type=int, default=100000,\n help=\"Report status every this many sentences\")\n group.add_argument('-log_file', type=str, default=\"\",\n help=\"Output logs to a file under this path.\")\n\n # Options most relevant to speech\n group = parser.add_argument_group('Speech')\n group.add_argument('-sample_rate', type=int, default=16000,\n help=\"Sample rate.\")\n group.add_argument('-window_size', type=float, default=.02,\n help=\"Window size for spectrogram in seconds.\")\n group.add_argument('-window_stride', type=float, default=.01,\n help=\"Window stride for spectrogram in seconds.\")\n group.add_argument('-window', default='hamming',\n help=\"Window type for spectrogram generation.\")\n\n # Option most relevant to image input\n group.add_argument('-image_channel_size', type=int, default=3,\n choices=[3, 1],\n help=\"\"\"Using grayscale image can training\n model faster and smaller\"\"\")","function_tokens":["def","preprocess_opts","(","parser",")",":","# Data options","group","=","parser",".","add_argument_group","(","'Data'",")","group",".","add_argument","(","'-data_type'",",","default","=","\"text\"",",","help","=","\"\"\"Type of the source input.\n Options are [text|img].\"\"\"",")","group",".","add_argument","(","'-train_src'",",","required","=","True",",","help","=","\"Path to the training source data\"",")","group",".","add_argument","(","'-train_tgt'",",","required","=","True",",","help","=","\"Path to the training target data\"",")","group",".","add_argument","(","'-valid_src'",",","required","=","True",",","help","=","\"Path to the validation source data\"",")","group",".","add_argument","(","'-valid_tgt'",",","required","=","True",",","help","=","\"Path to the validation target data\"",")","group",".","add_argument","(","'-src_dir'",",","default","=","\"\"",",","help","=","\"Source directory for image or audio files.\"",")","group",".","add_argument","(","'-save_data'",",","required","=","True",",","help","=","\"Output file for the prepared data\"",")","group",".","add_argument","(","'-max_shard_size'",",","type","=","int",",","default","=","0",",","help","=","\"\"\"For text corpus of large volume, it will\n be divided into shards of this size to preprocess.\n If 0, the data will be handled as a whole. The unit\n is in bytes. Optimal value should be multiples of\n 64 bytes. A commonly used sharding value is 131072000.\n It is recommended to ensure the corpus is shuffled\n before sharding.\"\"\"",")","group",".","add_argument","(","'-shard_size'",",","type","=","int",",","default","=","0",",","help","=","\"\"\"Divide src_corpus and tgt_corpus into\n smaller multiple src_copus and tgt corpus files, then\n build shards, each shard will have\n opt.shard_size samples except last shard.\n shard_size=0 means no segmentation\n shard_size>0 means segment dataset into multiple shards,\n each shard has shard_size samples\"\"\"",")","# Dictionary options, for text corpus","group","=","parser",".","add_argument_group","(","'Vocab'",")","group",".","add_argument","(","'-src_vocab'",",","default","=","\"\"",",","help","=","\"\"\"Path to an existing source vocabulary. Format:\n one word per line.\"\"\"",")","group",".","add_argument","(","'-tgt_vocab'",",","default","=","\"\"",",","help","=","\"\"\"Path to an existing target vocabulary. Format:\n one word per line.\"\"\"",")","group",".","add_argument","(","'-features_vocabs_prefix'",",","type","=","str",",","default","=","''",",","help","=","\"Path prefix to existing features vocabularies\"",")","group",".","add_argument","(","'-src_vocab_size'",",","type","=","int",",","default","=","50000",",","help","=","\"Size of the source vocabulary\"",")","group",".","add_argument","(","'-tgt_vocab_size'",",","type","=","int",",","default","=","50000",",","help","=","\"Size of the target vocabulary\"",")","group",".","add_argument","(","'-src_words_min_frequency'",",","type","=","int",",","default","=","0",")","group",".","add_argument","(","'-tgt_words_min_frequency'",",","type","=","int",",","default","=","0",")","group",".","add_argument","(","'-dynamic_dict'",",","action","=","'store_true'",",","help","=","\"Create dynamic dictionaries\"",")","group",".","add_argument","(","'-share_vocab'",",","action","=","'store_true'",",","help","=","\"Share source and target vocabulary\"",")","# Truncation options, for text corpus","group","=","parser",".","add_argument_group","(","'Pruning'",")","group",".","add_argument","(","'-src_seq_length'",",","type","=","int",",","default","=","50",",","help","=","\"Maximum source sequence length\"",")","group",".","add_argument","(","'-src_seq_length_trunc'",",","type","=","int",",","default","=","0",",","help","=","\"Truncate source sequence length.\"",")","group",".","add_argument","(","'-tgt_seq_length'",",","type","=","int",",","default","=","50",",","help","=","\"Maximum target sequence length to keep.\"",")","group",".","add_argument","(","'-tgt_seq_length_trunc'",",","type","=","int",",","default","=","0",",","help","=","\"Truncate target sequence length.\"",")","group",".","add_argument","(","'-lower'",",","action","=","'store_true'",",","help","=","'lowercase data'",")","# Data processing options","group","=","parser",".","add_argument_group","(","'Random'",")","group",".","add_argument","(","'-shuffle'",",","type","=","int",",","default","=","1",",","help","=","\"Shuffle data\"",")","group",".","add_argument","(","'-seed'",",","type","=","int",",","default","=","3435",",","help","=","\"Random seed\"",")","group","=","parser",".","add_argument_group","(","'Logging'",")","group",".","add_argument","(","'-report_every'",",","type","=","int",",","default","=","100000",",","help","=","\"Report status every this many sentences\"",")","group",".","add_argument","(","'-log_file'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Output logs to a file under this path.\"",")","# Options most relevant to speech","group","=","parser",".","add_argument_group","(","'Speech'",")","group",".","add_argument","(","'-sample_rate'",",","type","=","int",",","default","=","16000",",","help","=","\"Sample rate.\"",")","group",".","add_argument","(","'-window_size'",",","type","=","float",",","default","=",".02",",","help","=","\"Window size for spectrogram in seconds.\"",")","group",".","add_argument","(","'-window_stride'",",","type","=","float",",","default","=",".01",",","help","=","\"Window stride for spectrogram in seconds.\"",")","group",".","add_argument","(","'-window'",",","default","=","'hamming'",",","help","=","\"Window type for spectrogram generation.\"",")","# Option most relevant to image input","group",".","add_argument","(","'-image_channel_size'",",","type","=","int",",","default","=","3",",","choices","=","[","3",",","1","]",",","help","=","\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/opts.py#L137-L242"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/opts.py","language":"python","identifier":"train_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"Training and saving options","docstring_summary":"Training and saving options","docstring_tokens":["Training","and","saving","options"],"function":"def train_opts(parser):\n \"\"\" Training and saving options \"\"\"\n\n group = parser.add_argument_group('General')\n group.add_argument('-data', required=True,\n help=\"\"\"Path prefix to the \".train.pt\" and\n \".valid.pt\" file path from preprocess.py\"\"\")\n\n group.add_argument('-save_model', default='model',\n help=\"\"\"Model filename (the model will be saved as\n _N.pt where N is the number\n of steps\"\"\")\n\n group.add_argument('-save_checkpoint_steps', type=int, default=5000,\n help=\"\"\"Save a checkpoint every X steps\"\"\")\n group.add_argument('-keep_checkpoint', type=int, default=-1,\n help=\"\"\"Keep X checkpoints (negative: keep all)\"\"\")\n\n # GPU\n group.add_argument('-gpuid', default=[], nargs='+', type=int,\n help=\"Deprecated see world_size and gpu_ranks.\")\n group.add_argument('-gpu_ranks', default=[], nargs='+', type=int,\n help=\"list of ranks of each process.\")\n group.add_argument('-world_size', default=1, type=int,\n help=\"total number of distributed processes.\")\n group.add_argument('-gpu_backend', default='nccl', nargs='+', type=str,\n help=\"Type of torch distributed backend\")\n group.add_argument('-gpu_verbose_level', default=0, type=int,\n help=\"Gives more info on each process per GPU.\")\n group.add_argument('-master_ip', default=\"localhost\", type=str,\n help=\"IP of master for torch.distributed training.\")\n group.add_argument('-master_port', default=10000, type=int,\n help=\"Port of master for torch.distributed training.\")\n\n group.add_argument('-seed', type=int, default=-1,\n help=\"\"\"Random seed used for the experiments\n reproducibility.\"\"\")\n\n # Init options\n group = parser.add_argument_group('Initialization')\n group.add_argument('-param_init', type=float, default=0.1,\n help=\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init).\n Use 0 to not use initialization\"\"\")\n group.add_argument('-param_init_glorot', action='store_true',\n help=\"\"\"Init parameters with xavier_uniform.\n Required for transfomer.\"\"\")\n\n group.add_argument('-train_from', default='', type=str,\n help=\"\"\"If training from a checkpoint then this is the\n path to the pretrained model's state_dict.\"\"\")\n\n # Pretrained word vectors\n group.add_argument('-pre_word_vecs_enc',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\")\n group.add_argument('-pre_word_vecs_dec',\n help=\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\")\n # Fixed word vectors\n group.add_argument('-fix_word_vecs_enc',\n action='store_true',\n help=\"Fix word embeddings on the encoder side.\")\n group.add_argument('-fix_word_vecs_dec',\n action='store_true',\n help=\"Fix word embeddings on the decoder side.\")\n\n # Optimization options\n group = parser.add_argument_group('Optimization- Type')\n group.add_argument('-batch_size', type=int, default=64,\n help='Maximum batch size for training')\n group.add_argument('-batch_type', default='sents',\n choices=[\"sents\", \"tokens\"],\n help=\"\"\"Batch grouping for batch_size. Standard\n is sents. Tokens will do dynamic batching\"\"\")\n group.add_argument('-normalization', default='sents',\n choices=[\"sents\", \"tokens\"],\n help='Normalization method of the gradient.')\n group.add_argument('-accum_count', type=int, default=1,\n help=\"\"\"Accumulate gradient this many times.\n Approximately equivalent to updating\n batch_size * accum_count batches at once.\n Recommended for Transformer.\"\"\")\n group.add_argument('-valid_steps', type=int, default=10000,\n help='Perfom validation every X steps')\n group.add_argument('-valid_batch_size', type=int, default=32,\n help='Maximum batch size for validation')\n group.add_argument('-max_generator_batches', type=int, default=32,\n help=\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but\n uses more memory.\"\"\")\n group.add_argument('-train_steps', type=int, default=100000,\n help='Number of training steps')\n group.add_argument('-epochs', type=int, default=0,\n help='Deprecated epochs see train_steps')\n group.add_argument('-optim', default='sgd',\n choices=['sgd', 'adagrad', 'adadelta', 'adam',\n 'sparseadam'],\n help=\"\"\"Optimization method.\"\"\")\n group.add_argument('-adagrad_accumulator_init', type=float, default=0,\n help=\"\"\"Initializes the accumulator values in adagrad.\n Mirrors the initial_accumulator_value option\n in the tensorflow adagrad (use 0.1 for their default).\n \"\"\")\n group.add_argument('-max_grad_norm', type=float, default=5,\n help=\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to\n max_grad_norm\"\"\")\n group.add_argument('-dropout', type=float, default=0.3,\n help=\"Dropout probability; applied in LSTM stacks.\")\n group.add_argument('-truncated_decoder', type=int, default=0,\n help=\"\"\"Truncated bptt.\"\"\")\n group.add_argument('-adam_beta1', type=float, default=0.9,\n help=\"\"\"The beta1 parameter used by Adam.\n Almost without exception a value of 0.9 is used in\n the literature, seemingly giving good results,\n so we would discourage changing this value from\n the default without due consideration.\"\"\")\n group.add_argument('-adam_beta2', type=float, default=0.999,\n help=\"\"\"The beta2 parameter used by Adam.\n Typically a value of 0.999 is recommended, as this is\n the value suggested by the original paper describing\n Adam, and is also the value adopted in other frameworks\n such as Tensorflow and Kerras, i.e. see:\n https:\/\/www.tensorflow.org\/api_docs\/python\/tf\/train\/AdamOptimizer\n https:\/\/keras.io\/optimizers\/ .\n Whereas recently the paper \"Attention is All You Need\"\n suggested a value of 0.98 for beta2, this parameter may\n not work well for normal models \/ default\n baselines.\"\"\")\n group.add_argument('-label_smoothing', type=float, default=0.0,\n help=\"\"\"Label smoothing value epsilon.\n Probabilities of all non-true labels\n will be smoothed by epsilon \/ (vocab_size - 1).\n Set to zero to turn off label smoothing.\n For more detailed information, see:\n https:\/\/arxiv.org\/abs\/1512.00567\"\"\")\n # learning rate\n group = parser.add_argument_group('Optimization- Rate')\n group.add_argument('-learning_rate', type=float, default=1.0,\n help=\"\"\"Starting learning rate.\n Recommended settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001\"\"\")\n group.add_argument('-learning_rate_decay', type=float, default=0.5,\n help=\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) steps have gone past\n start_decay_steps\"\"\")\n group.add_argument('-start_decay_steps', type=int, default=50000,\n help=\"\"\"Start decaying every decay_steps after\n start_decay_steps\"\"\")\n group.add_argument('-decay_steps', type=int, default=10000,\n help=\"\"\"Decay every decay_steps\"\"\")\n\n group.add_argument('-decay_method', type=str, default=\"\",\n choices=['noam'], help=\"Use a custom decay rate.\")\n group.add_argument('-warmup_steps', type=int, default=4000,\n help=\"\"\"Number of warmup steps for custom decay.\"\"\")\n\n group = parser.add_argument_group('Logging')\n group.add_argument('-report_every', type=int, default=50,\n help=\"Print stats at this interval.\")\n group.add_argument('-log_file', type=str, default=\"\",\n help=\"Output logs to a file under this path.\")\n group.add_argument('-exp_host', type=str, default=\"\",\n help=\"Send logs to this crayon server.\")\n group.add_argument('-exp', type=str, default=\"\",\n help=\"Name of the experiment for logging.\")\n # Use TensorboardX for visualization during training\n group.add_argument('-tensorboard', action=\"store_true\",\n help=\"\"\"Use tensorboardX for visualization during training.\n Must have the library tensorboardX.\"\"\")\n group.add_argument(\"-tensorboard_log_dir\", type=str,\n default=\"runs\/onmt\",\n help=\"\"\"Log directory for Tensorboard.\n This is also the name of the run.\n \"\"\")\n\n group = parser.add_argument_group('Speech')\n # Options most relevant to speech\n group.add_argument('-sample_rate', type=int, default=16000,\n help=\"Sample rate.\")\n group.add_argument('-window_size', type=float, default=.02,\n help=\"Window size for spectrogram in seconds.\")\n\n # Option most relevant to image input\n group.add_argument('-image_channel_size', type=int, default=3,\n choices=[3, 1],\n help=\"\"\"Using grayscale image can training\n model faster and smaller\"\"\")","function_tokens":["def","train_opts","(","parser",")",":","group","=","parser",".","add_argument_group","(","'General'",")","group",".","add_argument","(","'-data'",",","required","=","True",",","help","=","\"\"\"Path prefix to the \".train.pt\" and\n \".valid.pt\" file path from preprocess.py\"\"\"",")","group",".","add_argument","(","'-save_model'",",","default","=","'model'",",","help","=","\"\"\"Model filename (the model will be saved as\n _N.pt where N is the number\n of steps\"\"\"",")","group",".","add_argument","(","'-save_checkpoint_steps'",",","type","=","int",",","default","=","5000",",","help","=","\"\"\"Save a checkpoint every X steps\"\"\"",")","group",".","add_argument","(","'-keep_checkpoint'",",","type","=","int",",","default","=","-","1",",","help","=","\"\"\"Keep X checkpoints (negative: keep all)\"\"\"",")","# GPU","group",".","add_argument","(","'-gpuid'",",","default","=","[","]",",","nargs","=","'+'",",","type","=","int",",","help","=","\"Deprecated see world_size and gpu_ranks.\"",")","group",".","add_argument","(","'-gpu_ranks'",",","default","=","[","]",",","nargs","=","'+'",",","type","=","int",",","help","=","\"list of ranks of each process.\"",")","group",".","add_argument","(","'-world_size'",",","default","=","1",",","type","=","int",",","help","=","\"total number of distributed processes.\"",")","group",".","add_argument","(","'-gpu_backend'",",","default","=","'nccl'",",","nargs","=","'+'",",","type","=","str",",","help","=","\"Type of torch distributed backend\"",")","group",".","add_argument","(","'-gpu_verbose_level'",",","default","=","0",",","type","=","int",",","help","=","\"Gives more info on each process per GPU.\"",")","group",".","add_argument","(","'-master_ip'",",","default","=","\"localhost\"",",","type","=","str",",","help","=","\"IP of master for torch.distributed training.\"",")","group",".","add_argument","(","'-master_port'",",","default","=","10000",",","type","=","int",",","help","=","\"Port of master for torch.distributed training.\"",")","group",".","add_argument","(","'-seed'",",","type","=","int",",","default","=","-","1",",","help","=","\"\"\"Random seed used for the experiments\n reproducibility.\"\"\"",")","# Init options","group","=","parser",".","add_argument_group","(","'Initialization'",")","group",".","add_argument","(","'-param_init'",",","type","=","float",",","default","=","0.1",",","help","=","\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init).\n Use 0 to not use initialization\"\"\"",")","group",".","add_argument","(","'-param_init_glorot'",",","action","=","'store_true'",",","help","=","\"\"\"Init parameters with xavier_uniform.\n Required for transfomer.\"\"\"",")","group",".","add_argument","(","'-train_from'",",","default","=","''",",","type","=","str",",","help","=","\"\"\"If training from a checkpoint then this is the\n path to the pretrained model's state_dict.\"\"\"",")","# Pretrained word vectors","group",".","add_argument","(","'-pre_word_vecs_enc'",",","help","=","\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\"",")","group",".","add_argument","(","'-pre_word_vecs_dec'",",","help","=","\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\"",")","# Fixed word vectors","group",".","add_argument","(","'-fix_word_vecs_enc'",",","action","=","'store_true'",",","help","=","\"Fix word embeddings on the encoder side.\"",")","group",".","add_argument","(","'-fix_word_vecs_dec'",",","action","=","'store_true'",",","help","=","\"Fix word embeddings on the decoder side.\"",")","# Optimization options","group","=","parser",".","add_argument_group","(","'Optimization- Type'",")","group",".","add_argument","(","'-batch_size'",",","type","=","int",",","default","=","64",",","help","=","'Maximum batch size for training'",")","group",".","add_argument","(","'-batch_type'",",","default","=","'sents'",",","choices","=","[","\"sents\"",",","\"tokens\"","]",",","help","=","\"\"\"Batch grouping for batch_size. Standard\n is sents. Tokens will do dynamic batching\"\"\"",")","group",".","add_argument","(","'-normalization'",",","default","=","'sents'",",","choices","=","[","\"sents\"",",","\"tokens\"","]",",","help","=","'Normalization method of the gradient.'",")","group",".","add_argument","(","'-accum_count'",",","type","=","int",",","default","=","1",",","help","=","\"\"\"Accumulate gradient this many times.\n Approximately equivalent to updating\n batch_size * accum_count batches at once.\n Recommended for Transformer.\"\"\"",")","group",".","add_argument","(","'-valid_steps'",",","type","=","int",",","default","=","10000",",","help","=","'Perfom validation every X steps'",")","group",".","add_argument","(","'-valid_batch_size'",",","type","=","int",",","default","=","32",",","help","=","'Maximum batch size for validation'",")","group",".","add_argument","(","'-max_generator_batches'",",","type","=","int",",","default","=","32",",","help","=","\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but\n uses more memory.\"\"\"",")","group",".","add_argument","(","'-train_steps'",",","type","=","int",",","default","=","100000",",","help","=","'Number of training steps'",")","group",".","add_argument","(","'-epochs'",",","type","=","int",",","default","=","0",",","help","=","'Deprecated epochs see train_steps'",")","group",".","add_argument","(","'-optim'",",","default","=","'sgd'",",","choices","=","[","'sgd'",",","'adagrad'",",","'adadelta'",",","'adam'",",","'sparseadam'","]",",","help","=","\"\"\"Optimization method.\"\"\"",")","group",".","add_argument","(","'-adagrad_accumulator_init'",",","type","=","float",",","default","=","0",",","help","=","\"\"\"Initializes the accumulator values in adagrad.\n Mirrors the initial_accumulator_value option\n in the tensorflow adagrad (use 0.1 for their default).\n \"\"\"",")","group",".","add_argument","(","'-max_grad_norm'",",","type","=","float",",","default","=","5",",","help","=","\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to\n max_grad_norm\"\"\"",")","group",".","add_argument","(","'-dropout'",",","type","=","float",",","default","=","0.3",",","help","=","\"Dropout probability; applied in LSTM stacks.\"",")","group",".","add_argument","(","'-truncated_decoder'",",","type","=","int",",","default","=","0",",","help","=","\"\"\"Truncated bptt.\"\"\"",")","group",".","add_argument","(","'-adam_beta1'",",","type","=","float",",","default","=","0.9",",","help","=","\"\"\"The beta1 parameter used by Adam.\n Almost without exception a value of 0.9 is used in\n the literature, seemingly giving good results,\n so we would discourage changing this value from\n the default without due consideration.\"\"\"",")","group",".","add_argument","(","'-adam_beta2'",",","type","=","float",",","default","=","0.999",",","help","=","\"\"\"The beta2 parameter used by Adam.\n Typically a value of 0.999 is recommended, as this is\n the value suggested by the original paper describing\n Adam, and is also the value adopted in other frameworks\n such as Tensorflow and Kerras, i.e. see:\n https:\/\/www.tensorflow.org\/api_docs\/python\/tf\/train\/AdamOptimizer\n https:\/\/keras.io\/optimizers\/ .\n Whereas recently the paper \"Attention is All You Need\"\n suggested a value of 0.98 for beta2, this parameter may\n not work well for normal models \/ default\n baselines.\"\"\"",")","group",".","add_argument","(","'-label_smoothing'",",","type","=","float",",","default","=","0.0",",","help","=","\"\"\"Label smoothing value epsilon.\n Probabilities of all non-true labels\n will be smoothed by epsilon \/ (vocab_size - 1).\n Set to zero to turn off label smoothing.\n For more detailed information, see:\n https:\/\/arxiv.org\/abs\/1512.00567\"\"\"",")","# learning rate","group","=","parser",".","add_argument_group","(","'Optimization- Rate'",")","group",".","add_argument","(","'-learning_rate'",",","type","=","float",",","default","=","1.0",",","help","=","\"\"\"Starting learning rate.\n Recommended settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001\"\"\"",")","group",".","add_argument","(","'-learning_rate_decay'",",","type","=","float",",","default","=","0.5",",","help","=","\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) steps have gone past\n start_decay_steps\"\"\"",")","group",".","add_argument","(","'-start_decay_steps'",",","type","=","int",",","default","=","50000",",","help","=","\"\"\"Start decaying every decay_steps after\n start_decay_steps\"\"\"",")","group",".","add_argument","(","'-decay_steps'",",","type","=","int",",","default","=","10000",",","help","=","\"\"\"Decay every decay_steps\"\"\"",")","group",".","add_argument","(","'-decay_method'",",","type","=","str",",","default","=","\"\"",",","choices","=","[","'noam'","]",",","help","=","\"Use a custom decay rate.\"",")","group",".","add_argument","(","'-warmup_steps'",",","type","=","int",",","default","=","4000",",","help","=","\"\"\"Number of warmup steps for custom decay.\"\"\"",")","group","=","parser",".","add_argument_group","(","'Logging'",")","group",".","add_argument","(","'-report_every'",",","type","=","int",",","default","=","50",",","help","=","\"Print stats at this interval.\"",")","group",".","add_argument","(","'-log_file'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Output logs to a file under this path.\"",")","group",".","add_argument","(","'-exp_host'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Send logs to this crayon server.\"",")","group",".","add_argument","(","'-exp'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Name of the experiment for logging.\"",")","# Use TensorboardX for visualization during training","group",".","add_argument","(","'-tensorboard'",",","action","=","\"store_true\"",",","help","=","\"\"\"Use tensorboardX for visualization during training.\n Must have the library tensorboardX.\"\"\"",")","group",".","add_argument","(","\"-tensorboard_log_dir\"",",","type","=","str",",","default","=","\"runs\/onmt\"",",","help","=","\"\"\"Log directory for Tensorboard.\n This is also the name of the run.\n \"\"\"",")","group","=","parser",".","add_argument_group","(","'Speech'",")","# Options most relevant to speech","group",".","add_argument","(","'-sample_rate'",",","type","=","int",",","default","=","16000",",","help","=","\"Sample rate.\"",")","group",".","add_argument","(","'-window_size'",",","type","=","float",",","default","=",".02",",","help","=","\"Window size for spectrogram in seconds.\"",")","# Option most relevant to image input","group",".","add_argument","(","'-image_channel_size'",",","type","=","int",",","default","=","3",",","choices","=","[","3",",","1","]",",","help","=","\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/opts.py#L245-L436"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/opts.py","language":"python","identifier":"translate_opts","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"Translation \/ inference options","docstring_summary":"Translation \/ inference options","docstring_tokens":["Translation","\/","inference","options"],"function":"def translate_opts(parser):\n \"\"\" Translation \/ inference options \"\"\"\n group = parser.add_argument_group('Model')\n group.add_argument('-model', dest='models', metavar='MODEL',\n nargs='+', type=str, default=[], required=True,\n help='Path to model .pt file(s). '\n 'Multiple models can be specified, '\n 'for ensemble decoding.')\n\n group = parser.add_argument_group('Data')\n group.add_argument('-data_type', default=\"text\",\n help=\"Type of the source input. Options: [text|img].\")\n\n group.add_argument('-src', required=True,\n help=\"\"\"Source sequence to decode (one line per\n sequence)\"\"\")\n group.add_argument('-src_dir', default=\"\",\n help='Source directory for image or audio files')\n group.add_argument('-tgt',\n help='True target sequence (optional)')\n group.add_argument('-output', default='pred.txt',\n help=\"\"\"Path to output the predictions (each line will\n be the decoded sequence\"\"\")\n group.add_argument('-report_bleu', action='store_true',\n help=\"\"\"Report bleu score after translation,\n call tools\/multi-bleu.perl on command line\"\"\")\n group.add_argument('-report_rouge', action='store_true',\n help=\"\"\"Report rouge 1\/2\/3\/L\/SU4 score after translation\n call tools\/test_rouge.py on command line\"\"\")\n\n # Options most relevant to summarization.\n group.add_argument('-dynamic_dict', action='store_true',\n help=\"Create dynamic dictionaries\")\n group.add_argument('-share_vocab', action='store_true',\n help=\"Share source and target vocabulary\")\n\n group = parser.add_argument_group('Beam')\n group.add_argument('-fast', action=\"store_true\",\n help=\"\"\"Use fast beam search (some features may not be\n supported!)\"\"\")\n group.add_argument('-beam_size', type=int, default=5,\n help='Beam size')\n group.add_argument('-min_length', type=int, default=0,\n help='Minimum prediction length')\n group.add_argument('-max_length', type=int, default=100,\n help='Maximum prediction length.')\n group.add_argument('-max_sent_length', action=DeprecateAction,\n help=\"Deprecated, use `-max_length` instead\")\n\n # Alpha and Beta values for Google Length + Coverage penalty\n # Described here: https:\/\/arxiv.org\/pdf\/1609.08144.pdf, Section 7\n group.add_argument('-stepwise_penalty', action='store_true',\n help=\"\"\"Apply penalty at every decoding step.\n Helpful for summary penalty.\"\"\")\n group.add_argument('-length_penalty', default='none',\n choices=['none', 'wu', 'avg'],\n help=\"\"\"Length Penalty to use.\"\"\")\n group.add_argument('-coverage_penalty', default='none',\n choices=['none', 'wu', 'summary'],\n help=\"\"\"Coverage Penalty to use.\"\"\")\n group.add_argument('-alpha', type=float, default=0.,\n help=\"\"\"Google NMT length penalty parameter\n (higher = longer generation)\"\"\")\n group.add_argument('-beta', type=float, default=-0.,\n help=\"\"\"Coverage penalty parameter\"\"\")\n group.add_argument('-block_ngram_repeat', type=int, default=0,\n help='Block repetition of ngrams during decoding.')\n group.add_argument('-ignore_when_blocking', nargs='+', type=str,\n default=[],\n help=\"\"\"Ignore these strings when blocking repeats.\n You want to block sentence delimiters.\"\"\")\n group.add_argument('-replace_unk', action=\"store_true\",\n help=\"\"\"Replace the generated UNK tokens with the\n source token that had highest attention weight. If\n phrase_table is provided, it will lookup the\n identified source token and give the corresponding\n target token. If it is not provided(or the identified\n source token does not exist in the table) then it\n will copy the source token\"\"\")\n\n group = parser.add_argument_group('Logging')\n group.add_argument('-verbose', action=\"store_true\",\n help='Print scores and predictions for each sentence')\n group.add_argument('-log_file', type=str, default=\"\",\n help=\"Output logs to a file under this path.\")\n group.add_argument('-attn_debug', action=\"store_true\",\n help='Print best attn for each word')\n group.add_argument('-dump_beam', type=str, default=\"\",\n help='File to dump beam information to.')\n group.add_argument('-n_best', type=int, default=1,\n help=\"\"\"If verbose is set, will output the n_best\n decoded sentences\"\"\")\n\n group = parser.add_argument_group('Efficiency')\n group.add_argument('-batch_size', type=int, default=30,\n help='Batch size')\n group.add_argument('-gpu', type=int, default=-1,\n help=\"Device to run on\")\n\n # Options most relevant to speech.\n group = parser.add_argument_group('Speech')\n group.add_argument('-sample_rate', type=int, default=16000,\n help=\"Sample rate.\")\n group.add_argument('-window_size', type=float, default=.02,\n help='Window size for spectrogram in seconds')\n group.add_argument('-window_stride', type=float, default=.01,\n help='Window stride for spectrogram in seconds')\n group.add_argument('-window', default='hamming',\n help='Window type for spectrogram generation')\n\n # Option most relevant to image input\n group.add_argument('-image_channel_size', type=int, default=3,\n choices=[3, 1],\n help=\"\"\"Using grayscale image can training\n model faster and smaller\"\"\")","function_tokens":["def","translate_opts","(","parser",")",":","group","=","parser",".","add_argument_group","(","'Model'",")","group",".","add_argument","(","'-model'",",","dest","=","'models'",",","metavar","=","'MODEL'",",","nargs","=","'+'",",","type","=","str",",","default","=","[","]",",","required","=","True",",","help","=","'Path to model .pt file(s). '","'Multiple models can be specified, '","'for ensemble decoding.'",")","group","=","parser",".","add_argument_group","(","'Data'",")","group",".","add_argument","(","'-data_type'",",","default","=","\"text\"",",","help","=","\"Type of the source input. Options: [text|img].\"",")","group",".","add_argument","(","'-src'",",","required","=","True",",","help","=","\"\"\"Source sequence to decode (one line per\n sequence)\"\"\"",")","group",".","add_argument","(","'-src_dir'",",","default","=","\"\"",",","help","=","'Source directory for image or audio files'",")","group",".","add_argument","(","'-tgt'",",","help","=","'True target sequence (optional)'",")","group",".","add_argument","(","'-output'",",","default","=","'pred.txt'",",","help","=","\"\"\"Path to output the predictions (each line will\n be the decoded sequence\"\"\"",")","group",".","add_argument","(","'-report_bleu'",",","action","=","'store_true'",",","help","=","\"\"\"Report bleu score after translation,\n call tools\/multi-bleu.perl on command line\"\"\"",")","group",".","add_argument","(","'-report_rouge'",",","action","=","'store_true'",",","help","=","\"\"\"Report rouge 1\/2\/3\/L\/SU4 score after translation\n call tools\/test_rouge.py on command line\"\"\"",")","# Options most relevant to summarization.","group",".","add_argument","(","'-dynamic_dict'",",","action","=","'store_true'",",","help","=","\"Create dynamic dictionaries\"",")","group",".","add_argument","(","'-share_vocab'",",","action","=","'store_true'",",","help","=","\"Share source and target vocabulary\"",")","group","=","parser",".","add_argument_group","(","'Beam'",")","group",".","add_argument","(","'-fast'",",","action","=","\"store_true\"",",","help","=","\"\"\"Use fast beam search (some features may not be\n supported!)\"\"\"",")","group",".","add_argument","(","'-beam_size'",",","type","=","int",",","default","=","5",",","help","=","'Beam size'",")","group",".","add_argument","(","'-min_length'",",","type","=","int",",","default","=","0",",","help","=","'Minimum prediction length'",")","group",".","add_argument","(","'-max_length'",",","type","=","int",",","default","=","100",",","help","=","'Maximum prediction length.'",")","group",".","add_argument","(","'-max_sent_length'",",","action","=","DeprecateAction",",","help","=","\"Deprecated, use `-max_length` instead\"",")","# Alpha and Beta values for Google Length + Coverage penalty","# Described here: https:\/\/arxiv.org\/pdf\/1609.08144.pdf, Section 7","group",".","add_argument","(","'-stepwise_penalty'",",","action","=","'store_true'",",","help","=","\"\"\"Apply penalty at every decoding step.\n Helpful for summary penalty.\"\"\"",")","group",".","add_argument","(","'-length_penalty'",",","default","=","'none'",",","choices","=","[","'none'",",","'wu'",",","'avg'","]",",","help","=","\"\"\"Length Penalty to use.\"\"\"",")","group",".","add_argument","(","'-coverage_penalty'",",","default","=","'none'",",","choices","=","[","'none'",",","'wu'",",","'summary'","]",",","help","=","\"\"\"Coverage Penalty to use.\"\"\"",")","group",".","add_argument","(","'-alpha'",",","type","=","float",",","default","=","0.",",","help","=","\"\"\"Google NMT length penalty parameter\n (higher = longer generation)\"\"\"",")","group",".","add_argument","(","'-beta'",",","type","=","float",",","default","=","-","0.",",","help","=","\"\"\"Coverage penalty parameter\"\"\"",")","group",".","add_argument","(","'-block_ngram_repeat'",",","type","=","int",",","default","=","0",",","help","=","'Block repetition of ngrams during decoding.'",")","group",".","add_argument","(","'-ignore_when_blocking'",",","nargs","=","'+'",",","type","=","str",",","default","=","[","]",",","help","=","\"\"\"Ignore these strings when blocking repeats.\n You want to block sentence delimiters.\"\"\"",")","group",".","add_argument","(","'-replace_unk'",",","action","=","\"store_true\"",",","help","=","\"\"\"Replace the generated UNK tokens with the\n source token that had highest attention weight. If\n phrase_table is provided, it will lookup the\n identified source token and give the corresponding\n target token. If it is not provided(or the identified\n source token does not exist in the table) then it\n will copy the source token\"\"\"",")","group","=","parser",".","add_argument_group","(","'Logging'",")","group",".","add_argument","(","'-verbose'",",","action","=","\"store_true\"",",","help","=","'Print scores and predictions for each sentence'",")","group",".","add_argument","(","'-log_file'",",","type","=","str",",","default","=","\"\"",",","help","=","\"Output logs to a file under this path.\"",")","group",".","add_argument","(","'-attn_debug'",",","action","=","\"store_true\"",",","help","=","'Print best attn for each word'",")","group",".","add_argument","(","'-dump_beam'",",","type","=","str",",","default","=","\"\"",",","help","=","'File to dump beam information to.'",")","group",".","add_argument","(","'-n_best'",",","type","=","int",",","default","=","1",",","help","=","\"\"\"If verbose is set, will output the n_best\n decoded sentences\"\"\"",")","group","=","parser",".","add_argument_group","(","'Efficiency'",")","group",".","add_argument","(","'-batch_size'",",","type","=","int",",","default","=","30",",","help","=","'Batch size'",")","group",".","add_argument","(","'-gpu'",",","type","=","int",",","default","=","-","1",",","help","=","\"Device to run on\"",")","# Options most relevant to speech.","group","=","parser",".","add_argument_group","(","'Speech'",")","group",".","add_argument","(","'-sample_rate'",",","type","=","int",",","default","=","16000",",","help","=","\"Sample rate.\"",")","group",".","add_argument","(","'-window_size'",",","type","=","float",",","default","=",".02",",","help","=","'Window size for spectrogram in seconds'",")","group",".","add_argument","(","'-window_stride'",",","type","=","float",",","default","=",".01",",","help","=","'Window stride for spectrogram in seconds'",")","group",".","add_argument","(","'-window'",",","default","=","'hamming'",",","help","=","'Window type for spectrogram generation'",")","# Option most relevant to image input","group",".","add_argument","(","'-image_channel_size'",",","type","=","int",",","default","=","3",",","choices","=","[","3",",","1","]",",","help","=","\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/opts.py#L439-L553"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/opts.py","language":"python","identifier":"add_md_help_argument","parameters":"(parser)","argument_list":"","return_statement":"","docstring":"md help parser","docstring_summary":"md help parser","docstring_tokens":["md","help","parser"],"function":"def add_md_help_argument(parser):\n \"\"\" md help parser \"\"\"\n parser.add_argument('-md', action=MarkdownHelpAction,\n help='print Markdown-formatted help text and exit.')","function_tokens":["def","add_md_help_argument","(","parser",")",":","parser",".","add_argument","(","'-md'",",","action","=","MarkdownHelpAction",",","help","=","'print Markdown-formatted help text and exit.'",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/opts.py#L556-L559"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"RNNDecoderBase.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None,sent_encoder=None,src_sents=None)","argument_list":"","return_statement":"return decoder_outputs, state, attns","docstring":"Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.","docstring_summary":"Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.","docstring_tokens":["Args",":","tgt","(","LongTensor",")",":","sequences","of","padded","tokens","[","tgt_len","x","batch","x","nfeats","]",".","memory_bank","(","FloatTensor",")",":","vectors","from","the","encoder","[","src_len","x","batch","x","hidden","]",".","state","(",":","obj",":","onmt",".","models",".","DecoderState",")",":","decoder","state","object","to","initialize","the","decoder","memory_lengths","(","LongTensor",")",":","the","padded","source","lengths","[","batch","]",".","Returns",":","(","FloatTensor",":","obj",":","onmt",".","Models",".","DecoderState","FloatTensor",")",":","*","decoder_outputs",":","output","from","the","decoder","(","after","attn",")","[","tgt_len","x","batch","x","hidden","]",".","*","decoder_state",":","final","hidden","state","from","the","decoder","*","attns",":","distribution","over","src","at","each","tgt","[","tgt_len","x","batch","x","src_len","]","."],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None,sent_encoder=None,src_sents=None):\n \"\"\"\n Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.\n \"\"\"\n # Check\n assert isinstance(state, RNNDecoderState)\n # tgt.size() returns tgt length and batch\n _, tgt_batch, _ = tgt.size()\n _, memory_batch, _ = memory_bank.size()\n aeq(tgt_batch, memory_batch)\n # END\n\n\n # 23333: TODO I changed this return value 'sent_decoder'\n\n\n # Run the forward pass of the RNN.\n decoder_final, decoder_outputs, attns = self._run_forward_pass(\n tgt, memory_bank, state, memory_lengths=memory_lengths,sent_encoder=sent_encoder,src_sents=src_sents)\n\n # Update the state with the result.\n final_output = decoder_outputs[-1]\n coverage = None\n if \"coverage\" in attns:\n coverage = attns[\"coverage\"][-1].unsqueeze(0)\n state.update_state(decoder_final, final_output.unsqueeze(0), coverage)\n\n # Concatenates sequence of tensors along a new dimension.\n # NOTE: v0.3 to 0.4: decoder_outputs \/ attns[*] may not be list\n # (in particular in case of SRU) it was not raising error in 0.3\n # since stack(Variable) was allowed.\n # In 0.4, SRU returns a tensor that shouldn't be stacke\n if type(decoder_outputs) == list:\n decoder_outputs = torch.stack(decoder_outputs)\n\n for k in attns:\n if type(attns[k]) == list:\n attns[k] = torch.stack(attns[k])\n\n return decoder_outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",",","sent_encoder","=","None",",","src_sents","=","None",")",":","# Check","assert","isinstance","(","state",",","RNNDecoderState",")","# tgt.size() returns tgt length and batch","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","_",",","memory_batch",",","_","=","memory_bank",".","size","(",")","aeq","(","tgt_batch",",","memory_batch",")","# END","# 23333: TODO I changed this return value 'sent_decoder'","# Run the forward pass of the RNN.","decoder_final",",","decoder_outputs",",","attns","=","self",".","_run_forward_pass","(","tgt",",","memory_bank",",","state",",","memory_lengths","=","memory_lengths",",","sent_encoder","=","sent_encoder",",","src_sents","=","src_sents",")","# Update the state with the result.","final_output","=","decoder_outputs","[","-","1","]","coverage","=","None","if","\"coverage\"","in","attns",":","coverage","=","attns","[","\"coverage\"","]","[","-","1","]",".","unsqueeze","(","0",")","state",".","update_state","(","decoder_final",",","final_output",".","unsqueeze","(","0",")",",","coverage",")","# Concatenates sequence of tensors along a new dimension.","# NOTE: v0.3 to 0.4: decoder_outputs \/ attns[*] may not be list","# (in particular in case of SRU) it was not raising error in 0.3","# since stack(Variable) was allowed.","# In 0.4, SRU returns a tensor that shouldn't be stacke","if","type","(","decoder_outputs",")","==","list",":","decoder_outputs","=","torch",".","stack","(","decoder_outputs",")","for","k","in","attns",":","if","type","(","attns","[","k","]",")","==","list",":","attns","[","k","]","=","torch",".","stack","(","attns","[","k","]",")","return","decoder_outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L115-L170"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"RNNDecoderBase.init_decoder_state","parameters":"(self, src, memory_bank, encoder_final,\n with_cache=False)","argument_list":"","return_statement":"","docstring":"Init decoder state with last state of the encoder","docstring_summary":"Init decoder state with last state of the encoder","docstring_tokens":["Init","decoder","state","with","last","state","of","the","encoder"],"function":"def init_decoder_state(self, src, memory_bank, encoder_final,\n with_cache=False):\n \"\"\" Init decoder state with last state of the encoder \"\"\"\n def _fix_enc_hidden(hidden):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n if self.bidirectional_encoder:\n hidden = torch.cat([hidden[0:hidden.size(0):2],\n hidden[1:hidden.size(0):2]], 2)\n return hidden\n\n if isinstance(encoder_final, tuple): # LSTM\n return RNNDecoderState(self.hidden_size,\n tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final]))\n else: # GRU\n return RNNDecoderState(self.hidden_size,\n _fix_enc_hidden(encoder_final))","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","encoder_final",",","with_cache","=","False",")",":","def","_fix_enc_hidden","(","hidden",")",":","# The encoder hidden is (layers*directions) x batch x dim.","# We need to convert it to layers x batch x (directions*dim).","if","self",".","bidirectional_encoder",":","hidden","=","torch",".","cat","(","[","hidden","[","0",":","hidden",".","size","(","0",")",":","2","]",",","hidden","[","1",":","hidden",".","size","(","0",")",":","2","]","]",",","2",")","return","hidden","if","isinstance","(","encoder_final",",","tuple",")",":","# LSTM","return","RNNDecoderState","(","self",".","hidden_size",",","tuple","(","[","_fix_enc_hidden","(","enc_hid",")","for","enc_hid","in","encoder_final","]",")",")","else",":","# GRU","return","RNNDecoderState","(","self",".","hidden_size",",","_fix_enc_hidden","(","encoder_final",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L172-L189"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"StdRNNDecoder._run_forward_pass","parameters":"(self, tgt, memory_bank, state, memory_lengths=None)","argument_list":"","return_statement":"return decoder_final, decoder_outputs, attns","docstring":"Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.","docstring_summary":"Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.","docstring_tokens":["Private","helper","for","running","the","specific","RNN","forward","pass",".","Must","be","overriden","by","all","subclasses",".","Args",":","tgt","(","LongTensor",")",":","a","sequence","of","input","tokens","tensors","[","len","x","batch","x","nfeats","]",".","memory_bank","(","FloatTensor",")",":","output","(","tensor","sequence",")","from","the","encoder","RNN","of","size","(","src_len","x","batch","x","hidden_size",")",".","state","(","FloatTensor",")",":","hidden","state","from","the","encoder","RNN","for","initializing","the","decoder",".","memory_lengths","(","LongTensor",")",":","the","source","memory_bank","lengths",".","Returns",":","decoder_final","(","Tensor",")",":","final","hidden","state","from","the","decoder",".","decoder_outputs","(","[","FloatTensor","]",")",":","an","array","of","output","of","every","time","step","from","the","decoder",".","attns","(","dict","of","(","str","[","FloatTensor","]",")",":","a","dictionary","of","different","type","of","attention","Tensor","array","of","every","time","step","from","the","decoder","."],"function":"def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):\n \"\"\"\n Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.\n \"\"\"\n assert not self._copy # TODO, no support yet.\n assert not self._coverage # TODO, no support yet.\n\n # Initialize local and return variables.\n attns = {}\n emb = self.embeddings(tgt)\n\n # Run the forward pass of the RNN.\n if isinstance(self.rnn, nn.GRU):\n rnn_output, decoder_final = self.rnn(emb, state.hidden[0])\n else:\n rnn_output, decoder_final = self.rnn(emb, state.hidden)\n\n # Check\n tgt_len, tgt_batch, _ = tgt.size()\n output_len, output_batch, _ = rnn_output.size()\n aeq(tgt_len, output_len)\n aeq(tgt_batch, output_batch)\n # END\n\n # Calculate the attention.\n decoder_outputs, p_attn = self.attn(\n rnn_output.transpose(0, 1).contiguous(),\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths\n )\n attns[\"std\"] = p_attn\n\n # Calculate the context gate.\n if self.context_gate is not None:\n decoder_outputs = self.context_gate(\n emb.view(-1, emb.size(2)),\n rnn_output.view(-1, rnn_output.size(2)),\n decoder_outputs.view(-1, decoder_outputs.size(2))\n )\n decoder_outputs = \\\n decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)\n\n decoder_outputs = self.dropout(decoder_outputs)\n\n\n\n return decoder_final, decoder_outputs, attns","function_tokens":["def","_run_forward_pass","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",")",":","assert","not","self",".","_copy","# TODO, no support yet.","assert","not","self",".","_coverage","# TODO, no support yet.","# Initialize local and return variables.","attns","=","{","}","emb","=","self",".","embeddings","(","tgt",")","# Run the forward pass of the RNN.","if","isinstance","(","self",".","rnn",",","nn",".","GRU",")",":","rnn_output",",","decoder_final","=","self",".","rnn","(","emb",",","state",".","hidden","[","0","]",")","else",":","rnn_output",",","decoder_final","=","self",".","rnn","(","emb",",","state",".","hidden",")","# Check","tgt_len",",","tgt_batch",",","_","=","tgt",".","size","(",")","output_len",",","output_batch",",","_","=","rnn_output",".","size","(",")","aeq","(","tgt_len",",","output_len",")","aeq","(","tgt_batch",",","output_batch",")","# END","# Calculate the attention.","decoder_outputs",",","p_attn","=","self",".","attn","(","rnn_output",".","transpose","(","0",",","1",")",".","contiguous","(",")",",","memory_bank",".","transpose","(","0",",","1",")",",","memory_lengths","=","memory_lengths",")","attns","[","\"std\"","]","=","p_attn","# Calculate the context gate.","if","self",".","context_gate","is","not","None",":","decoder_outputs","=","self",".","context_gate","(","emb",".","view","(","-","1",",","emb",".","size","(","2",")",")",",","rnn_output",".","view","(","-","1",",","rnn_output",".","size","(","2",")",")",",","decoder_outputs",".","view","(","-","1",",","decoder_outputs",".","size","(","2",")",")",")","decoder_outputs","=","decoder_outputs",".","view","(","tgt_len",",","tgt_batch",",","self",".","hidden_size",")","decoder_outputs","=","self",".","dropout","(","decoder_outputs",")","return","decoder_final",",","decoder_outputs",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L208-L270"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"StdRNNDecoder._input_size","parameters":"(self)","argument_list":"","return_statement":"return self.embeddings.embedding_size","docstring":"Private helper returning the number of expected features.","docstring_summary":"Private helper returning the number of expected features.","docstring_tokens":["Private","helper","returning","the","number","of","expected","features","."],"function":"def _input_size(self):\n \"\"\"\n Private helper returning the number of expected features.\n \"\"\"\n return self.embeddings.embedding_size","function_tokens":["def","_input_size","(","self",")",":","return","self",".","embeddings",".","embedding_size"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L277-L281"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._run_mmr_attention","parameters":"(self,sent_encoder,sent_decoder,src_sents,input_step)","argument_list":"","return_statement":"return mmr_among_words","docstring":"This is the attention version, where in the encoding part we use self-attention,\n the score is the max value of the attention weight\n # sent_encoder: size (sent_len=9,batch=2,dim=512)\n # sent_decoder: size (sent_len=1,batch=2,dim=512)\n # src_sents: size (batch=2,sent_len=9)\n function to calculate mmr\n :param sent_encoder:\n :param sent_decoder:\n :param src_sents:\n :return:","docstring_summary":"This is the attention version, where in the encoding part we use self-attention,\n the score is the max value of the attention weight\n # sent_encoder: size (sent_len=9,batch=2,dim=512)\n # sent_decoder: size (sent_len=1,batch=2,dim=512)\n # src_sents: size (batch=2,sent_len=9)\n function to calculate mmr\n :param sent_encoder:\n :param sent_decoder:\n :param src_sents:\n :return:","docstring_tokens":["This","is","the","attention","version","where","in","the","encoding","part","we","use","self","-","attention","the","score","is","the","max","value","of","the","attention","weight","#","sent_encoder",":","size","(","sent_len","=","9","batch","=","2","dim","=","512",")","#","sent_decoder",":","size","(","sent_len","=","1","batch","=","2","dim","=","512",")","#","src_sents",":","size","(","batch","=","2","sent_len","=","9",")","function","to","calculate","mmr",":","param","sent_encoder",":",":","param","sent_decoder",":",":","param","src_sents",":",":","return",":"],"function":"def _run_mmr_attention(self,sent_encoder,sent_decoder,src_sents,input_step):\n '''\n This is the attention version, where in the encoding part we use self-attention,\n the score is the max value of the attention weight\n # sent_encoder: size (sent_len=9,batch=2,dim=512)\n # sent_decoder: size (sent_len=1,batch=2,dim=512)\n # src_sents: size (batch=2,sent_len=9)\n function to calculate mmr\n :param sent_encoder:\n :param sent_decoder:\n :param src_sents:\n :return:\n '''\n\n pdist = nn.PairwiseDistance(p=2)\n sent_decoder=sent_decoder.permute(1,0,2) # (2,1,512)\n\n scores =[]\n # define sent matrix and current vector distance as the Euclidean distance\n for sent in sent_encoder:\n # distance: https:\/\/pytorch.org\/docs\/stable\/_modules\/torch\/nn\/modules\/distance.html\n sim2 = 0.5 * torch.sum(pdist(sent_encoder.permute(1,0,2),sent.unsqueeze(1)),1).unsqueeze(1) # this is also similarity func, can be another for-loop\n\n sim1 = torch.bmm(self.mmr_W(sent_decoder), sent.unsqueeze(2)).squeeze(2) # (2,1)\n\n scores.append(0.5*(sim1 - sim2))\n\n sent_ranking_att = torch.t(torch.cat(scores,1)) #(sent_len=9,batch_size)\n sent_ranking_att = torch.softmax(sent_ranking_att, dim=0).permute(1,0) #(sent_len=9,batch_size)\n # scores is a list of score (sent_len=9, tensor shape (batch_size, 1))\n mmr_among_words = [] # should be (batch=2,input_step=200)\n for batch_id in range(sent_ranking_att.size()[0]):\n # iterate each batch, create zero weight on the input steps\n # mmr= torch.zeros([input_step], dtype=torch.float32).cuda()\n\n tmp = []\n for id,position in enumerate(src_sents[batch_id]):\n\n for x in range(position):\n tmp.append(sent_ranking_att[batch_id][id])\n\n\n mmr = torch.stack(tmp) # make to 1-d\n\n\n if len(mmr) < input_step:\n tmp = torch.zeros(input_step - len(mmr)).float().cuda()\n # for x in range(input_step-len(mmr)):\n mmr = torch.cat((mmr, tmp), 0)\n else:\n mmr = mmr[:input_step]\n\n mmr_among_words.append(mmr.unsqueeze(0))\n\n mmr_among_words = torch.cat(mmr_among_words,0)\n\n # shape: (batch=2, input_step=200)\n\n return mmr_among_words","function_tokens":["def","_run_mmr_attention","(","self",",","sent_encoder",",","sent_decoder",",","src_sents",",","input_step",")",":","pdist","=","nn",".","PairwiseDistance","(","p","=","2",")","sent_decoder","=","sent_decoder",".","permute","(","1",",","0",",","2",")","# (2,1,512)","scores","=","[","]","# define sent matrix and current vector distance as the Euclidean distance","for","sent","in","sent_encoder",":","# distance: https:\/\/pytorch.org\/docs\/stable\/_modules\/torch\/nn\/modules\/distance.html","sim2","=","0.5","*","torch",".","sum","(","pdist","(","sent_encoder",".","permute","(","1",",","0",",","2",")",",","sent",".","unsqueeze","(","1",")",")",",","1",")",".","unsqueeze","(","1",")","# this is also similarity func, can be another for-loop","sim1","=","torch",".","bmm","(","self",".","mmr_W","(","sent_decoder",")",",","sent",".","unsqueeze","(","2",")",")",".","squeeze","(","2",")","# (2,1)","scores",".","append","(","0.5","*","(","sim1","-","sim2",")",")","sent_ranking_att","=","torch",".","t","(","torch",".","cat","(","scores",",","1",")",")","#(sent_len=9,batch_size)","sent_ranking_att","=","torch",".","softmax","(","sent_ranking_att",",","dim","=","0",")",".","permute","(","1",",","0",")","#(sent_len=9,batch_size)","# scores is a list of score (sent_len=9, tensor shape (batch_size, 1))","mmr_among_words","=","[","]","# should be (batch=2,input_step=200)","for","batch_id","in","range","(","sent_ranking_att",".","size","(",")","[","0","]",")",":","# iterate each batch, create zero weight on the input steps","# mmr= torch.zeros([input_step], dtype=torch.float32).cuda()","tmp","=","[","]","for","id",",","position","in","enumerate","(","src_sents","[","batch_id","]",")",":","for","x","in","range","(","position",")",":","tmp",".","append","(","sent_ranking_att","[","batch_id","]","[","id","]",")","mmr","=","torch",".","stack","(","tmp",")","# make to 1-d","if","len","(","mmr",")","<","input_step",":","tmp","=","torch",".","zeros","(","input_step","-","len","(","mmr",")",")",".","float","(",")",".","cuda","(",")","# for x in range(input_step-len(mmr)):","mmr","=","torch",".","cat","(","(","mmr",",","tmp",")",",","0",")","else",":","mmr","=","mmr","[",":","input_step","]","mmr_among_words",".","append","(","mmr",".","unsqueeze","(","0",")",")","mmr_among_words","=","torch",".","cat","(","mmr_among_words",",","0",")","# shape: (batch=2, input_step=200)","return","mmr_among_words"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L324-L382"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._run_forward_pass","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None)","argument_list":"","return_statement":"return hidden, decoder_outputs, attns","docstring":"See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns[\"mmr\"] = [].","docstring_summary":"See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns[\"mmr\"] = [].","docstring_tokens":["See","StdRNNDecoder",".","_run_forward_pass","()","for","description","of","arguments","and","return","values",".","TODO",":","added","a","new","param",":","sent_encoder","from","model",".","py","this","is","the","sentence","matrix",";","add","attns","[","mmr","]","=","[]","."],"function":"def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None):\n \"\"\"\n See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns[\"mmr\"] = [].\n\n \"\"\"\n\n\n # Additional args check.\n input_feed = state.input_feed.squeeze(0)\n #print(\"input feed size: {}\\n\".format(input_feed.size()))\n input_feed_batch, _ = input_feed.size()\n _, tgt_batch, _ = tgt.size()\n aeq(tgt_batch, input_feed_batch)\n # END Additional args check.\n\n # Initialize local and return variables.\n decoder_outputs = []\n attns = {\"std\": []}\n attns[\"mmr\"] = []\n if self._copy:\n attns[\"copy\"] = []\n if self._coverage:\n attns[\"coverage\"] = []\n\n emb = self.embeddings(tgt)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n hidden = state.hidden\n coverage = state.coverage.squeeze(0) \\\n if state.coverage is not None else None\n\n # Input feed concatenates hidden state with\n # input at every time step.\n\n #print(\"emb size: {}\\n\".format(emb.size()));exit()\n for _, emb_t in enumerate(emb.split(1)):\n # for each output time step in the loop\n\n emb_t = emb_t.squeeze(0)\n decoder_input = torch.cat([emb_t, input_feed], 1)\n\n # TODO: the following is where we get attention!\n rnn_output, hidden = self.rnn(decoder_input, hidden)\n decoder_output, p_attn = self.attn(\n rnn_output,\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths)\n # p_attn: size (batch=2,input_step=200)\n\n if self.context_gate is not None:\n # TODO: context gate should be employed\n # instead of second RNN transform.\n decoder_output = self.context_gate(\n decoder_input, rnn_output, decoder_output\n )\n decoder_output = self.dropout(decoder_output)\n input_feed = decoder_output\n\n decoder_outputs += [decoder_output]\n attns[\"std\"] += [p_attn]\n\n\n\n # Update the coverage attention.\n if self._coverage:\n coverage = coverage + p_attn \\\n if coverage is not None else p_attn\n attns[\"coverage\"] += [coverage]\n\n # Run the forward pass of the copy attention layer.\n #\n\n if self._copy and not self._reuse_copy_attn:\n\n _, copy_attn = self.copy_attn(decoder_output, memory_bank.transpose(0, 1))\n attns[\"copy\"] += [copy_attn]\n elif self._copy:\n attns[\"copy\"] = attns[\"std\"] # attns[\"copy\"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]\n\n\n\n # 2333: TODO : the sentence representation for decoder\n sent_decoder = decoder_outputs[-1].unsqueeze(0) # shape: (1, batch_size=2,dim=512)\n\n # Return result.\n # 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)\n\n # 2333: TODO: compute mmr attention here:\n print ('Now..')\n mmr_among_words = self._run_mmr_attention(sent_encoder, sent_decoder, src_sents,attns[\"std\"][0].size()[-1])\n\n # 2333: TODO: bring mmr to attention...\n\n for output_step in attns[\"std\"]:\n attention_weight = output_step\n # pairwise multiplication\n attention_weight = torch.mul(mmr_among_words,attention_weight)\n attns[\"mmr\"].append(attention_weight.cuda())\n # pdb.set_trace()\n\n attns[\"std\"] = attns[\"mmr\"]\n\n # decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)\n return hidden, decoder_outputs, attns","function_tokens":["def","_run_forward_pass","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","sent_encoder","=","None",",","src_sents","=","None",")",":","# Additional args check.","input_feed","=","state",".","input_feed",".","squeeze","(","0",")","#print(\"input feed size: {}\\n\".format(input_feed.size()))","input_feed_batch",",","_","=","input_feed",".","size","(",")","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","aeq","(","tgt_batch",",","input_feed_batch",")","# END Additional args check.","# Initialize local and return variables.","decoder_outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","attns","[","\"mmr\"","]","=","[","]","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","if","self",".","_coverage",":","attns","[","\"coverage\"","]","=","[","]","emb","=","self",".","embeddings","(","tgt",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","hidden","=","state",".","hidden","coverage","=","state",".","coverage",".","squeeze","(","0",")","if","state",".","coverage","is","not","None","else","None","# Input feed concatenates hidden state with","# input at every time step.","#print(\"emb size: {}\\n\".format(emb.size()));exit()","for","_",",","emb_t","in","enumerate","(","emb",".","split","(","1",")",")",":","# for each output time step in the loop","emb_t","=","emb_t",".","squeeze","(","0",")","decoder_input","=","torch",".","cat","(","[","emb_t",",","input_feed","]",",","1",")","# TODO: the following is where we get attention!","rnn_output",",","hidden","=","self",".","rnn","(","decoder_input",",","hidden",")","decoder_output",",","p_attn","=","self",".","attn","(","rnn_output",",","memory_bank",".","transpose","(","0",",","1",")",",","memory_lengths","=","memory_lengths",")","# p_attn: size (batch=2,input_step=200)","if","self",".","context_gate","is","not","None",":","# TODO: context gate should be employed","# instead of second RNN transform.","decoder_output","=","self",".","context_gate","(","decoder_input",",","rnn_output",",","decoder_output",")","decoder_output","=","self",".","dropout","(","decoder_output",")","input_feed","=","decoder_output","decoder_outputs","+=","[","decoder_output","]","attns","[","\"std\"","]","+=","[","p_attn","]","# Update the coverage attention.","if","self",".","_coverage",":","coverage","=","coverage","+","p_attn","if","coverage","is","not","None","else","p_attn","attns","[","\"coverage\"","]","+=","[","coverage","]","# Run the forward pass of the copy attention layer.","#","if","self",".","_copy","and","not","self",".","_reuse_copy_attn",":","_",",","copy_attn","=","self",".","copy_attn","(","decoder_output",",","memory_bank",".","transpose","(","0",",","1",")",")","attns","[","\"copy\"","]","+=","[","copy_attn","]","elif","self",".","_copy",":","attns","[","\"copy\"","]","=","attns","[","\"std\"","]","# attns[\"copy\"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]","# 2333: TODO : the sentence representation for decoder","sent_decoder","=","decoder_outputs","[","-","1","]",".","unsqueeze","(","0",")","# shape: (1, batch_size=2,dim=512)","# Return result.","# 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)","# 2333: TODO: compute mmr attention here:","print","(","'Now..'",")","mmr_among_words","=","self",".","_run_mmr_attention","(","sent_encoder",",","sent_decoder",",","src_sents",",","attns","[","\"std\"","]","[","0","]",".","size","(",")","[","-","1","]",")","# 2333: TODO: bring mmr to attention...","for","output_step","in","attns","[","\"std\"","]",":","attention_weight","=","output_step","# pairwise multiplication","attention_weight","=","torch",".","mul","(","mmr_among_words",",","attention_weight",")","attns","[","\"mmr\"","]",".","append","(","attention_weight",".","cuda","(",")",")","# pdb.set_trace()","attns","[","\"std\"","]","=","attns","[","\"mmr\"","]","# decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)","return","hidden",",","decoder_outputs",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L385-L490"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._input_size","parameters":"(self)","argument_list":"","return_statement":"return self.embeddings.embedding_size + self.hidden_size","docstring":"Using input feed by concatenating input with attention vectors.","docstring_summary":"Using input feed by concatenating input with attention vectors.","docstring_tokens":["Using","input","feed","by","concatenating","input","with","attention","vectors","."],"function":"def _input_size(self):\n \"\"\"\n Using input feed by concatenating input with attention vectors.\n \"\"\"\n return self.embeddings.embedding_size + self.hidden_size","function_tokens":["def","_input_size","(","self",")",":","return","self",".","embeddings",".","embedding_size","+","self",".","hidden_size"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L504-L508"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"DecoderState.detach","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Need to document this","docstring_summary":"Need to document this","docstring_tokens":["Need","to","document","this"],"function":"def detach(self):\n \"\"\" Need to document this \"\"\"\n self.hidden = tuple([_.detach() for _ in self.hidden])\n self.input_feed = self.input_feed.detach()","function_tokens":["def","detach","(","self",")",":","self",".","hidden","=","tuple","(","[","_",".","detach","(",")","for","_","in","self",".","hidden","]",")","self",".","input_feed","=","self",".","input_feed",".","detach","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L519-L522"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"DecoderState.beam_update","parameters":"(self, idx, positions, beam_size)","argument_list":"","return_statement":"","docstring":"Need to document this","docstring_summary":"Need to document this","docstring_tokens":["Need","to","document","this"],"function":"def beam_update(self, idx, positions, beam_size):\n \"\"\" Need to document this \"\"\"\n for e in self._all:\n sizes = e.size()\n br = sizes[1]\n if len(sizes) == 3:\n sent_states = e.view(sizes[0], beam_size, br \/\/ beam_size,\n sizes[2])[:, :, idx]\n else:\n sent_states = e.view(sizes[0], beam_size,\n br \/\/ beam_size,\n sizes[2],\n sizes[3])[:, :, idx]\n\n sent_states.data.copy_(\n sent_states.data.index_select(1, positions))","function_tokens":["def","beam_update","(","self",",","idx",",","positions",",","beam_size",")",":","for","e","in","self",".","_all",":","sizes","=","e",".","size","(",")","br","=","sizes","[","1","]","if","len","(","sizes",")","==","3",":","sent_states","=","e",".","view","(","sizes","[","0","]",",","beam_size",",","br","\/\/","beam_size",",","sizes","[","2","]",")","[",":",",",":",",","idx","]","else",":","sent_states","=","e",".","view","(","sizes","[","0","]",",","beam_size",",","br","\/\/","beam_size",",","sizes","[","2","]",",","sizes","[","3","]",")","[",":",",",":",",","idx","]","sent_states",".","data",".","copy_","(","sent_states",".","data",".","index_select","(","1",",","positions",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L524-L539"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"RNNDecoderState.__init__","parameters":"(self, hidden_size, rnnstate)","argument_list":"","return_statement":"","docstring":"Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).","docstring_summary":"Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).","docstring_tokens":["Args",":","hidden_size","(","int",")",":","the","size","of","hidden","layer","of","the","decoder",".","rnnstate",":","final","hidden","state","from","the","encoder",".","transformed","to","shape",":","layers","x","batch","x","(","directions","*","dim",")","."],"function":"def __init__(self, hidden_size, rnnstate):\n \"\"\"\n Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).\n \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.coverage = None\n\n # Init the input feed.\n batch_size = self.hidden[0].size(1)\n h_size = (batch_size, hidden_size)\n self.input_feed = self.hidden[0].data.new(*h_size).zero_() \\\n .unsqueeze(0)","function_tokens":["def","__init__","(","self",",","hidden_size",",","rnnstate",")",":","if","not","isinstance","(","rnnstate",",","tuple",")",":","self",".","hidden","=","(","rnnstate",",",")","else",":","self",".","hidden","=","rnnstate","self",".","coverage","=","None","# Init the input feed.","batch_size","=","self",".","hidden","[","0","]",".","size","(","1",")","h_size","=","(","batch_size",",","hidden_size",")","self",".","input_feed","=","self",".","hidden","[","0","]",".","data",".","new","(","*","h_size",")",".","zero_","(",")",".","unsqueeze","(","0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L548-L565"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"RNNDecoderState.update_state","parameters":"(self, rnnstate, input_feed, coverage)","argument_list":"","return_statement":"","docstring":"Update decoder state","docstring_summary":"Update decoder state","docstring_tokens":["Update","decoder","state"],"function":"def update_state(self, rnnstate, input_feed, coverage):\n \"\"\" Update decoder state \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.input_feed = input_feed\n self.coverage = coverage","function_tokens":["def","update_state","(","self",",","rnnstate",",","input_feed",",","coverage",")",":","if","not","isinstance","(","rnnstate",",","tuple",")",":","self",".","hidden","=","(","rnnstate",",",")","else",":","self",".","hidden","=","rnnstate","self",".","input_feed","=","input_feed","self",".","coverage","=","coverage"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L571-L578"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/decoder.py","language":"python","identifier":"RNNDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n vars = [e.data.repeat(1, beam_size, 1)\n for e in self._all]\n self.hidden = tuple(vars[:-1])\n self.input_feed = vars[-1]","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","vars","=","[","e",".","data",".","repeat","(","1",",","beam_size",",","1",")","for","e","in","self",".","_all","]","self",".","hidden","=","tuple","(","vars","[",":","-","1","]",")","self",".","input_feed","=","vars","[","-","1","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/decoder.py#L580-L585"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/ensemble.py","language":"python","identifier":"load_test_model","parameters":"(opt, dummy_opt)","argument_list":"","return_statement":"return shared_fields, ensemble_model, shared_model_opt","docstring":"Read in multiple models for ensemble","docstring_summary":"Read in multiple models for ensemble","docstring_tokens":["Read","in","multiple","models","for","ensemble"],"function":"def load_test_model(opt, dummy_opt):\n \"\"\" Read in multiple models for ensemble \"\"\"\n shared_fields = None\n shared_model_opt = None\n models = []\n for model_path in opt.models:\n fields, model, model_opt = \\\n onmt.model_builder.load_test_model(opt,\n dummy_opt,\n model_path=model_path)\n import pdb;pdb.set_trace()\n if shared_fields is None:\n shared_fields = fields\n else:\n for key, field in fields.items():\n if field is not None and 'vocab' in field.__dict__:\n assert field.vocab.stoi == shared_fields[key].vocab.stoi, \\\n 'Ensemble models must use the same preprocessed data'\n models.append(model)\n if shared_model_opt is None:\n shared_model_opt = model_opt\n ensemble_model = EnsembleModel(models)\n return shared_fields, ensemble_model, shared_model_opt","function_tokens":["def","load_test_model","(","opt",",","dummy_opt",")",":","shared_fields","=","None","shared_model_opt","=","None","models","=","[","]","for","model_path","in","opt",".","models",":","fields",",","model",",","model_opt","=","onmt",".","model_builder",".","load_test_model","(","opt",",","dummy_opt",",","model_path","=","model_path",")","import","pdb","pdb",".","set_trace","(",")","if","shared_fields","is","None",":","shared_fields","=","fields","else",":","for","key",",","field","in","fields",".","items","(",")",":","if","field","is","not","None","and","'vocab'","in","field",".","__dict__",":","assert","field",".","vocab",".","stoi","==","shared_fields","[","key","]",".","vocab",".","stoi",",","'Ensemble models must use the same preprocessed data'","models",".","append","(","model",")","if","shared_model_opt","is","None",":","shared_model_opt","=","model_opt","ensemble_model","=","EnsembleModel","(","models",")","return","shared_fields",",","ensemble_model",",","shared_model_opt"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/ensemble.py#L135-L157"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/ensemble.py","language":"python","identifier":"EnsembleDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n for model_state in self.model_decoder_states:\n model_state.repeat_beam_size_times(beam_size)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","for","model_state","in","self",".","model_decoder_states",":","model_state",".","repeat_beam_size_times","(","beam_size",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/ensemble.py#L27-L30"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/ensemble.py","language":"python","identifier":"EnsembleDecoderOutput.squeeze","parameters":"(self, dim=None)","argument_list":"","return_statement":"return EnsembleDecoderOutput([\n x.squeeze(dim) for x in self.model_outputs])","docstring":"Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`","docstring_summary":"Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`","docstring_tokens":["Delegate","squeeze","to","avoid","modifying",":","obj",":","Translator",".","translate_batch","()"],"function":"def squeeze(self, dim=None):\n \"\"\"\n Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`\n \"\"\"\n return EnsembleDecoderOutput([\n x.squeeze(dim) for x in self.model_outputs])","function_tokens":["def","squeeze","(","self",",","dim","=","None",")",":","return","EnsembleDecoderOutput","(","[","x",".","squeeze","(","dim",")","for","x","in","self",".","model_outputs","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/ensemble.py#L41-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/ensemble.py","language":"python","identifier":"EnsembleDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None)","argument_list":"","return_statement":"return (EnsembleDecoderOutput(outputs),\n EnsembleDecoderState(states),\n mean_attns)","docstring":"See :obj:`RNNDecoderBase.forward()`","docstring_summary":"See :obj:`RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None):\n \"\"\" See :obj:`RNNDecoderBase.forward()` \"\"\"\n # Memory_lengths is a single tensor shared between all models.\n # This assumption will not hold if Translator is modified\n # to calculate memory_lengths as something other than the length\n # of the input.\n outputs, states, attns = zip(*[\n model_decoder.forward(\n tgt, memory_bank[i], state[i], memory_lengths, step=step)\n for (i, model_decoder)\n in enumerate(self.model_decoders)])\n mean_attns = self.combine_attns(attns)\n return (EnsembleDecoderOutput(outputs),\n EnsembleDecoderState(states),\n mean_attns)","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",")",":","# Memory_lengths is a single tensor shared between all models.","# This assumption will not hold if Translator is modified","# to calculate memory_lengths as something other than the length","# of the input.","outputs",",","states",",","attns","=","zip","(","*","[","model_decoder",".","forward","(","tgt",",","memory_bank","[","i","]",",","state","[","i","]",",","memory_lengths",",","step","=","step",")","for","(","i",",","model_decoder",")","in","enumerate","(","self",".","model_decoders",")","]",")","mean_attns","=","self",".","combine_attns","(","attns",")","return","(","EnsembleDecoderOutput","(","outputs",")",",","EnsembleDecoderState","(","states",")",",","mean_attns",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/ensemble.py#L72-L87"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/ensemble.py","language":"python","identifier":"EnsembleDecoder.init_decoder_state","parameters":"(self, src, memory_bank, enc_hidden)","argument_list":"","return_statement":"return EnsembleDecoderState(\n [model_decoder.init_decoder_state(src,\n memory_bank[i],\n enc_hidden[i])\n for (i, model_decoder) in enumerate(self.model_decoders)])","docstring":"See :obj:`RNNDecoderBase.init_decoder_state()`","docstring_summary":"See :obj:`RNNDecoderBase.init_decoder_state()`","docstring_tokens":["See",":","obj",":","RNNDecoderBase",".","init_decoder_state","()"],"function":"def init_decoder_state(self, src, memory_bank, enc_hidden):\n \"\"\" See :obj:`RNNDecoderBase.init_decoder_state()` \"\"\"\n return EnsembleDecoderState(\n [model_decoder.init_decoder_state(src,\n memory_bank[i],\n enc_hidden[i])\n for (i, model_decoder) in enumerate(self.model_decoders)])","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","enc_hidden",")",":","return","EnsembleDecoderState","(","[","model_decoder",".","init_decoder_state","(","src",",","memory_bank","[","i","]",",","enc_hidden","[","i","]",")","for","(","i",",","model_decoder",")","in","enumerate","(","self",".","model_decoders",")","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/ensemble.py#L95-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/ensemble.py","language":"python","identifier":"EnsembleGenerator.forward","parameters":"(self, hidden)","argument_list":"","return_statement":"return torch.stack(distributions).mean(0)","docstring":"Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.","docstring_summary":"Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.","docstring_tokens":["Compute","a","distribution","over","the","target","dictionary","by","averaging","distributions","from","models","in","the","ensemble",".","All","models","in","the","ensemble","must","share","a","target","vocabulary","."],"function":"def forward(self, hidden):\n \"\"\"\n Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.\n \"\"\"\n distributions = [model_generator.forward(hidden[i])\n for (i, model_generator)\n in enumerate(self.model_generators)]\n return torch.stack(distributions).mean(0)","function_tokens":["def","forward","(","self",",","hidden",")",":","distributions","=","[","model_generator",".","forward","(","hidden","[","i","]",")","for","(","i",",","model_generator",")","in","enumerate","(","self",".","model_generators",")","]","return","torch",".","stack","(","distributions",")",".","mean","(","0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/ensemble.py#L113-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/audio_encoder.py","language":"python","identifier":"AudioEncoder.load_pretrained_vectors","parameters":"(self, opt)","argument_list":"","return_statement":"","docstring":"Pass in needed options only when modify function definition.","docstring_summary":"Pass in needed options only when modify function definition.","docstring_tokens":["Pass","in","needed","options","only","when","modify","function","definition","."],"function":"def load_pretrained_vectors(self, opt):\n \"\"\" Pass in needed options only when modify function definition.\"\"\"\n pass","function_tokens":["def","load_pretrained_vectors","(","self",",","opt",")",":","pass"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/audio_encoder.py#L45-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/audio_encoder.py","language":"python","identifier":"AudioEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return hidden, output","docstring":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_summary":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","encoders",".","encoder",".","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`\"\n # (batch_size, 1, nfft, t)\n # layer 1\n src = self.batch_norm1(self.layer1(src[:, :, :, :]))\n\n # (batch_size, 32, nfft\/2, t\/2)\n src = F.hardtanh(src, 0, 20, inplace=True)\n\n # (batch_size, 32, nfft\/2\/2, t\/2)\n # layer 2\n src = self.batch_norm2(self.layer2(src))\n\n # (batch_size, 32, nfft\/2\/2, t\/2)\n src = F.hardtanh(src, 0, 20, inplace=True)\n\n batch_size = src.size(0)\n length = src.size(3)\n src = src.view(batch_size, -1, length)\n src = src.transpose(0, 2).transpose(1, 2)\n\n output, hidden = self.rnn(src)\n\n return hidden, output","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","# (batch_size, 1, nfft, t)","# layer 1","src","=","self",".","batch_norm1","(","self",".","layer1","(","src","[",":",",",":",",",":",",",":","]",")",")","# (batch_size, 32, nfft\/2, t\/2)","src","=","F",".","hardtanh","(","src",",","0",",","20",",","inplace","=","True",")","# (batch_size, 32, nfft\/2\/2, t\/2)","# layer 2","src","=","self",".","batch_norm2","(","self",".","layer2","(","src",")",")","# (batch_size, 32, nfft\/2\/2, t\/2)","src","=","F",".","hardtanh","(","src",",","0",",","20",",","inplace","=","True",")","batch_size","=","src",".","size","(","0",")","length","=","src",".","size","(","3",")","src","=","src",".","view","(","batch_size",",","-","1",",","length",")","src","=","src",".","transpose","(","0",",","2",")",".","transpose","(","1",",","2",")","output",",","hidden","=","self",".","rnn","(","src",")","return","hidden",",","output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/audio_encoder.py#L49-L72"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/mean_encoder.py","language":"python","identifier":"MeanEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return encoder_final, memory_bank","docstring":"See :obj:`EncoderBase.forward()`","docstring_summary":"See :obj:`EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`EncoderBase.forward()`\"\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n _, batch, emb_dim = emb.size()\n mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)\n memory_bank = emb\n encoder_final = (mean, mean)\n return encoder_final, memory_bank","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","_",",","batch",",","emb_dim","=","emb",".","size","(",")","mean","=","emb",".","mean","(","0",")",".","expand","(","self",".","num_layers",",","batch",",","emb_dim",")","memory_bank","=","emb","encoder_final","=","(","mean",",","mean",")","return","encoder_final",",","memory_bank"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/mean_encoder.py#L20-L29"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/rnn_encoder_hi.py","language":"python","identifier":"RNNEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return encoder_final, memory_bank","docstring":"See :obj:`EncoderBase.forward()`","docstring_summary":"See :obj:`EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`EncoderBase.forward()`\"\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n\n s_len, batch, emb_dim = emb.size() # (185 16 128), s_len is changeable.\n\n packed_emb = emb\n if lengths is not None and not self.no_pack_padded_seq:\n # Lengths data is wrapped inside a Tensor.\n lengths = lengths.view(-1).tolist()\n packed_emb = pack(emb, lengths)\n\n memory_bank, encoder_final = self.rnn(packed_emb) # output, (hidden, cell), unpack using pad_packed_sequence()\n # memory_bank is the output\n # self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;\n\n # print('Hidden..', encoder_final[0].size(), encoder_final[1].size()) # both torch.Size([2, 16, 256]), 2 directions.\n\n if lengths is not None and not self.no_pack_padded_seq:\n memory_bank = unpack(memory_bank)[0]\n\n if self.use_bridge:\n encoder_final = self._bridge(encoder_final)\n\n print('Out..', memory_bank.size())\n #Out.. torch.Size([16, 512]) torch.Size([16, 512]) : two dir?\n\n return encoder_final, memory_bank","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","s_len",",","batch",",","emb_dim","=","emb",".","size","(",")","# (185 16 128), s_len is changeable.","packed_emb","=","emb","if","lengths","is","not","None","and","not","self",".","no_pack_padded_seq",":","# Lengths data is wrapped inside a Tensor.","lengths","=","lengths",".","view","(","-","1",")",".","tolist","(",")","packed_emb","=","pack","(","emb",",","lengths",")","memory_bank",",","encoder_final","=","self",".","rnn","(","packed_emb",")","# output, (hidden, cell), unpack using pad_packed_sequence()","# memory_bank is the output","# self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;","# print('Hidden..', encoder_final[0].size(), encoder_final[1].size()) # both torch.Size([2, 16, 256]), 2 directions.","if","lengths","is","not","None","and","not","self",".","no_pack_padded_seq",":","memory_bank","=","unpack","(","memory_bank",")","[","0","]","if","self",".","use_bridge",":","encoder_final","=","self",".","_bridge","(","encoder_final",")","print","(","'Out..'",",","memory_bank",".","size","(",")",")","#Out.. torch.Size([16, 512]) torch.Size([16, 512]) : two dir?","return","encoder_final",",","memory_bank"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/rnn_encoder_hi.py#L53-L82"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/rnn_encoder_hi.py","language":"python","identifier":"RNNEncoder._bridge","parameters":"(self, hidden)","argument_list":"","return_statement":"return outs","docstring":"Forward hidden state through bridge","docstring_summary":"Forward hidden state through bridge","docstring_tokens":["Forward","hidden","state","through","bridge"],"function":"def _bridge(self, hidden):\n \"\"\"\n Forward hidden state through bridge\n \"\"\"\n def bottle_hidden(linear, states):\n \"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)\n\n if isinstance(hidden, tuple): # LSTM\n outs = tuple([bottle_hidden(layer, hidden[ix])\n for ix, layer in enumerate(self.bridge)])\n else:\n outs = bottle_hidden(self.bridge[0], hidden)\n return outs","function_tokens":["def","_bridge","(","self",",","hidden",")",":","def","bottle_hidden","(","linear",",","states",")",":","\"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"","size","=","states",".","size","(",")","result","=","linear","(","states",".","view","(","-","1",",","self",".","total_hidden_dim",")",")","return","F",".","relu","(","result",")",".","view","(","size",")","if","isinstance","(","hidden",",","tuple",")",":","# LSTM","outs","=","tuple","(","[","bottle_hidden","(","layer",",","hidden","[","ix","]",")","for","ix",",","layer","in","enumerate","(","self",".","bridge",")","]",")","else",":","outs","=","bottle_hidden","(","self",".","bridge","[","0","]",",","hidden",")","return","outs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/rnn_encoder_hi.py#L99-L116"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/transformer.py","language":"python","identifier":"TransformerEncoderLayer.forward","parameters":"(self, inputs, mask)","argument_list":"","return_statement":"return self.feed_forward(out)","docstring":"Transformer Encoder Layer definition.\n\n Args:\n inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`\n mask (`LongTensor`): `[batch_size x src_len x src_len]`\n\n Returns:\n (`FloatTensor`):\n\n * outputs `[batch_size x src_len x model_dim]`","docstring_summary":"Transformer Encoder Layer definition.","docstring_tokens":["Transformer","Encoder","Layer","definition","."],"function":"def forward(self, inputs, mask):\n \"\"\"\n Transformer Encoder Layer definition.\n\n Args:\n inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`\n mask (`LongTensor`): `[batch_size x src_len x src_len]`\n\n Returns:\n (`FloatTensor`):\n\n * outputs `[batch_size x src_len x model_dim]`\n \"\"\"\n input_norm = self.layer_norm(inputs)\n context, _ = self.self_attn(input_norm, input_norm, input_norm,\n mask=mask)\n out = self.dropout(context) + inputs\n return self.feed_forward(out)","function_tokens":["def","forward","(","self",",","inputs",",","mask",")",":","input_norm","=","self",".","layer_norm","(","inputs",")","context",",","_","=","self",".","self_attn","(","input_norm",",","input_norm",",","input_norm",",","mask","=","mask",")","out","=","self",".","dropout","(","context",")","+","inputs","return","self",".","feed_forward","(","out",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/transformer.py#L35-L52"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/transformer.py","language":"python","identifier":"TransformerEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return emb, out.transpose(0, 1).contiguous()","docstring":"See :obj:`EncoderBase.forward()`","docstring_summary":"See :obj:`EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"\"\" See :obj:`EncoderBase.forward()`\"\"\"\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n\n out = emb.transpose(0, 1).contiguous()\n words = src[:, :, 0].transpose(0, 1)\n w_batch, w_len = words.size()\n padding_idx = self.embeddings.word_padding_idx\n mask = words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(w_batch, w_len, w_len)\n # Run the forward pass of every layer of the tranformer.\n for i in range(self.num_layers):\n out = self.transformer[i](out, mask)\n out = self.layer_norm(out)\n\n return emb, out.transpose(0, 1).contiguous()","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","out","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","words","=","src","[",":",",",":",",","0","]",".","transpose","(","0",",","1",")","w_batch",",","w_len","=","words",".","size","(",")","padding_idx","=","self",".","embeddings",".","word_padding_idx","mask","=","words",".","data",".","eq","(","padding_idx",")",".","unsqueeze","(","1",")",".","expand","(","w_batch",",","w_len",",","w_len",")","# Run the forward pass of every layer of the tranformer.","for","i","in","range","(","self",".","num_layers",")",":","out","=","self",".","transformer","[","i","]","(","out",",","mask",")","out","=","self",".","layer_norm","(","out",")","return","emb",",","out",".","transpose","(","0",",","1",")",".","contiguous","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/transformer.py#L98-L115"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None, step=None)","argument_list":"","return_statement":"return outputs, state, attns","docstring":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_summary":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","modules",".","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None, step=None):\n \"\"\" See :obj:`onmt.modules.RNNDecoderBase.forward()`\"\"\"\n # NOTE: memory_lengths is only here for compatibility reasons\n # with onmt.modules.RNNDecoderBase.forward()\n # CHECKS\n assert isinstance(state, CNNDecoderState)\n _, tgt_batch, _ = tgt.size()\n _, contxt_batch, _ = memory_bank.size()\n aeq(tgt_batch, contxt_batch)\n # END CHECKS\n\n if state.previous_input is not None:\n tgt = torch.cat([state.previous_input, tgt], 0)\n\n # Initialize return variables.\n outputs = []\n attns = {\"std\": []}\n assert not self._copy, \"Copy mechanism not yet tested in conv2conv\"\n if self._copy:\n attns[\"copy\"] = []\n\n emb = self.embeddings(tgt)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n tgt_emb = emb.transpose(0, 1).contiguous()\n # The output of CNNEncoder.\n src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()\n # The combination of output of CNNEncoder and source embeddings.\n src_memory_bank_c = state.init_src.transpose(0, 1).contiguous()\n\n # Run the forward pass of the CNNDecoder.\n emb_reshape = tgt_emb.contiguous().view(\n tgt_emb.size(0) * tgt_emb.size(1), -1)\n linear_out = self.linear(emb_reshape)\n x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)\n x = shape_transform(x)\n\n pad = torch.zeros(x.size(0), x.size(1),\n self.cnn_kernel_width - 1, 1)\n\n pad = pad.type_as(x)\n base_target_emb = x\n\n for conv, attention in zip(self.conv_layers, self.attn_layers):\n new_target_input = torch.cat([pad, x], 2)\n out = conv(new_target_input)\n c, attn = attention(base_target_emb, out,\n src_memory_bank_t, src_memory_bank_c)\n x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT\n output = x.squeeze(3).transpose(1, 2)\n\n # Process the result and update the attentions.\n outputs = output.transpose(0, 1).contiguous()\n if state.previous_input is not None:\n outputs = outputs[state.previous_input.size(0):]\n attn = attn[:, state.previous_input.size(0):].squeeze()\n attn = torch.stack([attn])\n attns[\"std\"] = attn\n if self._copy:\n attns[\"copy\"] = attn\n\n # Update the state.\n state.update_state(tgt)\n\n return outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",")",":","# NOTE: memory_lengths is only here for compatibility reasons","# with onmt.modules.RNNDecoderBase.forward()","# CHECKS","assert","isinstance","(","state",",","CNNDecoderState",")","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","_",",","contxt_batch",",","_","=","memory_bank",".","size","(",")","aeq","(","tgt_batch",",","contxt_batch",")","# END CHECKS","if","state",".","previous_input","is","not","None",":","tgt","=","torch",".","cat","(","[","state",".","previous_input",",","tgt","]",",","0",")","# Initialize return variables.","outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","assert","not","self",".","_copy",",","\"Copy mechanism not yet tested in conv2conv\"","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","emb","=","self",".","embeddings","(","tgt",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","tgt_emb","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","# The output of CNNEncoder.","src_memory_bank_t","=","memory_bank",".","transpose","(","0",",","1",")",".","contiguous","(",")","# The combination of output of CNNEncoder and source embeddings.","src_memory_bank_c","=","state",".","init_src",".","transpose","(","0",",","1",")",".","contiguous","(",")","# Run the forward pass of the CNNDecoder.","emb_reshape","=","tgt_emb",".","contiguous","(",")",".","view","(","tgt_emb",".","size","(","0",")","*","tgt_emb",".","size","(","1",")",",","-","1",")","linear_out","=","self",".","linear","(","emb_reshape",")","x","=","linear_out",".","view","(","tgt_emb",".","size","(","0",")",",","tgt_emb",".","size","(","1",")",",","-","1",")","x","=","shape_transform","(","x",")","pad","=","torch",".","zeros","(","x",".","size","(","0",")",",","x",".","size","(","1",")",",","self",".","cnn_kernel_width","-","1",",","1",")","pad","=","pad",".","type_as","(","x",")","base_target_emb","=","x","for","conv",",","attention","in","zip","(","self",".","conv_layers",",","self",".","attn_layers",")",":","new_target_input","=","torch",".","cat","(","[","pad",",","x","]",",","2",")","out","=","conv","(","new_target_input",")","c",",","attn","=","attention","(","base_target_emb",",","out",",","src_memory_bank_t",",","src_memory_bank_c",")","x","=","(","x","+","(","c","+","out",")","*","SCALE_WEIGHT",")","*","SCALE_WEIGHT","output","=","x",".","squeeze","(","3",")",".","transpose","(","1",",","2",")","# Process the result and update the attentions.","outputs","=","output",".","transpose","(","0",",","1",")",".","contiguous","(",")","if","state",".","previous_input","is","not","None",":","outputs","=","outputs","[","state",".","previous_input",".","size","(","0",")",":","]","attn","=","attn","[",":",",","state",".","previous_input",".","size","(","0",")",":","]",".","squeeze","(",")","attn","=","torch",".","stack","(","[","attn","]",")","attns","[","\"std\"","]","=","attn","if","self",".","_copy",":","attns","[","\"copy\"","]","=","attn","# Update the state.","state",".","update_state","(","tgt",")","return","outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py#L58-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoder.init_decoder_state","parameters":"(self, _, memory_bank, enc_hidden, with_cache=False)","argument_list":"","return_statement":"return CNNDecoderState(memory_bank, enc_hidden)","docstring":"Init decoder state.","docstring_summary":"Init decoder state.","docstring_tokens":["Init","decoder","state","."],"function":"def init_decoder_state(self, _, memory_bank, enc_hidden, with_cache=False):\n \"\"\"\n Init decoder state.\n \"\"\"\n return CNNDecoderState(memory_bank, enc_hidden)","function_tokens":["def","init_decoder_state","(","self",",","_",",","memory_bank",",","enc_hidden",",","with_cache","=","False",")",":","return","CNNDecoderState","(","memory_bank",",","enc_hidden",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py#L124-L128"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState._all","parameters":"(self)","argument_list":"","return_statement":"return (self.previous_input,)","docstring":"Contains attributes that need to be updated in self.beam_update().","docstring_summary":"Contains attributes that need to be updated in self.beam_update().","docstring_tokens":["Contains","attributes","that","need","to","be","updated","in","self",".","beam_update","()","."],"function":"def _all(self):\n \"\"\"\n Contains attributes that need to be updated in self.beam_update().\n \"\"\"\n return (self.previous_input,)","function_tokens":["def","_all","(","self",")",":","return","(","self",".","previous_input",",",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py#L141-L145"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState.update_state","parameters":"(self, new_input)","argument_list":"","return_statement":"","docstring":"Called for every decoder forward pass.","docstring_summary":"Called for every decoder forward pass.","docstring_tokens":["Called","for","every","decoder","forward","pass","."],"function":"def update_state(self, new_input):\n \"\"\" Called for every decoder forward pass. \"\"\"\n self.previous_input = new_input","function_tokens":["def","update_state","(","self",",","new_input",")",":","self",".","previous_input","=","new_input"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py#L150-L152"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n self.init_src = self.init_src.data.repeat(1, beam_size, 1)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","self",".","init_src","=","self",".","init_src",".","data",".","repeat","(","1",",","beam_size",",","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/cnn_decoder.py#L154-L156"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/encoder.py","language":"python","identifier":"EncoderBase.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"","docstring":"Args:\n src (:obj:`LongTensor`):\n padded sequences of sparse indices `[src_len x batch x nfeat]`\n lengths (:obj:`LongTensor`): length of each sequence `[batch]`\n\n\n Returns:\n (tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):\n * final encoder state, used to initialize decoder\n * memory bank for attention, `[src_len x batch x hidden]`","docstring_summary":"Args:\n src (:obj:`LongTensor`):\n padded sequences of sparse indices `[src_len x batch x nfeat]`\n lengths (:obj:`LongTensor`): length of each sequence `[batch]`","docstring_tokens":["Args",":","src","(",":","obj",":","LongTensor",")",":","padded","sequences","of","sparse","indices","[","src_len","x","batch","x","nfeat","]","lengths","(",":","obj",":","LongTensor",")",":","length","of","each","sequence","[","batch","]"],"function":"def forward(self, src, lengths=None):\n \"\"\"\n Args:\n src (:obj:`LongTensor`):\n padded sequences of sparse indices `[src_len x batch x nfeat]`\n lengths (:obj:`LongTensor`): length of each sequence `[batch]`\n\n\n Returns:\n (tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):\n * final encoder state, used to initialize decoder\n * memory bank for attention, `[src_len x batch x hidden]`\n \"\"\"\n raise NotImplementedError","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","raise","NotImplementedError"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/encoder.py#L41-L54"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py","language":"python","identifier":"RNNEncoder.build_sentence_layer","parameters":"(self,memory_bank,src_sents)","argument_list":"","return_statement":"return sent_output","docstring":"In this method we define sentence level representation. (This is the old version)\n :param memory_bank:\n :param encoder_final:\n :param src_sents:\n :return: sentence embeddings","docstring_summary":"In this method we define sentence level representation. (This is the old version)\n :param memory_bank:\n :param encoder_final:\n :param src_sents:\n :return: sentence embeddings","docstring_tokens":["In","this","method","we","define","sentence","level","representation",".","(","This","is","the","old","version",")",":","param","memory_bank",":",":","param","encoder_final",":",":","param","src_sents",":",":","return",":","sentence","embeddings"],"function":"def build_sentence_layer(self,memory_bank,src_sents):\n '''\n In this method we define sentence level representation. (This is the old version)\n :param memory_bank:\n :param encoder_final:\n :param src_sents:\n :return: sentence embeddings\n '''\n # print('Memory..', memory_bank.size()) # torch.Size([200, 2, 512]) TODO: this is the output\n # #\n # print ('encoder_final..',encoder_final) #\n\n\n if isinstance(memory_bank, torch.nn.utils.rnn.PackedSequence):\n\n memory_bank = nn.utils.rnn.pad_packed_sequence(memory_bank)[0] # as after unpack it is a tuple\n\n hidden = memory_bank.permute(1,0,2) # size: (2,200,512)\n\n # print ('in func...', src_sents)\n\n # in each case for the current batch, send the last hidden output as the input to the sent_lstm layer\n batch_input_list = []\n for output,sent_id in zip(hidden,src_sents): # so we have batch_size to be 1\n\n common_len = len(sent_id)\n\n output = output.unsqueeze(1)\n sent_input_list = []\n\n # firs id\n start_ind_sent_id = 0\n start_ind = sent_id[start_ind_sent_id]\n\n\n while (start_ind < output.size()[0]) and (start_ind_sent_id < sent_id.size()[0]):\n\n\n # add\n sent_input_list.append(output[start_ind])\n\n # both ids move to the next\n start_ind_sent_id += 1\n if start_ind_sent_id < sent_id.size()[0]:\n start_ind += sent_id[start_ind_sent_id]\n else:\n break\n\n\n # FEB 10, len check\n if len(sent_input_list) < common_len:\n # pad with zero\n pad_size = output[0].size()\n zeros = torch.zeros(pad_size, dtype=torch.float32).cuda()\n pad_list = [zeros]* (common_len-len(sent_input_list))\n\n sent_input_list = sent_input_list + pad_list\n\n\n sent_input = torch.cat(sent_input_list,0).unsqueeze(1) # (n_sent, batch_size=1,dim=512)\n\n\n batch_input_list.append(sent_input)\n\n\n\n # print ([x.size() for x in batch_input_list])\n # [torch.Size([18, 1, 512]), torch.Size([15, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512])]\n\n\n batch_input_list_concat = torch.cat(batch_input_list,1)\n\n # get the id of sent length:\n sent_output, (h_, c_) = self.sent_rnn(batch_input_list_concat)\n # LSTM(512, 256, bidirectional=True), sent_output has the same shape with batch_input_list_concat\n\n\n #sent_output: shape(number of sents or step, batch_size, dim) (9, 2, 512), number of sents or step can be different\n # print ('Encoder Sentence_output...',sent_output.size())\n return sent_output","function_tokens":["def","build_sentence_layer","(","self",",","memory_bank",",","src_sents",")",":","# print('Memory..', memory_bank.size()) # torch.Size([200, 2, 512]) TODO: this is the output","# #","# print ('encoder_final..',encoder_final) #","if","isinstance","(","memory_bank",",","torch",".","nn",".","utils",".","rnn",".","PackedSequence",")",":","memory_bank","=","nn",".","utils",".","rnn",".","pad_packed_sequence","(","memory_bank",")","[","0","]","# as after unpack it is a tuple","hidden","=","memory_bank",".","permute","(","1",",","0",",","2",")","# size: (2,200,512)","# print ('in func...', src_sents)","# in each case for the current batch, send the last hidden output as the input to the sent_lstm layer","batch_input_list","=","[","]","for","output",",","sent_id","in","zip","(","hidden",",","src_sents",")",":","# so we have batch_size to be 1","common_len","=","len","(","sent_id",")","output","=","output",".","unsqueeze","(","1",")","sent_input_list","=","[","]","# firs id","start_ind_sent_id","=","0","start_ind","=","sent_id","[","start_ind_sent_id","]","while","(","start_ind","<","output",".","size","(",")","[","0","]",")","and","(","start_ind_sent_id","<","sent_id",".","size","(",")","[","0","]",")",":","# add","sent_input_list",".","append","(","output","[","start_ind","]",")","# both ids move to the next","start_ind_sent_id","+=","1","if","start_ind_sent_id","<","sent_id",".","size","(",")","[","0","]",":","start_ind","+=","sent_id","[","start_ind_sent_id","]","else",":","break","# FEB 10, len check","if","len","(","sent_input_list",")","<","common_len",":","# pad with zero","pad_size","=","output","[","0","]",".","size","(",")","zeros","=","torch",".","zeros","(","pad_size",",","dtype","=","torch",".","float32",")",".","cuda","(",")","pad_list","=","[","zeros","]","*","(","common_len","-","len","(","sent_input_list",")",")","sent_input_list","=","sent_input_list","+","pad_list","sent_input","=","torch",".","cat","(","sent_input_list",",","0",")",".","unsqueeze","(","1",")","# (n_sent, batch_size=1,dim=512)","batch_input_list",".","append","(","sent_input",")","# print ([x.size() for x in batch_input_list])","# [torch.Size([18, 1, 512]), torch.Size([15, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512])]","batch_input_list_concat","=","torch",".","cat","(","batch_input_list",",","1",")","# get the id of sent length:","sent_output",",","(","h_",",","c_",")","=","self",".","sent_rnn","(","batch_input_list_concat",")","# LSTM(512, 256, bidirectional=True), sent_output has the same shape with batch_input_list_concat","#sent_output: shape(number of sents or step, batch_size, dim) (9, 2, 512), number of sents or step can be different","# print ('Encoder Sentence_output...',sent_output.size())","return","sent_output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py#L71-L150"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py","language":"python","identifier":"RNNEncoder.sent_level_encoder","parameters":"(self, memory_bank)","argument_list":"","return_statement":"return sent_output","docstring":"This is the sentence level encoder, it takes a bunch of sentence encoding,\n then feed into another sentence level rnn\n :param memory_bank: sentence encoding ( a list of packed)\n :return: output of the rnn layer","docstring_summary":"This is the sentence level encoder, it takes a bunch of sentence encoding,\n then feed into another sentence level rnn\n :param memory_bank: sentence encoding ( a list of packed)\n :return: output of the rnn layer","docstring_tokens":["This","is","the","sentence","level","encoder","it","takes","a","bunch","of","sentence","encoding","then","feed","into","another","sentence","level","rnn",":","param","memory_bank",":","sentence","encoding","(","a","list","of","packed",")",":","return",":","output","of","the","rnn","layer"],"function":"def sent_level_encoder(self, memory_bank):\n '''\n This is the sentence level encoder, it takes a bunch of sentence encoding,\n then feed into another sentence level rnn\n :param memory_bank: sentence encoding ( a list of packed)\n :return: output of the rnn layer\n '''\n\n if isinstance(memory_bank, torch.nn.utils.rnn.PackedSequence):\n memory_bank_unpacked = nn.utils.rnn.pad_packed_sequence(memory_bank)[0].permute(1,0,2)# as after unpack it is a tuple\n # memory_bank_unpacked size: torch.Size([42, 9, 512]) # [seq_len,batch_size,512]\n\n\n # take the last hiddent state of each\n last_hidden = [x[-1].unsqueeze(0) for x in memory_bank_unpacked]\n last_hidden = torch.cat(last_hidden, 0).unsqueeze(0) # size is [1,9,512]\n\n sent_output, (h_, c_) = self.sent_rnn(last_hidden)\n\n\n return sent_output","function_tokens":["def","sent_level_encoder","(","self",",","memory_bank",")",":","if","isinstance","(","memory_bank",",","torch",".","nn",".","utils",".","rnn",".","PackedSequence",")",":","memory_bank_unpacked","=","nn",".","utils",".","rnn",".","pad_packed_sequence","(","memory_bank",")","[","0","]",".","permute","(","1",",","0",",","2",")","# as after unpack it is a tuple","# memory_bank_unpacked size: torch.Size([42, 9, 512]) # [seq_len,batch_size,512]","# take the last hiddent state of each","last_hidden","=","[","x","[","-","1","]",".","unsqueeze","(","0",")","for","x","in","memory_bank_unpacked","]","last_hidden","=","torch",".","cat","(","last_hidden",",","0",")",".","unsqueeze","(","0",")","# size is [1,9,512]","sent_output",",","(","h_",",","c_",")","=","self",".","sent_rnn","(","last_hidden",")","return","sent_output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py#L153-L173"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py","language":"python","identifier":"RNNEncoder.forward_new","parameters":"(self, src, src_sents=None, lengths=None)","argument_list":"","return_statement":"return final_encoder_final, final_memory_bank, final_sent_output","docstring":"New Forward`","docstring_summary":"New Forward`","docstring_tokens":["New","Forward"],"function":"def forward_new(self, src, src_sents=None, lengths=None):\n \"New Forward`\"\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n\n s_len, batch, emb_dim = emb.size() # (185 16 128), s_len is sequence_len.\n\n # 2333 TODO: change starts here\n\n\n # Feb15: we break this into sentences\n\n # iterate each batch..\n input_embeddings=emb.permute(1,0,2)\n\n final_memory_bank = []\n final_encoder_final = []\n\n final_sent_output = []\n\n for batch_id in range(batch):\n\n # this is the input to word-level lstm\n current_sequence = input_embeddings[batch_id] # size id (sequence_len, emb_dim)\n # break this into multiple sentences according to the sentence lengths, and input to the rnn\n # sent len check, define len_sequence to be: tensor([26, 17, 21, 23, 19, 26, 10, 42], device='cuda:0')\n if torch.sum(src_sents[batch_id]) >= s_len:\n # if exceeds the total length, then their is a bug\n len_sequence = src_sents[batch_id][:-1]\n else:\n len_sequence = src_sents[batch_id]\n\n counter = 0\n\n\n feeding_as_a_batch = []\n lengths_as_a_batch = []\n lengths = []\n actual_len = 0\n for idx in len_sequence:\n if (counter < s_len ) and (idx != 0):\n actual_len += 1\n # from the current_sequence, add to the rnn\n feeding_sequence = current_sequence[counter:counter+idx].unsqueeze(0)\n feeding_as_a_batch.append(feeding_sequence.permute(1,0,2)) #feeding_sequence size = [1,26,128]\n counter += idx\n # feed into rnn\n lengths_as_a_batch.append(idx)\n\n feeding_as_a_batch_padded = torch.cat([x for x in pad_sequence(feeding_as_a_batch,batch_first=True)],1)\n # feed into rnn size: torch.Size([42, 9, 128]) -> [max, batch_size, dim]\n max_dim = feeding_as_a_batch_padded.size()[0]\n lengths_as_a_batch = [max_dim for x in range(actual_len)]\n # lengths_as_a_batch = [item for sublist in lengths_as_a_batch for item in sublist]\n\n if lengths_as_a_batch is not None and not self.no_pack_padded_seq:\n # Lengths data is wrapped inside a Tensor.\n packed_emb_rnn_input = pack(feeding_as_a_batch_padded, lengths_as_a_batch)\n\n\n # feed into!\n memory_bank, encoder_final = self.rnn(packed_emb_rnn_input)\n\n # feed into sentence_level\n sent_output = self.sent_level_encoder(memory_bank)\n final_sent_output.append(sent_output.view(-1,4*emb_dim))\n\n\n if lengths is not None and not self.no_pack_padded_seq:\n memory_bank = unpack(memory_bank)[0]\n\n\n # we need to get the original output, before padded\n revised_memory_bank = memory_bank.permute(1, 0, 2)\n memory_bank_unpadded_list = []\n\n\n\n for idx in range(actual_len):\n memory_bank_unpadded_list.append(revised_memory_bank[idx][:len_sequence[idx]])\n unpadded_memory_bank = torch.cat(memory_bank_unpadded_list,0) # size is [sequence_len,512] # need to pad or truncate\n actual_size = unpadded_memory_bank.size()[0]\n if actual_size >= s_len:\n padded_memory_bank = unpadded_memory_bank[:s_len]\n # print ('Size is okk..', padded_memory_bank.size())\n else:\n # pad with zero\n pad_size = s_len - actual_size\n padded_memory_bank = F.pad(unpadded_memory_bank, (0,0,0,pad_size), 'constant',0.0)\n # print ('Padded...',unpadded_memory_bank.size(),pad_size,padded_memory_bank.size())\n # print (actual_size,s_len,padded_memory_bank.size())\n final_memory_bank.append(padded_memory_bank.unsqueeze(1))\n # finish processing on memory bank\n\n\n if self.use_bridge:\n encoder_final = self._bridge(encoder_final)\n\n final_encoder_final.append(tuple([x[:,-1,:].unsqueeze(1) for x in encoder_final]))\n\n # add unpacked from final_memory_bank\n final_memory_bank = torch.cat(final_memory_bank,1) # [200, 2, 512], ready to return\n\n\n # join the encoder_final\n hs = []\n cs = []\n for (h,c) in final_encoder_final:\n hs.append(h)\n cs.append(c)\n hs = torch.cat(hs,1)\n cs = torch.cat(cs,1)\n # encoder_final\n final_encoder_final = tuple([hs,cs]) # ready to return\n\n # sent output\n final_sent_output = pad_sequence(final_sent_output) # size [9,2,512], ready to return\n\n # 2333 TODO: change finish here\n\n # import pdb;pdb.set_trace()\n\n return final_encoder_final, final_memory_bank, final_sent_output","function_tokens":["def","forward_new","(","self",",","src",",","src_sents","=","None",",","lengths","=","None",")",":","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","s_len",",","batch",",","emb_dim","=","emb",".","size","(",")","# (185 16 128), s_len is sequence_len.","# 2333 TODO: change starts here","# Feb15: we break this into sentences","# iterate each batch..","input_embeddings","=","emb",".","permute","(","1",",","0",",","2",")","final_memory_bank","=","[","]","final_encoder_final","=","[","]","final_sent_output","=","[","]","for","batch_id","in","range","(","batch",")",":","# this is the input to word-level lstm","current_sequence","=","input_embeddings","[","batch_id","]","# size id (sequence_len, emb_dim)","# break this into multiple sentences according to the sentence lengths, and input to the rnn","# sent len check, define len_sequence to be: tensor([26, 17, 21, 23, 19, 26, 10, 42], device='cuda:0')","if","torch",".","sum","(","src_sents","[","batch_id","]",")",">=","s_len",":","# if exceeds the total length, then their is a bug","len_sequence","=","src_sents","[","batch_id","]","[",":","-","1","]","else",":","len_sequence","=","src_sents","[","batch_id","]","counter","=","0","feeding_as_a_batch","=","[","]","lengths_as_a_batch","=","[","]","lengths","=","[","]","actual_len","=","0","for","idx","in","len_sequence",":","if","(","counter","<","s_len",")","and","(","idx","!=","0",")",":","actual_len","+=","1","# from the current_sequence, add to the rnn","feeding_sequence","=","current_sequence","[","counter",":","counter","+","idx","]",".","unsqueeze","(","0",")","feeding_as_a_batch",".","append","(","feeding_sequence",".","permute","(","1",",","0",",","2",")",")","#feeding_sequence size = [1,26,128]","counter","+=","idx","# feed into rnn","lengths_as_a_batch",".","append","(","idx",")","feeding_as_a_batch_padded","=","torch",".","cat","(","[","x","for","x","in","pad_sequence","(","feeding_as_a_batch",",","batch_first","=","True",")","]",",","1",")","# feed into rnn size: torch.Size([42, 9, 128]) -> [max, batch_size, dim]","max_dim","=","feeding_as_a_batch_padded",".","size","(",")","[","0","]","lengths_as_a_batch","=","[","max_dim","for","x","in","range","(","actual_len",")","]","# lengths_as_a_batch = [item for sublist in lengths_as_a_batch for item in sublist]","if","lengths_as_a_batch","is","not","None","and","not","self",".","no_pack_padded_seq",":","# Lengths data is wrapped inside a Tensor.","packed_emb_rnn_input","=","pack","(","feeding_as_a_batch_padded",",","lengths_as_a_batch",")","# feed into!","memory_bank",",","encoder_final","=","self",".","rnn","(","packed_emb_rnn_input",")","# feed into sentence_level","sent_output","=","self",".","sent_level_encoder","(","memory_bank",")","final_sent_output",".","append","(","sent_output",".","view","(","-","1",",","4","*","emb_dim",")",")","if","lengths","is","not","None","and","not","self",".","no_pack_padded_seq",":","memory_bank","=","unpack","(","memory_bank",")","[","0","]","# we need to get the original output, before padded","revised_memory_bank","=","memory_bank",".","permute","(","1",",","0",",","2",")","memory_bank_unpadded_list","=","[","]","for","idx","in","range","(","actual_len",")",":","memory_bank_unpadded_list",".","append","(","revised_memory_bank","[","idx","]","[",":","len_sequence","[","idx","]","]",")","unpadded_memory_bank","=","torch",".","cat","(","memory_bank_unpadded_list",",","0",")","# size is [sequence_len,512] # need to pad or truncate","actual_size","=","unpadded_memory_bank",".","size","(",")","[","0","]","if","actual_size",">=","s_len",":","padded_memory_bank","=","unpadded_memory_bank","[",":","s_len","]","# print ('Size is okk..', padded_memory_bank.size())","else",":","# pad with zero","pad_size","=","s_len","-","actual_size","padded_memory_bank","=","F",".","pad","(","unpadded_memory_bank",",","(","0",",","0",",","0",",","pad_size",")",",","'constant'",",","0.0",")","# print ('Padded...',unpadded_memory_bank.size(),pad_size,padded_memory_bank.size())","# print (actual_size,s_len,padded_memory_bank.size())","final_memory_bank",".","append","(","padded_memory_bank",".","unsqueeze","(","1",")",")","# finish processing on memory bank","if","self",".","use_bridge",":","encoder_final","=","self",".","_bridge","(","encoder_final",")","final_encoder_final",".","append","(","tuple","(","[","x","[",":",",","-","1",",",":","]",".","unsqueeze","(","1",")","for","x","in","encoder_final","]",")",")","# add unpacked from final_memory_bank","final_memory_bank","=","torch",".","cat","(","final_memory_bank",",","1",")","# [200, 2, 512], ready to return","# join the encoder_final","hs","=","[","]","cs","=","[","]","for","(","h",",","c",")","in","final_encoder_final",":","hs",".","append","(","h",")","cs",".","append","(","c",")","hs","=","torch",".","cat","(","hs",",","1",")","cs","=","torch",".","cat","(","cs",",","1",")","# encoder_final","final_encoder_final","=","tuple","(","[","hs",",","cs","]",")","# ready to return","# sent output","final_sent_output","=","pad_sequence","(","final_sent_output",")","# size [9,2,512], ready to return","# 2333 TODO: change finish here","# import pdb;pdb.set_trace()","return","final_encoder_final",",","final_memory_bank",",","final_sent_output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py#L175-L298"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py","language":"python","identifier":"RNNEncoder.forward","parameters":"(self, src, src_sents=None, lengths=None)","argument_list":"","return_statement":"return encoder_final, memory_bank, sent_output","docstring":"forward_original","docstring_summary":"forward_original","docstring_tokens":["forward_original"],"function":"def forward(self, src, src_sents=None, lengths=None):\n \"forward_original\"\n #print ('Original!')\n self._check_args(src, lengths)\n\n emb = self.embeddings(src)\n\n s_len, batch, emb_dim = emb.size() # (185 16 128), s_len is changeable.\n\n\n packed_emb = emb\n\n\n if lengths is not None and not self.no_pack_padded_seq:\n # Lengths data is wrapped inside a Tensor.\n lengths = lengths.view(-1).tolist()\n packed_emb = pack(emb, lengths)\n\n\n\n memory_bank, encoder_final = self.rnn(packed_emb) # output, (hidden, cell), unpack using pad_packed_sequence(), encoder_final is the last state, a list (contains the batch)\n # encoder_final size: a list, len is the batch size; for each item, size [2, 2, 256]\n\n\n # memory_bank is the output\n # output, (hidden, cell), unpack using pad_packed_sequence()\n # self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;\n\n # print ('forwarding... src_sents',src_sents)\n\n # get sentence embedding\n sent_output = self.build_sentence_layer(memory_bank,src_sents)\n # sent_output size: torch.Size([9, 2, 512])\n # print ('We need...!!!',src_sents.size(),src_sents)\n\n\n if lengths is not None and not self.no_pack_padded_seq:\n memory_bank = unpack(memory_bank)[0]\n # memory_bank size torch.Size([200, 2, 512])\n\n\n # encoder_final: a tuple of 2 (batch size)\n # each of it has the size of torch.Size([2, 2, 256])\n\n if self.use_bridge:\n encoder_final = self._bridge(encoder_final)\n # encoder_final same shape as before\n\n return encoder_final, memory_bank, sent_output","function_tokens":["def","forward","(","self",",","src",",","src_sents","=","None",",","lengths","=","None",")",":","#print ('Original!')","self",".","_check_args","(","src",",","lengths",")","emb","=","self",".","embeddings","(","src",")","s_len",",","batch",",","emb_dim","=","emb",".","size","(",")","# (185 16 128), s_len is changeable.","packed_emb","=","emb","if","lengths","is","not","None","and","not","self",".","no_pack_padded_seq",":","# Lengths data is wrapped inside a Tensor.","lengths","=","lengths",".","view","(","-","1",")",".","tolist","(",")","packed_emb","=","pack","(","emb",",","lengths",")","memory_bank",",","encoder_final","=","self",".","rnn","(","packed_emb",")","# output, (hidden, cell), unpack using pad_packed_sequence(), encoder_final is the last state, a list (contains the batch)","# encoder_final size: a list, len is the batch size; for each item, size [2, 2, 256]","# memory_bank is the output","# output, (hidden, cell), unpack using pad_packed_sequence()","# self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;","# print ('forwarding... src_sents',src_sents)","# get sentence embedding","sent_output","=","self",".","build_sentence_layer","(","memory_bank",",","src_sents",")","# sent_output size: torch.Size([9, 2, 512])","# print ('We need...!!!',src_sents.size(),src_sents)","if","lengths","is","not","None","and","not","self",".","no_pack_padded_seq",":","memory_bank","=","unpack","(","memory_bank",")","[","0","]","# memory_bank size torch.Size([200, 2, 512])","# encoder_final: a tuple of 2 (batch size)","# each of it has the size of torch.Size([2, 2, 256])","if","self",".","use_bridge",":","encoder_final","=","self",".","_bridge","(","encoder_final",")","# encoder_final same shape as before","return","encoder_final",",","memory_bank",",","sent_output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py#L300-L348"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py","language":"python","identifier":"RNNEncoder._bridge","parameters":"(self, hidden)","argument_list":"","return_statement":"return outs","docstring":"Forward hidden state through bridge","docstring_summary":"Forward hidden state through bridge","docstring_tokens":["Forward","hidden","state","through","bridge"],"function":"def _bridge(self, hidden):\n \"\"\"\n Forward hidden state through bridge\n \"\"\"\n def bottle_hidden(linear, states):\n \"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)\n\n if isinstance(hidden, tuple): # LSTM\n outs = tuple([bottle_hidden(layer, hidden[ix])\n for ix, layer in enumerate(self.bridge)])\n else:\n outs = bottle_hidden(self.bridge[0], hidden)\n return outs","function_tokens":["def","_bridge","(","self",",","hidden",")",":","def","bottle_hidden","(","linear",",","states",")",":","\"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"","size","=","states",".","size","(",")","result","=","linear","(","states",".","view","(","-","1",",","self",".","total_hidden_dim",")",")","return","F",".","relu","(","result",")",".","view","(","size",")","if","isinstance","(","hidden",",","tuple",")",":","# LSTM","outs","=","tuple","(","[","bottle_hidden","(","layer",",","hidden","[","ix","]",")","for","ix",",","layer","in","enumerate","(","self",".","bridge",")","]",")","else",":","outs","=","bottle_hidden","(","self",".","bridge","[","0","]",",","hidden",")","return","outs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/rnn_encoder.py#L365-L382"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/image_encoder.py","language":"python","identifier":"ImageEncoder.load_pretrained_vectors","parameters":"(self, opt)","argument_list":"","return_statement":"","docstring":"Pass in needed options only when modify function definition.","docstring_summary":"Pass in needed options only when modify function definition.","docstring_tokens":["Pass","in","needed","options","only","when","modify","function","definition","."],"function":"def load_pretrained_vectors(self, opt):\n \"\"\" Pass in needed options only when modify function definition.\"\"\"\n pass","function_tokens":["def","load_pretrained_vectors","(","self",",","opt",")",":","pass"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/image_encoder.py#L50-L52"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/image_encoder.py","language":"python","identifier":"ImageEncoder.forward","parameters":"(self, src, lengths=None)","argument_list":"","return_statement":"return hidden_t, out","docstring":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_summary":"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","encoders",".","encoder",".","EncoderBase",".","forward","()"],"function":"def forward(self, src, lengths=None):\n \"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`\"\n\n batch_size = src.size(0)\n # (batch_size, 64, imgH, imgW)\n # layer 1\n src = F.relu(self.layer1(src[:, :, :, :] - 0.5), True)\n\n # (batch_size, 64, imgH\/2, imgW\/2)\n src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))\n\n # (batch_size, 128, imgH\/2, imgW\/2)\n # layer 2\n src = F.relu(self.layer2(src), True)\n\n # (batch_size, 128, imgH\/2\/2, imgW\/2\/2)\n src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))\n\n # (batch_size, 256, imgH\/2\/2, imgW\/2\/2)\n # layer 3\n # batch norm 1\n src = F.relu(self.batch_norm1(self.layer3(src)), True)\n\n # (batch_size, 256, imgH\/2\/2, imgW\/2\/2)\n # layer4\n src = F.relu(self.layer4(src), True)\n\n # (batch_size, 256, imgH\/2\/2\/2, imgW\/2\/2)\n src = F.max_pool2d(src, kernel_size=(1, 2), stride=(1, 2))\n\n # (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2)\n # layer 5\n # batch norm 2\n src = F.relu(self.batch_norm2(self.layer5(src)), True)\n\n # (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)\n src = F.max_pool2d(src, kernel_size=(2, 1), stride=(2, 1))\n\n # (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)\n src = F.relu(self.batch_norm3(self.layer6(src)), True)\n\n # # (batch_size, 512, H, W)\n all_outputs = []\n for row in range(src.size(2)):\n inp = src[:, :, row, :].transpose(0, 2) \\\n .transpose(1, 2)\n row_vec = torch.Tensor(batch_size).type_as(inp.data) \\\n .long().fill_(row)\n pos_emb = self.pos_lut(row_vec)\n with_pos = torch.cat(\n (pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)\n outputs, hidden_t = self.rnn(with_pos)\n all_outputs.append(outputs)\n out = torch.cat(all_outputs, 0)\n\n return hidden_t, out","function_tokens":["def","forward","(","self",",","src",",","lengths","=","None",")",":","batch_size","=","src",".","size","(","0",")","# (batch_size, 64, imgH, imgW)","# layer 1","src","=","F",".","relu","(","self",".","layer1","(","src","[",":",",",":",",",":",",",":","]","-","0.5",")",",","True",")","# (batch_size, 64, imgH\/2, imgW\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","2",",","2",")",",","stride","=","(","2",",","2",")",")","# (batch_size, 128, imgH\/2, imgW\/2)","# layer 2","src","=","F",".","relu","(","self",".","layer2","(","src",")",",","True",")","# (batch_size, 128, imgH\/2\/2, imgW\/2\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","2",",","2",")",",","stride","=","(","2",",","2",")",")","# (batch_size, 256, imgH\/2\/2, imgW\/2\/2)","# layer 3","# batch norm 1","src","=","F",".","relu","(","self",".","batch_norm1","(","self",".","layer3","(","src",")",")",",","True",")","# (batch_size, 256, imgH\/2\/2, imgW\/2\/2)","# layer4","src","=","F",".","relu","(","self",".","layer4","(","src",")",",","True",")","# (batch_size, 256, imgH\/2\/2\/2, imgW\/2\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","1",",","2",")",",","stride","=","(","1",",","2",")",")","# (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2)","# layer 5","# batch norm 2","src","=","F",".","relu","(","self",".","batch_norm2","(","self",".","layer5","(","src",")",")",",","True",")","# (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)","src","=","F",".","max_pool2d","(","src",",","kernel_size","=","(","2",",","1",")",",","stride","=","(","2",",","1",")",")","# (batch_size, 512, imgH\/2\/2\/2, imgW\/2\/2\/2)","src","=","F",".","relu","(","self",".","batch_norm3","(","self",".","layer6","(","src",")",")",",","True",")","# # (batch_size, 512, H, W)","all_outputs","=","[","]","for","row","in","range","(","src",".","size","(","2",")",")",":","inp","=","src","[",":",",",":",",","row",",",":","]",".","transpose","(","0",",","2",")",".","transpose","(","1",",","2",")","row_vec","=","torch",".","Tensor","(","batch_size",")",".","type_as","(","inp",".","data",")",".","long","(",")",".","fill_","(","row",")","pos_emb","=","self",".","pos_lut","(","row_vec",")","with_pos","=","torch",".","cat","(","(","pos_emb",".","view","(","1",",","pos_emb",".","size","(","0",")",",","pos_emb",".","size","(","1",")",")",",","inp",")",",","0",")","outputs",",","hidden_t","=","self",".","rnn","(","with_pos",")","all_outputs",".","append","(","outputs",")","out","=","torch",".","cat","(","all_outputs",",","0",")","return","hidden_t",",","out"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/image_encoder.py#L54-L109"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/encoders\/cnn_encoder.py","language":"python","identifier":"CNNEncoder.forward","parameters":"(self, input, lengths=None, hidden=None)","argument_list":"","return_statement":"return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \\\n out.squeeze(3).transpose(0, 1).contiguous()","docstring":"See :obj:`onmt.modules.EncoderBase.forward()`","docstring_summary":"See :obj:`onmt.modules.EncoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","modules",".","EncoderBase",".","forward","()"],"function":"def forward(self, input, lengths=None, hidden=None):\n \"\"\" See :obj:`onmt.modules.EncoderBase.forward()`\"\"\"\n self._check_args(input, lengths, hidden)\n\n emb = self.embeddings(input)\n # s_len, batch, emb_dim = emb.size()\n\n emb = emb.transpose(0, 1).contiguous()\n emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)\n emb_remap = self.linear(emb_reshape)\n emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)\n emb_remap = shape_transform(emb_remap)\n out = self.cnn(emb_remap)\n\n return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \\\n out.squeeze(3).transpose(0, 1).contiguous()","function_tokens":["def","forward","(","self",",","input",",","lengths","=","None",",","hidden","=","None",")",":","self",".","_check_args","(","input",",","lengths",",","hidden",")","emb","=","self",".","embeddings","(","input",")","# s_len, batch, emb_dim = emb.size()","emb","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","emb_reshape","=","emb",".","view","(","emb",".","size","(","0",")","*","emb",".","size","(","1",")",",","-","1",")","emb_remap","=","self",".","linear","(","emb_reshape",")","emb_remap","=","emb_remap",".","view","(","emb",".","size","(","0",")",",","emb",".","size","(","1",")",",","-","1",")","emb_remap","=","shape_transform","(","emb_remap",")","out","=","self",".","cnn","(","emb_remap",")","return","emb_remap",".","squeeze","(","3",")",".","transpose","(","0",",","1",")",".","contiguous","(",")",",","out",".","squeeze","(","3",")",".","transpose","(","0",",","1",")",".","contiguous","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/encoders\/cnn_encoder.py#L28-L43"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/models\/sru.py","language":"python","identifier":"check_sru_requirement","parameters":"(abort=False)","argument_list":"","return_statement":"return True","docstring":"Return True if check pass; if check fails and abort is True,\n raise an Exception, othereise return False.","docstring_summary":"Return True if check pass; if check fails and abort is True,\n raise an Exception, othereise return False.","docstring_tokens":["Return","True","if","check","pass",";","if","check","fails","and","abort","is","True","raise","an","Exception","othereise","return","False","."],"function":"def check_sru_requirement(abort=False):\n \"\"\"\n Return True if check pass; if check fails and abort is True,\n raise an Exception, othereise return False.\n \"\"\"\n\n # Check 1.\n try:\n if platform.system() == 'Windows':\n subprocess.check_output('pip freeze | findstr cupy', shell=True)\n subprocess.check_output('pip freeze | findstr pynvrtc',\n shell=True)\n else: # Unix-like systems\n subprocess.check_output('pip freeze | grep -w cupy', shell=True)\n subprocess.check_output('pip freeze | grep -w pynvrtc',\n shell=True)\n except subprocess.CalledProcessError:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires 'cupy' and 'pynvrtc' \"\n \"python packages installed.\")\n\n # Check 2.\n if torch.cuda.is_available() is False:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires pytorch built with cuda.\")\n\n # Check 3.\n pattern = re.compile(\".*cuda\/lib.*\")\n ld_path = os.getenv('LD_LIBRARY_PATH', \"\")\n if re.match(pattern, ld_path) is None:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires setting cuda lib path, e.g. \"\n \"export LD_LIBRARY_PATH=\/usr\/local\/cuda\/lib64.\")\n\n return True","function_tokens":["def","check_sru_requirement","(","abort","=","False",")",":","# Check 1.","try",":","if","platform",".","system","(",")","==","'Windows'",":","subprocess",".","check_output","(","'pip freeze | findstr cupy'",",","shell","=","True",")","subprocess",".","check_output","(","'pip freeze | findstr pynvrtc'",",","shell","=","True",")","else",":","# Unix-like systems","subprocess",".","check_output","(","'pip freeze | grep -w cupy'",",","shell","=","True",")","subprocess",".","check_output","(","'pip freeze | grep -w pynvrtc'",",","shell","=","True",")","except","subprocess",".","CalledProcessError",":","if","not","abort",":","return","False","raise","AssertionError","(","\"Using SRU requires 'cupy' and 'pynvrtc' \"","\"python packages installed.\"",")","# Check 2.","if","torch",".","cuda",".","is_available","(",")","is","False",":","if","not","abort",":","return","False","raise","AssertionError","(","\"Using SRU requires pytorch built with cuda.\"",")","# Check 3.","pattern","=","re",".","compile","(","\".*cuda\/lib.*\"",")","ld_path","=","os",".","getenv","(","'LD_LIBRARY_PATH'",",","\"\"",")","if","re",".","match","(","pattern",",","ld_path",")","is","None",":","if","not","abort",":","return","False","raise","AssertionError","(","\"Using SRU requires setting cuda lib path, e.g. \"","\"export LD_LIBRARY_PATH=\/usr\/local\/cuda\/lib64.\"",")","return","True"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/models\/sru.py#L32-L69"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/models\/model.py","language":"python","identifier":"NMTModel.forward","parameters":"(self, src, tgt, src_sents, lengths, dec_state=None)","argument_list":"","return_statement":"return decoder_outputs, attns, dec_state","docstring":"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state","docstring_summary":"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.","docstring_tokens":["Forward","propagate","a","src","and","tgt","pair","for","training",".","Possible","initialized","with","a","beginning","decoder","state","."],"function":"def forward(self, src, tgt, src_sents, lengths, dec_state=None):\n \"\"\"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state\n \"\"\"\n tgt = tgt[:-1] # exclude last target from inputs ?? why\n\n # import pdb;pdb.set_trace()\n old_src_sents = src_sents.clone()\n\n\n enc_final, memory_bank, sent_encoder = self.encoder(src,src_sents,lengths)\n\n\n enc_state =self.decoder.init_decoder_state(src, memory_bank, enc_final)\n\n\n decoder_outputs, dec_state, attns = \\\n self.decoder(tgt, memory_bank,\n enc_state if dec_state is None\n else dec_state,sent_encoder=sent_encoder,src_sents=old_src_sents,\n memory_lengths=lengths)\n\n if self.multigpu:\n # Not yet supported on multi-gpu\n dec_state = None\n attns = None\n return decoder_outputs, attns, dec_state","function_tokens":["def","forward","(","self",",","src",",","tgt",",","src_sents",",","lengths",",","dec_state","=","None",")",":","tgt","=","tgt","[",":","-","1","]","# exclude last target from inputs ?? why","# import pdb;pdb.set_trace()","old_src_sents","=","src_sents",".","clone","(",")","enc_final",",","memory_bank",",","sent_encoder","=","self",".","encoder","(","src",",","src_sents",",","lengths",")","enc_state","=","self",".","decoder",".","init_decoder_state","(","src",",","memory_bank",",","enc_final",")","decoder_outputs",",","dec_state",",","attns","=","self",".","decoder","(","tgt",",","memory_bank",",","enc_state","if","dec_state","is","None","else","dec_state",",","sent_encoder","=","sent_encoder",",","src_sents","=","old_src_sents",",","memory_lengths","=","lengths",")","if","self",".","multigpu",":","# Not yet supported on multi-gpu","dec_state","=","None","attns","=","None","return","decoder_outputs",",","attns",",","dec_state"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/models\/model.py#L22-L65"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/models\/model_saver.py","language":"python","identifier":"ModelSaverBase.maybe_save","parameters":"(self, step)","argument_list":"","return_statement":"","docstring":"Main entry point for model saver\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic","docstring_summary":"Main entry point for model saver\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic","docstring_tokens":["Main","entry","point","for","model","saver","It","wraps","the","_save","method","with","checks","and","apply","keep_checkpoint","related","logic"],"function":"def maybe_save(self, step):\n \"\"\"\n Main entry point for model saver\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic\n \"\"\"\n if self.keep_checkpoint == 0:\n return\n\n if step % self.save_checkpoint_steps != 0:\n return\n\n chkpt, chkpt_name = self._save(step)\n\n if self.keep_checkpoint > 0:\n if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:\n todel = self.checkpoint_queue.popleft()\n self._rm_checkpoint(todel)\n self.checkpoint_queue.append(chkpt_name)","function_tokens":["def","maybe_save","(","self",",","step",")",":","if","self",".","keep_checkpoint","==","0",":","return","if","step","%","self",".","save_checkpoint_steps","!=","0",":","return","chkpt",",","chkpt_name","=","self",".","_save","(","step",")","if","self",".","keep_checkpoint",">","0",":","if","len","(","self",".","checkpoint_queue",")","==","self",".","checkpoint_queue",".","maxlen",":","todel","=","self",".","checkpoint_queue",".","popleft","(",")","self",".","_rm_checkpoint","(","todel",")","self",".","checkpoint_queue",".","append","(","chkpt_name",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/models\/model_saver.py#L43-L61"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/models\/model_saver.py","language":"python","identifier":"ModelSaverBase._save","parameters":"(self, step)","argument_list":"","return_statement":"","docstring":"Save a resumable checkpoint.\n\n Args:\n step (int): step number\n\n Returns:\n checkpoint: the saved object\n checkpoint_name: name (or path) of the saved checkpoint","docstring_summary":"Save a resumable checkpoint.","docstring_tokens":["Save","a","resumable","checkpoint","."],"function":"def _save(self, step):\n \"\"\" Save a resumable checkpoint.\n\n Args:\n step (int): step number\n\n Returns:\n checkpoint: the saved object\n checkpoint_name: name (or path) of the saved checkpoint\n \"\"\"\n raise NotImplementedError()","function_tokens":["def","_save","(","self",",","step",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/models\/model_saver.py#L63-L73"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/models\/model_saver.py","language":"python","identifier":"ModelSaverBase._rm_checkpoint","parameters":"(self, name)","argument_list":"","return_statement":"","docstring":"Remove a checkpoint\n\n Args:\n name(str): name that indentifies the checkpoint\n (it may be a filepath)","docstring_summary":"Remove a checkpoint","docstring_tokens":["Remove","a","checkpoint"],"function":"def _rm_checkpoint(self, name):\n \"\"\"\n Remove a checkpoint\n\n Args:\n name(str): name that indentifies the checkpoint\n (it may be a filepath)\n \"\"\"\n raise NotImplementedError()","function_tokens":["def","_rm_checkpoint","(","self",",","name",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/models\/model_saver.py#L75-L83"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/cnn_factory.py","language":"python","identifier":"shape_transform","parameters":"(x)","argument_list":"","return_statement":"return torch.unsqueeze(torch.transpose(x, 1, 2), 3)","docstring":"Tranform the size of the tensors to fit for conv input.","docstring_summary":"Tranform the size of the tensors to fit for conv input.","docstring_tokens":["Tranform","the","size","of","the","tensors","to","fit","for","conv","input","."],"function":"def shape_transform(x):\n \"\"\" Tranform the size of the tensors to fit for conv input. \"\"\"\n return torch.unsqueeze(torch.transpose(x, 1, 2), 3)","function_tokens":["def","shape_transform","(","x",")",":","return","torch",".","unsqueeze","(","torch",".","transpose","(","x",",","1",",","2",")",",","3",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/cnn_factory.py#L14-L16"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"build_optim","parameters":"(model, opt, checkpoint)","argument_list":"","return_statement":"return optim","docstring":"Build optimizer","docstring_summary":"Build optimizer","docstring_tokens":["Build","optimizer"],"function":"def build_optim(model, opt, checkpoint):\n \"\"\" Build optimizer \"\"\"\n saved_optimizer_state_dict = None\n\n if opt.train_from:\n optim = checkpoint['optim']\n # We need to save a copy of optim.optimizer.state_dict() for setting\n # the, optimizer state later on in Stage 2 in this method, since\n # the method optim.set_parameters(model.parameters()) will overwrite\n # optim.optimizer, and with ith the values stored in\n # optim.optimizer.state_dict()\n saved_optimizer_state_dict = optim.optimizer.state_dict()\n else:\n optim = Optimizer(\n opt.optim, opt.learning_rate, opt.max_grad_norm,\n lr_decay=opt.learning_rate_decay,\n start_decay_steps=opt.start_decay_steps,\n decay_steps=opt.decay_steps,\n beta1=opt.adam_beta1,\n beta2=opt.adam_beta2,\n adagrad_accum=opt.adagrad_accumulator_init,\n decay_method=opt.decay_method,\n warmup_steps=opt.warmup_steps,\n model_size=opt.rnn_size)\n\n # Stage 1:\n # Essentially optim.set_parameters (re-)creates and optimizer using\n # model.paramters() as parameters that will be stored in the\n # optim.optimizer.param_groups field of the torch optimizer class.\n # Importantly, this method does not yet load the optimizer state, as\n # essentially it builds a new optimizer with empty optimizer state and\n # parameters from the model.\n optim.set_parameters(model.named_parameters())\n\n if opt.train_from:\n # Stage 2: In this stage, which is only performed when loading an\n # optimizer from a checkpoint, we load the saved_optimizer_state_dict\n # into the re-created optimizer, to set the optim.optimizer.state\n # field, which was previously empty. For this, we use the optimizer\n # state saved in the \"saved_optimizer_state_dict\" variable for\n # this purpose.\n # See also: https:\/\/github.com\/pytorch\/pytorch\/issues\/2830\n optim.optimizer.load_state_dict(saved_optimizer_state_dict)\n # Convert back the state values to cuda type if applicable\n if use_gpu(opt):\n for state in optim.optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()\n\n # We want to make sure that indeed we have a non-empty optimizer state\n # when we loaded an existing model. This should be at least the case\n # for Adam, which saves \"exp_avg\" and \"exp_avg_sq\" state\n # (Exponential moving average of gradient and squared gradient values)\n if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):\n raise RuntimeError(\n \"Error: loaded Adam optimizer from existing model\" +\n \" but optimizer state is empty\")\n\n return optim","function_tokens":["def","build_optim","(","model",",","opt",",","checkpoint",")",":","saved_optimizer_state_dict","=","None","if","opt",".","train_from",":","optim","=","checkpoint","[","'optim'","]","# We need to save a copy of optim.optimizer.state_dict() for setting","# the, optimizer state later on in Stage 2 in this method, since","# the method optim.set_parameters(model.parameters()) will overwrite","# optim.optimizer, and with ith the values stored in","# optim.optimizer.state_dict()","saved_optimizer_state_dict","=","optim",".","optimizer",".","state_dict","(",")","else",":","optim","=","Optimizer","(","opt",".","optim",",","opt",".","learning_rate",",","opt",".","max_grad_norm",",","lr_decay","=","opt",".","learning_rate_decay",",","start_decay_steps","=","opt",".","start_decay_steps",",","decay_steps","=","opt",".","decay_steps",",","beta1","=","opt",".","adam_beta1",",","beta2","=","opt",".","adam_beta2",",","adagrad_accum","=","opt",".","adagrad_accumulator_init",",","decay_method","=","opt",".","decay_method",",","warmup_steps","=","opt",".","warmup_steps",",","model_size","=","opt",".","rnn_size",")","# Stage 1:","# Essentially optim.set_parameters (re-)creates and optimizer using","# model.paramters() as parameters that will be stored in the","# optim.optimizer.param_groups field of the torch optimizer class.","# Importantly, this method does not yet load the optimizer state, as","# essentially it builds a new optimizer with empty optimizer state and","# parameters from the model.","optim",".","set_parameters","(","model",".","named_parameters","(",")",")","if","opt",".","train_from",":","# Stage 2: In this stage, which is only performed when loading an","# optimizer from a checkpoint, we load the saved_optimizer_state_dict","# into the re-created optimizer, to set the optim.optimizer.state","# field, which was previously empty. For this, we use the optimizer","# state saved in the \"saved_optimizer_state_dict\" variable for","# this purpose.","# See also: https:\/\/github.com\/pytorch\/pytorch\/issues\/2830","optim",".","optimizer",".","load_state_dict","(","saved_optimizer_state_dict",")","# Convert back the state values to cuda type if applicable","if","use_gpu","(","opt",")",":","for","state","in","optim",".","optimizer",".","state",".","values","(",")",":","for","k",",","v","in","state",".","items","(",")",":","if","torch",".","is_tensor","(","v",")",":","state","[","k","]","=","v",".","cuda","(",")","# We want to make sure that indeed we have a non-empty optimizer state","# when we loaded an existing model. This should be at least the case","# for Adam, which saves \"exp_avg\" and \"exp_avg_sq\" state","# (Exponential moving average of gradient and squared gradient values)","if","(","optim",".","method","==","'adam'",")","and","(","len","(","optim",".","optimizer",".","state",")","<","1",")",":","raise","RuntimeError","(","\"Error: loaded Adam optimizer from existing model\"","+","\" but optimizer state is empty\"",")","return","optim"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L9-L68"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.__init__","parameters":"(self, op)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def __init__(self, op):\n \"\"\" ? \"\"\"\n self.optimizers = op","function_tokens":["def","__init__","(","self",",","op",")",":","self",".","optimizers","=","op"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L74-L76"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.zero_grad","parameters":"(self)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def zero_grad(self):\n \"\"\" ? \"\"\"\n for op in self.optimizers:\n op.zero_grad()","function_tokens":["def","zero_grad","(","self",")",":","for","op","in","self",".","optimizers",":","op",".","zero_grad","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L78-L81"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.step","parameters":"(self)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def step(self):\n \"\"\" ? \"\"\"\n for op in self.optimizers:\n op.step()","function_tokens":["def","step","(","self",")",":","for","op","in","self",".","optimizers",":","op",".","step","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L83-L86"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.state","parameters":"(self)","argument_list":"","return_statement":"return {k: v for op in self.optimizers for k, v in op.state.items()}","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def state(self):\n \"\"\" ? \"\"\"\n return {k: v for op in self.optimizers for k, v in op.state.items()}","function_tokens":["def","state","(","self",")",":","return","{","k",":","v","for","op","in","self",".","optimizers","for","k",",","v","in","op",".","state",".","items","(",")","}"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L89-L91"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.state_dict","parameters":"(self)","argument_list":"","return_statement":"return [op.state_dict() for op in self.optimizers]","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def state_dict(self):\n \"\"\" ? \"\"\"\n return [op.state_dict() for op in self.optimizers]","function_tokens":["def","state_dict","(","self",")",":","return","[","op",".","state_dict","(",")","for","op","in","self",".","optimizers","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L93-L95"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"MultipleOptimizer.load_state_dict","parameters":"(self, state_dicts)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def load_state_dict(self, state_dicts):\n \"\"\" ? \"\"\"\n assert len(state_dicts) == len(self.optimizers)\n for i in range(len(state_dicts)):\n self.optimizers[i].load_state_dict(state_dicts[i])","function_tokens":["def","load_state_dict","(","self",",","state_dicts",")",":","assert","len","(","state_dicts",")","==","len","(","self",".","optimizers",")","for","i","in","range","(","len","(","state_dicts",")",")",":","self",".","optimizers","[","i","]",".","load_state_dict","(","state_dicts","[","i","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L97-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"Optimizer.set_parameters","parameters":"(self, params)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def set_parameters(self, params):\n \"\"\" ? \"\"\"\n self.params = []\n self.sparse_params = []\n for k, p in params:\n if p.requires_grad:\n if self.method != 'sparseadam' or \"embed\" not in k:\n self.params.append(p)\n else:\n self.sparse_params.append(p)\n if self.method == 'sgd':\n self.optimizer = optim.SGD(self.params, lr=self.learning_rate)\n elif self.method == 'adagrad':\n self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)\n for group in self.optimizer.param_groups:\n for p in group['params']:\n self.optimizer.state[p]['sum'] = self.optimizer\\\n .state[p]['sum'].fill_(self.adagrad_accum)\n elif self.method == 'adadelta':\n self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)\n elif self.method == 'adam':\n self.optimizer = optim.Adam(self.params, lr=self.learning_rate,\n betas=self.betas, eps=1e-9)\n elif self.method == 'sparseadam':\n self.optimizer = MultipleOptimizer(\n [optim.Adam(self.params, lr=self.learning_rate,\n betas=self.betas, eps=1e-8),\n optim.SparseAdam(self.sparse_params, lr=self.learning_rate,\n betas=self.betas, eps=1e-8)])\n else:\n raise RuntimeError(\"Invalid optim method: \" + self.method)","function_tokens":["def","set_parameters","(","self",",","params",")",":","self",".","params","=","[","]","self",".","sparse_params","=","[","]","for","k",",","p","in","params",":","if","p",".","requires_grad",":","if","self",".","method","!=","'sparseadam'","or","\"embed\"","not","in","k",":","self",".","params",".","append","(","p",")","else",":","self",".","sparse_params",".","append","(","p",")","if","self",".","method","==","'sgd'",":","self",".","optimizer","=","optim",".","SGD","(","self",".","params",",","lr","=","self",".","learning_rate",")","elif","self",".","method","==","'adagrad'",":","self",".","optimizer","=","optim",".","Adagrad","(","self",".","params",",","lr","=","self",".","learning_rate",")","for","group","in","self",".","optimizer",".","param_groups",":","for","p","in","group","[","'params'","]",":","self",".","optimizer",".","state","[","p","]","[","'sum'","]","=","self",".","optimizer",".","state","[","p","]","[","'sum'","]",".","fill_","(","self",".","adagrad_accum",")","elif","self",".","method","==","'adadelta'",":","self",".","optimizer","=","optim",".","Adadelta","(","self",".","params",",","lr","=","self",".","learning_rate",")","elif","self",".","method","==","'adam'",":","self",".","optimizer","=","optim",".","Adam","(","self",".","params",",","lr","=","self",".","learning_rate",",","betas","=","self",".","betas",",","eps","=","1e-9",")","elif","self",".","method","==","'sparseadam'",":","self",".","optimizer","=","MultipleOptimizer","(","[","optim",".","Adam","(","self",".","params",",","lr","=","self",".","learning_rate",",","betas","=","self",".","betas",",","eps","=","1e-8",")",",","optim",".","SparseAdam","(","self",".","sparse_params",",","lr","=","self",".","learning_rate",",","betas","=","self",".","betas",",","eps","=","1e-8",")","]",")","else",":","raise","RuntimeError","(","\"Invalid optim method: \"","+","self",".","method",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L158-L188"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/optimizers.py","language":"python","identifier":"Optimizer.step","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Update the model parameters based on current gradients.\n\n Optionally, will employ gradient modification or update learning\n rate.","docstring_summary":"Update the model parameters based on current gradients.","docstring_tokens":["Update","the","model","parameters","based","on","current","gradients","."],"function":"def step(self):\n \"\"\"Update the model parameters based on current gradients.\n\n Optionally, will employ gradient modification or update learning\n rate.\n \"\"\"\n self._step += 1\n\n # Decay method used in tensor2tensor.\n if self.decay_method == \"noam\":\n self._set_rate(\n self.original_lr *\n (self.model_size ** (-0.5) *\n min(self._step ** (-0.5),\n self._step * self.warmup_steps**(-1.5))))\n # Decay based on start_decay_steps every decay_steps\n else:\n if ((self.start_decay_steps is not None) and (\n self._step >= self.start_decay_steps)):\n self.start_decay = True\n if self.start_decay:\n if ((self._step - self.start_decay_steps)\n % self.decay_steps == 0):\n self.learning_rate = self.learning_rate * self.lr_decay\n\n if self.method != 'sparseadam':\n self.optimizer.param_groups[0]['lr'] = self.learning_rate\n\n if self.max_grad_norm:\n clip_grad_norm_(self.params, self.max_grad_norm)\n self.optimizer.step()","function_tokens":["def","step","(","self",")",":","self",".","_step","+=","1","# Decay method used in tensor2tensor.","if","self",".","decay_method","==","\"noam\"",":","self",".","_set_rate","(","self",".","original_lr","*","(","self",".","model_size","**","(","-","0.5",")","*","min","(","self",".","_step","**","(","-","0.5",")",",","self",".","_step","*","self",".","warmup_steps","**","(","-","1.5",")",")",")",")","# Decay based on start_decay_steps every decay_steps","else",":","if","(","(","self",".","start_decay_steps","is","not","None",")","and","(","self",".","_step",">=","self",".","start_decay_steps",")",")",":","self",".","start_decay","=","True","if","self",".","start_decay",":","if","(","(","self",".","_step","-","self",".","start_decay_steps",")","%","self",".","decay_steps","==","0",")",":","self",".","learning_rate","=","self",".","learning_rate","*","self",".","lr_decay","if","self",".","method","!=","'sparseadam'",":","self",".","optimizer",".","param_groups","[","0","]","[","'lr'","]","=","self",".","learning_rate","if","self",".","max_grad_norm",":","clip_grad_norm_","(","self",".","params",",","self",".","max_grad_norm",")","self",".","optimizer",".","step","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/optimizers.py#L198-L228"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.all_gather_stats","parameters":"(stat, max_size=4096)","argument_list":"","return_statement":"return stats[0]","docstring":"Gather a `Statistics` object accross multiple process\/nodes\n\n Args:\n stat(:obj:Statistics): the statistics object to gather\n accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n `Statistics`, the update stats object","docstring_summary":"Gather a `Statistics` object accross multiple process\/nodes","docstring_tokens":["Gather","a","Statistics","object","accross","multiple","process","\/","nodes"],"function":"def all_gather_stats(stat, max_size=4096):\n \"\"\"\n Gather a `Statistics` object accross multiple process\/nodes\n\n Args:\n stat(:obj:Statistics): the statistics object to gather\n accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n `Statistics`, the update stats object\n \"\"\"\n stats = Statistics.all_gather_stats_list([stat], max_size=max_size)\n return stats[0]","function_tokens":["def","all_gather_stats","(","stat",",","max_size","=","4096",")",":","stats","=","Statistics",".","all_gather_stats_list","(","[","stat","]",",","max_size","=","max_size",")","return","stats","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L30-L43"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.all_gather_stats_list","parameters":"(stat_list, max_size=4096)","argument_list":"","return_statement":"return our_stats","docstring":"Gather a `Statistics` list accross all processes\/nodes\n\n Args:\n stat_list(list([`Statistics`])): list of statistics objects to\n gather accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n our_stats(list([`Statistics`])): list of updated stats","docstring_summary":"Gather a `Statistics` list accross all processes\/nodes","docstring_tokens":["Gather","a","Statistics","list","accross","all","processes","\/","nodes"],"function":"def all_gather_stats_list(stat_list, max_size=4096):\n \"\"\"\n Gather a `Statistics` list accross all processes\/nodes\n\n Args:\n stat_list(list([`Statistics`])): list of statistics objects to\n gather accross all processes\/nodes\n max_size(int): max buffer size to use\n\n Returns:\n our_stats(list([`Statistics`])): list of updated stats\n \"\"\"\n # Get a list of world_size lists with len(stat_list) Statistics objects\n all_stats = all_gather_list(stat_list, max_size=max_size)\n\n our_rank = get_rank()\n our_stats = all_stats[our_rank]\n for other_rank, stats in enumerate(all_stats):\n if other_rank == our_rank:\n continue\n for i, stat in enumerate(stats):\n our_stats[i].update(stat, update_n_src_words=True)\n return our_stats","function_tokens":["def","all_gather_stats_list","(","stat_list",",","max_size","=","4096",")",":","# Get a list of world_size lists with len(stat_list) Statistics objects","all_stats","=","all_gather_list","(","stat_list",",","max_size","=","max_size",")","our_rank","=","get_rank","(",")","our_stats","=","all_stats","[","our_rank","]","for","other_rank",",","stats","in","enumerate","(","all_stats",")",":","if","other_rank","==","our_rank",":","continue","for","i",",","stat","in","enumerate","(","stats",")",":","our_stats","[","i","]",".","update","(","stat",",","update_n_src_words","=","True",")","return","our_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L46-L68"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.update","parameters":"(self, stat, update_n_src_words=False)","argument_list":"","return_statement":"","docstring":"Update statistics by suming values with another `Statistics` object\n\n Args:\n stat: another statistic object\n update_n_src_words(bool): whether to update (sum) `n_src_words`\n or not","docstring_summary":"Update statistics by suming values with another `Statistics` object","docstring_tokens":["Update","statistics","by","suming","values","with","another","Statistics","object"],"function":"def update(self, stat, update_n_src_words=False):\n \"\"\"\n Update statistics by suming values with another `Statistics` object\n\n Args:\n stat: another statistic object\n update_n_src_words(bool): whether to update (sum) `n_src_words`\n or not\n\n \"\"\"\n self.loss += stat.loss\n self.n_words += stat.n_words\n self.n_correct += stat.n_correct\n\n if update_n_src_words:\n self.n_src_words += stat.n_src_words","function_tokens":["def","update","(","self",",","stat",",","update_n_src_words","=","False",")",":","self",".","loss","+=","stat",".","loss","self",".","n_words","+=","stat",".","n_words","self",".","n_correct","+=","stat",".","n_correct","if","update_n_src_words",":","self",".","n_src_words","+=","stat",".","n_src_words"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L70-L85"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.accuracy","parameters":"(self)","argument_list":"","return_statement":"return 100 * (self.n_correct \/ self.n_words)","docstring":"compute accuracy","docstring_summary":"compute accuracy","docstring_tokens":["compute","accuracy"],"function":"def accuracy(self):\n \"\"\" compute accuracy \"\"\"\n return 100 * (self.n_correct \/ self.n_words)","function_tokens":["def","accuracy","(","self",")",":","return","100","*","(","self",".","n_correct","\/","self",".","n_words",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L87-L89"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.xent","parameters":"(self)","argument_list":"","return_statement":"return self.loss \/ self.n_words","docstring":"compute cross entropy","docstring_summary":"compute cross entropy","docstring_tokens":["compute","cross","entropy"],"function":"def xent(self):\n \"\"\" compute cross entropy \"\"\"\n return self.loss \/ self.n_words","function_tokens":["def","xent","(","self",")",":","return","self",".","loss","\/","self",".","n_words"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L91-L93"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.ppl","parameters":"(self)","argument_list":"","return_statement":"return math.exp(min(self.loss \/ self.n_words, 100))","docstring":"compute perplexity","docstring_summary":"compute perplexity","docstring_tokens":["compute","perplexity"],"function":"def ppl(self):\n \"\"\" compute perplexity \"\"\"\n return math.exp(min(self.loss \/ self.n_words, 100))","function_tokens":["def","ppl","(","self",")",":","return","math",".","exp","(","min","(","self",".","loss","\/","self",".","n_words",",","100",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L95-L97"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.elapsed_time","parameters":"(self)","argument_list":"","return_statement":"return time.time() - self.start_time","docstring":"compute elapsed time","docstring_summary":"compute elapsed time","docstring_tokens":["compute","elapsed","time"],"function":"def elapsed_time(self):\n \"\"\" compute elapsed time \"\"\"\n return time.time() - self.start_time","function_tokens":["def","elapsed_time","(","self",")",":","return","time",".","time","(",")","-","self",".","start_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L99-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.output","parameters":"(self, step, num_steps, learning_rate, start)","argument_list":"","return_statement":"","docstring":"Write out statistics to stdout.\n\n Args:\n step (int): current step\n n_batch (int): total batches\n start (int): start time of step.","docstring_summary":"Write out statistics to stdout.","docstring_tokens":["Write","out","statistics","to","stdout","."],"function":"def output(self, step, num_steps, learning_rate, start):\n \"\"\"Write out statistics to stdout.\n\n Args:\n step (int): current step\n n_batch (int): total batches\n start (int): start time of step.\n \"\"\"\n t = self.elapsed_time()\n logger.info(\n (\"Step %2d\/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \" +\n \"lr: %7.5f; %3.0f\/%3.0f tok\/s; %6.0f sec\")\n % (step, num_steps,\n self.accuracy(),\n self.ppl(),\n self.xent(),\n learning_rate,\n self.n_src_words \/ (t + 1e-5),\n self.n_words \/ (t + 1e-5),\n time.time() - start))\n sys.stdout.flush()","function_tokens":["def","output","(","self",",","step",",","num_steps",",","learning_rate",",","start",")",":","t","=","self",".","elapsed_time","(",")","logger",".","info","(","(","\"Step %2d\/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \"","+","\"lr: %7.5f; %3.0f\/%3.0f tok\/s; %6.0f sec\"",")","%","(","step",",","num_steps",",","self",".","accuracy","(",")",",","self",".","ppl","(",")",",","self",".","xent","(",")",",","learning_rate",",","self",".","n_src_words","\/","(","t","+","1e-5",")",",","self",".","n_words","\/","(","t","+","1e-5",")",",","time",".","time","(",")","-","start",")",")","sys",".","stdout",".","flush","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L103-L123"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/statistics.py","language":"python","identifier":"Statistics.log_tensorboard","parameters":"(self, prefix, writer, learning_rate, step)","argument_list":"","return_statement":"","docstring":"display statistics to tensorboard","docstring_summary":"display statistics to tensorboard","docstring_tokens":["display","statistics","to","tensorboard"],"function":"def log_tensorboard(self, prefix, writer, learning_rate, step):\n \"\"\" display statistics to tensorboard \"\"\"\n t = self.elapsed_time()\n writer.add_scalar(prefix + \"\/xent\", self.xent(), step)\n writer.add_scalar(prefix + \"\/ppl\", self.ppl(), step)\n writer.add_scalar(prefix + \"\/accuracy\", self.accuracy(), step)\n writer.add_scalar(prefix + \"\/tgtper\", self.n_words \/ t, step)\n writer.add_scalar(prefix + \"\/lr\", learning_rate, step)","function_tokens":["def","log_tensorboard","(","self",",","prefix",",","writer",",","learning_rate",",","step",")",":","t","=","self",".","elapsed_time","(",")","writer",".","add_scalar","(","prefix","+","\"\/xent\"",",","self",".","xent","(",")",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/ppl\"",",","self",".","ppl","(",")",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/accuracy\"",",","self",".","accuracy","(",")",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/tgtper\"",",","self",".","n_words","\/","t",",","step",")","writer",".","add_scalar","(","prefix","+","\"\/lr\"",",","learning_rate",",","step",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/statistics.py#L125-L132"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/rnn_factory.py","language":"python","identifier":"rnn_factory","parameters":"(rnn_type, **kwargs)","argument_list":"","return_statement":"return rnn, no_pack_padded_seq","docstring":"rnn factory, Use pytorch version when available.","docstring_summary":"rnn factory, Use pytorch version when available.","docstring_tokens":["rnn","factory","Use","pytorch","version","when","available","."],"function":"def rnn_factory(rnn_type, **kwargs):\n \"\"\" rnn factory, Use pytorch version when available. \"\"\"\n no_pack_padded_seq = False\n if rnn_type == \"SRU\":\n # SRU doesn't support PackedSequence.\n no_pack_padded_seq = True\n rnn = onmt.models.sru.SRU(**kwargs)\n else:\n rnn = getattr(nn, rnn_type)(**kwargs)\n return rnn, no_pack_padded_seq","function_tokens":["def","rnn_factory","(","rnn_type",",","*","*","kwargs",")",":","no_pack_padded_seq","=","False","if","rnn_type","==","\"SRU\"",":","# SRU doesn't support PackedSequence.","no_pack_padded_seq","=","True","rnn","=","onmt",".","models",".","sru",".","SRU","(","*","*","kwargs",")","else",":","rnn","=","getattr","(","nn",",","rnn_type",")","(","*","*","kwargs",")","return","rnn",",","no_pack_padded_seq"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/rnn_factory.py#L10-L19"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/distributed.py","language":"python","identifier":"all_reduce_and_rescale_tensors","parameters":"(tensors, rescale_denom,\n buffer_size=10485760)","argument_list":"","return_statement":"","docstring":"All-reduce and rescale tensors in chunks of the specified size.\n\n Args:\n tensors: list of Tensors to all-reduce\n rescale_denom: denominator for rescaling summed Tensors\n buffer_size: all-reduce chunk size in bytes","docstring_summary":"All-reduce and rescale tensors in chunks of the specified size.","docstring_tokens":["All","-","reduce","and","rescale","tensors","in","chunks","of","the","specified","size","."],"function":"def all_reduce_and_rescale_tensors(tensors, rescale_denom,\n buffer_size=10485760):\n \"\"\"All-reduce and rescale tensors in chunks of the specified size.\n\n Args:\n tensors: list of Tensors to all-reduce\n rescale_denom: denominator for rescaling summed Tensors\n buffer_size: all-reduce chunk size in bytes\n \"\"\"\n # buffer size in bytes, determine equiv. # of elements based on data type\n buffer_t = tensors[0].new(\n math.ceil(buffer_size \/ tensors[0].element_size())).zero_()\n buffer = []\n\n def all_reduce_buffer():\n # copy tensors into buffer_t\n offset = 0\n for t in buffer:\n numel = t.numel()\n buffer_t[offset:offset+numel].copy_(t.view(-1))\n offset += numel\n\n # all-reduce and rescale\n torch.distributed.all_reduce(buffer_t[:offset])\n buffer_t.div_(rescale_denom)\n\n # copy all-reduced buffer back into tensors\n offset = 0\n for t in buffer:\n numel = t.numel()\n t.view(-1).copy_(buffer_t[offset:offset+numel])\n offset += numel\n\n filled = 0\n for t in tensors:\n sz = t.numel() * t.element_size()\n if sz > buffer_size:\n # tensor is bigger than buffer, all-reduce and rescale directly\n torch.distributed.all_reduce(t)\n t.div_(rescale_denom)\n elif filled + sz > buffer_size:\n # buffer is full, all-reduce and replace buffer with grad\n all_reduce_buffer()\n buffer = [t]\n filled = sz\n else:\n # add tensor to buffer\n buffer.append(t)\n filled += sz\n\n if len(buffer) > 0:\n all_reduce_buffer()","function_tokens":["def","all_reduce_and_rescale_tensors","(","tensors",",","rescale_denom",",","buffer_size","=","10485760",")",":","# buffer size in bytes, determine equiv. # of elements based on data type","buffer_t","=","tensors","[","0","]",".","new","(","math",".","ceil","(","buffer_size","\/","tensors","[","0","]",".","element_size","(",")",")",")",".","zero_","(",")","buffer","=","[","]","def","all_reduce_buffer","(",")",":","# copy tensors into buffer_t","offset","=","0","for","t","in","buffer",":","numel","=","t",".","numel","(",")","buffer_t","[","offset",":","offset","+","numel","]",".","copy_","(","t",".","view","(","-","1",")",")","offset","+=","numel","# all-reduce and rescale","torch",".","distributed",".","all_reduce","(","buffer_t","[",":","offset","]",")","buffer_t",".","div_","(","rescale_denom",")","# copy all-reduced buffer back into tensors","offset","=","0","for","t","in","buffer",":","numel","=","t",".","numel","(",")","t",".","view","(","-","1",")",".","copy_","(","buffer_t","[","offset",":","offset","+","numel","]",")","offset","+=","numel","filled","=","0","for","t","in","tensors",":","sz","=","t",".","numel","(",")","*","t",".","element_size","(",")","if","sz",">","buffer_size",":","# tensor is bigger than buffer, all-reduce and rescale directly","torch",".","distributed",".","all_reduce","(","t",")","t",".","div_","(","rescale_denom",")","elif","filled","+","sz",">","buffer_size",":","# buffer is full, all-reduce and replace buffer with grad","all_reduce_buffer","(",")","buffer","=","[","t","]","filled","=","sz","else",":","# add tensor to buffer","buffer",".","append","(","t",")","filled","+=","sz","if","len","(","buffer",")",">","0",":","all_reduce_buffer","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/distributed.py#L35-L86"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/distributed.py","language":"python","identifier":"all_gather_list","parameters":"(data, max_size=4096)","argument_list":"","return_statement":"return results","docstring":"Gathers arbitrary data from all nodes into a list.","docstring_summary":"Gathers arbitrary data from all nodes into a list.","docstring_tokens":["Gathers","arbitrary","data","from","all","nodes","into","a","list","."],"function":"def all_gather_list(data, max_size=4096):\n \"\"\"Gathers arbitrary data from all nodes into a list.\"\"\"\n world_size = torch.distributed.get_world_size()\n if not hasattr(all_gather_list, '_in_buffer') or \\\n max_size != all_gather_list._in_buffer.size():\n all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)\n all_gather_list._out_buffers = [\n torch.cuda.ByteTensor(max_size)\n for i in range(world_size)\n ]\n in_buffer = all_gather_list._in_buffer\n out_buffers = all_gather_list._out_buffers\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n if enc_size + 2 > max_size:\n raise ValueError(\n 'encoded data exceeds max_size: {}'.format(enc_size + 2))\n assert max_size < 255*256\n in_buffer[0] = enc_size \/\/ 255 # this encoding works for max_size < 65k\n in_buffer[1] = enc_size % 255\n in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))\n\n torch.distributed.all_gather(out_buffers, in_buffer.cuda())\n\n results = []\n for i in range(world_size):\n out_buffer = out_buffers[i]\n size = (255 * out_buffer[0].item()) + out_buffer[1].item()\n\n bytes_list = bytes(out_buffer[2:size+2].tolist())\n result = pickle.loads(bytes_list)\n results.append(result)\n return results","function_tokens":["def","all_gather_list","(","data",",","max_size","=","4096",")",":","world_size","=","torch",".","distributed",".","get_world_size","(",")","if","not","hasattr","(","all_gather_list",",","'_in_buffer'",")","or","max_size","!=","all_gather_list",".","_in_buffer",".","size","(",")",":","all_gather_list",".","_in_buffer","=","torch",".","cuda",".","ByteTensor","(","max_size",")","all_gather_list",".","_out_buffers","=","[","torch",".","cuda",".","ByteTensor","(","max_size",")","for","i","in","range","(","world_size",")","]","in_buffer","=","all_gather_list",".","_in_buffer","out_buffers","=","all_gather_list",".","_out_buffers","enc","=","pickle",".","dumps","(","data",")","enc_size","=","len","(","enc",")","if","enc_size","+","2",">","max_size",":","raise","ValueError","(","'encoded data exceeds max_size: {}'",".","format","(","enc_size","+","2",")",")","assert","max_size","<","255","*","256","in_buffer","[","0","]","=","enc_size","\/\/","255","# this encoding works for max_size < 65k","in_buffer","[","1","]","=","enc_size","%","255","in_buffer","[","2",":","enc_size","+","2","]","=","torch",".","ByteTensor","(","list","(","enc",")",")","torch",".","distributed",".","all_gather","(","out_buffers",",","in_buffer",".","cuda","(",")",")","results","=","[","]","for","i","in","range","(","world_size",")",":","out_buffer","=","out_buffers","[","i","]","size","=","(","255","*","out_buffer","[","0","]",".","item","(",")",")","+","out_buffer","[","1","]",".","item","(",")","bytes_list","=","bytes","(","out_buffer","[","2",":","size","+","2","]",".","tolist","(",")",")","result","=","pickle",".","loads","(","bytes_list",")","results",".","append","(","result",")","return","results"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/distributed.py#L89-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"build_loss_compute","parameters":"(model, tgt_vocab, opt, train=True)","argument_list":"","return_statement":"return compute","docstring":"This returns user-defined LossCompute object, which is used to\n compute loss in train\/validate process. You can implement your\n own *LossCompute class, by subclassing LossComputeBase.","docstring_summary":"This returns user-defined LossCompute object, which is used to\n compute loss in train\/validate process. You can implement your\n own *LossCompute class, by subclassing LossComputeBase.","docstring_tokens":["This","returns","user","-","defined","LossCompute","object","which","is","used","to","compute","loss","in","train","\/","validate","process",".","You","can","implement","your","own","*","LossCompute","class","by","subclassing","LossComputeBase","."],"function":"def build_loss_compute(model, tgt_vocab, opt, train=True):\n \"\"\"\n This returns user-defined LossCompute object, which is used to\n compute loss in train\/validate process. You can implement your\n own *LossCompute class, by subclassing LossComputeBase.\n \"\"\"\n device = torch.device(\"cuda\" if onmt.utils.misc.use_gpu(opt) else \"cpu\")\n\n if opt.copy_attn:\n compute = onmt.modules.CopyGeneratorLossCompute(\n model.generator, tgt_vocab, opt.copy_attn_force,\n opt.copy_loss_by_seqlength)\n else:\n compute = NMTLossCompute(\n model.generator, tgt_vocab,\n label_smoothing=opt.label_smoothing if train else 0.0)\n compute.to(device)\n\n return compute","function_tokens":["def","build_loss_compute","(","model",",","tgt_vocab",",","opt",",","train","=","True",")",":","device","=","torch",".","device","(","\"cuda\"","if","onmt",".","utils",".","misc",".","use_gpu","(","opt",")","else","\"cpu\"",")","if","opt",".","copy_attn",":","compute","=","onmt",".","modules",".","CopyGeneratorLossCompute","(","model",".","generator",",","tgt_vocab",",","opt",".","copy_attn_force",",","opt",".","copy_loss_by_seqlength",")","else",":","compute","=","NMTLossCompute","(","model",".","generator",",","tgt_vocab",",","label_smoothing","=","opt",".","label_smoothing","if","train","else","0.0",")","compute",".","to","(","device",")","return","compute"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L17-L35"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"filter_shard_state","parameters":"(state, shard_size=None)","argument_list":"","return_statement":"","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def filter_shard_state(state, shard_size=None):\n \"\"\" ? \"\"\"\n for k, v in state.items():\n if shard_size is None:\n yield k, v\n\n if v is not None:\n v_split = []\n if isinstance(v, torch.Tensor):\n for v_chunk in torch.split(v, shard_size):\n v_chunk = v_chunk.data.clone()\n v_chunk.requires_grad = v.requires_grad\n v_split.append(v_chunk)\n yield k, (v, v_split)","function_tokens":["def","filter_shard_state","(","state",",","shard_size","=","None",")",":","for","k",",","v","in","state",".","items","(",")",":","if","shard_size","is","None",":","yield","k",",","v","if","v","is","not","None",":","v_split","=","[","]","if","isinstance","(","v",",","torch",".","Tensor",")",":","for","v_chunk","in","torch",".","split","(","v",",","shard_size",")",":","v_chunk","=","v_chunk",".","data",".","clone","(",")","v_chunk",".","requires_grad","=","v",".","requires_grad","v_split",".","append","(","v_chunk",")","yield","k",",","(","v",",","v_split",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L252-L265"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"shards","parameters":"(state, shard_size, eval_only=False)","argument_list":"","return_statement":"","docstring":"Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.","docstring_summary":"Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.","docstring_tokens":["Args",":","state",":","A","dictionary","which","corresponds","to","the","output","of","*","LossCompute",".","_make_shard_state","()",".","The","values","for","those","keys","are","Tensor","-","like","or","None",".","shard_size",":","The","maximum","size","of","the","shards","yielded","by","the","model",".","eval_only",":","If","True","only","yield","the","state","nothing","else",".","Otherwise","yield","shards","."],"function":"def shards(state, shard_size, eval_only=False):\n \"\"\"\n Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.\n \"\"\"\n if eval_only:\n yield filter_shard_state(state)\n else:\n # non_none: the subdict of the state dictionary where the values\n # are not None.\n non_none = dict(filter_shard_state(state, shard_size))\n\n # Now, the iteration:\n # state is a dictionary of sequences of tensor-like but we\n # want a sequence of dictionaries of tensors.\n # First, unzip the dictionary into a sequence of keys and a\n # sequence of tensor-like sequences.\n keys, values = zip(*((k, [v_chunk for v_chunk in v_split])\n for k, (_, v_split) in non_none.items()))\n\n # Now, yield a dictionary for each shard. The keys are always\n # the same. values is a sequence of length #keys where each\n # element is a sequence of length #shards. We want to iterate\n # over the shards, not over the keys: therefore, the values need\n # to be re-zipped by shard and then each shard can be paired\n # with the keys.\n for shard_tensors in zip(*values):\n yield dict(zip(keys, shard_tensors))\n\n # Assumed backprop'd\n variables = []\n for k, (v, v_split) in non_none.items():\n if isinstance(v, torch.Tensor) and state[k].requires_grad:\n variables.extend(zip(torch.split(state[k], shard_size),\n [v_chunk.grad for v_chunk in v_split]))\n inputs, grads = zip(*variables)\n torch.autograd.backward(inputs, grads)","function_tokens":["def","shards","(","state",",","shard_size",",","eval_only","=","False",")",":","if","eval_only",":","yield","filter_shard_state","(","state",")","else",":","# non_none: the subdict of the state dictionary where the values","# are not None.","non_none","=","dict","(","filter_shard_state","(","state",",","shard_size",")",")","# Now, the iteration:","# state is a dictionary of sequences of tensor-like but we","# want a sequence of dictionaries of tensors.","# First, unzip the dictionary into a sequence of keys and a","# sequence of tensor-like sequences.","keys",",","values","=","zip","(","*","(","(","k",",","[","v_chunk","for","v_chunk","in","v_split","]",")","for","k",",","(","_",",","v_split",")","in","non_none",".","items","(",")",")",")","# Now, yield a dictionary for each shard. The keys are always","# the same. values is a sequence of length #keys where each","# element is a sequence of length #shards. We want to iterate","# over the shards, not over the keys: therefore, the values need","# to be re-zipped by shard and then each shard can be paired","# with the keys.","for","shard_tensors","in","zip","(","*","values",")",":","yield","dict","(","zip","(","keys",",","shard_tensors",")",")","# Assumed backprop'd","variables","=","[","]","for","k",",","(","v",",","v_split",")","in","non_none",".","items","(",")",":","if","isinstance","(","v",",","torch",".","Tensor",")","and","state","[","k","]",".","requires_grad",":","variables",".","extend","(","zip","(","torch",".","split","(","state","[","k","]",",","shard_size",")",",","[","v_chunk",".","grad","for","v_chunk","in","v_split","]",")",")","inputs",",","grads","=","zip","(","*","variables",")","torch",".","autograd",".","backward","(","inputs",",","grads",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L268-L315"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase._make_shard_state","parameters":"(self, batch, output, range_, attns=None)","argument_list":"","return_statement":"return NotImplementedError","docstring":"Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.","docstring_summary":"Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.","docstring_tokens":["Make","shard","state","dictionary","for","shards","()","to","return","iterable","shards","for","efficient","loss","computation",".","Subclass","must","define","this","method","to","match","its","own","_compute_loss","()","interface",".","Args",":","batch",":","the","current","batch",".","output",":","the","predict","output","from","the","model",".","range_",":","the","range","of","examples","for","computing","the","whole","batch","or","a","trunc","of","it?","attns",":","the","attns","dictionary","returned","from","the","model","."],"function":"def _make_shard_state(self, batch, output, range_, attns=None):\n \"\"\"\n Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.\n \"\"\"\n return NotImplementedError","function_tokens":["def","_make_shard_state","(","self",",","batch",",","output",",","range_",",","attns","=","None",")",":","return","NotImplementedError"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L64-L76"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase._compute_loss","parameters":"(self, batch, output, target, **kwargs)","argument_list":"","return_statement":"return NotImplementedError","docstring":"Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.","docstring_summary":"Compute the loss. Subclass must define this method.","docstring_tokens":["Compute","the","loss",".","Subclass","must","define","this","method","."],"function":"def _compute_loss(self, batch, output, target, **kwargs):\n \"\"\"\n Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.\n \"\"\"\n return NotImplementedError","function_tokens":["def","_compute_loss","(","self",",","batch",",","output",",","target",",","*","*","kwargs",")",":","return","NotImplementedError"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L78-L89"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase.monolithic_compute_loss","parameters":"(self, batch, output, attns)","argument_list":"","return_statement":"return batch_stats","docstring":"Compute the forward loss for the batch.\n\n Args:\n batch (batch): batch of labeled examples\n output (:obj:`FloatTensor`):\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict of :obj:`FloatTensor`) :\n dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n Returns:\n :obj:`onmt.utils.Statistics`: loss statistics","docstring_summary":"Compute the forward loss for the batch.","docstring_tokens":["Compute","the","forward","loss","for","the","batch","."],"function":"def monolithic_compute_loss(self, batch, output, attns):\n \"\"\"\n Compute the forward loss for the batch.\n\n Args:\n batch (batch): batch of labeled examples\n output (:obj:`FloatTensor`):\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict of :obj:`FloatTensor`) :\n dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n Returns:\n :obj:`onmt.utils.Statistics`: loss statistics\n \"\"\"\n range_ = (0, batch.tgt.size(0))\n shard_state = self._make_shard_state(batch, output, range_, attns)\n _, batch_stats = self._compute_loss(batch, **shard_state)\n\n return batch_stats","function_tokens":["def","monolithic_compute_loss","(","self",",","batch",",","output",",","attns",")",":","range_","=","(","0",",","batch",".","tgt",".","size","(","0",")",")","shard_state","=","self",".","_make_shard_state","(","batch",",","output",",","range_",",","attns",")","_",",","batch_stats","=","self",".","_compute_loss","(","batch",",","*","*","shard_state",")","return","batch_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L91-L109"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase.sharded_compute_loss","parameters":"(self, batch, output, attns,\n cur_trunc, trunc_size, shard_size,\n normalization)","argument_list":"","return_statement":"return batch_stats","docstring":"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(cur_trunc, cur_trunc + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n cur_trunc (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n shard_size (int) : maximum number of examples in a shard\n normalization (int) : Loss is divided by this number\n\n Returns:\n :obj:`onmt.utils.Statistics`: validation loss statistics","docstring_summary":"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.","docstring_tokens":["Compute","the","forward","loss","and","backpropagate",".","Computation","is","done","with","shards","and","optionally","truncation","for","memory","efficiency","."],"function":"def sharded_compute_loss(self, batch, output, attns,\n cur_trunc, trunc_size, shard_size,\n normalization):\n \"\"\"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(cur_trunc, cur_trunc + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n cur_trunc (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n shard_size (int) : maximum number of examples in a shard\n normalization (int) : Loss is divided by this number\n\n Returns:\n :obj:`onmt.utils.Statistics`: validation loss statistics\n\n \"\"\"\n batch_stats = onmt.utils.Statistics()\n range_ = (cur_trunc, cur_trunc + trunc_size)\n shard_state = self._make_shard_state(batch, output, range_, attns)\n for shard in shards(shard_state, shard_size):\n loss, stats = self._compute_loss(batch, **shard)\n loss.div(float(normalization)).backward()\n batch_stats.update(stats)\n\n return batch_stats","function_tokens":["def","sharded_compute_loss","(","self",",","batch",",","output",",","attns",",","cur_trunc",",","trunc_size",",","shard_size",",","normalization",")",":","batch_stats","=","onmt",".","utils",".","Statistics","(",")","range_","=","(","cur_trunc",",","cur_trunc","+","trunc_size",")","shard_state","=","self",".","_make_shard_state","(","batch",",","output",",","range_",",","attns",")","for","shard","in","shards","(","shard_state",",","shard_size",")",":","loss",",","stats","=","self",".","_compute_loss","(","batch",",","*","*","shard",")","loss",".","div","(","float","(","normalization",")",")",".","backward","(",")","batch_stats",".","update","(","stats",")","return","batch_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L111-L149"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"LossComputeBase._stats","parameters":"(self, loss, scores, target)","argument_list":"","return_statement":"return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)","docstring":"Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.","docstring_summary":"Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets","docstring_tokens":["Args",":","loss","(",":","obj",":","FloatTensor",")",":","the","loss","computed","by","the","loss","criterion",".","scores","(",":","obj",":","FloatTensor",")",":","a","score","for","each","possible","output","target","(",":","obj",":","FloatTensor",")",":","true","targets"],"function":"def _stats(self, loss, scores, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.\n \"\"\"\n pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target) \\\n .masked_select(non_padding) \\\n .sum() \\\n .item()\n num_non_padding = non_padding.sum().item()\n return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)","function_tokens":["def","_stats","(","self",",","loss",",","scores",",","target",")",":","pred","=","scores",".","max","(","1",")","[","1","]","non_padding","=","target",".","ne","(","self",".","padding_idx",")","num_correct","=","pred",".","eq","(","target",")",".","masked_select","(","non_padding",")",".","sum","(",")",".","item","(",")","num_non_padding","=","non_padding",".","sum","(",")",".","item","(",")","return","onmt",".","utils",".","Statistics","(","loss",".","item","(",")",",","num_non_padding",",","num_correct",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L151-L168"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/loss.py","language":"python","identifier":"LabelSmoothingLoss.forward","parameters":"(self, output, target)","argument_list":"","return_statement":"return F.kl_div(output, model_prob, reduction='sum')","docstring":"output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size","docstring_summary":"output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size","docstring_tokens":["output","(","FloatTensor",")",":","batch_size","x","n_classes","target","(","LongTensor",")",":","batch_size"],"function":"def forward(self, output, target):\n \"\"\"\n output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size\n \"\"\"\n model_prob = self.one_hot.repeat(target.size(0), 1)\n model_prob.scatter_(1, target.unsqueeze(1), self.confidence)\n model_prob.masked_fill_((target == self.padding_idx).unsqueeze(1), 0)\n\n return F.kl_div(output, model_prob, reduction='sum')","function_tokens":["def","forward","(","self",",","output",",","target",")",":","model_prob","=","self",".","one_hot",".","repeat","(","target",".","size","(","0",")",",","1",")","model_prob",".","scatter_","(","1",",","target",".","unsqueeze","(","1",")",",","self",".","confidence",")","model_prob",".","masked_fill_","(","(","target","==","self",".","padding_idx",")",".","unsqueeze","(","1",")",",","0",")","return","F",".","kl_div","(","output",",","model_prob",",","reduction","=","'sum'",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/loss.py#L195-L204"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/misc.py","language":"python","identifier":"aeq","parameters":"(*args)","argument_list":"","return_statement":"","docstring":"Assert all arguments have the same value","docstring_summary":"Assert all arguments have the same value","docstring_tokens":["Assert","all","arguments","have","the","same","value"],"function":"def aeq(*args):\n \"\"\"\n Assert all arguments have the same value\n \"\"\"\n arguments = (arg for arg in args)\n first = next(arguments)\n assert all(arg == first for arg in arguments), \\\n \"Not all arguments have the same value: \" + str(args)","function_tokens":["def","aeq","(","*","args",")",":","arguments","=","(","arg","for","arg","in","args",")","first","=","next","(","arguments",")","assert","all","(","arg","==","first","for","arg","in","arguments",")",",","\"Not all arguments have the same value: \"","+","str","(","args",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/misc.py#L6-L13"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/misc.py","language":"python","identifier":"sequence_mask","parameters":"(lengths, max_len=None)","argument_list":"","return_statement":"return (torch.arange(0, max_len)\n .type_as(lengths)\n .repeat(batch_size, 1)\n .lt(lengths.unsqueeze(1)))","docstring":"Creates a boolean mask from sequence lengths.","docstring_summary":"Creates a boolean mask from sequence lengths.","docstring_tokens":["Creates","a","boolean","mask","from","sequence","lengths","."],"function":"def sequence_mask(lengths, max_len=None):\n \"\"\"\n Creates a boolean mask from sequence lengths.\n \"\"\"\n batch_size = lengths.numel()\n max_len = max_len or lengths.max()\n return (torch.arange(0, max_len)\n .type_as(lengths)\n .repeat(batch_size, 1)\n .lt(lengths.unsqueeze(1)))","function_tokens":["def","sequence_mask","(","lengths",",","max_len","=","None",")",":","batch_size","=","lengths",".","numel","(",")","max_len","=","max_len","or","lengths",".","max","(",")","return","(","torch",".","arange","(","0",",","max_len",")",".","type_as","(","lengths",")",".","repeat","(","batch_size",",","1",")",".","lt","(","lengths",".","unsqueeze","(","1",")",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/misc.py#L16-L25"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/misc.py","language":"python","identifier":"tile","parameters":"(x, count, dim=0)","argument_list":"","return_statement":"return x","docstring":"Tiles x on dimension dim count times.","docstring_summary":"Tiles x on dimension dim count times.","docstring_tokens":["Tiles","x","on","dimension","dim","count","times","."],"function":"def tile(x, count, dim=0):\n \"\"\"\n Tiles x on dimension dim count times.\n \"\"\"\n perm = list(range(len(x.size())))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.size())\n out_size[0] *= count\n batch = x.size(0)\n x = x.view(batch, -1) \\\n .transpose(0, 1) \\\n .repeat(count, 1) \\\n .transpose(0, 1) \\\n .contiguous() \\\n .view(*out_size)\n if dim != 0:\n x = x.permute(perm).contiguous()\n return x","function_tokens":["def","tile","(","x",",","count",",","dim","=","0",")",":","perm","=","list","(","range","(","len","(","x",".","size","(",")",")",")",")","if","dim","!=","0",":","perm","[","0","]",",","perm","[","dim","]","=","perm","[","dim","]",",","perm","[","0","]","x","=","x",".","permute","(","perm",")",".","contiguous","(",")","out_size","=","list","(","x",".","size","(",")",")","out_size","[","0","]","*=","count","batch","=","x",".","size","(","0",")","x","=","x",".","view","(","batch",",","-","1",")",".","transpose","(","0",",","1",")",".","repeat","(","count",",","1",")",".","transpose","(","0",",","1",")",".","contiguous","(",")",".","view","(","*","out_size",")","if","dim","!=","0",":","x","=","x",".","permute","(","perm",")",".","contiguous","(",")","return","x"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/misc.py#L28-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/misc.py","language":"python","identifier":"use_gpu","parameters":"(opt)","argument_list":"","return_statement":"return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \\\n (hasattr(opt, 'gpu') and opt.gpu > -1)","docstring":"Creates a boolean if gpu used","docstring_summary":"Creates a boolean if gpu used","docstring_tokens":["Creates","a","boolean","if","gpu","used"],"function":"def use_gpu(opt):\n \"\"\"\n Creates a boolean if gpu used\n \"\"\"\n return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \\\n (hasattr(opt, 'gpu') and opt.gpu > -1)","function_tokens":["def","use_gpu","(","opt",")",":","return","(","hasattr","(","opt",",","'gpu_ranks'",")","and","len","(","opt",".","gpu_ranks",")",">","0",")","or","(","hasattr","(","opt",",","'gpu'",")","and","opt",".","gpu",">","-","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/misc.py#L50-L55"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase.__init__","parameters":"(self, report_every, start_time=-1.)","argument_list":"","return_statement":"","docstring":"Args:\n report_every(int): Report status every this many sentences\n start_time(float): manually set report start time. Negative values\n means that you will need to set it later or use `start()`","docstring_summary":"Args:\n report_every(int): Report status every this many sentences\n start_time(float): manually set report start time. Negative values\n means that you will need to set it later or use `start()`","docstring_tokens":["Args",":","report_every","(","int",")",":","Report","status","every","this","many","sentences","start_time","(","float",")",":","manually","set","report","start","time",".","Negative","values","means","that","you","will","need","to","set","it","later","or","use","start","()"],"function":"def __init__(self, report_every, start_time=-1.):\n \"\"\"\n Args:\n report_every(int): Report status every this many sentences\n start_time(float): manually set report start time. Negative values\n means that you will need to set it later or use `start()`\n \"\"\"\n self.report_every = report_every\n self.progress_step = 0\n self.start_time = start_time","function_tokens":["def","__init__","(","self",",","report_every",",","start_time","=","-","1.",")",":","self",".","report_every","=","report_every","self",".","progress_step","=","0","self",".","start_time","=","start_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/report_manager.py#L33-L42"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase.report_training","parameters":"(self, step, num_steps, learning_rate,\n report_stats, multigpu=False)","argument_list":"","return_statement":"return onmt.utils.Statistics()","docstring":"This is the user-defined batch-level traing progress\n report function.\n\n Args:\n step(int): current step count.\n num_steps(int): total number of batches.\n learning_rate(float): current learning rate.\n report_stats(Statistics): old Statistics instance.\n Returns:\n report_stats(Statistics): updated Statistics instance.","docstring_summary":"This is the user-defined batch-level traing progress\n report function.","docstring_tokens":["This","is","the","user","-","defined","batch","-","level","traing","progress","report","function","."],"function":"def report_training(self, step, num_steps, learning_rate,\n report_stats, multigpu=False):\n \"\"\"\n This is the user-defined batch-level traing progress\n report function.\n\n Args:\n step(int): current step count.\n num_steps(int): total number of batches.\n learning_rate(float): current learning rate.\n report_stats(Statistics): old Statistics instance.\n Returns:\n report_stats(Statistics): updated Statistics instance.\n \"\"\"\n if self.start_time < 0:\n raise ValueError(\"\"\"ReportMgr needs to be started\n (set 'start_time' or use 'start()'\"\"\")\n\n if multigpu:\n report_stats = onmt.utils.Statistics.all_gather_stats(report_stats)\n\n if step % self.report_every == 0:\n self._report_training(\n step, num_steps, learning_rate, report_stats)\n self.progress_step += 1\n return onmt.utils.Statistics()","function_tokens":["def","report_training","(","self",",","step",",","num_steps",",","learning_rate",",","report_stats",",","multigpu","=","False",")",":","if","self",".","start_time","<","0",":","raise","ValueError","(","\"\"\"ReportMgr needs to be started\n (set 'start_time' or use 'start()'\"\"\"",")","if","multigpu",":","report_stats","=","onmt",".","utils",".","Statistics",".","all_gather_stats","(","report_stats",")","if","step","%","self",".","report_every","==","0",":","self",".","_report_training","(","step",",","num_steps",",","learning_rate",",","report_stats",")","self",".","progress_step","+=","1","return","onmt",".","utils",".","Statistics","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/report_manager.py#L50-L75"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase._report_training","parameters":"(self, *args, **kwargs)","argument_list":"","return_statement":"","docstring":"To be overridden","docstring_summary":"To be overridden","docstring_tokens":["To","be","overridden"],"function":"def _report_training(self, *args, **kwargs):\n \"\"\" To be overridden \"\"\"\n raise NotImplementedError()","function_tokens":["def","_report_training","(","self",",","*","args",",","*","*","kwargs",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/report_manager.py#L77-L79"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgrBase.report_step","parameters":"(self, lr, step, train_stats=None, valid_stats=None)","argument_list":"","return_statement":"","docstring":"Report stats of a step\n\n Args:\n train_stats(Statistics): training stats\n valid_stats(Statistics): validation stats\n lr(float): current learning rate","docstring_summary":"Report stats of a step","docstring_tokens":["Report","stats","of","a","step"],"function":"def report_step(self, lr, step, train_stats=None, valid_stats=None):\n \"\"\"\n Report stats of a step\n\n Args:\n train_stats(Statistics): training stats\n valid_stats(Statistics): validation stats\n lr(float): current learning rate\n \"\"\"\n self._report_step(\n lr, step, train_stats=train_stats, valid_stats=valid_stats)","function_tokens":["def","report_step","(","self",",","lr",",","step",",","train_stats","=","None",",","valid_stats","=","None",")",":","self",".","_report_step","(","lr",",","step",",","train_stats","=","train_stats",",","valid_stats","=","valid_stats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/report_manager.py#L81-L91"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgr.__init__","parameters":"(self, report_every, start_time=-1., tensorboard_writer=None)","argument_list":"","return_statement":"","docstring":"A report manager that writes statistics on standard output as well as\n (optionally) TensorBoard\n\n Args:\n report_every(int): Report status every this many sentences\n tensorboard_writer(:obj:`tensorboard.SummaryWriter`):\n The TensorBoard Summary writer to use or None","docstring_summary":"A report manager that writes statistics on standard output as well as\n (optionally) TensorBoard","docstring_tokens":["A","report","manager","that","writes","statistics","on","standard","output","as","well","as","(","optionally",")","TensorBoard"],"function":"def __init__(self, report_every, start_time=-1., tensorboard_writer=None):\n \"\"\"\n A report manager that writes statistics on standard output as well as\n (optionally) TensorBoard\n\n Args:\n report_every(int): Report status every this many sentences\n tensorboard_writer(:obj:`tensorboard.SummaryWriter`):\n The TensorBoard Summary writer to use or None\n \"\"\"\n super(ReportMgr, self).__init__(report_every, start_time)\n self.tensorboard_writer = tensorboard_writer","function_tokens":["def","__init__","(","self",",","report_every",",","start_time","=","-","1.",",","tensorboard_writer","=","None",")",":","super","(","ReportMgr",",","self",")",".","__init__","(","report_every",",","start_time",")","self",".","tensorboard_writer","=","tensorboard_writer"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/report_manager.py#L98-L109"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgr._report_training","parameters":"(self, step, num_steps, learning_rate,\n report_stats)","argument_list":"","return_statement":"return report_stats","docstring":"See base class method `ReportMgrBase.report_training`.","docstring_summary":"See base class method `ReportMgrBase.report_training`.","docstring_tokens":["See","base","class","method","ReportMgrBase",".","report_training","."],"function":"def _report_training(self, step, num_steps, learning_rate,\n report_stats):\n \"\"\"\n See base class method `ReportMgrBase.report_training`.\n \"\"\"\n report_stats.output(step, num_steps,\n learning_rate, self.start_time)\n\n # Log the progress using the number of batches on the x-axis.\n self.maybe_log_tensorboard(report_stats,\n \"progress\",\n learning_rate,\n self.progress_step)\n report_stats = onmt.utils.Statistics()\n\n return report_stats","function_tokens":["def","_report_training","(","self",",","step",",","num_steps",",","learning_rate",",","report_stats",")",":","report_stats",".","output","(","step",",","num_steps",",","learning_rate",",","self",".","start_time",")","# Log the progress using the number of batches on the x-axis.","self",".","maybe_log_tensorboard","(","report_stats",",","\"progress\"",",","learning_rate",",","self",".","progress_step",")","report_stats","=","onmt",".","utils",".","Statistics","(",")","return","report_stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/report_manager.py#L116-L131"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/utils\/report_manager.py","language":"python","identifier":"ReportMgr._report_step","parameters":"(self, lr, step, train_stats=None, valid_stats=None)","argument_list":"","return_statement":"","docstring":"See base class method `ReportMgrBase.report_step`.","docstring_summary":"See base class method `ReportMgrBase.report_step`.","docstring_tokens":["See","base","class","method","ReportMgrBase",".","report_step","."],"function":"def _report_step(self, lr, step, train_stats=None, valid_stats=None):\n \"\"\"\n See base class method `ReportMgrBase.report_step`.\n \"\"\"\n if train_stats is not None:\n self.log('Train perplexity: %g' % train_stats.ppl())\n self.log('Train accuracy: %g' % train_stats.accuracy())\n\n self.maybe_log_tensorboard(train_stats,\n \"train\",\n lr,\n step)\n\n if valid_stats is not None:\n self.log('Validation perplexity: %g' % valid_stats.ppl())\n self.log('Validation accuracy: %g' % valid_stats.accuracy())\n\n self.maybe_log_tensorboard(valid_stats,\n \"valid\",\n lr,\n step)","function_tokens":["def","_report_step","(","self",",","lr",",","step",",","train_stats","=","None",",","valid_stats","=","None",")",":","if","train_stats","is","not","None",":","self",".","log","(","'Train perplexity: %g'","%","train_stats",".","ppl","(",")",")","self",".","log","(","'Train accuracy: %g'","%","train_stats",".","accuracy","(",")",")","self",".","maybe_log_tensorboard","(","train_stats",",","\"train\"",",","lr",",","step",")","if","valid_stats","is","not","None",":","self",".","log","(","'Validation perplexity: %g'","%","valid_stats",".","ppl","(",")",")","self",".","log","(","'Validation accuracy: %g'","%","valid_stats",".","accuracy","(",")",")","self",".","maybe_log_tensorboard","(","valid_stats",",","\"valid\"",",","lr",",","step",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/utils\/report_manager.py#L133-L153"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/multi_headed_attn.py","language":"python","identifier":"MultiHeadedAttention.forward","parameters":"(self, key, value, query, mask=None,\n layer_cache=None, type=None)","argument_list":"","return_statement":"return output, top_attn","docstring":"Compute the context vector and the attention vectors.\n\n Args:\n key (`FloatTensor`): set of `key_len`\n key vectors `[batch, key_len, dim]`\n value (`FloatTensor`): set of `key_len`\n value vectors `[batch, key_len, dim]`\n query (`FloatTensor`): set of `query_len`\n query vectors `[batch, query_len, dim]`\n mask: binary mask indicating which keys have\n non-zero attention `[batch, query_len, key_len]`\n Returns:\n (`FloatTensor`, `FloatTensor`) :\n\n * output context vectors `[batch, query_len, dim]`\n * one of the attention vectors `[batch, query_len, key_len]`","docstring_summary":"Compute the context vector and the attention vectors.","docstring_tokens":["Compute","the","context","vector","and","the","attention","vectors","."],"function":"def forward(self, key, value, query, mask=None,\n layer_cache=None, type=None):\n \"\"\"\n Compute the context vector and the attention vectors.\n\n Args:\n key (`FloatTensor`): set of `key_len`\n key vectors `[batch, key_len, dim]`\n value (`FloatTensor`): set of `key_len`\n value vectors `[batch, key_len, dim]`\n query (`FloatTensor`): set of `query_len`\n query vectors `[batch, query_len, dim]`\n mask: binary mask indicating which keys have\n non-zero attention `[batch, query_len, key_len]`\n Returns:\n (`FloatTensor`, `FloatTensor`) :\n\n * output context vectors `[batch, query_len, dim]`\n * one of the attention vectors `[batch, query_len, key_len]`\n \"\"\"\n\n # CHECKS\n # batch, k_len, d = key.size()\n # batch_, k_len_, d_ = value.size()\n # aeq(batch, batch_)\n # aeq(k_len, k_len_)\n # aeq(d, d_)\n # batch_, q_len, d_ = query.size()\n # aeq(batch, batch_)\n # aeq(d, d_)\n # aeq(self.model_dim % 8, 0)\n # if mask is not None:\n # batch_, q_len_, k_len_ = mask.size()\n # aeq(batch_, batch)\n # aeq(k_len_, k_len)\n # aeq(q_len_ == q_len)\n # END CHECKS\n\n batch_size = key.size(0)\n dim_per_head = self.dim_per_head\n head_count = self.head_count\n key_len = key.size(1)\n query_len = query.size(1)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(batch_size, -1, head_count, dim_per_head) \\\n .transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous() \\\n .view(batch_size, -1, head_count * dim_per_head)\n\n # 1) Project key, value, and query.\n if layer_cache is not None:\n if type == \"self\":\n query, key, value = self.linear_query(query),\\\n self.linear_keys(query),\\\n self.linear_values(query)\n\n key = shape(key)\n value = shape(value)\n\n if layer_cache is not None:\n device = key.device\n if layer_cache[\"self_keys\"] is not None:\n key = torch.cat(\n (layer_cache[\"self_keys\"].to(device), key),\n dim=2)\n if layer_cache[\"self_values\"] is not None:\n value = torch.cat(\n (layer_cache[\"self_values\"].to(device), value),\n dim=2)\n layer_cache[\"self_keys\"] = key\n layer_cache[\"self_values\"] = value\n elif type == \"context\":\n query = self.linear_query(query)\n if layer_cache is not None:\n if layer_cache[\"memory_keys\"] is None:\n key, value = self.linear_keys(key),\\\n self.linear_values(value)\n key = shape(key)\n value = shape(value)\n else:\n key, value = layer_cache[\"memory_keys\"],\\\n layer_cache[\"memory_values\"]\n layer_cache[\"memory_keys\"] = key\n layer_cache[\"memory_values\"] = value\n else:\n key, value = self.linear_keys(key),\\\n self.linear_values(value)\n key = shape(key)\n value = shape(value)\n else:\n key = self.linear_keys(key)\n value = self.linear_values(value)\n query = self.linear_query(query)\n key = shape(key)\n value = shape(value)\n\n query = shape(query)\n\n key_len = key.size(2)\n query_len = query.size(2)\n\n # 2) Calculate and scale scores.\n query = query \/ math.sqrt(dim_per_head)\n scores = torch.matmul(query, key.transpose(2, 3))\n\n if mask is not None:\n mask = mask.unsqueeze(1).expand_as(scores)\n scores = scores.masked_fill(mask, -1e18)\n\n # 3) Apply attention dropout and compute context vectors.\n attn = self.softmax(scores)\n drop_attn = self.dropout(attn)\n context = unshape(torch.matmul(drop_attn, value))\n\n output = self.final_linear(context)\n # CHECK\n # batch_, q_len_, d_ = output.size()\n # aeq(q_len, q_len_)\n # aeq(batch, batch_)\n # aeq(d, d_)\n\n # Return one attn\n top_attn = attn \\\n .view(batch_size, head_count,\n query_len, key_len)[:, 0, :, :] \\\n .contiguous()\n\n return output, top_attn","function_tokens":["def","forward","(","self",",","key",",","value",",","query",",","mask","=","None",",","layer_cache","=","None",",","type","=","None",")",":","# CHECKS","# batch, k_len, d = key.size()","# batch_, k_len_, d_ = value.size()","# aeq(batch, batch_)","# aeq(k_len, k_len_)","# aeq(d, d_)","# batch_, q_len, d_ = query.size()","# aeq(batch, batch_)","# aeq(d, d_)","# aeq(self.model_dim % 8, 0)","# if mask is not None:","# batch_, q_len_, k_len_ = mask.size()","# aeq(batch_, batch)","# aeq(k_len_, k_len)","# aeq(q_len_ == q_len)","# END CHECKS","batch_size","=","key",".","size","(","0",")","dim_per_head","=","self",".","dim_per_head","head_count","=","self",".","head_count","key_len","=","key",".","size","(","1",")","query_len","=","query",".","size","(","1",")","def","shape","(","x",")",":","\"\"\" projection \"\"\"","return","x",".","view","(","batch_size",",","-","1",",","head_count",",","dim_per_head",")",".","transpose","(","1",",","2",")","def","unshape","(","x",")",":","\"\"\" compute context \"\"\"","return","x",".","transpose","(","1",",","2",")",".","contiguous","(",")",".","view","(","batch_size",",","-","1",",","head_count","*","dim_per_head",")","# 1) Project key, value, and query.","if","layer_cache","is","not","None",":","if","type","==","\"self\"",":","query",",","key",",","value","=","self",".","linear_query","(","query",")",",","self",".","linear_keys","(","query",")",",","self",".","linear_values","(","query",")","key","=","shape","(","key",")","value","=","shape","(","value",")","if","layer_cache","is","not","None",":","device","=","key",".","device","if","layer_cache","[","\"self_keys\"","]","is","not","None",":","key","=","torch",".","cat","(","(","layer_cache","[","\"self_keys\"","]",".","to","(","device",")",",","key",")",",","dim","=","2",")","if","layer_cache","[","\"self_values\"","]","is","not","None",":","value","=","torch",".","cat","(","(","layer_cache","[","\"self_values\"","]",".","to","(","device",")",",","value",")",",","dim","=","2",")","layer_cache","[","\"self_keys\"","]","=","key","layer_cache","[","\"self_values\"","]","=","value","elif","type","==","\"context\"",":","query","=","self",".","linear_query","(","query",")","if","layer_cache","is","not","None",":","if","layer_cache","[","\"memory_keys\"","]","is","None",":","key",",","value","=","self",".","linear_keys","(","key",")",",","self",".","linear_values","(","value",")","key","=","shape","(","key",")","value","=","shape","(","value",")","else",":","key",",","value","=","layer_cache","[","\"memory_keys\"","]",",","layer_cache","[","\"memory_values\"","]","layer_cache","[","\"memory_keys\"","]","=","key","layer_cache","[","\"memory_values\"","]","=","value","else",":","key",",","value","=","self",".","linear_keys","(","key",")",",","self",".","linear_values","(","value",")","key","=","shape","(","key",")","value","=","shape","(","value",")","else",":","key","=","self",".","linear_keys","(","key",")","value","=","self",".","linear_values","(","value",")","query","=","self",".","linear_query","(","query",")","key","=","shape","(","key",")","value","=","shape","(","value",")","query","=","shape","(","query",")","key_len","=","key",".","size","(","2",")","query_len","=","query",".","size","(","2",")","# 2) Calculate and scale scores.","query","=","query","\/","math",".","sqrt","(","dim_per_head",")","scores","=","torch",".","matmul","(","query",",","key",".","transpose","(","2",",","3",")",")","if","mask","is","not","None",":","mask","=","mask",".","unsqueeze","(","1",")",".","expand_as","(","scores",")","scores","=","scores",".","masked_fill","(","mask",",","-","1e18",")","# 3) Apply attention dropout and compute context vectors.","attn","=","self",".","softmax","(","scores",")","drop_attn","=","self",".","dropout","(","attn",")","context","=","unshape","(","torch",".","matmul","(","drop_attn",",","value",")",")","output","=","self",".","final_linear","(","context",")","# CHECK","# batch_, q_len_, d_ = output.size()","# aeq(q_len, q_len_)","# aeq(batch, batch_)","# aeq(d, d_)","# Return one attn","top_attn","=","attn",".","view","(","batch_size",",","head_count",",","query_len",",","key_len",")","[",":",",","0",",",":",",",":","]",".","contiguous","(",")","return","output",",","top_attn"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/multi_headed_attn.py#L69-L201"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/position_ffn.py","language":"python","identifier":"PositionwiseFeedForward.forward","parameters":"(self, x)","argument_list":"","return_statement":"return output + x","docstring":"Layer definition.\n\n Args:\n input: [ batch_size, input_len, model_dim ]\n\n\n Returns:\n output: [ batch_size, input_len, model_dim ]","docstring_summary":"Layer definition.","docstring_tokens":["Layer","definition","."],"function":"def forward(self, x):\n \"\"\"\n Layer definition.\n\n Args:\n input: [ batch_size, input_len, model_dim ]\n\n\n Returns:\n output: [ batch_size, input_len, model_dim ]\n \"\"\"\n inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))\n output = self.dropout_2(self.w_2(inter))\n return output + x","function_tokens":["def","forward","(","self",",","x",")",":","inter","=","self",".","dropout_1","(","self",".","relu","(","self",".","w_1","(","self",".","layer_norm","(","x",")",")",")",")","output","=","self",".","dropout_2","(","self",".","w_2","(","inter",")",")","return","output","+","x"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/position_ffn.py#L29-L42"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/sparse_activations.py","language":"python","identifier":"threshold_and_support","parameters":"(z, dim=0)","argument_list":"","return_statement":"return tau_z, k_z","docstring":"z: any dimension\n dim: dimension along which to apply the sparsemax","docstring_summary":"z: any dimension\n dim: dimension along which to apply the sparsemax","docstring_tokens":["z",":","any","dimension","dim",":","dimension","along","which","to","apply","the","sparsemax"],"function":"def threshold_and_support(z, dim=0):\n \"\"\"\n z: any dimension\n dim: dimension along which to apply the sparsemax\n \"\"\"\n sorted_z, _ = torch.sort(z, descending=True, dim=dim)\n z_sum = sorted_z.cumsum(dim) - 1 # sort of a misnomer\n k = torch.arange(1, sorted_z.size(dim) + 1, device=z.device).float().view(\n torch.Size([-1] + [1] * (z.dim() - 1))\n ).transpose(0, dim)\n support = k * sorted_z > z_sum\n\n k_z_indices = support.sum(dim=dim).unsqueeze(dim)\n k_z = k_z_indices.float()\n tau_z = z_sum.gather(dim, k_z_indices - 1) \/ k_z\n return tau_z, k_z","function_tokens":["def","threshold_and_support","(","z",",","dim","=","0",")",":","sorted_z",",","_","=","torch",".","sort","(","z",",","descending","=","True",",","dim","=","dim",")","z_sum","=","sorted_z",".","cumsum","(","dim",")","-","1","# sort of a misnomer","k","=","torch",".","arange","(","1",",","sorted_z",".","size","(","dim",")","+","1",",","device","=","z",".","device",")",".","float","(",")",".","view","(","torch",".","Size","(","[","-","1","]","+","[","1","]","*","(","z",".","dim","(",")","-","1",")",")",")",".","transpose","(","0",",","dim",")","support","=","k","*","sorted_z",">","z_sum","k_z_indices","=","support",".","sum","(","dim","=","dim",")",".","unsqueeze","(","dim",")","k_z","=","k_z_indices",".","float","(",")","tau_z","=","z_sum",".","gather","(","dim",",","k_z_indices","-","1",")","\/","k_z","return","tau_z",",","k_z"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/sparse_activations.py#L11-L26"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/sparse_activations.py","language":"python","identifier":"SparsemaxFunction.forward","parameters":"(ctx, input, dim=0)","argument_list":"","return_statement":"return output","docstring":"input (FloatTensor): any shape\n returns (FloatTensor): same shape with sparsemax computed on given dim","docstring_summary":"input (FloatTensor): any shape\n returns (FloatTensor): same shape with sparsemax computed on given dim","docstring_tokens":["input","(","FloatTensor",")",":","any","shape","returns","(","FloatTensor",")",":","same","shape","with","sparsemax","computed","on","given","dim"],"function":"def forward(ctx, input, dim=0):\n \"\"\"\n input (FloatTensor): any shape\n returns (FloatTensor): same shape with sparsemax computed on given dim\n \"\"\"\n ctx.dim = dim\n tau_z, k_z = threshold_and_support(input, dim=dim)\n output = torch.clamp(input - tau_z, min=0)\n ctx.save_for_backward(k_z, output)\n return output","function_tokens":["def","forward","(","ctx",",","input",",","dim","=","0",")",":","ctx",".","dim","=","dim","tau_z",",","k_z","=","threshold_and_support","(","input",",","dim","=","dim",")","output","=","torch",".","clamp","(","input","-","tau_z",",","min","=","0",")","ctx",".","save_for_backward","(","k_z",",","output",")","return","output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/sparse_activations.py#L32-L41"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/global_attention.py","language":"python","identifier":"GlobalAttention.mmr_score","parameters":"(self, inputs, output)","argument_list":"","return_statement":"","docstring":":param inputs: inputs sentence matrix\n :param output: output sentence (vector)\n :return: scores of mmr","docstring_summary":"","docstring_tokens":[],"function":"def mmr_score(self, inputs, output):\n '''\n\n :param inputs: inputs sentence matrix\n :param output: output sentence (vector)\n :return: scores of mmr\n '''","function_tokens":["def","mmr_score","(","self",",","inputs",",","output",")",":"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/global_attention.py#L99-L105"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/global_attention.py","language":"python","identifier":"GlobalAttention.score","parameters":"(self, h_t, h_s)","argument_list":"","return_statement":"","docstring":"Args:\n h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`\n h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`\n\n Returns:\n :obj:`FloatTensor`:\n raw attention scores (unnormalized) for each src index\n `[batch x tgt_len x src_len]`","docstring_summary":"Args:\n h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`\n h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`","docstring_tokens":["Args",":","h_t","(","FloatTensor",")",":","sequence","of","queries","[","batch","x","tgt_len","x","dim","]","h_s","(","FloatTensor",")",":","sequence","of","sources","[","batch","x","src_len","x","dim","]"],"function":"def score(self, h_t, h_s):\n \"\"\"\n Args:\n h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`\n h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`\n\n Returns:\n :obj:`FloatTensor`:\n raw attention scores (unnormalized) for each src index\n `[batch x tgt_len x src_len]`\n\n \"\"\"\n # target length is 1 (tgt_len)\n\n # Check input sizes\n src_batch, src_len, src_dim = h_s.size()\n tgt_batch, tgt_len, tgt_dim = h_t.size()\n aeq(src_batch, tgt_batch)\n aeq(src_dim, tgt_dim)\n aeq(self.dim, src_dim)\n\n if self.attn_type in [\"general\", \"dot\"]:\n if self.attn_type == \"general\":\n h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)\n h_t_ = self.linear_in(h_t_)\n h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)\n h_s_ = h_s.transpose(1, 2)\n # (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)\n # print('tgt_len, src_len...', tgt_len, src_len) tgt_len=1, src_len is various\n return torch.bmm(h_t, h_s_) # Performs a batch matrix-matrix product of matrices\n else: # normal attention\n dim = self.dim\n wq = self.linear_query(h_t.view(-1, dim))\n wq = wq.view(tgt_batch, tgt_len, 1, dim)\n wq = wq.expand(tgt_batch, tgt_len, src_len, dim)\n\n uh = self.linear_context(h_s.contiguous().view(-1, dim))\n uh = uh.view(src_batch, 1, src_len, dim)\n uh = uh.expand(src_batch, tgt_len, src_len, dim)\n\n # (batch, t_len, s_len, d)\n wquh = torch.tanh(wq + uh)\n return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)","function_tokens":["def","score","(","self",",","h_t",",","h_s",")",":","# target length is 1 (tgt_len)","# Check input sizes","src_batch",",","src_len",",","src_dim","=","h_s",".","size","(",")","tgt_batch",",","tgt_len",",","tgt_dim","=","h_t",".","size","(",")","aeq","(","src_batch",",","tgt_batch",")","aeq","(","src_dim",",","tgt_dim",")","aeq","(","self",".","dim",",","src_dim",")","if","self",".","attn_type","in","[","\"general\"",",","\"dot\"","]",":","if","self",".","attn_type","==","\"general\"",":","h_t_","=","h_t",".","view","(","tgt_batch","*","tgt_len",",","tgt_dim",")","h_t_","=","self",".","linear_in","(","h_t_",")","h_t","=","h_t_",".","view","(","tgt_batch",",","tgt_len",",","tgt_dim",")","h_s_","=","h_s",".","transpose","(","1",",","2",")","# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)","# print('tgt_len, src_len...', tgt_len, src_len) tgt_len=1, src_len is various","return","torch",".","bmm","(","h_t",",","h_s_",")","# Performs a batch matrix-matrix product of matrices","else",":","# normal attention","dim","=","self",".","dim","wq","=","self",".","linear_query","(","h_t",".","view","(","-","1",",","dim",")",")","wq","=","wq",".","view","(","tgt_batch",",","tgt_len",",","1",",","dim",")","wq","=","wq",".","expand","(","tgt_batch",",","tgt_len",",","src_len",",","dim",")","uh","=","self",".","linear_context","(","h_s",".","contiguous","(",")",".","view","(","-","1",",","dim",")",")","uh","=","uh",".","view","(","src_batch",",","1",",","src_len",",","dim",")","uh","=","uh",".","expand","(","src_batch",",","tgt_len",",","src_len",",","dim",")","# (batch, t_len, s_len, d)","wquh","=","torch",".","tanh","(","wq","+","uh",")","return","self",".","v","(","wquh",".","view","(","-","1",",","dim",")",")",".","view","(","tgt_batch",",","tgt_len",",","src_len",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/global_attention.py#L109-L151"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/global_attention.py","language":"python","identifier":"GlobalAttention.forward","parameters":"(self, source, memory_bank,memory_lengths=None, coverage=None)","argument_list":"","return_statement":"return attn_h, align_vectors","docstring":"Args:\n source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`\n memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`\n memory_lengths (`LongTensor`): the source context lengths `[batch]`\n coverage (`FloatTensor`): None (not supported yet)\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * Computed vector `[tgt_len x batch x dim]`\n * Attention distribtutions for each query\n `[tgt_len x batch x src_len]`","docstring_summary":"","docstring_tokens":[],"function":"def forward(self, source, memory_bank,memory_lengths=None, coverage=None):\n \"\"\"\n\n Args:\n source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`\n memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`\n memory_lengths (`LongTensor`): the source context lengths `[batch]`\n coverage (`FloatTensor`): None (not supported yet)\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * Computed vector `[tgt_len x batch x dim]`\n * Attention distribtutions for each query\n `[tgt_len x batch x src_len]`\n \"\"\"\n # print ('Source..',source.size())\n # print ('memory_bank..',memory_bank.size())\n # Source..torch.Size([16, 512])\n # memory_bank..torch.Size([16, 400, 512])\n\n\n # one step input\n if source.dim() == 2:\n one_step = True\n source = source.unsqueeze(1)\n else:\n one_step = False\n\n batch, source_l, dim = memory_bank.size()\n batch_, target_l, dim_ = source.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n aeq(self.dim, dim)\n if coverage is not None:\n batch_, source_l_ = coverage.size()\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n if coverage is not None:\n cover = coverage.view(-1).unsqueeze(1)\n memory_bank += self.linear_cover(cover).view_as(memory_bank)\n memory_bank = torch.tanh(memory_bank)\n\n # compute attention scores, as in Luong et al.\n align = self.score(source, memory_bank)\n\n if memory_lengths is not None:\n #????\n mask = sequence_mask(memory_lengths, max_len=align.size(-1))\n mask = mask.unsqueeze(1) # Make it broadcastable.\n align.masked_fill_(1 - mask, -float('inf'))\n\n # Softmax or sparsemax to normalize attention weights\n if self.attn_func == \"softmax\":\n align_vectors = F.softmax(align.view(batch*target_l, source_l), -1)\n else:\n align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)\n align_vectors = align_vectors.view(batch, target_l, source_l)\n\n # each context vector c_t is the weighted average\n # over all the source hidden states\n c = torch.bmm(align_vectors, memory_bank)\n\n # concatenate\n concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)\n attn_h = self.linear_out(concat_c).view(batch, target_l, dim)\n if self.attn_type in [\"general\", \"dot\"]:\n attn_h = torch.tanh(attn_h)\n\n if one_step:\n attn_h = attn_h.squeeze(1)\n align_vectors = align_vectors.squeeze(1)\n\n # Check output sizes\n batch_, dim_ = attn_h.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n batch_, source_l_ = align_vectors.size()\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n else:\n attn_h = attn_h.transpose(0, 1).contiguous()\n align_vectors = align_vectors.transpose(0, 1).contiguous()\n # Check output sizes\n target_l_, batch_, dim_ = attn_h.size()\n aeq(target_l, target_l_)\n aeq(batch, batch_)\n aeq(dim, dim_)\n target_l_, batch_, source_l_ = align_vectors.size()\n aeq(target_l, target_l_)\n aeq(batch, batch_)\n aeq(source_l, source_l_)\n\n # print ('Atten Hidden...',attn_h.size()) # torch.Size([16, 512])\n # print ('Align...',align_vectors.size()) # torch.Size([16, 400])\n return attn_h, align_vectors","function_tokens":["def","forward","(","self",",","source",",","memory_bank",",","memory_lengths","=","None",",","coverage","=","None",")",":","# print ('Source..',source.size())","# print ('memory_bank..',memory_bank.size())","# Source..torch.Size([16, 512])","# memory_bank..torch.Size([16, 400, 512])","# one step input","if","source",".","dim","(",")","==","2",":","one_step","=","True","source","=","source",".","unsqueeze","(","1",")","else",":","one_step","=","False","batch",",","source_l",",","dim","=","memory_bank",".","size","(",")","batch_",",","target_l",",","dim_","=","source",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","dim",",","dim_",")","aeq","(","self",".","dim",",","dim",")","if","coverage","is","not","None",":","batch_",",","source_l_","=","coverage",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","source_l",",","source_l_",")","if","coverage","is","not","None",":","cover","=","coverage",".","view","(","-","1",")",".","unsqueeze","(","1",")","memory_bank","+=","self",".","linear_cover","(","cover",")",".","view_as","(","memory_bank",")","memory_bank","=","torch",".","tanh","(","memory_bank",")","# compute attention scores, as in Luong et al.","align","=","self",".","score","(","source",",","memory_bank",")","if","memory_lengths","is","not","None",":","#????","mask","=","sequence_mask","(","memory_lengths",",","max_len","=","align",".","size","(","-","1",")",")","mask","=","mask",".","unsqueeze","(","1",")","# Make it broadcastable.","align",".","masked_fill_","(","1","-","mask",",","-","float","(","'inf'",")",")","# Softmax or sparsemax to normalize attention weights","if","self",".","attn_func","==","\"softmax\"",":","align_vectors","=","F",".","softmax","(","align",".","view","(","batch","*","target_l",",","source_l",")",",","-","1",")","else",":","align_vectors","=","sparsemax","(","align",".","view","(","batch","*","target_l",",","source_l",")",",","-","1",")","align_vectors","=","align_vectors",".","view","(","batch",",","target_l",",","source_l",")","# each context vector c_t is the weighted average","# over all the source hidden states","c","=","torch",".","bmm","(","align_vectors",",","memory_bank",")","# concatenate","concat_c","=","torch",".","cat","(","[","c",",","source","]",",","2",")",".","view","(","batch","*","target_l",",","dim","*","2",")","attn_h","=","self",".","linear_out","(","concat_c",")",".","view","(","batch",",","target_l",",","dim",")","if","self",".","attn_type","in","[","\"general\"",",","\"dot\"","]",":","attn_h","=","torch",".","tanh","(","attn_h",")","if","one_step",":","attn_h","=","attn_h",".","squeeze","(","1",")","align_vectors","=","align_vectors",".","squeeze","(","1",")","# Check output sizes","batch_",",","dim_","=","attn_h",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","dim",",","dim_",")","batch_",",","source_l_","=","align_vectors",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","source_l",",","source_l_",")","else",":","attn_h","=","attn_h",".","transpose","(","0",",","1",")",".","contiguous","(",")","align_vectors","=","align_vectors",".","transpose","(","0",",","1",")",".","contiguous","(",")","# Check output sizes","target_l_",",","batch_",",","dim_","=","attn_h",".","size","(",")","aeq","(","target_l",",","target_l_",")","aeq","(","batch",",","batch_",")","aeq","(","dim",",","dim_",")","target_l_",",","batch_",",","source_l_","=","align_vectors",".","size","(",")","aeq","(","target_l",",","target_l_",")","aeq","(","batch",",","batch_",")","aeq","(","source_l",",","source_l_",")","# print ('Atten Hidden...',attn_h.size()) # torch.Size([16, 512])","# print ('Align...',align_vectors.size()) # torch.Size([16, 400])","return","attn_h",",","align_vectors"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/global_attention.py#L153-L250"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/gate.py","language":"python","identifier":"context_gate_factory","parameters":"(gate_type, embeddings_size, decoder_size,\n attention_size, output_size)","argument_list":"","return_statement":"return gate_types[gate_type](embeddings_size, decoder_size, attention_size,\n output_size)","docstring":"Returns the correct ContextGate class","docstring_summary":"Returns the correct ContextGate class","docstring_tokens":["Returns","the","correct","ContextGate","class"],"function":"def context_gate_factory(gate_type, embeddings_size, decoder_size,\n attention_size, output_size):\n \"\"\"Returns the correct ContextGate class\"\"\"\n\n gate_types = {'source': SourceContextGate,\n 'target': TargetContextGate,\n 'both': BothContextGate}\n\n assert gate_type in gate_types, \"Not valid ContextGate type: {0}\".format(\n gate_type)\n return gate_types[gate_type](embeddings_size, decoder_size, attention_size,\n output_size)","function_tokens":["def","context_gate_factory","(","gate_type",",","embeddings_size",",","decoder_size",",","attention_size",",","output_size",")",":","gate_types","=","{","'source'",":","SourceContextGate",",","'target'",":","TargetContextGate",",","'both'",":","BothContextGate","}","assert","gate_type","in","gate_types",",","\"Not valid ContextGate type: {0}\"",".","format","(","gate_type",")","return","gate_types","[","gate_type","]","(","embeddings_size",",","decoder_size",",","attention_size",",","output_size",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/gate.py#L6-L17"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/average_attn.py","language":"python","identifier":"AverageAttention.cumulative_average_mask","parameters":"(self, batch_size, inputs_len)","argument_list":"","return_statement":"return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)","docstring":"Builds the mask to compute the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Figure 3\n\n Args:\n batch_size (int): batch size\n inputs_len (int): length of the inputs\n\n Returns:\n (`FloatTensor`):\n\n * A Tensor of shape `[batch_size x input_len x input_len]`","docstring_summary":"Builds the mask to compute the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Figure 3","docstring_tokens":["Builds","the","mask","to","compute","the","cumulative","average","as","described","in","https",":","\/\/","arxiv",".","org","\/","abs","\/","1805",".","00631","--","Figure","3"],"function":"def cumulative_average_mask(self, batch_size, inputs_len):\n \"\"\"\n Builds the mask to compute the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Figure 3\n\n Args:\n batch_size (int): batch size\n inputs_len (int): length of the inputs\n\n Returns:\n (`FloatTensor`):\n\n * A Tensor of shape `[batch_size x input_len x input_len]`\n \"\"\"\n\n triangle = torch.tril(torch.ones(inputs_len, inputs_len))\n weights = torch.ones(1, inputs_len) \/ torch.arange(\n 1, inputs_len + 1, dtype=torch.float)\n mask = triangle * weights.transpose(0, 1)\n\n return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)","function_tokens":["def","cumulative_average_mask","(","self",",","batch_size",",","inputs_len",")",":","triangle","=","torch",".","tril","(","torch",".","ones","(","inputs_len",",","inputs_len",")",")","weights","=","torch",".","ones","(","1",",","inputs_len",")","\/","torch",".","arange","(","1",",","inputs_len","+","1",",","dtype","=","torch",".","float",")","mask","=","triangle","*","weights",".","transpose","(","0",",","1",")","return","mask",".","unsqueeze","(","0",")",".","expand","(","batch_size",",","inputs_len",",","inputs_len",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/average_attn.py#L31-L51"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/average_attn.py","language":"python","identifier":"AverageAttention.cumulative_average","parameters":"(self, inputs, mask_or_step,\n layer_cache=None, step=None)","argument_list":"","return_statement":"","docstring":"Computes the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Equations (1) (5) (6)\n\n Args:\n inputs (`FloatTensor`): sequence to average\n `[batch_size x input_len x dimension]`\n mask_or_step: if cache is set, this is assumed\n to be the current step of the\n dynamic decoding. Otherwise, it is the mask matrix\n used to compute the cumulative average.\n cache: a dictionary containing the cumulative average\n of the previous step.","docstring_summary":"Computes the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Equations (1) (5) (6)","docstring_tokens":["Computes","the","cumulative","average","as","described","in","https",":","\/\/","arxiv",".","org","\/","abs","\/","1805",".","00631","--","Equations","(","1",")","(","5",")","(","6",")"],"function":"def cumulative_average(self, inputs, mask_or_step,\n layer_cache=None, step=None):\n \"\"\"\n Computes the cumulative average as described in\n https:\/\/arxiv.org\/abs\/1805.00631 -- Equations (1) (5) (6)\n\n Args:\n inputs (`FloatTensor`): sequence to average\n `[batch_size x input_len x dimension]`\n mask_or_step: if cache is set, this is assumed\n to be the current step of the\n dynamic decoding. Otherwise, it is the mask matrix\n used to compute the cumulative average.\n cache: a dictionary containing the cumulative average\n of the previous step.\n \"\"\"\n if layer_cache is not None:\n step = mask_or_step\n device = inputs.device\n average_attention = (inputs + step *\n layer_cache[\"prev_g\"].to(device)) \/ (step + 1)\n layer_cache[\"prev_g\"] = average_attention\n return average_attention\n else:\n mask = mask_or_step\n return torch.matmul(mask, inputs)","function_tokens":["def","cumulative_average","(","self",",","inputs",",","mask_or_step",",","layer_cache","=","None",",","step","=","None",")",":","if","layer_cache","is","not","None",":","step","=","mask_or_step","device","=","inputs",".","device","average_attention","=","(","inputs","+","step","*","layer_cache","[","\"prev_g\"","]",".","to","(","device",")",")","\/","(","step","+","1",")","layer_cache","[","\"prev_g\"","]","=","average_attention","return","average_attention","else",":","mask","=","mask_or_step","return","torch",".","matmul","(","mask",",","inputs",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/average_attn.py#L53-L78"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/average_attn.py","language":"python","identifier":"AverageAttention.forward","parameters":"(self, inputs, mask=None, layer_cache=None, step=None)","argument_list":"","return_statement":"return gating_outputs, average_outputs","docstring":"Args:\n inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * gating_outputs `[batch_size x 1 x model_dim]`\n * average_outputs average attention `[batch_size x 1 x model_dim]`","docstring_summary":"Args:\n inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`","docstring_tokens":["Args",":","inputs","(","FloatTensor",")",":","[","batch_size","x","input_len","x","model_dim","]"],"function":"def forward(self, inputs, mask=None, layer_cache=None, step=None):\n \"\"\"\n Args:\n inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * gating_outputs `[batch_size x 1 x model_dim]`\n * average_outputs average attention `[batch_size x 1 x model_dim]`\n \"\"\"\n batch_size = inputs.size(0)\n inputs_len = inputs.size(1)\n\n device = inputs.device\n average_outputs = self.cumulative_average(\n inputs, self.cumulative_average_mask(batch_size,\n inputs_len).to(device).float()\n if layer_cache is None else step, layer_cache=layer_cache)\n average_outputs = self.average_layer(average_outputs)\n gating_outputs = self.gating_layer(torch.cat((inputs,\n average_outputs), -1))\n input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)\n gating_outputs = torch.sigmoid(input_gate) * inputs + \\\n torch.sigmoid(forget_gate) * average_outputs\n\n return gating_outputs, average_outputs","function_tokens":["def","forward","(","self",",","inputs",",","mask","=","None",",","layer_cache","=","None",",","step","=","None",")",":","batch_size","=","inputs",".","size","(","0",")","inputs_len","=","inputs",".","size","(","1",")","device","=","inputs",".","device","average_outputs","=","self",".","cumulative_average","(","inputs",",","self",".","cumulative_average_mask","(","batch_size",",","inputs_len",")",".","to","(","device",")",".","float","(",")","if","layer_cache","is","None","else","step",",","layer_cache","=","layer_cache",")","average_outputs","=","self",".","average_layer","(","average_outputs",")","gating_outputs","=","self",".","gating_layer","(","torch",".","cat","(","(","inputs",",","average_outputs",")",",","-","1",")",")","input_gate",",","forget_gate","=","torch",".","chunk","(","gating_outputs",",","2",",","dim","=","2",")","gating_outputs","=","torch",".","sigmoid","(","input_gate",")","*","inputs","+","torch",".","sigmoid","(","forget_gate",")","*","average_outputs","return","gating_outputs",",","average_outputs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/average_attn.py#L80-L106"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/copy_generator.py","language":"python","identifier":"CopyGenerator.forward","parameters":"(self, hidden, attn, src_map)","argument_list":"","return_statement":"return torch.cat([out_prob, copy_prob], 1)","docstring":"Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.\n\n Args:\n hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`\n attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`\n src_map (`FloatTensor`):\n A sparse indicator matrix mapping each source word to\n its index in the \"extended\" vocab containing.\n `[src_len, batch, extra_words]`","docstring_summary":"Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.","docstring_tokens":["Compute","a","distribution","over","the","target","dictionary","extended","by","the","dynamic","dictionary","implied","by","compying","source","words","."],"function":"def forward(self, hidden, attn, src_map):\n \"\"\"\n Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.\n\n Args:\n hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`\n attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`\n src_map (`FloatTensor`):\n A sparse indicator matrix mapping each source word to\n its index in the \"extended\" vocab containing.\n `[src_len, batch, extra_words]`\n \"\"\"\n # CHECKS\n batch_by_tlen, _ = hidden.size()\n batch_by_tlen_, slen = attn.size()\n slen_, batch, cvocab = src_map.size()\n aeq(batch_by_tlen, batch_by_tlen_)\n aeq(slen, slen_)\n\n # Original probabilities.\n logits = self.linear(hidden)\n logits[:, self.tgt_dict.stoi[inputters.PAD_WORD]] = -float('inf')\n prob = self.softmax(logits)\n\n # Probability of copying p(z=1) batch.\n p_copy = self.sigmoid(self.linear_copy(hidden))\n # Probibility of not copying: p_{word}(w) * (1 - p(z))\n out_prob = torch.mul(prob, 1 - p_copy.expand_as(prob))\n mul_attn = torch.mul(attn, p_copy.expand_as(attn))\n copy_prob = torch.bmm(mul_attn.view(-1, batch, slen)\n .transpose(0, 1),\n src_map.transpose(0, 1)).transpose(0, 1)\n copy_prob = copy_prob.contiguous().view(-1, cvocab)\n return torch.cat([out_prob, copy_prob], 1)","function_tokens":["def","forward","(","self",",","hidden",",","attn",",","src_map",")",":","# CHECKS","batch_by_tlen",",","_","=","hidden",".","size","(",")","batch_by_tlen_",",","slen","=","attn",".","size","(",")","slen_",",","batch",",","cvocab","=","src_map",".","size","(",")","aeq","(","batch_by_tlen",",","batch_by_tlen_",")","aeq","(","slen",",","slen_",")","# Original probabilities.","logits","=","self",".","linear","(","hidden",")","logits","[",":",",","self",".","tgt_dict",".","stoi","[","inputters",".","PAD_WORD","]","]","=","-","float","(","'inf'",")","prob","=","self",".","softmax","(","logits",")","# Probability of copying p(z=1) batch.","p_copy","=","self",".","sigmoid","(","self",".","linear_copy","(","hidden",")",")","# Probibility of not copying: p_{word}(w) * (1 - p(z))","out_prob","=","torch",".","mul","(","prob",",","1","-","p_copy",".","expand_as","(","prob",")",")","mul_attn","=","torch",".","mul","(","attn",",","p_copy",".","expand_as","(","attn",")",")","copy_prob","=","torch",".","bmm","(","mul_attn",".","view","(","-","1",",","batch",",","slen",")",".","transpose","(","0",",","1",")",",","src_map",".","transpose","(","0",",","1",")",")",".","transpose","(","0",",","1",")","copy_prob","=","copy_prob",".","contiguous","(",")",".","view","(","-","1",",","cvocab",")","return","torch",".","cat","(","[","out_prob",",","copy_prob","]",",","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/copy_generator.py#L71-L106"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/copy_generator.py","language":"python","identifier":"CopyGeneratorLossCompute._make_shard_state","parameters":"(self, batch, output, range_, attns)","argument_list":"","return_statement":"return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n \"copy_attn\": attns.get(\"copy\"),\n \"align\": batch.alignment[range_[0] + 1: range_[1]]\n }","docstring":"See base class for args description.","docstring_summary":"See base class for args description.","docstring_tokens":["See","base","class","for","args","description","."],"function":"def _make_shard_state(self, batch, output, range_, attns):\n \"\"\" See base class for args description. \"\"\"\n if getattr(batch, \"alignment\", None) is None:\n raise AssertionError(\"using -copy_attn you need to pass in \"\n \"-dynamic_dict during preprocess stage.\")\n\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n \"copy_attn\": attns.get(\"copy\"),\n \"align\": batch.alignment[range_[0] + 1: range_[1]]\n }","function_tokens":["def","_make_shard_state","(","self",",","batch",",","output",",","range_",",","attns",")",":","if","getattr","(","batch",",","\"alignment\"",",","None",")","is","None",":","raise","AssertionError","(","\"using -copy_attn you need to pass in \"","\"-dynamic_dict during preprocess stage.\"",")","return","{","\"output\"",":","output",",","\"target\"",":","batch",".","tgt","[","range_","[","0","]","+","1",":","range_","[","1","]","]",",","\"copy_attn\"",":","attns",".","get","(","\"copy\"",")",",","\"align\"",":","batch",".","alignment","[","range_","[","0","]","+","1",":","range_","[","1","]","]","}"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/copy_generator.py#L163-L174"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/copy_generator.py","language":"python","identifier":"CopyGeneratorLossCompute._compute_loss","parameters":"(self, batch, output, target, copy_attn, align)","argument_list":"","return_statement":"return loss, stats","docstring":"Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.","docstring_summary":"Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.","docstring_tokens":["Compute","the","loss",".","The","args","must","match","self",".","_make_shard_state","()",".","Args",":","batch",":","the","current","batch",".","output",":","the","predict","output","from","the","model",".","target",":","the","validate","target","to","compare","output","with",".","copy_attn",":","the","copy","attention","value",".","align",":","the","align","info","."],"function":"def _compute_loss(self, batch, output, target, copy_attn, align):\n \"\"\"\n Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.\n \"\"\"\n target = target.view(-1)\n align = align.view(-1)\n scores = self.generator(self._bottle(output),\n self._bottle(copy_attn),\n batch.src_map)\n loss = self.criterion(scores, align, target)\n scores_data = scores.data.clone()\n scores_data = inputters.TextDataset.collapse_copy_scores(\n self._unbottle(scores_data, batch.batch_size),\n batch, self.tgt_vocab, batch.dataset.src_vocabs)\n scores_data = self._bottle(scores_data)\n\n # Correct target copy token instead of \n # tgt[i] = align[i] + len(tgt_vocab)\n # for i such that tgt[i] == 0 and align[i] != 0\n target_data = target.data.clone()\n correct_mask = target_data.eq(0) * align.data.ne(0)\n correct_copy = (align.data + len(self.tgt_vocab)) * correct_mask.long()\n target_data = target_data + correct_copy\n\n # Compute sum of perplexities for stats\n loss_data = loss.sum().data.clone()\n stats = self._stats(loss_data, scores_data, target_data)\n\n if self.normalize_by_length:\n # Compute Loss as NLL divided by seq length\n # Compute Sequence Lengths\n pad_ix = batch.dataset.fields['tgt'].vocab.stoi[inputters.PAD_WORD]\n tgt_lens = batch.tgt.ne(pad_ix).float().sum(0)\n # Compute Total Loss per sequence in batch\n loss = loss.view(-1, batch.batch_size).sum(0)\n # Divide by length of each sequence and sum\n loss = torch.div(loss, tgt_lens).sum()\n else:\n loss = loss.sum()\n\n return loss, stats","function_tokens":["def","_compute_loss","(","self",",","batch",",","output",",","target",",","copy_attn",",","align",")",":","target","=","target",".","view","(","-","1",")","align","=","align",".","view","(","-","1",")","scores","=","self",".","generator","(","self",".","_bottle","(","output",")",",","self",".","_bottle","(","copy_attn",")",",","batch",".","src_map",")","loss","=","self",".","criterion","(","scores",",","align",",","target",")","scores_data","=","scores",".","data",".","clone","(",")","scores_data","=","inputters",".","TextDataset",".","collapse_copy_scores","(","self",".","_unbottle","(","scores_data",",","batch",".","batch_size",")",",","batch",",","self",".","tgt_vocab",",","batch",".","dataset",".","src_vocabs",")","scores_data","=","self",".","_bottle","(","scores_data",")","# Correct target copy token instead of ","# tgt[i] = align[i] + len(tgt_vocab)","# for i such that tgt[i] == 0 and align[i] != 0","target_data","=","target",".","data",".","clone","(",")","correct_mask","=","target_data",".","eq","(","0",")","*","align",".","data",".","ne","(","0",")","correct_copy","=","(","align",".","data","+","len","(","self",".","tgt_vocab",")",")","*","correct_mask",".","long","(",")","target_data","=","target_data","+","correct_copy","# Compute sum of perplexities for stats","loss_data","=","loss",".","sum","(",")",".","data",".","clone","(",")","stats","=","self",".","_stats","(","loss_data",",","scores_data",",","target_data",")","if","self",".","normalize_by_length",":","# Compute Loss as NLL divided by seq length","# Compute Sequence Lengths","pad_ix","=","batch",".","dataset",".","fields","[","'tgt'","]",".","vocab",".","stoi","[","inputters",".","PAD_WORD","]","tgt_lens","=","batch",".","tgt",".","ne","(","pad_ix",")",".","float","(",")",".","sum","(","0",")","# Compute Total Loss per sequence in batch","loss","=","loss",".","view","(","-","1",",","batch",".","batch_size",")",".","sum","(","0",")","# Divide by length of each sequence and sum","loss","=","torch",".","div","(","loss",",","tgt_lens",")",".","sum","(",")","else",":","loss","=","loss",".","sum","(",")","return","loss",",","stats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/copy_generator.py#L176-L222"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.word_lut","parameters":"(self)","argument_list":"","return_statement":"return self.make_embedding[0][0]","docstring":"word look-up table","docstring_summary":"word look-up table","docstring_tokens":["word","look","-","up","table"],"function":"def word_lut(self):\n \"\"\" word look-up table \"\"\"\n return self.make_embedding[0][0]","function_tokens":["def","word_lut","(","self",")",":","return","self",".","make_embedding","[","0","]","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/embeddings.py#L160-L162"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.emb_luts","parameters":"(self)","argument_list":"","return_statement":"return self.make_embedding[0]","docstring":"embedding look-up table","docstring_summary":"embedding look-up table","docstring_tokens":["embedding","look","-","up","table"],"function":"def emb_luts(self):\n \"\"\" embedding look-up table \"\"\"\n return self.make_embedding[0]","function_tokens":["def","emb_luts","(","self",")",":","return","self",".","make_embedding","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/embeddings.py#L165-L167"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.load_pretrained_vectors","parameters":"(self, emb_file, fixed)","argument_list":"","return_statement":"","docstring":"Load in pretrained embeddings.\n\n Args:\n emb_file (str) : path to torch serialized embeddings\n fixed (bool) : if true, embeddings are not updated","docstring_summary":"Load in pretrained embeddings.","docstring_tokens":["Load","in","pretrained","embeddings","."],"function":"def load_pretrained_vectors(self, emb_file, fixed):\n \"\"\"Load in pretrained embeddings.\n\n Args:\n emb_file (str) : path to torch serialized embeddings\n fixed (bool) : if true, embeddings are not updated\n \"\"\"\n if emb_file:\n pretrained = torch.load(emb_file)\n pretrained_vec_size = pretrained.size(1)\n if self.word_vec_size > pretrained_vec_size:\n self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained\n elif self.word_vec_size < pretrained_vec_size:\n self.word_lut.weight.data \\\n .copy_(pretrained[:, :self.word_vec_size])\n else:\n self.word_lut.weight.data.copy_(pretrained)\n if fixed:\n self.word_lut.weight.requires_grad = False","function_tokens":["def","load_pretrained_vectors","(","self",",","emb_file",",","fixed",")",":","if","emb_file",":","pretrained","=","torch",".","load","(","emb_file",")","pretrained_vec_size","=","pretrained",".","size","(","1",")","if","self",".","word_vec_size",">","pretrained_vec_size",":","self",".","word_lut",".","weight",".","data","[",":",",",":","pretrained_vec_size","]","=","pretrained","elif","self",".","word_vec_size","<","pretrained_vec_size",":","self",".","word_lut",".","weight",".","data",".","copy_","(","pretrained","[",":",",",":","self",".","word_vec_size","]",")","else",":","self",".","word_lut",".","weight",".","data",".","copy_","(","pretrained",")","if","fixed",":","self",".","word_lut",".","weight",".","requires_grad","=","False"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/embeddings.py#L169-L187"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/embeddings.py","language":"python","identifier":"Embeddings.forward","parameters":"(self, source, step=None)","argument_list":"","return_statement":"return source","docstring":"Computes the embeddings for words and features.\n\n Args:\n source (`LongTensor`): index tensor `[len x batch x nfeat]`\n Return:\n `FloatTensor`: word embeddings `[len x batch x embedding_size]`","docstring_summary":"Computes the embeddings for words and features.","docstring_tokens":["Computes","the","embeddings","for","words","and","features","."],"function":"def forward(self, source, step=None):\n \"\"\"\n Computes the embeddings for words and features.\n\n Args:\n source (`LongTensor`): index tensor `[len x batch x nfeat]`\n Return:\n `FloatTensor`: word embeddings `[len x batch x embedding_size]`\n \"\"\"\n if self.position_encoding:\n for i, module in enumerate(self.make_embedding._modules.values()):\n if i == len(self.make_embedding._modules.values()) - 1:\n source = module(source, step=step)\n else:\n source = module(source)\n else:\n source = self.make_embedding(source)\n\n return source","function_tokens":["def","forward","(","self",",","source",",","step","=","None",")",":","if","self",".","position_encoding",":","for","i",",","module","in","enumerate","(","self",".","make_embedding",".","_modules",".","values","(",")",")",":","if","i","==","len","(","self",".","make_embedding",".","_modules",".","values","(",")",")","-","1",":","source","=","module","(","source",",","step","=","step",")","else",":","source","=","module","(","source",")","else",":","source","=","self",".","make_embedding","(","source",")","return","source"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/embeddings.py#L189-L207"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/sparse_losses.py","language":"python","identifier":"SparsemaxLossFunction.forward","parameters":"(ctx, input, target)","argument_list":"","return_statement":"return torch.clamp(x \/ 2 - z_k + 0.5, min=0.0)","docstring":"input (FloatTensor): n x num_classes\n target (LongTensor): n, the indices of the target classes","docstring_summary":"input (FloatTensor): n x num_classes\n target (LongTensor): n, the indices of the target classes","docstring_tokens":["input","(","FloatTensor",")",":","n","x","num_classes","target","(","LongTensor",")",":","n","the","indices","of","the","target","classes"],"function":"def forward(ctx, input, target):\n \"\"\"\n input (FloatTensor): n x num_classes\n target (LongTensor): n, the indices of the target classes\n \"\"\"\n input_batch, classes = input.size()\n target_batch = target.size(0)\n aeq(input_batch, target_batch)\n\n z_k = input.gather(1, target.unsqueeze(1)).squeeze()\n tau_z, support_size = threshold_and_support(input, dim=1)\n support = input > tau_z\n x = torch.where(\n support, input**2 - tau_z**2,\n torch.tensor(0.0, device=input.device)\n ).sum(dim=1)\n ctx.save_for_backward(input, target, tau_z)\n # clamping necessary because of numerical errors: loss should be lower\n # bounded by zero, but negative values near zero are possible without\n # the clamp\n return torch.clamp(x \/ 2 - z_k + 0.5, min=0.0)","function_tokens":["def","forward","(","ctx",",","input",",","target",")",":","input_batch",",","classes","=","input",".","size","(",")","target_batch","=","target",".","size","(","0",")","aeq","(","input_batch",",","target_batch",")","z_k","=","input",".","gather","(","1",",","target",".","unsqueeze","(","1",")",")",".","squeeze","(",")","tau_z",",","support_size","=","threshold_and_support","(","input",",","dim","=","1",")","support","=","input",">","tau_z","x","=","torch",".","where","(","support",",","input","**","2","-","tau_z","**","2",",","torch",".","tensor","(","0.0",",","device","=","input",".","device",")",")",".","sum","(","dim","=","1",")","ctx",".","save_for_backward","(","input",",","target",",","tau_z",")","# clamping necessary because of numerical errors: loss should be lower","# bounded by zero, but negative values near zero are possible without","# the clamp","return","torch",".","clamp","(","x","\/","2","-","z_k","+","0.5",",","min","=","0.0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/sparse_losses.py#L11-L31"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/conv_multi_step_attention.py","language":"python","identifier":"seq_linear","parameters":"(linear, x)","argument_list":"","return_statement":"return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)","docstring":"linear transform for 3-d tensor","docstring_summary":"linear transform for 3-d tensor","docstring_tokens":["linear","transform","for","3","-","d","tensor"],"function":"def seq_linear(linear, x):\n \"\"\" linear transform for 3-d tensor \"\"\"\n batch, hidden_size, length, _ = x.size()\n h = linear(torch.transpose(x, 1, 2).contiguous().view(\n batch * length, hidden_size))\n return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)","function_tokens":["def","seq_linear","(","linear",",","x",")",":","batch",",","hidden_size",",","length",",","_","=","x",".","size","(",")","h","=","linear","(","torch",".","transpose","(","x",",","1",",","2",")",".","contiguous","(",")",".","view","(","batch","*","length",",","hidden_size",")",")","return","torch",".","transpose","(","h",".","view","(","batch",",","length",",","hidden_size",",","1",")",",","1",",","2",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/conv_multi_step_attention.py#L11-L16"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/conv_multi_step_attention.py","language":"python","identifier":"ConvMultiStepAttention.apply_mask","parameters":"(self, mask)","argument_list":"","return_statement":"","docstring":"Apply mask","docstring_summary":"Apply mask","docstring_tokens":["Apply","mask"],"function":"def apply_mask(self, mask):\n \"\"\" Apply mask \"\"\"\n self.mask = mask","function_tokens":["def","apply_mask","(","self",",","mask",")",":","self",".","mask","=","mask"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/conv_multi_step_attention.py#L34-L36"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/conv_multi_step_attention.py","language":"python","identifier":"ConvMultiStepAttention.forward","parameters":"(self, base_target_emb, input_from_dec, encoder_out_top,\n encoder_out_combine)","argument_list":"","return_statement":"return context_output, attn","docstring":"Args:\n base_target_emb: target emb tensor\n input: output of decode conv\n encoder_out_t: the key matrix for calculation of attetion weight,\n which is the top output of encode conv\n encoder_out_combine:\n the value matrix for the attention-weighted sum,\n which is the combination of base emb and top output of encode","docstring_summary":"Args:\n base_target_emb: target emb tensor\n input: output of decode conv\n encoder_out_t: the key matrix for calculation of attetion weight,\n which is the top output of encode conv\n encoder_out_combine:\n the value matrix for the attention-weighted sum,\n which is the combination of base emb and top output of encode","docstring_tokens":["Args",":","base_target_emb",":","target","emb","tensor","input",":","output","of","decode","conv","encoder_out_t",":","the","key","matrix","for","calculation","of","attetion","weight","which","is","the","top","output","of","encode","conv","encoder_out_combine",":","the","value","matrix","for","the","attention","-","weighted","sum","which","is","the","combination","of","base","emb","and","top","output","of","encode"],"function":"def forward(self, base_target_emb, input_from_dec, encoder_out_top,\n encoder_out_combine):\n \"\"\"\n Args:\n base_target_emb: target emb tensor\n input: output of decode conv\n encoder_out_t: the key matrix for calculation of attetion weight,\n which is the top output of encode conv\n encoder_out_combine:\n the value matrix for the attention-weighted sum,\n which is the combination of base emb and top output of encode\n\n \"\"\"\n # checks\n # batch, channel, height, width = base_target_emb.size()\n batch, _, height, _ = base_target_emb.size()\n # batch_, channel_, height_, width_ = input_from_dec.size()\n batch_, _, height_, _ = input_from_dec.size()\n aeq(batch, batch_)\n aeq(height, height_)\n\n # enc_batch, enc_channel, enc_height = encoder_out_top.size()\n enc_batch, _, enc_height = encoder_out_top.size()\n # enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()\n enc_batch_, _, enc_height_ = encoder_out_combine.size()\n\n aeq(enc_batch, enc_batch_)\n aeq(enc_height, enc_height_)\n\n preatt = seq_linear(self.linear_in, input_from_dec)\n target = (base_target_emb + preatt) * SCALE_WEIGHT\n target = torch.squeeze(target, 3)\n target = torch.transpose(target, 1, 2)\n pre_attn = torch.bmm(target, encoder_out_top)\n\n if self.mask is not None:\n pre_attn.data.masked_fill_(self.mask, -float('inf'))\n\n pre_attn = pre_attn.transpose(0, 2)\n attn = F.softmax(pre_attn, dim=-1)\n attn = attn.transpose(0, 2).contiguous()\n context_output = torch.bmm(\n attn, torch.transpose(encoder_out_combine, 1, 2))\n context_output = torch.transpose(\n torch.unsqueeze(context_output, 3), 1, 2)\n return context_output, attn","function_tokens":["def","forward","(","self",",","base_target_emb",",","input_from_dec",",","encoder_out_top",",","encoder_out_combine",")",":","# checks","# batch, channel, height, width = base_target_emb.size()","batch",",","_",",","height",",","_","=","base_target_emb",".","size","(",")","# batch_, channel_, height_, width_ = input_from_dec.size()","batch_",",","_",",","height_",",","_","=","input_from_dec",".","size","(",")","aeq","(","batch",",","batch_",")","aeq","(","height",",","height_",")","# enc_batch, enc_channel, enc_height = encoder_out_top.size()","enc_batch",",","_",",","enc_height","=","encoder_out_top",".","size","(",")","# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()","enc_batch_",",","_",",","enc_height_","=","encoder_out_combine",".","size","(",")","aeq","(","enc_batch",",","enc_batch_",")","aeq","(","enc_height",",","enc_height_",")","preatt","=","seq_linear","(","self",".","linear_in",",","input_from_dec",")","target","=","(","base_target_emb","+","preatt",")","*","SCALE_WEIGHT","target","=","torch",".","squeeze","(","target",",","3",")","target","=","torch",".","transpose","(","target",",","1",",","2",")","pre_attn","=","torch",".","bmm","(","target",",","encoder_out_top",")","if","self",".","mask","is","not","None",":","pre_attn",".","data",".","masked_fill_","(","self",".","mask",",","-","float","(","'inf'",")",")","pre_attn","=","pre_attn",".","transpose","(","0",",","2",")","attn","=","F",".","softmax","(","pre_attn",",","dim","=","-","1",")","attn","=","attn",".","transpose","(","0",",","2",")",".","contiguous","(",")","context_output","=","torch",".","bmm","(","attn",",","torch",".","transpose","(","encoder_out_combine",",","1",",","2",")",")","context_output","=","torch",".","transpose","(","torch",".","unsqueeze","(","context_output",",","3",")",",","1",",","2",")","return","context_output",",","attn"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/conv_multi_step_attention.py#L38-L83"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/weight_norm.py","language":"python","identifier":"get_var_maybe_avg","parameters":"(namespace, var_name, training, polyak_decay)","argument_list":"","return_statement":"","docstring":"utility for retrieving polyak averaged params\n Update average","docstring_summary":"utility for retrieving polyak averaged params\n Update average","docstring_tokens":["utility","for","retrieving","polyak","averaged","params","Update","average"],"function":"def get_var_maybe_avg(namespace, var_name, training, polyak_decay):\n \"\"\" utility for retrieving polyak averaged params\n Update average\n \"\"\"\n v = getattr(namespace, var_name)\n v_avg = getattr(namespace, var_name + '_avg')\n v_avg -= (1 - polyak_decay) * (v_avg - v.data)\n\n if training:\n return v\n else:\n return v_avg","function_tokens":["def","get_var_maybe_avg","(","namespace",",","var_name",",","training",",","polyak_decay",")",":","v","=","getattr","(","namespace",",","var_name",")","v_avg","=","getattr","(","namespace",",","var_name","+","'_avg'",")","v_avg","-=","(","1","-","polyak_decay",")","*","(","v_avg","-","v",".","data",")","if","training",":","return","v","else",":","return","v_avg"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/weight_norm.py#L8-L19"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/modules\/weight_norm.py","language":"python","identifier":"get_vars_maybe_avg","parameters":"(namespace, var_names, training, polyak_decay)","argument_list":"","return_statement":"return vars","docstring":"utility for retrieving polyak averaged params","docstring_summary":"utility for retrieving polyak averaged params","docstring_tokens":["utility","for","retrieving","polyak","averaged","params"],"function":"def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):\n \"\"\" utility for retrieving polyak averaged params \"\"\"\n vars = []\n for vn in var_names:\n vars.append(get_var_maybe_avg(\n namespace, vn, training, polyak_decay))\n return vars","function_tokens":["def","get_vars_maybe_avg","(","namespace",",","var_names",",","training",",","polyak_decay",")",":","vars","=","[","]","for","vn","in","var_names",":","vars",".","append","(","get_var_maybe_avg","(","namespace",",","vn",",","training",",","polyak_decay",")",")","return","vars"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/modules\/weight_norm.py#L22-L28"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase.__reduce_ex__","parameters":"(self, proto)","argument_list":"","return_statement":"return super(DatasetBase, self).__reduce_ex__()","docstring":"This is a hack. Something is broken with torch pickle.","docstring_summary":"This is a hack. Something is broken with torch pickle.","docstring_tokens":["This","is","a","hack",".","Something","is","broken","with","torch","pickle","."],"function":"def __reduce_ex__(self, proto):\n \"This is a hack. Something is broken with torch pickle.\"\n return super(DatasetBase, self).__reduce_ex__()","function_tokens":["def","__reduce_ex__","(","self",",","proto",")",":","return","super","(","DatasetBase",",","self",")",".","__reduce_ex__","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/dataset_base.py#L38-L40"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase.load_fields","parameters":"(self, vocab_dict)","argument_list":"","return_statement":"","docstring":"Load fields from vocab.pt, and set the `fields` attribute.\n\n Args:\n vocab_dict (dict): a dict of loaded vocab from vocab.pt file.","docstring_summary":"Load fields from vocab.pt, and set the `fields` attribute.","docstring_tokens":["Load","fields","from","vocab",".","pt","and","set","the","fields","attribute","."],"function":"def load_fields(self, vocab_dict):\n \"\"\" Load fields from vocab.pt, and set the `fields` attribute.\n\n Args:\n vocab_dict (dict): a dict of loaded vocab from vocab.pt file.\n \"\"\"\n fields = onmt.inputters.inputter.load_fields_from_vocab(\n vocab_dict.items(), self.data_type)\n self.fields = dict([(k, f) for (k, f) in fields.items()\n if k in self.examples[0].__dict__])","function_tokens":["def","load_fields","(","self",",","vocab_dict",")",":","fields","=","onmt",".","inputters",".","inputter",".","load_fields_from_vocab","(","vocab_dict",".","items","(",")",",","self",".","data_type",")","self",".","fields","=","dict","(","[","(","k",",","f",")","for","(","k",",","f",")","in","fields",".","items","(",")","if","k","in","self",".","examples","[","0","]",".","__dict__","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/dataset_base.py#L42-L51"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase.extract_text_features","parameters":"(tokens)","argument_list":"","return_statement":"return tuple(words), features, n_feats - 1","docstring":"Args:\n tokens: A list of tokens, where each token consists of a word,\n optionally followed by u\"\uffe8\"-delimited features.\n Returns:\n A sequence of words, a sequence of features, and num of features.","docstring_summary":"Args:\n tokens: A list of tokens, where each token consists of a word,\n optionally followed by u\"\uffe8\"-delimited features.\n Returns:\n A sequence of words, a sequence of features, and num of features.","docstring_tokens":["Args",":","tokens",":","A","list","of","tokens","where","each","token","consists","of","a","word","optionally","followed","by","u","\uffe8","-","delimited","features",".","Returns",":","A","sequence","of","words","a","sequence","of","features","and","num","of","features","."],"function":"def extract_text_features(tokens):\n \"\"\"\n Args:\n tokens: A list of tokens, where each token consists of a word,\n optionally followed by u\"\uffe8\"-delimited features.\n Returns:\n A sequence of words, a sequence of features, and num of features.\n \"\"\"\n if not tokens:\n return [], [], -1\n\n specials = [PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD]\n words = []\n features = []\n n_feats = None\n\n #TODO We stop here\n for token in tokens:\n split_token = token.split(u\"\uffe8\")\n assert all([special != split_token[0] for special in specials]), \\\n \"Dataset cannot contain Special Tokens\"\n\n if split_token[0]:\n words += [split_token[0]]\n features += [split_token[1:]]\n\n if n_feats is None:\n n_feats = len(split_token)\n else:\n assert len(split_token) == n_feats, \\\n \"all words must have the same number of features\"\n features = list(zip(*features))\n\n return tuple(words), features, n_feats - 1","function_tokens":["def","extract_text_features","(","tokens",")",":","if","not","tokens",":","return","[","]",",","[","]",",","-","1","specials","=","[","PAD_WORD",",","UNK_WORD",",","BOS_WORD",",","EOS_WORD","]","words","=","[","]","features","=","[","]","n_feats","=","None","#TODO We stop here","for","token","in","tokens",":","split_token","=","token",".","split","(","u\"\uffe8\")","","assert","all","(","[","special","!=","split_token","[","0","]","for","special","in","specials","]",")",",","\"Dataset cannot contain Special Tokens\"","if","split_token","[","0","]",":","words","+=","[","split_token","[","0","]","]","features","+=","[","split_token","[","1",":","]","]","if","n_feats","is","None",":","n_feats","=","len","(","split_token",")","else",":","assert","len","(","split_token",")","==","n_feats",",","\"all words must have the same number of features\"","features","=","list","(","zip","(","*","features",")",")","return","tuple","(","words",")",",","features",",","n_feats","-","1"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/dataset_base.py#L54-L87"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase._join_dicts","parameters":"(self, *args)","argument_list":"","return_statement":"return dict(chain(*[d.items() for d in args]))","docstring":"Args:\n dictionaries with disjoint keys.\n\n Returns:\n a single dictionary that has the union of these keys.","docstring_summary":"Args:\n dictionaries with disjoint keys.","docstring_tokens":["Args",":","dictionaries","with","disjoint","keys","."],"function":"def _join_dicts(self, *args):\n \"\"\"\n Args:\n dictionaries with disjoint keys.\n\n Returns:\n a single dictionary that has the union of these keys.\n \"\"\"\n return dict(chain(*[d.items() for d in args]))","function_tokens":["def","_join_dicts","(","self",",","*","args",")",":","return","dict","(","chain","(","*","[","d",".","items","(",")","for","d","in","args","]",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/dataset_base.py#L91-L99"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase._peek","parameters":"(self, seq)","argument_list":"","return_statement":"return first, chain([first], seq)","docstring":"Args:\n seq: an iterator.\n\n Returns:\n the first thing returned by calling next() on the iterator\n and an iterator created by re-chaining that value to the beginning\n of the iterator.","docstring_summary":"Args:\n seq: an iterator.","docstring_tokens":["Args",":","seq",":","an","iterator","."],"function":"def _peek(self, seq):\n \"\"\"\n Args:\n seq: an iterator.\n\n Returns:\n the first thing returned by calling next() on the iterator\n and an iterator created by re-chaining that value to the beginning\n of the iterator.\n \"\"\"\n first = next(seq)\n return first, chain([first], seq)","function_tokens":["def","_peek","(","self",",","seq",")",":","first","=","next","(","seq",")","return","first",",","chain","(","[","first","]",",","seq",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/dataset_base.py#L101-L112"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/dataset_base.py","language":"python","identifier":"DatasetBase._construct_example_fromlist","parameters":"(self, data, fields)","argument_list":"","return_statement":"return ex","docstring":"Args:\n data: the data to be set as the value of the attributes of\n the to-be-created `Example`, associating with respective\n `Field` objects with same key.\n fields: a dict of `torchtext.data.Field` objects. The keys\n are attributes of the to-be-created `Example`.\n\n Returns:\n the created `Example` object.","docstring_summary":"Args:\n data: the data to be set as the value of the attributes of\n the to-be-created `Example`, associating with respective\n `Field` objects with same key.\n fields: a dict of `torchtext.data.Field` objects. The keys\n are attributes of the to-be-created `Example`.","docstring_tokens":["Args",":","data",":","the","data","to","be","set","as","the","value","of","the","attributes","of","the","to","-","be","-","created","Example","associating","with","respective","Field","objects","with","same","key",".","fields",":","a","dict","of","torchtext",".","data",".","Field","objects",".","The","keys","are","attributes","of","the","to","-","be","-","created","Example","."],"function":"def _construct_example_fromlist(self, data, fields):\n \"\"\"\n Args:\n data: the data to be set as the value of the attributes of\n the to-be-created `Example`, associating with respective\n `Field` objects with same key.\n fields: a dict of `torchtext.data.Field` objects. The keys\n are attributes of the to-be-created `Example`.\n\n Returns:\n the created `Example` object.\n \"\"\"\n ex = torchtext.data.Example()\n # import pdb;pdb.set_trace()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val))\n else:\n setattr(ex, name, val)\n return ex","function_tokens":["def","_construct_example_fromlist","(","self",",","data",",","fields",")",":","ex","=","torchtext",".","data",".","Example","(",")","# import pdb;pdb.set_trace()","for","(","name",",","field",")",",","val","in","zip","(","fields",",","data",")",":","if","field","is","not","None",":","setattr","(","ex",",","name",",","field",".","preprocess","(","val",")",")","else",":","setattr","(","ex",",","name",",","val",")","return","ex"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/dataset_base.py#L114-L133"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.sort_key","parameters":"(self, ex)","argument_list":"","return_statement":"return len(ex.src)","docstring":"Sort using length of source sentences.","docstring_summary":"Sort using length of source sentences.","docstring_tokens":["Sort","using","length","of","source","sentences","."],"function":"def sort_key(self, ex):\n \"\"\" Sort using length of source sentences. \"\"\"\n # Default to a balanced sort, prioritizing tgt len match.\n # TODO: make this configurable.\n if hasattr(ex, \"tgt\"):\n return len(ex.src), len(ex.tgt)\n return len(ex.src)","function_tokens":["def","sort_key","(","self",",","ex",")",":","# Default to a balanced sort, prioritizing tgt len match.","# TODO: make this configurable.","if","hasattr","(","ex",",","\"tgt\"",")",":","return","len","(","ex",".","src",")",",","len","(","ex",".","tgt",")","return","len","(","ex",".","src",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L105-L111"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.collapse_copy_scores","parameters":"(scores, batch, tgt_vocab, src_vocabs)","argument_list":"","return_statement":"return scores","docstring":"Given scores from an expanded dictionary\n corresponeding to a batch, sums together copies,\n with a dictionary word when it is ambigious.","docstring_summary":"Given scores from an expanded dictionary\n corresponeding to a batch, sums together copies,\n with a dictionary word when it is ambigious.","docstring_tokens":["Given","scores","from","an","expanded","dictionary","corresponeding","to","a","batch","sums","together","copies","with","a","dictionary","word","when","it","is","ambigious","."],"function":"def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs):\n \"\"\"\n Given scores from an expanded dictionary\n corresponeding to a batch, sums together copies,\n with a dictionary word when it is ambigious.\n \"\"\"\n offset = len(tgt_vocab)\n for b in range(batch.batch_size):\n blank = []\n fill = []\n index = batch.indices.data[b]\n src_vocab = src_vocabs[index]\n for i in range(1, len(src_vocab)):\n sw = src_vocab.itos[i]\n ti = tgt_vocab.stoi[sw]\n if ti != 0:\n blank.append(offset + i)\n fill.append(ti)\n if blank:\n blank = torch.Tensor(blank).type_as(batch.indices.data)\n fill = torch.Tensor(fill).type_as(batch.indices.data)\n scores[:, b].index_add_(1, fill,\n scores[:, b].index_select(1, blank))\n scores[:, b].index_fill_(1, blank, 1e-10)\n return scores","function_tokens":["def","collapse_copy_scores","(","scores",",","batch",",","tgt_vocab",",","src_vocabs",")",":","offset","=","len","(","tgt_vocab",")","for","b","in","range","(","batch",".","batch_size",")",":","blank","=","[","]","fill","=","[","]","index","=","batch",".","indices",".","data","[","b","]","src_vocab","=","src_vocabs","[","index","]","for","i","in","range","(","1",",","len","(","src_vocab",")",")",":","sw","=","src_vocab",".","itos","[","i","]","ti","=","tgt_vocab",".","stoi","[","sw","]","if","ti","!=","0",":","blank",".","append","(","offset","+","i",")","fill",".","append","(","ti",")","if","blank",":","blank","=","torch",".","Tensor","(","blank",")",".","type_as","(","batch",".","indices",".","data",")","fill","=","torch",".","Tensor","(","fill",")",".","type_as","(","batch",".","indices",".","data",")","scores","[",":",",","b","]",".","index_add_","(","1",",","fill",",","scores","[",":",",","b","]",".","index_select","(","1",",","blank",")",")","scores","[",":",",","b","]",".","index_fill_","(","1",",","blank",",","1e-10",")","return","scores"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L114-L138"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.make_text_examples_nfeats_tpl","parameters":"(text_iter, text_path, truncate, side)","argument_list":"","return_statement":"return (examples_iter, num_feats)","docstring":"Args:\n text_iter(iterator): an iterator (or None) that we can loop over\n to read examples.\n It may be an openned file, a string list etc...\n text_path(str): path to file or None\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Returns:\n (example_dict iterator, num_feats) tuple.","docstring_summary":"Args:\n text_iter(iterator): an iterator (or None) that we can loop over\n to read examples.\n It may be an openned file, a string list etc...\n text_path(str): path to file or None\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".","docstring_tokens":["Args",":","text_iter","(","iterator",")",":","an","iterator","(","or","None",")","that","we","can","loop","over","to","read","examples",".","It","may","be","an","openned","file","a","string","list","etc","...","text_path","(","str",")",":","path","to","file","or","None","path","(","str",")",":","location","of","a","src","or","tgt","file",".","truncate","(","int",")",":","maximum","sequence","length","(","0","for","unlimited",")",".","side","(","str",")",":","src","or","tgt","."],"function":"def make_text_examples_nfeats_tpl(text_iter, text_path, truncate, side):\n \"\"\"\n Args:\n text_iter(iterator): an iterator (or None) that we can loop over\n to read examples.\n It may be an openned file, a string list etc...\n text_path(str): path to file or None\n path (str): location of a src or tgt file.\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Returns:\n (example_dict iterator, num_feats) tuple.\n \"\"\"\n assert side in ['src', 'tgt']\n\n if text_iter is None:\n if text_path is not None:\n text_iter = TextDataset.make_text_iterator_from_file(text_path)\n else:\n return (None, 0)\n\n # All examples have same number of features, so we peek first one\n # to get the num_feats.\n examples_nfeats_iter = \\\n TextDataset.make_examples(text_iter, truncate, side)\n\n first_ex = next(examples_nfeats_iter)\n num_feats = first_ex[1]\n\n # Chain back the first element - we only want to peek it.\n examples_nfeats_iter = chain([first_ex], examples_nfeats_iter)\n examples_iter = (ex for ex, nfeats in examples_nfeats_iter)\n\n return (examples_iter, num_feats)","function_tokens":["def","make_text_examples_nfeats_tpl","(","text_iter",",","text_path",",","truncate",",","side",")",":","assert","side","in","[","'src'",",","'tgt'","]","if","text_iter","is","None",":","if","text_path","is","not","None",":","text_iter","=","TextDataset",".","make_text_iterator_from_file","(","text_path",")","else",":","return","(","None",",","0",")","# All examples have same number of features, so we peek first one","# to get the num_feats.","examples_nfeats_iter","=","TextDataset",".","make_examples","(","text_iter",",","truncate",",","side",")","first_ex","=","next","(","examples_nfeats_iter",")","num_feats","=","first_ex","[","1","]","# Chain back the first element - we only want to peek it.","examples_nfeats_iter","=","chain","(","[","first_ex","]",",","examples_nfeats_iter",")","examples_iter","=","(","ex","for","ex",",","nfeats","in","examples_nfeats_iter",")","return","(","examples_iter",",","num_feats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L141-L175"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.make_examples","parameters":"(text_iter, truncate, side)","argument_list":"","return_statement":"","docstring":"Args:\n text_iter (iterator): iterator of text sequences\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Yields:\n (word, features, nfeat) triples for each line.","docstring_summary":"Args:\n text_iter (iterator): iterator of text sequences\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".","docstring_tokens":["Args",":","text_iter","(","iterator",")",":","iterator","of","text","sequences","truncate","(","int",")",":","maximum","sequence","length","(","0","for","unlimited",")",".","side","(","str",")",":","src","or","tgt","."],"function":"def make_examples(text_iter, truncate, side):\n \"\"\"\n Args:\n text_iter (iterator): iterator of text sequences\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\".\n\n Yields:\n (word, features, nfeat) triples for each line.\n \"\"\"\n for i, line in enumerate(text_iter):\n # print('*' * 10)\n line = line.strip().split()\n\n if truncate:\n line = line[:truncate]\n\n words, feats, n_feats = \\\n TextDataset.extract_text_features(line)\n\n # print (line)\n # print (words)\n\n example_dict = {side: words, \"indices\": i}\n if feats:\n prefix = side + \"_feat_\"\n example_dict.update((prefix + str(j), f)\n for j, f in enumerate(feats))\n yield example_dict, n_feats","function_tokens":["def","make_examples","(","text_iter",",","truncate",",","side",")",":","for","i",",","line","in","enumerate","(","text_iter",")",":","# print('*' * 10)","line","=","line",".","strip","(",")",".","split","(",")","if","truncate",":","line","=","line","[",":","truncate","]","words",",","feats",",","n_feats","=","TextDataset",".","extract_text_features","(","line",")","# print (line)","# print (words)","example_dict","=","{","side",":","words",",","\"indices\"",":","i","}","if","feats",":","prefix","=","side","+","\"_feat_\"","example_dict",".","update","(","(","prefix","+","str","(","j",")",",","f",")","for","j",",","f","in","enumerate","(","feats",")",")","yield","example_dict",",","n_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L178-L206"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.get_fields","parameters":"(n_src_features, n_tgt_features)","argument_list":"","return_statement":"return fields","docstring":"Args:\n n_src_features (int): the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features (int): the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.","docstring_summary":"Args:\n n_src_features (int): the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features (int): the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","n_src_features","(","int",")",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features","(","int",")",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(n_src_features, n_tgt_features):\n \"\"\"\n Args:\n n_src_features (int): the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features (int): the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.\n \"\"\"\n\n\n\n fields = {}\n\n\n fields[\"src\"] = torchtext.data.Field(\n pad_token=PAD_WORD,\n include_lengths=True)\n\n\n for j in range(n_src_features):\n fields[\"src_feat_\" + str(j)] = \\\n torchtext.data.Field(pad_token=PAD_WORD)\n\n fields[\"tgt\"] = torchtext.data.Field(\n init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n for j in range(n_tgt_features):\n fields[\"tgt_feat_\" + str(j)] = \\\n torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n def make_src(data, vocab):\n \"\"\" ? \"\"\"\n #pdb.set_trace()\n src_size = max([t.size(0) for t in data])\n \n src_vocab_size = int(max([t.max() for t in data])) + 1\n \n try:\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n except:\n\n print(src_size)\n print(len(data))\n print(src_vocab_size)\n\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n fields[\"src_map\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n\n def make_tgt(data, vocab):\n \"\"\" ? \"\"\"\n\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n\n fields[\"alignment\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n\n fields[\"indices\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n sequential=False)\n\n def make_sents(data, vocab):\n \"\"\" ? \"\"\"\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(len(data),tgt_size).long()\n\n for i, sent in enumerate(data):\n alignment[i,:sent.size(0)] = sent\n return alignment\n\n fields[\"src_sents\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long, postprocessing=make_sents,sequential=False)\n fields[\"tgt_sents\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long, postprocessing=make_sents,sequential=False)\n\n return fields","function_tokens":["def","get_fields","(","n_src_features",",","n_tgt_features",")",":","fields","=","{","}","fields","[","\"src\"","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",",","include_lengths","=","True",")","for","j","in","range","(","n_src_features",")",":","fields","[","\"src_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",")","fields","[","\"tgt\"","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","for","j","in","range","(","n_tgt_features",")",":","fields","[","\"tgt_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","def","make_src","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","#pdb.set_trace()","src_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","src_vocab_size","=","int","(","max","(","[","t",".","max","(",")","for","t","in","data","]",")",")","+","1","try",":","alignment","=","torch",".","zeros","(","src_size",",","len","(","data",")",",","src_vocab_size",")","except",":","print","(","src_size",")","print","(","len","(","data",")",")","print","(","src_vocab_size",")","for","i",",","sent","in","enumerate","(","data",")",":","for","j",",","t","in","enumerate","(","sent",")",":","alignment","[","j",",","i",",","t","]","=","1","return","alignment","fields","[","\"src_map\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_src",",","sequential","=","False",")","def","make_tgt","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","tgt_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","alignment","=","torch",".","zeros","(","tgt_size",",","len","(","data",")",")",".","long","(",")","for","i",",","sent","in","enumerate","(","data",")",":","alignment","[",":","sent",".","size","(","0",")",",","i","]","=","sent","return","alignment","fields","[","\"alignment\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_tgt",",","sequential","=","False",")","fields","[","\"indices\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","sequential","=","False",")","def","make_sents","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","tgt_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","alignment","=","torch",".","zeros","(","len","(","data",")",",","tgt_size",")",".","long","(",")","for","i",",","sent","in","enumerate","(","data",")",":","alignment","[","i",",",":","sent",".","size","(","0",")","]","=","sent","return","alignment","fields","[","\"src_sents\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_sents",",","sequential","=","False",")","fields","[","\"tgt_sents\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_sents",",","sequential","=","False",")","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L215-L308"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"TextDataset.get_num_features","parameters":"(corpus_file, side)","argument_list":"","return_statement":"return num_feats","docstring":"Peek one line and get number of features of it.\n (All lines must have same number of features).\n For text corpus, both sides are in text form, thus\n it works the same.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.","docstring_summary":"Peek one line and get number of features of it.\n (All lines must have same number of features).\n For text corpus, both sides are in text form, thus\n it works the same.","docstring_tokens":["Peek","one","line","and","get","number","of","features","of","it",".","(","All","lines","must","have","same","number","of","features",")",".","For","text","corpus","both","sides","are","in","text","form","thus","it","works","the","same","."],"function":"def get_num_features(corpus_file, side):\n \"\"\"\n Peek one line and get number of features of it.\n (All lines must have same number of features).\n For text corpus, both sides are in text form, thus\n it works the same.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.\n \"\"\"\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = TextDataset.extract_text_features(f_line)\n\n return num_feats","function_tokens":["def","get_num_features","(","corpus_file",",","side",")",":","with","codecs",".","open","(","corpus_file",",","\"r\"",",","\"utf-8\"",")","as","cf",":","f_line","=","cf",".","readline","(",")",".","strip","(",")",".","split","(",")","_",",","_",",","num_feats","=","TextDataset",".","extract_text_features","(","f_line",")","return","num_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L311-L329"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.__init__","parameters":"(self, corpus_path, line_truncate, side, shard_size,\n assoc_iter=None)","argument_list":"","return_statement":"","docstring":"Args:\n corpus_path: the corpus file path.\n line_truncate: the maximum length of a line to read.\n 0 for unlimited.\n side: \"src\" or \"tgt\".\n shard_size: the shard size, 0 means not sharding the file.\n assoc_iter: if not None, it is the associate iterator that\n this iterator should align its step with.","docstring_summary":"Args:\n corpus_path: the corpus file path.\n line_truncate: the maximum length of a line to read.\n 0 for unlimited.\n side: \"src\" or \"tgt\".\n shard_size: the shard size, 0 means not sharding the file.\n assoc_iter: if not None, it is the associate iterator that\n this iterator should align its step with.","docstring_tokens":["Args",":","corpus_path",":","the","corpus","file","path",".","line_truncate",":","the","maximum","length","of","a","line","to","read",".","0","for","unlimited",".","side",":","src","or","tgt",".","shard_size",":","the","shard","size","0","means","not","sharding","the","file",".","assoc_iter",":","if","not","None","it","is","the","associate","iterator","that","this","iterator","should","align","its","step","with","."],"function":"def __init__(self, corpus_path, line_truncate, side, shard_size,\n assoc_iter=None):\n \"\"\"\n Args:\n corpus_path: the corpus file path.\n line_truncate: the maximum length of a line to read.\n 0 for unlimited.\n side: \"src\" or \"tgt\".\n shard_size: the shard size, 0 means not sharding the file.\n assoc_iter: if not None, it is the associate iterator that\n this iterator should align its step with.\n \"\"\"\n try:\n # The codecs module seems to have bugs with seek()\/tell(),\n # so we use io.open().\n self.corpus = io.open(corpus_path, \"r\", encoding=\"utf-8\")\n except IOError:\n sys.stderr.write(\"Failed to open corpus file: %s\" % corpus_path)\n sys.exit(1)\n\n self.line_truncate = line_truncate\n self.side = side\n self.shard_size = shard_size\n self.assoc_iter = assoc_iter\n self.last_pos = 0\n self.line_index = -1\n self.eof = False","function_tokens":["def","__init__","(","self",",","corpus_path",",","line_truncate",",","side",",","shard_size",",","assoc_iter","=","None",")",":","try",":","# The codecs module seems to have bugs with seek()\/tell(),","# so we use io.open().","self",".","corpus","=","io",".","open","(","corpus_path",",","\"r\"",",","encoding","=","\"utf-8\"",")","except","IOError",":","sys",".","stderr",".","write","(","\"Failed to open corpus file: %s\"","%","corpus_path",")","sys",".","exit","(","1",")","self",".","line_truncate","=","line_truncate","self",".","side","=","side","self",".","shard_size","=","shard_size","self",".","assoc_iter","=","assoc_iter","self",".","last_pos","=","0","self",".","line_index","=","-","1","self",".","eof","=","False"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L406-L432"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.__iter__","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Iterator of (example_dict, nfeats).\n On each call, it iterates over as many (example_dict, nfeats) tuples\n until this shard's size equals to or approximates `self.shard_size`.","docstring_summary":"Iterator of (example_dict, nfeats).\n On each call, it iterates over as many (example_dict, nfeats) tuples\n until this shard's size equals to or approximates `self.shard_size`.","docstring_tokens":["Iterator","of","(","example_dict","nfeats",")",".","On","each","call","it","iterates","over","as","many","(","example_dict","nfeats",")","tuples","until","this","shard","s","size","equals","to","or","approximates","self",".","shard_size","."],"function":"def __iter__(self):\n \"\"\"\n Iterator of (example_dict, nfeats).\n On each call, it iterates over as many (example_dict, nfeats) tuples\n until this shard's size equals to or approximates `self.shard_size`.\n \"\"\"\n iteration_index = -1\n if self.assoc_iter is not None:\n # We have associate iterator, just yields tuples\n # util we run parallel with it.\n while self.line_index < self.assoc_iter.line_index:\n line = self.corpus.readline()\n if line == '':\n raise AssertionError(\n \"Two corpuses must have same number of lines!\")\n\n self.line_index += 1\n iteration_index += 1\n yield self._example_dict_iter(line, iteration_index)\n\n if self.assoc_iter.eof:\n self.eof = True\n self.corpus.close()\n else:\n # Yield tuples util this shard's size reaches the threshold.\n self.corpus.seek(self.last_pos)\n while True:\n if self.shard_size != 0 and self.line_index % 64 == 0:\n # This part of check is time consuming on Py2 (but\n # it is quite fast on Py3, weird!). So we don't bother\n # to check for very line. Instead we chekc every 64\n # lines. Thus we are not dividing exactly per\n # `shard_size`, but it is not too much difference.\n cur_pos = self.corpus.tell()\n if cur_pos >= self.last_pos + self.shard_size:\n self.last_pos = cur_pos\n return\n\n line = self.corpus.readline()\n if line == '':\n self.eof = True\n self.corpus.close()\n return\n\n self.line_index += 1\n iteration_index += 1\n yield self._example_dict_iter(line, iteration_index)","function_tokens":["def","__iter__","(","self",")",":","iteration_index","=","-","1","if","self",".","assoc_iter","is","not","None",":","# We have associate iterator, just yields tuples","# util we run parallel with it.","while","self",".","line_index","<","self",".","assoc_iter",".","line_index",":","line","=","self",".","corpus",".","readline","(",")","if","line","==","''",":","raise","AssertionError","(","\"Two corpuses must have same number of lines!\"",")","self",".","line_index","+=","1","iteration_index","+=","1","yield","self",".","_example_dict_iter","(","line",",","iteration_index",")","if","self",".","assoc_iter",".","eof",":","self",".","eof","=","True","self",".","corpus",".","close","(",")","else",":","# Yield tuples util this shard's size reaches the threshold.","self",".","corpus",".","seek","(","self",".","last_pos",")","while","True",":","if","self",".","shard_size","!=","0","and","self",".","line_index","%","64","==","0",":","# This part of check is time consuming on Py2 (but","# it is quite fast on Py3, weird!). So we don't bother","# to check for very line. Instead we chekc every 64","# lines. Thus we are not dividing exactly per","# `shard_size`, but it is not too much difference.","cur_pos","=","self",".","corpus",".","tell","(",")","if","cur_pos",">=","self",".","last_pos","+","self",".","shard_size",":","self",".","last_pos","=","cur_pos","return","line","=","self",".","corpus",".","readline","(",")","if","line","==","''",":","self",".","eof","=","True","self",".","corpus",".","close","(",")","return","self",".","line_index","+=","1","iteration_index","+=","1","yield","self",".","_example_dict_iter","(","line",",","iteration_index",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L434-L480"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.hit_end","parameters":"(self)","argument_list":"","return_statement":"return self.eof","docstring":"?","docstring_summary":"?","docstring_tokens":["?"],"function":"def hit_end(self):\n \"\"\" ? \"\"\"\n return self.eof","function_tokens":["def","hit_end","(","self",")",":","return","self",".","eof"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L482-L484"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/text_dataset.py","language":"python","identifier":"ShardedTextCorpusIterator.num_feats","parameters":"(self)","argument_list":"","return_statement":"return self.n_feats","docstring":"We peek the first line and seek back to\n the beginning of the file.","docstring_summary":"We peek the first line and seek back to\n the beginning of the file.","docstring_tokens":["We","peek","the","first","line","and","seek","back","to","the","beginning","of","the","file","."],"function":"def num_feats(self):\n \"\"\"\n We peek the first line and seek back to\n the beginning of the file.\n \"\"\"\n saved_pos = self.corpus.tell()\n\n line = self.corpus.readline().split()\n if self.line_truncate:\n line = line[:self.line_truncate]\n _, _, self.n_feats = TextDataset.extract_text_features(line)\n\n self.corpus.seek(saved_pos)\n\n return self.n_feats","function_tokens":["def","num_feats","(","self",")",":","saved_pos","=","self",".","corpus",".","tell","(",")","line","=","self",".","corpus",".","readline","(",")",".","split","(",")","if","self",".","line_truncate",":","line","=","line","[",":","self",".","line_truncate","]","_",",","_",",","self",".","n_feats","=","TextDataset",".","extract_text_features","(","line",")","self",".","corpus",".","seek","(","saved_pos",")","return","self",".","n_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/text_dataset.py#L487-L501"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.sort_key","parameters":"(self, ex)","argument_list":"","return_statement":"return (ex.src.size(2), ex.src.size(1))","docstring":"Sort using the size of the image: (width, height).","docstring_summary":"Sort using the size of the image: (width, height).","docstring_tokens":["Sort","using","the","size","of","the","image",":","(","width","height",")","."],"function":"def sort_key(self, ex):\n \"\"\" Sort using the size of the image: (width, height).\"\"\"\n return (ex.src.size(2), ex.src.size(1))","function_tokens":["def","sort_key","(","self",",","ex",")",":","return","(","ex",".","src",".","size","(","2",")",",","ex",".","src",".","size","(","1",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/image_dataset.py#L80-L82"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.make_image_examples_nfeats_tpl","parameters":"(img_iter, img_path, img_dir,\n image_channel_size=3)","argument_list":"","return_statement":"return (examples_iter, num_feats)","docstring":"Note: one of img_iter and img_path must be not None\n Args:\n img_iter(iterator): an iterator that yields pairs (img, filename)\n (or None)\n img_path(str): location of a src file containing image paths\n (or None)\n src_dir (str): location of source images\n\n Returns:\n (example_dict iterator, num_feats) tuple","docstring_summary":"Note: one of img_iter and img_path must be not None\n Args:\n img_iter(iterator): an iterator that yields pairs (img, filename)\n (or None)\n img_path(str): location of a src file containing image paths\n (or None)\n src_dir (str): location of source images","docstring_tokens":["Note",":","one","of","img_iter","and","img_path","must","be","not","None","Args",":","img_iter","(","iterator",")",":","an","iterator","that","yields","pairs","(","img","filename",")","(","or","None",")","img_path","(","str",")",":","location","of","a","src","file","containing","image","paths","(","or","None",")","src_dir","(","str",")",":","location","of","source","images"],"function":"def make_image_examples_nfeats_tpl(img_iter, img_path, img_dir,\n image_channel_size=3):\n \"\"\"\n Note: one of img_iter and img_path must be not None\n Args:\n img_iter(iterator): an iterator that yields pairs (img, filename)\n (or None)\n img_path(str): location of a src file containing image paths\n (or None)\n src_dir (str): location of source images\n\n Returns:\n (example_dict iterator, num_feats) tuple\n \"\"\"\n if img_iter is None:\n if img_path is not None:\n img_iter = ImageDataset. \\\n make_img_iterator_from_file(img_path,\n img_dir,\n image_channel_size)\n else:\n raise ValueError(\"\"\"One of 'img_iter' and 'img_path'\n must be not None\"\"\")\n examples_iter = ImageDataset.make_examples(img_iter, img_dir, 'src')\n num_feats = 0 # Source side(img) has no features.\n\n return (examples_iter, num_feats)","function_tokens":["def","make_image_examples_nfeats_tpl","(","img_iter",",","img_path",",","img_dir",",","image_channel_size","=","3",")",":","if","img_iter","is","None",":","if","img_path","is","not","None",":","img_iter","=","ImageDataset",".","make_img_iterator_from_file","(","img_path",",","img_dir",",","image_channel_size",")","else",":","raise","ValueError","(","\"\"\"One of 'img_iter' and 'img_path'\n must be not None\"\"\"",")","examples_iter","=","ImageDataset",".","make_examples","(","img_iter",",","img_dir",",","'src'",")","num_feats","=","0","# Source side(img) has no features.","return","(","examples_iter",",","num_feats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/image_dataset.py#L85-L111"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.make_examples","parameters":"(img_iter, src_dir, side, truncate=None)","argument_list":"","return_statement":"","docstring":"Args:\n path (str): location of a src file containing image paths\n src_dir (str): location of source images\n side (str): 'src' or 'tgt'\n truncate: maximum img size ((0,0) or None for unlimited)\n\n Yields:\n a dictionary containing image data, path and index for each line.","docstring_summary":"Args:\n path (str): location of a src file containing image paths\n src_dir (str): location of source images\n side (str): 'src' or 'tgt'\n truncate: maximum img size ((0,0) or None for unlimited)","docstring_tokens":["Args",":","path","(","str",")",":","location","of","a","src","file","containing","image","paths","src_dir","(","str",")",":","location","of","source","images","side","(","str",")",":","src","or","tgt","truncate",":","maximum","img","size","((","0","0",")","or","None","for","unlimited",")"],"function":"def make_examples(img_iter, src_dir, side, truncate=None):\n \"\"\"\n Args:\n path (str): location of a src file containing image paths\n src_dir (str): location of source images\n side (str): 'src' or 'tgt'\n truncate: maximum img size ((0,0) or None for unlimited)\n\n Yields:\n a dictionary containing image data, path and index for each line.\n \"\"\"\n assert (src_dir is not None) and os.path.exists(src_dir), \\\n 'src_dir must be a valid directory if data_type is img'\n\n for index, (img, filename) in enumerate(img_iter):\n if truncate and truncate != (0, 0):\n if not (img.size(1) <= truncate[0]\n and img.size(2) <= truncate[1]):\n continue\n\n example_dict = {side: img,\n side + '_path': filename,\n 'indices': index}\n yield example_dict","function_tokens":["def","make_examples","(","img_iter",",","src_dir",",","side",",","truncate","=","None",")",":","assert","(","src_dir","is","not","None",")","and","os",".","path",".","exists","(","src_dir",")",",","'src_dir must be a valid directory if data_type is img'","for","index",",","(","img",",","filename",")","in","enumerate","(","img_iter",")",":","if","truncate","and","truncate","!=","(","0",",","0",")",":","if","not","(","img",".","size","(","1",")","<=","truncate","[","0","]","and","img",".","size","(","2",")","<=","truncate","[","1","]",")",":","continue","example_dict","=","{","side",":","img",",","side","+","'_path'",":","filename",",","'indices'",":","index","}","yield","example_dict"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/image_dataset.py#L114-L137"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.make_img_iterator_from_file","parameters":"(path, src_dir, image_channel_size=3)","argument_list":"","return_statement":"","docstring":"Args:\n path(str):\n src_dir(str):\n\n Yields:\n img: and image tensor\n filename(str): the image filename","docstring_summary":"Args:\n path(str):\n src_dir(str):","docstring_tokens":["Args",":","path","(","str",")",":","src_dir","(","str",")",":"],"function":"def make_img_iterator_from_file(path, src_dir, image_channel_size=3):\n \"\"\"\n Args:\n path(str):\n src_dir(str):\n\n Yields:\n img: and image tensor\n filename(str): the image filename\n \"\"\"\n from PIL import Image\n from torchvision import transforms\n\n with codecs.open(path, \"r\", \"utf-8\") as corpus_file:\n for line in corpus_file:\n filename = line.strip()\n img_path = os.path.join(src_dir, filename)\n if not os.path.exists(img_path):\n img_path = line\n\n assert os.path.exists(img_path), \\\n 'img path %s not found' % (line.strip())\n\n if (image_channel_size == 1):\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n img = transforms.ToTensor()(Image.open(img_path))\n\n yield img, filename","function_tokens":["def","make_img_iterator_from_file","(","path",",","src_dir",",","image_channel_size","=","3",")",":","from","PIL","import","Image","from","torchvision","import","transforms","with","codecs",".","open","(","path",",","\"r\"",",","\"utf-8\"",")","as","corpus_file",":","for","line","in","corpus_file",":","filename","=","line",".","strip","(",")","img_path","=","os",".","path",".","join","(","src_dir",",","filename",")","if","not","os",".","path",".","exists","(","img_path",")",":","img_path","=","line","assert","os",".","path",".","exists","(","img_path",")",",","'img path %s not found'","%","(","line",".","strip","(",")",")","if","(","image_channel_size","==","1",")",":","img","=","transforms",".","ToTensor","(",")","(","Image",".","fromarray","(","cv2",".","imread","(","img_path",",","0",")",")",")","else",":","img","=","transforms",".","ToTensor","(",")","(","Image",".","open","(","img_path",")",")","yield","img",",","filename"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/image_dataset.py#L140-L169"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.get_fields","parameters":"(n_src_features, n_tgt_features)","argument_list":"","return_statement":"return fields","docstring":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.","docstring_summary":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","n_src_features",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(n_src_features, n_tgt_features):\n \"\"\"\n Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.\n \"\"\"\n fields = {}\n\n def make_img(data, vocab):\n \"\"\" ? \"\"\"\n c = data[0].size(0)\n h = max([t.size(1) for t in data])\n w = max([t.size(2) for t in data])\n imgs = torch.zeros(len(data), c, h, w).fill_(1)\n for i, img in enumerate(data):\n imgs[i, :, 0:img.size(1), 0:img.size(2)] = img\n return imgs\n\n fields[\"src\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_img, sequential=False)\n\n for j in range(n_src_features):\n fields[\"src_feat_\" + str(j)] = \\\n torchtext.data.Field(pad_token=PAD_WORD)\n\n fields[\"tgt\"] = torchtext.data.Field(\n init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n for j in range(n_tgt_features):\n fields[\"tgt_feat_\" + str(j)] = \\\n torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n def make_src(data, vocab):\n \"\"\" ? \"\"\"\n src_size = max([t.size(0) for t in data])\n src_vocab_size = max([t.max() for t in data]) + 1\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n fields[\"src_map\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n\n def make_tgt(data, vocab):\n \"\"\" ? \"\"\"\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n fields[\"alignment\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n\n fields[\"indices\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n sequential=False)\n\n return fields","function_tokens":["def","get_fields","(","n_src_features",",","n_tgt_features",")",":","fields","=","{","}","def","make_img","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","c","=","data","[","0","]",".","size","(","0",")","h","=","max","(","[","t",".","size","(","1",")","for","t","in","data","]",")","w","=","max","(","[","t",".","size","(","2",")","for","t","in","data","]",")","imgs","=","torch",".","zeros","(","len","(","data",")",",","c",",","h",",","w",")",".","fill_","(","1",")","for","i",",","img","in","enumerate","(","data",")",":","imgs","[","i",",",":",",","0",":","img",".","size","(","1",")",",","0",":","img",".","size","(","2",")","]","=","img","return","imgs","fields","[","\"src\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_img",",","sequential","=","False",")","for","j","in","range","(","n_src_features",")",":","fields","[","\"src_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",")","fields","[","\"tgt\"","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","for","j","in","range","(","n_tgt_features",")",":","fields","[","\"tgt_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","def","make_src","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","src_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","src_vocab_size","=","max","(","[","t",".","max","(",")","for","t","in","data","]",")","+","1","alignment","=","torch",".","zeros","(","src_size",",","len","(","data",")",",","src_vocab_size",")","for","i",",","sent","in","enumerate","(","data",")",":","for","j",",","t","in","enumerate","(","sent",")",":","alignment","[","j",",","i",",","t","]","=","1","return","alignment","fields","[","\"src_map\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_src",",","sequential","=","False",")","def","make_tgt","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","tgt_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","alignment","=","torch",".","zeros","(","tgt_size",",","len","(","data",")",")",".","long","(",")","for","i",",","sent","in","enumerate","(","data",")",":","alignment","[",":","sent",".","size","(","0",")",",","i","]","=","sent","return","alignment","fields","[","\"alignment\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_tgt",",","sequential","=","False",")","fields","[","\"indices\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","sequential","=","False",")","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/image_dataset.py#L172-L243"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/image_dataset.py","language":"python","identifier":"ImageDataset.get_num_features","parameters":"(corpus_file, side)","argument_list":"","return_statement":"return num_feats","docstring":"For image corpus, source side is in form of image, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.","docstring_summary":"For image corpus, source side is in form of image, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.","docstring_tokens":["For","image","corpus","source","side","is","in","form","of","image","thus","no","feature",";","while","target","side","is","in","form","of","text","thus","we","can","extract","its","text","features","."],"function":"def get_num_features(corpus_file, side):\n \"\"\"\n For image corpus, source side is in form of image, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.\n \"\"\"\n if side == 'src':\n num_feats = 0\n else:\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = ImageDataset.extract_text_features(f_line)\n\n return num_feats","function_tokens":["def","get_num_features","(","corpus_file",",","side",")",":","if","side","==","'src'",":","num_feats","=","0","else",":","with","codecs",".","open","(","corpus_file",",","\"r\"",",","\"utf-8\"",")","as","cf",":","f_line","=","cf",".","readline","(",")",".","strip","(",")",".","split","(",")","_",",","_",",","num_feats","=","ImageDataset",".","extract_text_features","(","f_line",")","return","num_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/image_dataset.py#L246-L266"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.sort_key","parameters":"(self, ex)","argument_list":"","return_statement":"return ex.src.size(1)","docstring":"Sort using duration time of the sound spectrogram.","docstring_summary":"Sort using duration time of the sound spectrogram.","docstring_tokens":["Sort","using","duration","time","of","the","sound","spectrogram","."],"function":"def sort_key(self, ex):\n \"\"\" Sort using duration time of the sound spectrogram. \"\"\"\n return ex.src.size(1)","function_tokens":["def","sort_key","(","self",",","ex",")",":","return","ex",".","src",".","size","(","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py#L90-L92"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.make_audio_examples_nfeats_tpl","parameters":"(path, audio_dir,\n sample_rate, window_size,\n window_stride, window,\n normalize_audio, truncate=None)","argument_list":"","return_statement":"return (examples_iter, num_feats)","docstring":"Args:\n path (str): location of a src file containing audio paths.\n audio_dir (str): location of source audio files.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Returns:\n (example_dict iterator, num_feats) tuple","docstring_summary":"Args:\n path (str): location of a src file containing audio paths.\n audio_dir (str): location of source audio files.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).","docstring_tokens":["Args",":","path","(","str",")",":","location","of","a","src","file","containing","audio","paths",".","audio_dir","(","str",")",":","location","of","source","audio","files",".","sample_rate","(","int",")",":","sample_rate",".","window_size","(","float",")",":","window","size","for","spectrogram","in","seconds",".","window_stride","(","float",")",":","window","stride","for","spectrogram","in","seconds",".","window","(","str",")",":","window","type","for","spectrogram","generation",".","normalize_audio","(","bool",")",":","subtract","spectrogram","by","mean","and","divide","by","std","or","not",".","truncate","(","int",")",":","maximum","audio","length","(","0","or","None","for","unlimited",")","."],"function":"def make_audio_examples_nfeats_tpl(path, audio_dir,\n sample_rate, window_size,\n window_stride, window,\n normalize_audio, truncate=None):\n \"\"\"\n Args:\n path (str): location of a src file containing audio paths.\n audio_dir (str): location of source audio files.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Returns:\n (example_dict iterator, num_feats) tuple\n \"\"\"\n examples_iter = AudioDataset.read_audio_file(\n path, audio_dir, \"src\", sample_rate,\n window_size, window_stride, window,\n normalize_audio, truncate)\n num_feats = 0 # Source side(audio) has no features.\n\n return (examples_iter, num_feats)","function_tokens":["def","make_audio_examples_nfeats_tpl","(","path",",","audio_dir",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","truncate","=","None",")",":","examples_iter","=","AudioDataset",".","read_audio_file","(","path",",","audio_dir",",","\"src\"",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","truncate",")","num_feats","=","0","# Source side(audio) has no features.","return","(","examples_iter",",","num_feats",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py#L95-L120"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.read_audio_file","parameters":"(path, src_dir, side, sample_rate, window_size,\n window_stride, window, normalize_audio,\n truncate=None)","argument_list":"","return_statement":"","docstring":"Args:\n path (str): location of a src file containing audio paths.\n src_dir (str): location of source audio files.\n side (str): 'src' or 'tgt'.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Yields:\n a dictionary containing audio data for each line.","docstring_summary":"Args:\n path (str): location of a src file containing audio paths.\n src_dir (str): location of source audio files.\n side (str): 'src' or 'tgt'.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).","docstring_tokens":["Args",":","path","(","str",")",":","location","of","a","src","file","containing","audio","paths",".","src_dir","(","str",")",":","location","of","source","audio","files",".","side","(","str",")",":","src","or","tgt",".","sample_rate","(","int",")",":","sample_rate",".","window_size","(","float",")",":","window","size","for","spectrogram","in","seconds",".","window_stride","(","float",")",":","window","stride","for","spectrogram","in","seconds",".","window","(","str",")",":","window","type","for","spectrogram","generation",".","normalize_audio","(","bool",")",":","subtract","spectrogram","by","mean","and","divide","by","std","or","not",".","truncate","(","int",")",":","maximum","audio","length","(","0","or","None","for","unlimited",")","."],"function":"def read_audio_file(path, src_dir, side, sample_rate, window_size,\n window_stride, window, normalize_audio,\n truncate=None):\n \"\"\"\n Args:\n path (str): location of a src file containing audio paths.\n src_dir (str): location of source audio files.\n side (str): 'src' or 'tgt'.\n sample_rate (int): sample_rate.\n window_size (float) : window size for spectrogram in seconds.\n window_stride (float): window stride for spectrogram in seconds.\n window (str): window type for spectrogram generation.\n normalize_audio (bool): subtract spectrogram by mean and divide\n by std or not.\n truncate (int): maximum audio length (0 or None for unlimited).\n\n Yields:\n a dictionary containing audio data for each line.\n \"\"\"\n assert (src_dir is not None) and os.path.exists(src_dir),\\\n \"src_dir must be a valid directory if data_type is audio\"\n\n import torchaudio\n import librosa\n import numpy as np\n\n with codecs.open(path, \"r\", \"utf-8\") as corpus_file:\n index = 0\n for line in corpus_file:\n audio_path = os.path.join(src_dir, line.strip())\n if not os.path.exists(audio_path):\n audio_path = line\n\n assert os.path.exists(audio_path), \\\n 'audio path %s not found' % (line.strip())\n\n sound, sample_rate = torchaudio.load(audio_path)\n if truncate and truncate > 0:\n if sound.size(0) > truncate:\n continue\n\n assert sample_rate == sample_rate, \\\n 'Sample rate of %s != -sample_rate (%d vs %d)' \\\n % (audio_path, sample_rate, sample_rate)\n\n sound = sound.numpy()\n if len(sound.shape) > 1:\n if sound.shape[1] == 1:\n sound = sound.squeeze()\n else:\n sound = sound.mean(axis=1) # average multiple channels\n\n n_fft = int(sample_rate * window_size)\n win_length = n_fft\n hop_length = int(sample_rate * window_stride)\n # STFT\n d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, window=window)\n spect, _ = librosa.magphase(d)\n spect = np.log1p(spect)\n spect = torch.FloatTensor(spect)\n if normalize_audio:\n mean = spect.mean()\n std = spect.std()\n spect.add_(-mean)\n spect.div_(std)\n\n example_dict = {side: spect,\n side + '_path': line.strip(),\n 'indices': index}\n index += 1\n\n yield example_dict","function_tokens":["def","read_audio_file","(","path",",","src_dir",",","side",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","truncate","=","None",")",":","assert","(","src_dir","is","not","None",")","and","os",".","path",".","exists","(","src_dir",")",",","\"src_dir must be a valid directory if data_type is audio\"","import","torchaudio","import","librosa","import","numpy","as","np","with","codecs",".","open","(","path",",","\"r\"",",","\"utf-8\"",")","as","corpus_file",":","index","=","0","for","line","in","corpus_file",":","audio_path","=","os",".","path",".","join","(","src_dir",",","line",".","strip","(",")",")","if","not","os",".","path",".","exists","(","audio_path",")",":","audio_path","=","line","assert","os",".","path",".","exists","(","audio_path",")",",","'audio path %s not found'","%","(","line",".","strip","(",")",")","sound",",","sample_rate","=","torchaudio",".","load","(","audio_path",")","if","truncate","and","truncate",">","0",":","if","sound",".","size","(","0",")",">","truncate",":","continue","assert","sample_rate","==","sample_rate",",","'Sample rate of %s != -sample_rate (%d vs %d)'","%","(","audio_path",",","sample_rate",",","sample_rate",")","sound","=","sound",".","numpy","(",")","if","len","(","sound",".","shape",")",">","1",":","if","sound",".","shape","[","1","]","==","1",":","sound","=","sound",".","squeeze","(",")","else",":","sound","=","sound",".","mean","(","axis","=","1",")","# average multiple channels","n_fft","=","int","(","sample_rate","*","window_size",")","win_length","=","n_fft","hop_length","=","int","(","sample_rate","*","window_stride",")","# STFT","d","=","librosa",".","stft","(","sound",",","n_fft","=","n_fft",",","hop_length","=","hop_length",",","win_length","=","win_length",",","window","=","window",")","spect",",","_","=","librosa",".","magphase","(","d",")","spect","=","np",".","log1p","(","spect",")","spect","=","torch",".","FloatTensor","(","spect",")","if","normalize_audio",":","mean","=","spect",".","mean","(",")","std","=","spect",".","std","(",")","spect",".","add_","(","-","mean",")","spect",".","div_","(","std",")","example_dict","=","{","side",":","spect",",","side","+","'_path'",":","line",".","strip","(",")",",","'indices'",":","index","}","index","+=","1","yield","example_dict"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py#L123-L195"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.get_fields","parameters":"(n_src_features, n_tgt_features)","argument_list":"","return_statement":"return fields","docstring":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.","docstring_summary":"Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","n_src_features",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(n_src_features, n_tgt_features):\n \"\"\"\n Args:\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values\n are the corresponding Field objects.\n \"\"\"\n fields = {}\n\n def make_audio(data, vocab):\n \"\"\" ? \"\"\"\n nfft = data[0].size(0)\n t = max([t.size(1) for t in data])\n sounds = torch.zeros(len(data), 1, nfft, t)\n for i, spect in enumerate(data):\n sounds[i, :, :, 0:spect.size(1)] = spect\n return sounds\n\n fields[\"src\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_audio, sequential=False)\n\n for j in range(n_src_features):\n fields[\"src_feat_\" + str(j)] = \\\n torchtext.data.Field(pad_token=PAD_WORD)\n\n fields[\"tgt\"] = torchtext.data.Field(\n init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n for j in range(n_tgt_features):\n fields[\"tgt_feat_\" + str(j)] = \\\n torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,\n pad_token=PAD_WORD)\n\n def make_src(data, vocab):\n \"\"\" ? \"\"\"\n src_size = max([t.size(0) for t in data])\n src_vocab_size = max([t.max() for t in data]) + 1\n alignment = torch.zeros(src_size, len(data), src_vocab_size)\n for i, sent in enumerate(data):\n for j, t in enumerate(sent):\n alignment[j, i, t] = 1\n return alignment\n\n fields[\"src_map\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=make_src, sequential=False)\n\n def make_tgt(data, vocab):\n \"\"\" ? \"\"\"\n tgt_size = max([t.size(0) for t in data])\n alignment = torch.zeros(tgt_size, len(data)).long()\n for i, sent in enumerate(data):\n alignment[:sent.size(0), i] = sent\n return alignment\n\n fields[\"alignment\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n postprocessing=make_tgt, sequential=False)\n\n fields[\"indices\"] = torchtext.data.Field(\n use_vocab=False, dtype=torch.long,\n sequential=False)\n\n return fields","function_tokens":["def","get_fields","(","n_src_features",",","n_tgt_features",")",":","fields","=","{","}","def","make_audio","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","nfft","=","data","[","0","]",".","size","(","0",")","t","=","max","(","[","t",".","size","(","1",")","for","t","in","data","]",")","sounds","=","torch",".","zeros","(","len","(","data",")",",","1",",","nfft",",","t",")","for","i",",","spect","in","enumerate","(","data",")",":","sounds","[","i",",",":",",",":",",","0",":","spect",".","size","(","1",")","]","=","spect","return","sounds","fields","[","\"src\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_audio",",","sequential","=","False",")","for","j","in","range","(","n_src_features",")",":","fields","[","\"src_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","pad_token","=","PAD_WORD",")","fields","[","\"tgt\"","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","for","j","in","range","(","n_tgt_features",")",":","fields","[","\"tgt_feat_\"","+","str","(","j",")","]","=","torchtext",".","data",".","Field","(","init_token","=","BOS_WORD",",","eos_token","=","EOS_WORD",",","pad_token","=","PAD_WORD",")","def","make_src","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","src_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","src_vocab_size","=","max","(","[","t",".","max","(",")","for","t","in","data","]",")","+","1","alignment","=","torch",".","zeros","(","src_size",",","len","(","data",")",",","src_vocab_size",")","for","i",",","sent","in","enumerate","(","data",")",":","for","j",",","t","in","enumerate","(","sent",")",":","alignment","[","j",",","i",",","t","]","=","1","return","alignment","fields","[","\"src_map\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","float",",","postprocessing","=","make_src",",","sequential","=","False",")","def","make_tgt","(","data",",","vocab",")",":","\"\"\" ? \"\"\"","tgt_size","=","max","(","[","t",".","size","(","0",")","for","t","in","data","]",")","alignment","=","torch",".","zeros","(","tgt_size",",","len","(","data",")",")",".","long","(",")","for","i",",","sent","in","enumerate","(","data",")",":","alignment","[",":","sent",".","size","(","0",")",",","i","]","=","sent","return","alignment","fields","[","\"alignment\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","postprocessing","=","make_tgt",",","sequential","=","False",")","fields","[","\"indices\"","]","=","torchtext",".","data",".","Field","(","use_vocab","=","False",",","dtype","=","torch",".","long",",","sequential","=","False",")","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py#L198-L268"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py","language":"python","identifier":"AudioDataset.get_num_features","parameters":"(corpus_file, side)","argument_list":"","return_statement":"return num_feats","docstring":"For audio corpus, source side is in form of audio, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.","docstring_summary":"For audio corpus, source side is in form of audio, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.","docstring_tokens":["For","audio","corpus","source","side","is","in","form","of","audio","thus","no","feature",";","while","target","side","is","in","form","of","text","thus","we","can","extract","its","text","features","."],"function":"def get_num_features(corpus_file, side):\n \"\"\"\n For audio corpus, source side is in form of audio, thus\n no feature; while target side is in form of text, thus\n we can extract its text features.\n\n Args:\n corpus_file (str): file path to get the features.\n side (str): 'src' or 'tgt'.\n\n Returns:\n number of features on `side`.\n \"\"\"\n if side == 'src':\n num_feats = 0\n else:\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = AudioDataset.extract_text_features(f_line)\n\n return num_feats","function_tokens":["def","get_num_features","(","corpus_file",",","side",")",":","if","side","==","'src'",":","num_feats","=","0","else",":","with","codecs",".","open","(","corpus_file",",","\"r\"",",","\"utf-8\"",")","as","cf",":","f_line","=","cf",".","readline","(",")",".","strip","(",")",".","split","(",")","_",",","_",",","num_feats","=","AudioDataset",".","extract_text_features","(","f_line",")","return","num_feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/audio_dataset.py#L271-L291"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"get_fields","parameters":"(data_type, n_src_features, n_tgt_features)","argument_list":"","return_statement":"","docstring":"Args:\n data_type: type of the source input. Options are [text|img|audio].\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values are the\n corresponding Field objects.","docstring_summary":"Args:\n data_type: type of the source input. Options are [text|img|audio].\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.","docstring_tokens":["Args",":","data_type",":","type","of","the","source","input",".","Options","are","[","text|img|audio","]",".","n_src_features",":","the","number","of","source","features","to","create","torchtext",".","data",".","Field","for",".","n_tgt_features",":","the","number","of","target","features","to","create","torchtext",".","data",".","Field","for","."],"function":"def get_fields(data_type, n_src_features, n_tgt_features):\n \"\"\"\n Args:\n data_type: type of the source input. Options are [text|img|audio].\n n_src_features: the number of source features to\n create `torchtext.data.Field` for.\n n_tgt_features: the number of target features to\n create `torchtext.data.Field` for.\n\n Returns:\n A dictionary whose keys are strings and whose values are the\n corresponding Field objects.\n \"\"\"\n if data_type == 'text':\n return TextDataset.get_fields(n_src_features, n_tgt_features)\n elif data_type == 'img':\n return ImageDataset.get_fields(n_src_features, n_tgt_features)\n elif data_type == 'audio':\n return AudioDataset.get_fields(n_src_features, n_tgt_features)\n else:\n raise ValueError(\"Data type not implemented\")","function_tokens":["def","get_fields","(","data_type",",","n_src_features",",","n_tgt_features",")",":","if","data_type","==","'text'",":","return","TextDataset",".","get_fields","(","n_src_features",",","n_tgt_features",")","elif","data_type","==","'img'",":","return","ImageDataset",".","get_fields","(","n_src_features",",","n_tgt_features",")","elif","data_type","==","'audio'",":","return","AudioDataset",".","get_fields","(","n_src_features",",","n_tgt_features",")","else",":","raise","ValueError","(","\"Data type not implemented\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L36-L56"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"load_fields_from_vocab","parameters":"(vocab, data_type=\"text\")","argument_list":"","return_statement":"return fields","docstring":"Load Field objects from `vocab.pt` file.","docstring_summary":"Load Field objects from `vocab.pt` file.","docstring_tokens":["Load","Field","objects","from","vocab",".","pt","file","."],"function":"def load_fields_from_vocab(vocab, data_type=\"text\"):\n \"\"\"\n Load Field objects from `vocab.pt` file.\n \"\"\"\n vocab = dict(vocab)\n n_src_features = len(collect_features(vocab, 'src'))\n n_tgt_features = len(collect_features(vocab, 'tgt'))\n\n\n fields = get_fields(data_type, n_src_features, n_tgt_features)\n\n\n\n for k, v in vocab.items():\n # Hack. Can't pickle defaultdict :(\n v.stoi = defaultdict(lambda: 0, v.stoi)\n fields[k].vocab = v\n # TODO: until here, fields has 'tgt_sents'\n\n\n return fields","function_tokens":["def","load_fields_from_vocab","(","vocab",",","data_type","=","\"text\"",")",":","vocab","=","dict","(","vocab",")","n_src_features","=","len","(","collect_features","(","vocab",",","'src'",")",")","n_tgt_features","=","len","(","collect_features","(","vocab",",","'tgt'",")",")","fields","=","get_fields","(","data_type",",","n_src_features",",","n_tgt_features",")","for","k",",","v","in","vocab",".","items","(",")",":","# Hack. Can't pickle defaultdict :(","v",".","stoi","=","defaultdict","(","lambda",":","0",",","v",".","stoi",")","fields","[","k","]",".","vocab","=","v","# TODO: until here, fields has 'tgt_sents'","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L59-L79"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"save_fields_to_vocab","parameters":"(fields)","argument_list":"","return_statement":"return vocab","docstring":"Save Vocab objects in Field objects to `vocab.pt` file.","docstring_summary":"Save Vocab objects in Field objects to `vocab.pt` file.","docstring_tokens":["Save","Vocab","objects","in","Field","objects","to","vocab",".","pt","file","."],"function":"def save_fields_to_vocab(fields):\n \"\"\"\n Save Vocab objects in Field objects to `vocab.pt` file.\n \"\"\"\n vocab = []\n for k, f in fields.items():\n if f is not None and 'vocab' in f.__dict__:\n f.vocab.stoi = f.vocab.stoi\n vocab.append((k, f.vocab))\n return vocab","function_tokens":["def","save_fields_to_vocab","(","fields",")",":","vocab","=","[","]","for","k",",","f","in","fields",".","items","(",")",":","if","f","is","not","None","and","'vocab'","in","f",".","__dict__",":","f",".","vocab",".","stoi","=","f",".","vocab",".","stoi","vocab",".","append","(","(","k",",","f",".","vocab",")",")","return","vocab"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L82-L91"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"merge_vocabs","parameters":"(vocabs, vocab_size=None)","argument_list":"","return_statement":"return torchtext.vocab.Vocab(merged,\n specials=[UNK_WORD, PAD_WORD,\n BOS_WORD, EOS_WORD],\n max_size=vocab_size)","docstring":"Merge individual vocabularies (assumed to be generated from disjoint\n documents) into a larger vocabulary.\n\n Args:\n vocabs: `torchtext.vocab.Vocab` vocabularies to be merged\n vocab_size: `int` the final vocabulary size. `None` for no limit.\n Return:\n `torchtext.vocab.Vocab`","docstring_summary":"Merge individual vocabularies (assumed to be generated from disjoint\n documents) into a larger vocabulary.","docstring_tokens":["Merge","individual","vocabularies","(","assumed","to","be","generated","from","disjoint","documents",")","into","a","larger","vocabulary","."],"function":"def merge_vocabs(vocabs, vocab_size=None):\n \"\"\"\n Merge individual vocabularies (assumed to be generated from disjoint\n documents) into a larger vocabulary.\n\n Args:\n vocabs: `torchtext.vocab.Vocab` vocabularies to be merged\n vocab_size: `int` the final vocabulary size. `None` for no limit.\n Return:\n `torchtext.vocab.Vocab`\n \"\"\"\n merged = sum([vocab.freqs for vocab in vocabs], Counter())\n return torchtext.vocab.Vocab(merged,\n specials=[UNK_WORD, PAD_WORD,\n BOS_WORD, EOS_WORD],\n max_size=vocab_size)","function_tokens":["def","merge_vocabs","(","vocabs",",","vocab_size","=","None",")",":","merged","=","sum","(","[","vocab",".","freqs","for","vocab","in","vocabs","]",",","Counter","(",")",")","return","torchtext",".","vocab",".","Vocab","(","merged",",","specials","=","[","UNK_WORD",",","PAD_WORD",",","BOS_WORD",",","EOS_WORD","]",",","max_size","=","vocab_size",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L94-L109"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"get_num_features","parameters":"(data_type, corpus_file, side)","argument_list":"","return_statement":"","docstring":"Args:\n data_type (str): type of the source input.\n Options are [text|img|audio].\n corpus_file (str): file path to get the features.\n side (str): for source or for target.\n\n Returns:\n number of features on `side`.","docstring_summary":"Args:\n data_type (str): type of the source input.\n Options are [text|img|audio].\n corpus_file (str): file path to get the features.\n side (str): for source or for target.","docstring_tokens":["Args",":","data_type","(","str",")",":","type","of","the","source","input",".","Options","are","[","text|img|audio","]",".","corpus_file","(","str",")",":","file","path","to","get","the","features",".","side","(","str",")",":","for","source","or","for","target","."],"function":"def get_num_features(data_type, corpus_file, side):\n \"\"\"\n Args:\n data_type (str): type of the source input.\n Options are [text|img|audio].\n corpus_file (str): file path to get the features.\n side (str): for source or for target.\n\n Returns:\n number of features on `side`.\n \"\"\"\n assert side in [\"src\", \"tgt\"]\n\n if data_type == 'text':\n return TextDataset.get_num_features(corpus_file, side)\n elif data_type == 'img':\n return ImageDataset.get_num_features(corpus_file, side)\n elif data_type == 'audio':\n return AudioDataset.get_num_features(corpus_file, side)\n else:\n raise ValueError(\"Data type not implemented\")","function_tokens":["def","get_num_features","(","data_type",",","corpus_file",",","side",")",":","assert","side","in","[","\"src\"",",","\"tgt\"","]","if","data_type","==","'text'",":","return","TextDataset",".","get_num_features","(","corpus_file",",","side",")","elif","data_type","==","'img'",":","return","ImageDataset",".","get_num_features","(","corpus_file",",","side",")","elif","data_type","==","'audio'",":","return","AudioDataset",".","get_num_features","(","corpus_file",",","side",")","else",":","raise","ValueError","(","\"Data type not implemented\"",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L112-L132"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"make_features","parameters":"(batch, side, data_type='text')","argument_list":"","return_statement":"","docstring":"Args:\n batch (Tensor): a batch of source or target data.\n side (str): for source or for target.\n data_type (str): type of the source input.\n Options are [text|img|audio].\n Returns:\n A sequence of src\/tgt tensors with optional feature tensors\n of size (len x batch).","docstring_summary":"Args:\n batch (Tensor): a batch of source or target data.\n side (str): for source or for target.\n data_type (str): type of the source input.\n Options are [text|img|audio].\n Returns:\n A sequence of src\/tgt tensors with optional feature tensors\n of size (len x batch).","docstring_tokens":["Args",":","batch","(","Tensor",")",":","a","batch","of","source","or","target","data",".","side","(","str",")",":","for","source","or","for","target",".","data_type","(","str",")",":","type","of","the","source","input",".","Options","are","[","text|img|audio","]",".","Returns",":","A","sequence","of","src","\/","tgt","tensors","with","optional","feature","tensors","of","size","(","len","x","batch",")","."],"function":"def make_features(batch, side, data_type='text'):\n \"\"\"\n Args:\n batch (Tensor): a batch of source or target data.\n side (str): for source or for target.\n data_type (str): type of the source input.\n Options are [text|img|audio].\n Returns:\n A sequence of src\/tgt tensors with optional feature tensors\n of size (len x batch).\n \"\"\"\n assert side in ['src', 'tgt']\n if isinstance(batch.__dict__[side], tuple):\n data = batch.__dict__[side][0]\n else:\n data = batch.__dict__[side]\n\n feat_start = side + \"_feat_\"\n keys = sorted([k for k in batch.__dict__ if feat_start in k])\n features = [batch.__dict__[k] for k in keys]\n levels = [data] + features\n\n if data_type == 'text':\n return torch.cat([level.unsqueeze(2) for level in levels], 2)\n else:\n return levels[0]","function_tokens":["def","make_features","(","batch",",","side",",","data_type","=","'text'",")",":","assert","side","in","[","'src'",",","'tgt'","]","if","isinstance","(","batch",".","__dict__","[","side","]",",","tuple",")",":","data","=","batch",".","__dict__","[","side","]","[","0","]","else",":","data","=","batch",".","__dict__","[","side","]","feat_start","=","side","+","\"_feat_\"","keys","=","sorted","(","[","k","for","k","in","batch",".","__dict__","if","feat_start","in","k","]",")","features","=","[","batch",".","__dict__","[","k","]","for","k","in","keys","]","levels","=","[","data","]","+","features","if","data_type","==","'text'",":","return","torch",".","cat","(","[","level",".","unsqueeze","(","2",")","for","level","in","levels","]",",","2",")","else",":","return","levels","[","0","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L135-L160"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"collect_features","parameters":"(fields, side=\"src\")","argument_list":"","return_statement":"return feats","docstring":"Collect features from Field object.","docstring_summary":"Collect features from Field object.","docstring_tokens":["Collect","features","from","Field","object","."],"function":"def collect_features(fields, side=\"src\"):\n \"\"\"\n Collect features from Field object.\n \"\"\"\n assert side in [\"src\", \"tgt\"]\n feats = []\n for j in count():\n key = side + \"_feat_\" + str(j)\n if key not in fields:\n break\n feats.append(key)\n return feats","function_tokens":["def","collect_features","(","fields",",","side","=","\"src\"",")",":","assert","side","in","[","\"src\"",",","\"tgt\"","]","feats","=","[","]","for","j","in","count","(",")",":","key","=","side","+","\"_feat_\"","+","str","(","j",")","if","key","not","in","fields",":","break","feats",".","append","(","key",")","return","feats"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L163-L174"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"collect_feature_vocabs","parameters":"(fields, side)","argument_list":"","return_statement":"return feature_vocabs","docstring":"Collect feature Vocab objects from Field object.","docstring_summary":"Collect feature Vocab objects from Field object.","docstring_tokens":["Collect","feature","Vocab","objects","from","Field","object","."],"function":"def collect_feature_vocabs(fields, side):\n \"\"\"\n Collect feature Vocab objects from Field object.\n \"\"\"\n assert side in ['src', 'tgt']\n feature_vocabs = []\n for j in count():\n key = side + \"_feat_\" + str(j)\n if key not in fields:\n break\n feature_vocabs.append(fields[key].vocab)\n return feature_vocabs","function_tokens":["def","collect_feature_vocabs","(","fields",",","side",")",":","assert","side","in","[","'src'",",","'tgt'","]","feature_vocabs","=","[","]","for","j","in","count","(",")",":","key","=","side","+","\"_feat_\"","+","str","(","j",")","if","key","not","in","fields",":","break","feature_vocabs",".","append","(","fields","[","key","]",".","vocab",")","return","feature_vocabs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L177-L188"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"build_dataset","parameters":"(fields, data_type, src_data_iter=None, src_path=None,\n src_dir=None, tgt_data_iter=None, tgt_path=None,\n src_seq_length=0, tgt_seq_length=0,\n src_seq_length_trunc=0, tgt_seq_length_trunc=0,\n dynamic_dict=True, sample_rate=0,\n window_size=0, window_stride=0, window=None,\n normalize_audio=True, use_filter_pred=True,\n image_channel_size=3)","argument_list":"","return_statement":"return dataset","docstring":"Build src\/tgt examples iterator from corpus files, also extract\n number of features.","docstring_summary":"Build src\/tgt examples iterator from corpus files, also extract\n number of features.","docstring_tokens":["Build","src","\/","tgt","examples","iterator","from","corpus","files","also","extract","number","of","features","."],"function":"def build_dataset(fields, data_type, src_data_iter=None, src_path=None,\n src_dir=None, tgt_data_iter=None, tgt_path=None,\n src_seq_length=0, tgt_seq_length=0,\n src_seq_length_trunc=0, tgt_seq_length_trunc=0,\n dynamic_dict=True, sample_rate=0,\n window_size=0, window_stride=0, window=None,\n normalize_audio=True, use_filter_pred=True,\n image_channel_size=3):\n \"\"\"\n Build src\/tgt examples iterator from corpus files, also extract\n number of features.\n \"\"\"\n\n def _make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,\n src_seq_length_trunc, sample_rate,\n window_size, window_stride,\n window, normalize_audio,\n image_channel_size=3):\n \"\"\"\n Process the corpus into (example_dict iterator, num_feats) tuple\n on source side for different 'data_type'.\n \"\"\"\n\n if data_type == 'text':\n src_examples_iter, num_src_feats = \\\n TextDataset.make_text_examples_nfeats_tpl(\n src_data_iter, src_path, src_seq_length_trunc, \"src\")\n\n elif data_type == 'img':\n src_examples_iter, num_src_feats = \\\n ImageDataset.make_image_examples_nfeats_tpl(\n src_data_iter, src_path, src_dir, image_channel_size)\n\n elif data_type == 'audio':\n if src_data_iter:\n raise ValueError(\"\"\"Data iterator for AudioDataset isn't\n implemented\"\"\")\n\n if src_path is None:\n raise ValueError(\"AudioDataset requires a non None path\")\n src_examples_iter, num_src_feats = \\\n AudioDataset.make_audio_examples_nfeats_tpl(\n src_path, src_dir, sample_rate,\n window_size, window_stride, window,\n normalize_audio)\n\n return src_examples_iter, num_src_feats\n\n src_examples_iter, num_src_feats = \\\n _make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,\n src_seq_length_trunc, sample_rate,\n window_size, window_stride,\n window, normalize_audio,\n image_channel_size=image_channel_size)\n\n # For all data types, the tgt side corpus is in form of text.\n tgt_examples_iter, num_tgt_feats = \\\n TextDataset.make_text_examples_nfeats_tpl(\n tgt_data_iter, tgt_path, tgt_seq_length_trunc, \"tgt\")\n\n if data_type == 'text':\n dataset = TextDataset(fields, src_examples_iter, tgt_examples_iter,\n num_src_feats, num_tgt_feats,\n src_seq_length=src_seq_length,\n tgt_seq_length=tgt_seq_length,\n dynamic_dict=dynamic_dict,\n use_filter_pred=use_filter_pred)\n\n elif data_type == 'img':\n dataset = ImageDataset(fields, src_examples_iter, tgt_examples_iter,\n num_src_feats, num_tgt_feats,\n tgt_seq_length=tgt_seq_length,\n use_filter_pred=use_filter_pred,\n image_channel_size=image_channel_size)\n\n elif data_type == 'audio':\n dataset = AudioDataset(fields, src_examples_iter, tgt_examples_iter,\n num_src_feats, num_tgt_feats,\n tgt_seq_length=tgt_seq_length,\n sample_rate=sample_rate,\n window_size=window_size,\n window_stride=window_stride,\n window=window,\n normalize_audio=normalize_audio,\n use_filter_pred=use_filter_pred)\n\n return dataset","function_tokens":["def","build_dataset","(","fields",",","data_type",",","src_data_iter","=","None",",","src_path","=","None",",","src_dir","=","None",",","tgt_data_iter","=","None",",","tgt_path","=","None",",","src_seq_length","=","0",",","tgt_seq_length","=","0",",","src_seq_length_trunc","=","0",",","tgt_seq_length_trunc","=","0",",","dynamic_dict","=","True",",","sample_rate","=","0",",","window_size","=","0",",","window_stride","=","0",",","window","=","None",",","normalize_audio","=","True",",","use_filter_pred","=","True",",","image_channel_size","=","3",")",":","def","_make_examples_nfeats_tpl","(","data_type",",","src_data_iter",",","src_path",",","src_dir",",","src_seq_length_trunc",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","image_channel_size","=","3",")",":","\"\"\"\n Process the corpus into (example_dict iterator, num_feats) tuple\n on source side for different 'data_type'.\n \"\"\"","if","data_type","==","'text'",":","src_examples_iter",",","num_src_feats","=","TextDataset",".","make_text_examples_nfeats_tpl","(","src_data_iter",",","src_path",",","src_seq_length_trunc",",","\"src\"",")","elif","data_type","==","'img'",":","src_examples_iter",",","num_src_feats","=","ImageDataset",".","make_image_examples_nfeats_tpl","(","src_data_iter",",","src_path",",","src_dir",",","image_channel_size",")","elif","data_type","==","'audio'",":","if","src_data_iter",":","raise","ValueError","(","\"\"\"Data iterator for AudioDataset isn't\n implemented\"\"\"",")","if","src_path","is","None",":","raise","ValueError","(","\"AudioDataset requires a non None path\"",")","src_examples_iter",",","num_src_feats","=","AudioDataset",".","make_audio_examples_nfeats_tpl","(","src_path",",","src_dir",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",")","return","src_examples_iter",",","num_src_feats","src_examples_iter",",","num_src_feats","=","_make_examples_nfeats_tpl","(","data_type",",","src_data_iter",",","src_path",",","src_dir",",","src_seq_length_trunc",",","sample_rate",",","window_size",",","window_stride",",","window",",","normalize_audio",",","image_channel_size","=","image_channel_size",")","# For all data types, the tgt side corpus is in form of text.","tgt_examples_iter",",","num_tgt_feats","=","TextDataset",".","make_text_examples_nfeats_tpl","(","tgt_data_iter",",","tgt_path",",","tgt_seq_length_trunc",",","\"tgt\"",")","if","data_type","==","'text'",":","dataset","=","TextDataset","(","fields",",","src_examples_iter",",","tgt_examples_iter",",","num_src_feats",",","num_tgt_feats",",","src_seq_length","=","src_seq_length",",","tgt_seq_length","=","tgt_seq_length",",","dynamic_dict","=","dynamic_dict",",","use_filter_pred","=","use_filter_pred",")","elif","data_type","==","'img'",":","dataset","=","ImageDataset","(","fields",",","src_examples_iter",",","tgt_examples_iter",",","num_src_feats",",","num_tgt_feats",",","tgt_seq_length","=","tgt_seq_length",",","use_filter_pred","=","use_filter_pred",",","image_channel_size","=","image_channel_size",")","elif","data_type","==","'audio'",":","dataset","=","AudioDataset","(","fields",",","src_examples_iter",",","tgt_examples_iter",",","num_src_feats",",","num_tgt_feats",",","tgt_seq_length","=","tgt_seq_length",",","sample_rate","=","sample_rate",",","window_size","=","window_size",",","window_stride","=","window_stride",",","window","=","window",",","normalize_audio","=","normalize_audio",",","use_filter_pred","=","use_filter_pred",")","return","dataset"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L191-L277"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"build_vocab","parameters":"(train_dataset_files, fields, data_type, share_vocab,\n src_vocab_path, src_vocab_size, src_words_min_frequency,\n tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency)","argument_list":"","return_statement":"return fields","docstring":"Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict): fields to build vocab for.\n data_type: \"text\", \"img\" or \"audio\"?\n share_vocab(bool): share source and target vocabulary?\n src_vocab_path(string): Path to src vocabulary file.\n src_vocab_size(int): size of the source vocabulary.\n src_words_min_frequency(int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path(string): Path to tgt vocabulary file.\n tgt_vocab_size(int): size of the target vocabulary.\n tgt_words_min_frequency(int): the minimum frequency needed to\n include a target word in the vocabulary.\n\n Returns:\n Dict of Fields","docstring_summary":"Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict): fields to build vocab for.\n data_type: \"text\", \"img\" or \"audio\"?\n share_vocab(bool): share source and target vocabulary?\n src_vocab_path(string): Path to src vocabulary file.\n src_vocab_size(int): size of the source vocabulary.\n src_words_min_frequency(int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path(string): Path to tgt vocabulary file.\n tgt_vocab_size(int): size of the target vocabulary.\n tgt_words_min_frequency(int): the minimum frequency needed to\n include a target word in the vocabulary.","docstring_tokens":["Args",":","train_dataset_files",":","a","list","of","train","dataset","pt","file",".","fields","(","dict",")",":","fields","to","build","vocab","for",".","data_type",":","text","img","or","audio","?","share_vocab","(","bool",")",":","share","source","and","target","vocabulary?","src_vocab_path","(","string",")",":","Path","to","src","vocabulary","file",".","src_vocab_size","(","int",")",":","size","of","the","source","vocabulary",".","src_words_min_frequency","(","int",")",":","the","minimum","frequency","needed","to","include","a","source","word","in","the","vocabulary",".","tgt_vocab_path","(","string",")",":","Path","to","tgt","vocabulary","file",".","tgt_vocab_size","(","int",")",":","size","of","the","target","vocabulary",".","tgt_words_min_frequency","(","int",")",":","the","minimum","frequency","needed","to","include","a","target","word","in","the","vocabulary","."],"function":"def build_vocab(train_dataset_files, fields, data_type, share_vocab,\n src_vocab_path, src_vocab_size, src_words_min_frequency,\n tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency):\n \"\"\"\n Args:\n train_dataset_files: a list of train dataset pt file.\n fields (dict): fields to build vocab for.\n data_type: \"text\", \"img\" or \"audio\"?\n share_vocab(bool): share source and target vocabulary?\n src_vocab_path(string): Path to src vocabulary file.\n src_vocab_size(int): size of the source vocabulary.\n src_words_min_frequency(int): the minimum frequency needed to\n include a source word in the vocabulary.\n tgt_vocab_path(string): Path to tgt vocabulary file.\n tgt_vocab_size(int): size of the target vocabulary.\n tgt_words_min_frequency(int): the minimum frequency needed to\n include a target word in the vocabulary.\n\n Returns:\n Dict of Fields\n \"\"\"\n counter = {}\n\n # Prop src from field to get lower memory using when training with image\n if data_type == 'img':\n fields.pop(\"src\")\n\n for k in fields:\n counter[k] = Counter()\n\n # Load vocabulary\n src_vocab = load_vocabulary(src_vocab_path, tag=\"source\")\n tgt_vocab = load_vocabulary(tgt_vocab_path, tag=\"target\")\n\n for index, path in enumerate(train_dataset_files):\n dataset = torch.load(path)\n logger.info(\" * reloading %s.\" % path)\n for ex in dataset.examples:\n for k in fields:\n val = getattr(ex, k, None)\n if val is not None and not fields[k].sequential:\n val = [val]\n elif k == 'src' and src_vocab:\n val = [item for item in val if item in src_vocab]\n elif k == 'tgt' and tgt_vocab:\n val = [item for item in val if item in tgt_vocab]\n counter[k].update(val)\n\n # Drop the none-using from memory but keep the last\n if (index < len(train_dataset_files) - 1):\n dataset.examples = None\n gc.collect()\n del dataset.examples\n gc.collect()\n del dataset\n gc.collect()\n\n _build_field_vocab(fields[\"tgt\"], counter[\"tgt\"],\n max_size=tgt_vocab_size,\n min_freq=tgt_words_min_frequency)\n logger.info(\" * tgt vocab size: %d.\" % len(fields[\"tgt\"].vocab))\n\n # All datasets have same num of n_tgt_features,\n # getting the last one is OK.\n for j in range(dataset.n_tgt_feats):\n key = \"tgt_feat_\" + str(j)\n _build_field_vocab(fields[key], counter[key])\n logger.info(\" * %s vocab size: %d.\" % (key,\n len(fields[key].vocab)))\n\n if data_type == 'text':\n _build_field_vocab(fields[\"src\"], counter[\"src\"],\n max_size=src_vocab_size,\n min_freq=src_words_min_frequency)\n logger.info(\" * src vocab size: %d.\" % len(fields[\"src\"].vocab))\n\n # All datasets have same num of n_src_features,\n # getting the last one is OK.\n for j in range(dataset.n_src_feats):\n key = \"src_feat_\" + str(j)\n _build_field_vocab(fields[key], counter[key])\n logger.info(\" * %s vocab size: %d.\" %\n (key, len(fields[key].vocab)))\n\n # Merge the input and output vocabularies.\n if share_vocab:\n # `tgt_vocab_size` is ignored when sharing vocabularies\n logger.info(\" * merging src and tgt vocab...\")\n merged_vocab = merge_vocabs(\n [fields[\"src\"].vocab, fields[\"tgt\"].vocab],\n vocab_size=src_vocab_size)\n fields[\"src\"].vocab = merged_vocab\n fields[\"tgt\"].vocab = merged_vocab\n\n return fields","function_tokens":["def","build_vocab","(","train_dataset_files",",","fields",",","data_type",",","share_vocab",",","src_vocab_path",",","src_vocab_size",",","src_words_min_frequency",",","tgt_vocab_path",",","tgt_vocab_size",",","tgt_words_min_frequency",")",":","counter","=","{","}","# Prop src from field to get lower memory using when training with image","if","data_type","==","'img'",":","fields",".","pop","(","\"src\"",")","for","k","in","fields",":","counter","[","k","]","=","Counter","(",")","# Load vocabulary","src_vocab","=","load_vocabulary","(","src_vocab_path",",","tag","=","\"source\"",")","tgt_vocab","=","load_vocabulary","(","tgt_vocab_path",",","tag","=","\"target\"",")","for","index",",","path","in","enumerate","(","train_dataset_files",")",":","dataset","=","torch",".","load","(","path",")","logger",".","info","(","\" * reloading %s.\"","%","path",")","for","ex","in","dataset",".","examples",":","for","k","in","fields",":","val","=","getattr","(","ex",",","k",",","None",")","if","val","is","not","None","and","not","fields","[","k","]",".","sequential",":","val","=","[","val","]","elif","k","==","'src'","and","src_vocab",":","val","=","[","item","for","item","in","val","if","item","in","src_vocab","]","elif","k","==","'tgt'","and","tgt_vocab",":","val","=","[","item","for","item","in","val","if","item","in","tgt_vocab","]","counter","[","k","]",".","update","(","val",")","# Drop the none-using from memory but keep the last","if","(","index","<","len","(","train_dataset_files",")","-","1",")",":","dataset",".","examples","=","None","gc",".","collect","(",")","del","dataset",".","examples","gc",".","collect","(",")","del","dataset","gc",".","collect","(",")","_build_field_vocab","(","fields","[","\"tgt\"","]",",","counter","[","\"tgt\"","]",",","max_size","=","tgt_vocab_size",",","min_freq","=","tgt_words_min_frequency",")","logger",".","info","(","\" * tgt vocab size: %d.\"","%","len","(","fields","[","\"tgt\"","]",".","vocab",")",")","# All datasets have same num of n_tgt_features,","# getting the last one is OK.","for","j","in","range","(","dataset",".","n_tgt_feats",")",":","key","=","\"tgt_feat_\"","+","str","(","j",")","_build_field_vocab","(","fields","[","key","]",",","counter","[","key","]",")","logger",".","info","(","\" * %s vocab size: %d.\"","%","(","key",",","len","(","fields","[","key","]",".","vocab",")",")",")","if","data_type","==","'text'",":","_build_field_vocab","(","fields","[","\"src\"","]",",","counter","[","\"src\"","]",",","max_size","=","src_vocab_size",",","min_freq","=","src_words_min_frequency",")","logger",".","info","(","\" * src vocab size: %d.\"","%","len","(","fields","[","\"src\"","]",".","vocab",")",")","# All datasets have same num of n_src_features,","# getting the last one is OK.","for","j","in","range","(","dataset",".","n_src_feats",")",":","key","=","\"src_feat_\"","+","str","(","j",")","_build_field_vocab","(","fields","[","key","]",",","counter","[","key","]",")","logger",".","info","(","\" * %s vocab size: %d.\"","%","(","key",",","len","(","fields","[","key","]",".","vocab",")",")",")","# Merge the input and output vocabularies.","if","share_vocab",":","# `tgt_vocab_size` is ignored when sharing vocabularies","logger",".","info","(","\" * merging src and tgt vocab...\"",")","merged_vocab","=","merge_vocabs","(","[","fields","[","\"src\"","]",".","vocab",",","fields","[","\"tgt\"","]",".","vocab","]",",","vocab_size","=","src_vocab_size",")","fields","[","\"src\"","]",".","vocab","=","merged_vocab","fields","[","\"tgt\"","]",".","vocab","=","merged_vocab","return","fields"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L288-L382"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"load_vocabulary","parameters":"(vocabulary_path, tag=\"\")","argument_list":"","return_statement":"return vocabulary","docstring":"Loads a vocabulary from the given path.\n :param vocabulary_path: path to load vocabulary from\n :param tag: tag for vocabulary (only used for logging)\n :return: vocabulary or None if path is null","docstring_summary":"Loads a vocabulary from the given path.\n :param vocabulary_path: path to load vocabulary from\n :param tag: tag for vocabulary (only used for logging)\n :return: vocabulary or None if path is null","docstring_tokens":["Loads","a","vocabulary","from","the","given","path",".",":","param","vocabulary_path",":","path","to","load","vocabulary","from",":","param","tag",":","tag","for","vocabulary","(","only","used","for","logging",")",":","return",":","vocabulary","or","None","if","path","is","null"],"function":"def load_vocabulary(vocabulary_path, tag=\"\"):\n \"\"\"\n Loads a vocabulary from the given path.\n :param vocabulary_path: path to load vocabulary from\n :param tag: tag for vocabulary (only used for logging)\n :return: vocabulary or None if path is null\n \"\"\"\n vocabulary = None\n if vocabulary_path:\n vocabulary = set([])\n logger.info(\"Loading {} vocabulary from {}\".format(tag,\n vocabulary_path))\n\n if not os.path.exists(vocabulary_path):\n raise RuntimeError(\n \"{} vocabulary not found at {}!\".format(tag, vocabulary_path))\n else:\n with open(vocabulary_path) as f:\n for line in f:\n if len(line.strip()) == 0:\n continue\n word = line.strip().split()[0]\n vocabulary.add(word)\n return vocabulary","function_tokens":["def","load_vocabulary","(","vocabulary_path",",","tag","=","\"\"",")",":","vocabulary","=","None","if","vocabulary_path",":","vocabulary","=","set","(","[","]",")","logger",".","info","(","\"Loading {} vocabulary from {}\"",".","format","(","tag",",","vocabulary_path",")",")","if","not","os",".","path",".","exists","(","vocabulary_path",")",":","raise","RuntimeError","(","\"{} vocabulary not found at {}!\"",".","format","(","tag",",","vocabulary_path",")",")","else",":","with","open","(","vocabulary_path",")","as","f",":","for","line","in","f",":","if","len","(","line",".","strip","(",")",")","==","0",":","continue","word","=","line",".","strip","(",")",".","split","(",")","[","0","]","vocabulary",".","add","(","word",")","return","vocabulary"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L385-L408"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"build_dataset_iter","parameters":"(datasets, fields, opt, is_train=True)","argument_list":"","return_statement":"return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,\n device, is_train)","docstring":"This returns user-defined train\/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.","docstring_summary":"This returns user-defined train\/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.","docstring_tokens":["This","returns","user","-","defined","train","\/","validate","data","iterator","for","the","trainer","to","iterate","over",".","We","implement","simple","ordered","iterator","strategy","here","but","more","sophisticated","strategy","like","curriculum","learning","is","ok","too","."],"function":"def build_dataset_iter(datasets, fields, opt, is_train=True):\n \"\"\"\n This returns user-defined train\/validate data iterator for the trainer\n to iterate over. We implement simple ordered iterator strategy here,\n but more sophisticated strategy like curriculum learning is ok too.\n \"\"\"\n batch_size = opt.batch_size if is_train else opt.valid_batch_size\n if is_train and opt.batch_type == \"tokens\":\n def batch_size_fn(new, count, sofar):\n \"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src\/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"\n # Maintains the longest src and tgt length in the current batch\n global max_src_in_batch, max_tgt_in_batch\n # Reset current longest length at a new batch (count=1)\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n # Src: w1 ... wN \n max_src_in_batch = max(max_src_in_batch, len(new.src) + 2)\n # Tgt: w1 ... wN \n max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt) + 1)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n else:\n batch_size_fn = None\n\n if opt.gpu_ranks:\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,\n device, is_train)","function_tokens":["def","build_dataset_iter","(","datasets",",","fields",",","opt",",","is_train","=","True",")",":","batch_size","=","opt",".","batch_size","if","is_train","else","opt",".","valid_batch_size","if","is_train","and","opt",".","batch_type","==","\"tokens\"",":","def","batch_size_fn","(","new",",","count",",","sofar",")",":","\"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src\/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"","# Maintains the longest src and tgt length in the current batch","global","max_src_in_batch",",","max_tgt_in_batch","# Reset current longest length at a new batch (count=1)","if","count","==","1",":","max_src_in_batch","=","0","max_tgt_in_batch","=","0","# Src: w1 ... wN ","max_src_in_batch","=","max","(","max_src_in_batch",",","len","(","new",".","src",")","+","2",")","# Tgt: w1 ... wN ","max_tgt_in_batch","=","max","(","max_tgt_in_batch",",","len","(","new",".","tgt",")","+","1",")","src_elements","=","count","*","max_src_in_batch","tgt_elements","=","count","*","max_tgt_in_batch","return","max","(","src_elements",",","tgt_elements",")","else",":","batch_size_fn","=","None","if","opt",".","gpu_ranks",":","device","=","\"cuda\"","else",":","device","=","\"cpu\"","return","DatasetLazyIter","(","datasets",",","fields",",","batch_size",",","batch_size_fn",",","device",",","is_train",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L506-L542"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"lazily_load_dataset","parameters":"(corpus_type, opt)","argument_list":"","return_statement":"","docstring":"Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.","docstring_summary":"Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.","docstring_tokens":["Dataset","generator",".","Don","t","do","extra","stuff","here","like","printing","because","they","will","be","postponed","to","the","first","loading","time","."],"function":"def lazily_load_dataset(corpus_type, opt):\n \"\"\"\n Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.\n \"\"\"\n assert corpus_type in [\"train\", \"valid\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n # logger.info('Loading %s dataset from %s, number of examples: %d' %\n # (corpus_type, pt_file, len(dataset)))\n # import pdb;\n # pdb.set_trace()\n\n return dataset\n\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n # Only one inputters.*Dataset, simple!\n pt = opt.data + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type)","function_tokens":["def","lazily_load_dataset","(","corpus_type",",","opt",")",":","assert","corpus_type","in","[","\"train\"",",","\"valid\"","]","def","_lazy_dataset_loader","(","pt_file",",","corpus_type",")",":","dataset","=","torch",".","load","(","pt_file",")","# logger.info('Loading %s dataset from %s, number of examples: %d' %","# (corpus_type, pt_file, len(dataset)))","# import pdb;","# pdb.set_trace()","return","dataset","# Sort the glob output by file name (by increasing indexes).","pts","=","sorted","(","glob",".","glob","(","opt",".","data","+","'.'","+","corpus_type","+","'.[0-9]*.pt'",")",")","if","pts",":","for","pt","in","pts",":","yield","_lazy_dataset_loader","(","pt",",","corpus_type",")","else",":","# Only one inputters.*Dataset, simple!","pt","=","opt",".","data","+","'.'","+","corpus_type","+","'.pt'","yield","_lazy_dataset_loader","(","pt",",","corpus_type",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L545-L575"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/inputters\/inputter.py","language":"python","identifier":"OrderedIterator.create_batches","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Create batches","docstring_summary":"Create batches","docstring_tokens":["Create","batches"],"function":"def create_batches(self):\n \"\"\" Create batches \"\"\"\n if self.train:\n def _pool(data, random_shuffler):\n for p in torchtext.data.batch(data, self.batch_size * 100):\n p_batch = torchtext.data.batch(\n sorted(p, key=self.sort_key),\n self.batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n\n self.batches = _pool(self.data(), self.random_shuffler)\n else:\n self.batches = []\n for b in torchtext.data.batch(self.data(), self.batch_size,\n self.batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))","function_tokens":["def","create_batches","(","self",")",":","if","self",".","train",":","def","_pool","(","data",",","random_shuffler",")",":","for","p","in","torchtext",".","data",".","batch","(","data",",","self",".","batch_size","*","100",")",":","p_batch","=","torchtext",".","data",".","batch","(","sorted","(","p",",","key","=","self",".","sort_key",")",",","self",".","batch_size",",","self",".","batch_size_fn",")","for","b","in","random_shuffler","(","list","(","p_batch",")",")",":","yield","b","self",".","batches","=","_pool","(","self",".","data","(",")",",","self",".","random_shuffler",")","else",":","self",".","batches","=","[","]","for","b","in","torchtext",".","data",".","batch","(","self",".","data","(",")",",","self",".","batch_size",",","self",".","batch_size_fn",")",":","self",".","batches",".","append","(","sorted","(","b",",","key","=","self",".","sort_key",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/inputters\/inputter.py#L414-L430"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.start","parameters":"(self, config_file)","argument_list":"","return_statement":"","docstring":"Read the config file and pre-\/load the models","docstring_summary":"Read the config file and pre-\/load the models","docstring_tokens":["Read","the","config","file","and","pre","-","\/","load","the","models"],"function":"def start(self, config_file):\n \"\"\"Read the config file and pre-\/load the models\n \"\"\"\n self.config_file = config_file\n with open(self.config_file) as f:\n self.confs = json.load(f)\n\n self.models_root = self.confs.get('models_root', '.\/available_models')\n for i, conf in enumerate(self.confs[\"models\"]):\n if \"models\" not in conf:\n if \"model\" in conf:\n # backwards compatibility for confs\n conf[\"models\"] = [conf[\"model\"]]\n else:\n raise ValueError(\"\"\"Incorrect config file: missing 'models'\n parameter for model #%d\"\"\" % i)\n kwargs = {'timeout': conf.get('timeout', None),\n 'load': conf.get('load', None),\n 'tokenizer_opt': conf.get('tokenizer', None),\n 'on_timeout': conf.get('on_timeout', None),\n 'model_root': conf.get('model_root', self.models_root)\n }\n kwargs = {k: v for (k, v) in kwargs.items() if v is not None}\n model_id = conf.get(\"id\", None)\n opt = conf[\"opt\"]\n opt[\"models\"] = conf[\"models\"]\n self.preload_model(opt, model_id=model_id, **kwargs)","function_tokens":["def","start","(","self",",","config_file",")",":","self",".","config_file","=","config_file","with","open","(","self",".","config_file",")","as","f",":","self",".","confs","=","json",".","load","(","f",")","self",".","models_root","=","self",".","confs",".","get","(","'models_root'",",","'.\/available_models'",")","for","i",",","conf","in","enumerate","(","self",".","confs","[","\"models\"","]",")",":","if","\"models\"","not","in","conf",":","if","\"model\"","in","conf",":","# backwards compatibility for confs","conf","[","\"models\"","]","=","[","conf","[","\"model\"","]","]","else",":","raise","ValueError","(","\"\"\"Incorrect config file: missing 'models'\n parameter for model #%d\"\"\"","%","i",")","kwargs","=","{","'timeout'",":","conf",".","get","(","'timeout'",",","None",")",",","'load'",":","conf",".","get","(","'load'",",","None",")",",","'tokenizer_opt'",":","conf",".","get","(","'tokenizer'",",","None",")",",","'on_timeout'",":","conf",".","get","(","'on_timeout'",",","None",")",",","'model_root'",":","conf",".","get","(","'model_root'",",","self",".","models_root",")","}","kwargs","=","{","k",":","v","for","(","k",",","v",")","in","kwargs",".","items","(",")","if","v","is","not","None","}","model_id","=","conf",".","get","(","\"id\"",",","None",")","opt","=","conf","[","\"opt\"","]","opt","[","\"models\"","]","=","conf","[","\"models\"","]","self",".","preload_model","(","opt",",","model_id","=","model_id",",","*","*","kwargs",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L54-L80"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.clone_model","parameters":"(self, model_id, opt, timeout=-1)","argument_list":"","return_statement":"","docstring":"Clone a model `model_id`.\n Different options may be passed. If `opt` is None, it will use the\n same set of options","docstring_summary":"Clone a model `model_id`.\n Different options may be passed. If `opt` is None, it will use the\n same set of options","docstring_tokens":["Clone","a","model","model_id",".","Different","options","may","be","passed",".","If","opt","is","None","it","will","use","the","same","set","of","options"],"function":"def clone_model(self, model_id, opt, timeout=-1):\n \"\"\"Clone a model `model_id`.\n Different options may be passed. If `opt` is None, it will use the\n same set of options\n \"\"\"\n if model_id in self.models:\n if opt is None:\n opt = self.models[model_id].user_opt\n opt[\"models\"] = self.models[model_id].opt.models\n return self.load_model(opt, timeout)\n else:\n raise ServerModelError(\"No such model '%s'\" % str(model_id))","function_tokens":["def","clone_model","(","self",",","model_id",",","opt",",","timeout","=","-","1",")",":","if","model_id","in","self",".","models",":","if","opt","is","None",":","opt","=","self",".","models","[","model_id","]",".","user_opt","opt","[","\"models\"","]","=","self",".","models","[","model_id","]",".","opt",".","models","return","self",".","load_model","(","opt",",","timeout",")","else",":","raise","ServerModelError","(","\"No such model '%s'\"","%","str","(","model_id",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L82-L93"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.load_model","parameters":"(self, opt, model_id=None, **model_kwargs)","argument_list":"","return_statement":"return model_id, load_time","docstring":"Loading a model given a set of options","docstring_summary":"Loading a model given a set of options","docstring_tokens":["Loading","a","model","given","a","set","of","options"],"function":"def load_model(self, opt, model_id=None, **model_kwargs):\n \"\"\"Loading a model given a set of options\n \"\"\"\n model_id = self.preload_model(opt, model_id=model_id, **model_kwargs)\n load_time = self.models[model_id].load_time\n\n return model_id, load_time","function_tokens":["def","load_model","(","self",",","opt",",","model_id","=","None",",","*","*","model_kwargs",")",":","model_id","=","self",".","preload_model","(","opt",",","model_id","=","model_id",",","*","*","model_kwargs",")","load_time","=","self",".","models","[","model_id","]",".","load_time","return","model_id",",","load_time"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L95-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.preload_model","parameters":"(self, opt, model_id=None, **model_kwargs)","argument_list":"","return_statement":"return model_id","docstring":"Preloading the model: updating internal datastructure\n It will effectively load the model if `load` is set","docstring_summary":"Preloading the model: updating internal datastructure\n It will effectively load the model if `load` is set","docstring_tokens":["Preloading","the","model",":","updating","internal","datastructure","It","will","effectively","load","the","model","if","load","is","set"],"function":"def preload_model(self, opt, model_id=None, **model_kwargs):\n \"\"\"Preloading the model: updating internal datastructure\n It will effectively load the model if `load` is set\n \"\"\"\n if model_id is not None:\n if model_id in self.models.keys():\n raise ValueError(\"Model ID %d already exists\" % model_id)\n else:\n model_id = self.next_id\n while model_id in self.models.keys():\n model_id += 1\n self.next_id = model_id + 1\n print(\"Pre-loading model %d\" % model_id)\n model = ServerModel(opt, model_id, **model_kwargs)\n self.models[model_id] = model\n\n return model_id","function_tokens":["def","preload_model","(","self",",","opt",",","model_id","=","None",",","*","*","model_kwargs",")",":","if","model_id","is","not","None",":","if","model_id","in","self",".","models",".","keys","(",")",":","raise","ValueError","(","\"Model ID %d already exists\"","%","model_id",")","else",":","model_id","=","self",".","next_id","while","model_id","in","self",".","models",".","keys","(",")",":","model_id","+=","1","self",".","next_id","=","model_id","+","1","print","(","\"Pre-loading model %d\"","%","model_id",")","model","=","ServerModel","(","opt",",","model_id",",","*","*","model_kwargs",")","self",".","models","[","model_id","]","=","model","return","model_id"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L103-L119"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.run","parameters":"(self, inputs)","argument_list":"","return_statement":"","docstring":"Translate `inputs`\n We keep the same format as the Lua version i.e.\n [{\"id\": model_id, \"src\": \"sequence to translate\"},{ ...}]\n\n We use inputs[0][\"id\"] as the model id","docstring_summary":"Translate `inputs`\n We keep the same format as the Lua version i.e.\n [{\"id\": model_id, \"src\": \"sequence to translate\"},{ ...}]","docstring_tokens":["Translate","inputs","We","keep","the","same","format","as","the","Lua","version","i",".","e",".","[","{","id",":","model_id","src",":","sequence","to","translate","}","{","...","}","]"],"function":"def run(self, inputs):\n \"\"\"Translate `inputs`\n We keep the same format as the Lua version i.e.\n [{\"id\": model_id, \"src\": \"sequence to translate\"},{ ...}]\n\n We use inputs[0][\"id\"] as the model id\n \"\"\"\n model_id = inputs[0].get(\"id\", 0)\n if model_id in self.models and self.models[model_id] is not None:\n return self.models[model_id].run(inputs)\n else:\n print(\"Error No such model '%s'\" % str(model_id))\n raise ServerModelError(\"No such model '%s'\" % str(model_id))","function_tokens":["def","run","(","self",",","inputs",")",":","model_id","=","inputs","[","0","]",".","get","(","\"id\"",",","0",")","if","model_id","in","self",".","models","and","self",".","models","[","model_id","]","is","not","None",":","return","self",".","models","[","model_id","]",".","run","(","inputs",")","else",":","print","(","\"Error No such model '%s'\"","%","str","(","model_id",")",")","raise","ServerModelError","(","\"No such model '%s'\"","%","str","(","model_id",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L121-L133"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.unload_model","parameters":"(self, model_id)","argument_list":"","return_statement":"","docstring":"Manually unload a model.\n It will free the memory and cancel the timer","docstring_summary":"Manually unload a model.\n It will free the memory and cancel the timer","docstring_tokens":["Manually","unload","a","model",".","It","will","free","the","memory","and","cancel","the","timer"],"function":"def unload_model(self, model_id):\n \"\"\"Manually unload a model.\n It will free the memory and cancel the timer\n \"\"\"\n if model_id in self.models and self.models[model_id] is not None:\n self.models[model_id].unload()\n else:\n raise ServerModelError(\"No such model '%s'\" % str(model_id))","function_tokens":["def","unload_model","(","self",",","model_id",")",":","if","model_id","in","self",".","models","and","self",".","models","[","model_id","]","is","not","None",":","self",".","models","[","model_id","]",".","unload","(",")","else",":","raise","ServerModelError","(","\"No such model '%s'\"","%","str","(","model_id",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L135-L142"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"TranslationServer.list_models","parameters":"(self)","argument_list":"","return_statement":"return models","docstring":"Return the list of available models","docstring_summary":"Return the list of available models","docstring_tokens":["Return","the","list","of","available","models"],"function":"def list_models(self):\n \"\"\"Return the list of available models\n \"\"\"\n models = []\n for _, model in self.models.items():\n models += [model.to_dict()]\n return models","function_tokens":["def","list_models","(","self",")",":","models","=","[","]","for","_",",","model","in","self",".","models",".","items","(",")",":","models","+=","[","model",".","to_dict","(",")","]","return","models"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L144-L150"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.__init__","parameters":"(self, opt, model_id, tokenizer_opt=None, load=False,\n timeout=-1, on_timeout=\"to_cpu\", model_root=\".\/\")","argument_list":"","return_statement":"","docstring":"Args:\n opt: (dict) options for the Translator\n model_id: (int) model id\n tokenizer_opt: (dict) options for the tokenizer or None\n load: (bool) whether to load the model during __init__\n timeout: (int) seconds before running `do_timeout`\n Negative values means no timeout\n on_timeout: (str) in [\"to_cpu\", \"unload\"] set what to do on\n timeout (see function `do_timeout`)\n model_root: (str) path to the model directory\n it must contain de model and tokenizer file","docstring_summary":"Args:\n opt: (dict) options for the Translator\n model_id: (int) model id\n tokenizer_opt: (dict) options for the tokenizer or None\n load: (bool) whether to load the model during __init__\n timeout: (int) seconds before running `do_timeout`\n Negative values means no timeout\n on_timeout: (str) in [\"to_cpu\", \"unload\"] set what to do on\n timeout (see function `do_timeout`)\n model_root: (str) path to the model directory\n it must contain de model and tokenizer file","docstring_tokens":["Args",":","opt",":","(","dict",")","options","for","the","Translator","model_id",":","(","int",")","model","id","tokenizer_opt",":","(","dict",")","options","for","the","tokenizer","or","None","load",":","(","bool",")","whether","to","load","the","model","during","__init__","timeout",":","(","int",")","seconds","before","running","do_timeout","Negative","values","means","no","timeout","on_timeout",":","(","str",")","in","[","to_cpu","unload","]","set","what","to","do","on","timeout","(","see","function","do_timeout",")","model_root",":","(","str",")","path","to","the","model","directory","it","must","contain","de","model","and","tokenizer","file"],"function":"def __init__(self, opt, model_id, tokenizer_opt=None, load=False,\n timeout=-1, on_timeout=\"to_cpu\", model_root=\".\/\"):\n \"\"\"\n Args:\n opt: (dict) options for the Translator\n model_id: (int) model id\n tokenizer_opt: (dict) options for the tokenizer or None\n load: (bool) whether to load the model during __init__\n timeout: (int) seconds before running `do_timeout`\n Negative values means no timeout\n on_timeout: (str) in [\"to_cpu\", \"unload\"] set what to do on\n timeout (see function `do_timeout`)\n model_root: (str) path to the model directory\n it must contain de model and tokenizer file\n\n \"\"\"\n self.model_root = model_root\n self.opt = self.parse_opt(opt)\n if self.opt.n_best > 1:\n raise ValueError(\"Values of n_best > 1 are not supported\")\n\n self.model_id = model_id\n self.tokenizer_opt = tokenizer_opt\n self.timeout = timeout\n self.on_timeout = on_timeout\n\n self.unload_timer = None\n self.user_opt = opt\n self.tokenizer = None\n self.logger = init_logger(self.opt.log_file)\n self.loading_lock = threading.Event()\n self.loading_lock.set()\n\n if load:\n self.load()","function_tokens":["def","__init__","(","self",",","opt",",","model_id",",","tokenizer_opt","=","None",",","load","=","False",",","timeout","=","-","1",",","on_timeout","=","\"to_cpu\"",",","model_root","=","\".\/\"",")",":","self",".","model_root","=","model_root","self",".","opt","=","self",".","parse_opt","(","opt",")","if","self",".","opt",".","n_best",">","1",":","raise","ValueError","(","\"Values of n_best > 1 are not supported\"",")","self",".","model_id","=","model_id","self",".","tokenizer_opt","=","tokenizer_opt","self",".","timeout","=","timeout","self",".","on_timeout","=","on_timeout","self",".","unload_timer","=","None","self",".","user_opt","=","opt","self",".","tokenizer","=","None","self",".","logger","=","init_logger","(","self",".","opt",".","log_file",")","self",".","loading_lock","=","threading",".","Event","(",")","self",".","loading_lock",".","set","(",")","if","load",":","self",".","load","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L154-L188"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.parse_opt","parameters":"(self, opt)","argument_list":"","return_statement":"return opt","docstring":"Parse the option set passed by the user using `onmt.opts`\n Args:\n opt: (dict) options passed by the user\n\n Returns:\n opt: (Namespace) full set of options for the Translator","docstring_summary":"Parse the option set passed by the user using `onmt.opts`\n Args:\n opt: (dict) options passed by the user","docstring_tokens":["Parse","the","option","set","passed","by","the","user","using","onmt",".","opts","Args",":","opt",":","(","dict",")","options","passed","by","the","user"],"function":"def parse_opt(self, opt):\n \"\"\"Parse the option set passed by the user using `onmt.opts`\n Args:\n opt: (dict) options passed by the user\n\n Returns:\n opt: (Namespace) full set of options for the Translator\n \"\"\"\n prec_argv = sys.argv\n sys.argv = sys.argv[:1]\n parser = argparse.ArgumentParser()\n onmt.opts.translate_opts(parser)\n\n models = opt['models']\n if not isinstance(models, (list, tuple)):\n models = [models]\n opt['models'] = [os.path.join(self.model_root, model)\n for model in models]\n opt['src'] = \"dummy_src\"\n\n for (k, v) in opt.items():\n if k == 'models':\n sys.argv += ['-model']\n sys.argv += [str(model) for model in v]\n elif type(v) == bool:\n sys.argv += ['-%s' % k]\n else:\n sys.argv += ['-%s' % k, str(v)]\n\n opt = parser.parse_args()\n opt.cuda = opt.gpu > -1\n\n sys.argv = prec_argv\n return opt","function_tokens":["def","parse_opt","(","self",",","opt",")",":","prec_argv","=","sys",".","argv","sys",".","argv","=","sys",".","argv","[",":","1","]","parser","=","argparse",".","ArgumentParser","(",")","onmt",".","opts",".","translate_opts","(","parser",")","models","=","opt","[","'models'","]","if","not","isinstance","(","models",",","(","list",",","tuple",")",")",":","models","=","[","models","]","opt","[","'models'","]","=","[","os",".","path",".","join","(","self",".","model_root",",","model",")","for","model","in","models","]","opt","[","'src'","]","=","\"dummy_src\"","for","(","k",",","v",")","in","opt",".","items","(",")",":","if","k","==","'models'",":","sys",".","argv","+=","[","'-model'","]","sys",".","argv","+=","[","str","(","model",")","for","model","in","v","]","elif","type","(","v",")","==","bool",":","sys",".","argv","+=","[","'-%s'","%","k","]","else",":","sys",".","argv","+=","[","'-%s'","%","k",",","str","(","v",")","]","opt","=","parser",".","parse_args","(",")","opt",".","cuda","=","opt",".","gpu",">","-","1","sys",".","argv","=","prec_argv","return","opt"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L190-L223"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.run","parameters":"(self, inputs)","argument_list":"","return_statement":"return results, scores, self.opt.n_best, timer.times","docstring":"Translate `inputs` using this model\n\n Args:\n inputs: [{\"src\": \"...\"},{\"src\": ...}]\n\n Returns:\n result: (list) translations\n times: (dict) containing times","docstring_summary":"Translate `inputs` using this model","docstring_tokens":["Translate","inputs","using","this","model"],"function":"def run(self, inputs):\n \"\"\"Translate `inputs` using this model\n\n Args:\n inputs: [{\"src\": \"...\"},{\"src\": ...}]\n\n Returns:\n result: (list) translations\n times: (dict) containing times\n \"\"\"\n self.stop_unload_timer()\n\n timer = Timer()\n timer.start()\n self.logger.info(\"Running translation using %d\" % self.model_id)\n\n if not self.loading_lock.is_set():\n self.logger.info(\n \"Model #%d is being loaded by another thread, waiting\"\n % self.model_id)\n if not self.loading_lock.wait(timeout=30):\n raise ServerModelError(\"Model %d loading timeout\"\n % self.model_id)\n\n else:\n if not self.loaded:\n self.load()\n timer.tick(name=\"load\")\n elif self.opt.cuda:\n self.to_gpu()\n timer.tick(name=\"to_gpu\")\n\n texts = []\n head_spaces = []\n tail_spaces = []\n sslength = []\n for i, inp in enumerate(inputs):\n src = inp['src']\n if src.strip() == \"\":\n head_spaces.append(src)\n texts.append(\"\")\n tail_spaces.append(\"\")\n else:\n whitespaces_before, whitespaces_after = \"\", \"\"\n match_before = re.search(r'^\\s+', src)\n match_after = re.search(r'\\s+$', src)\n if match_before is not None:\n whitespaces_before = match_before.group(0)\n if match_after is not None:\n whitespaces_after = match_after.group(0)\n head_spaces.append(whitespaces_before)\n tok = self.maybe_tokenize(src.strip())\n texts.append(tok)\n sslength.append(len(tok.split()))\n tail_spaces.append(whitespaces_after)\n\n empty_indices = [i for i, x in enumerate(texts) if x == \"\"]\n texts_to_translate = [x for x in texts if x != \"\"]\n\n scores = []\n predictions = []\n if len(texts_to_translate) > 0:\n try:\n scores, predictions = self.translator.translate(\n src_data_iter=texts_to_translate,\n batch_size=self.opt.batch_size)\n except RuntimeError as e:\n raise ServerModelError(\"Runtime Error: %s\" % str(e))\n\n timer.tick(name=\"translation\")\n self.logger.info(\"\"\"Using model #%d\\t%d inputs\n \\ttranslation time: %f\"\"\" % (self.model_id, len(texts),\n timer.times['translation']))\n self.reset_unload_timer()\n\n # NOTE: translator returns lists of `n_best` list\n # we can ignore that (i.e. flatten lists) only because\n # we restrict `n_best=1`\n def flatten_list(_list): return sum(_list, [])\n results = flatten_list(predictions)\n scores = [score_tensor.item()\n for score_tensor in flatten_list(scores)]\n\n results = [self.maybe_detokenize(item)\n for item in results]\n\n # build back results with empty texts\n for i in empty_indices:\n results.insert(i, \"\")\n scores.insert(i, 0)\n\n results = [\"\".join(items)\n for items in zip(head_spaces, results, tail_spaces)]\n\n self.logger.info(\"Translation Results: %d\", len(results))\n\n return results, scores, self.opt.n_best, timer.times","function_tokens":["def","run","(","self",",","inputs",")",":","self",".","stop_unload_timer","(",")","timer","=","Timer","(",")","timer",".","start","(",")","self",".","logger",".","info","(","\"Running translation using %d\"","%","self",".","model_id",")","if","not","self",".","loading_lock",".","is_set","(",")",":","self",".","logger",".","info","(","\"Model #%d is being loaded by another thread, waiting\"","%","self",".","model_id",")","if","not","self",".","loading_lock",".","wait","(","timeout","=","30",")",":","raise","ServerModelError","(","\"Model %d loading timeout\"","%","self",".","model_id",")","else",":","if","not","self",".","loaded",":","self",".","load","(",")","timer",".","tick","(","name","=","\"load\"",")","elif","self",".","opt",".","cuda",":","self",".","to_gpu","(",")","timer",".","tick","(","name","=","\"to_gpu\"",")","texts","=","[","]","head_spaces","=","[","]","tail_spaces","=","[","]","sslength","=","[","]","for","i",",","inp","in","enumerate","(","inputs",")",":","src","=","inp","[","'src'","]","if","src",".","strip","(",")","==","\"\"",":","head_spaces",".","append","(","src",")","texts",".","append","(","\"\"",")","tail_spaces",".","append","(","\"\"",")","else",":","whitespaces_before",",","whitespaces_after","=","\"\"",",","\"\"","match_before","=","re",".","search","(","r'^\\s+'",",","src",")","match_after","=","re",".","search","(","r'\\s+$'",",","src",")","if","match_before","is","not","None",":","whitespaces_before","=","match_before",".","group","(","0",")","if","match_after","is","not","None",":","whitespaces_after","=","match_after",".","group","(","0",")","head_spaces",".","append","(","whitespaces_before",")","tok","=","self",".","maybe_tokenize","(","src",".","strip","(",")",")","texts",".","append","(","tok",")","sslength",".","append","(","len","(","tok",".","split","(",")",")",")","tail_spaces",".","append","(","whitespaces_after",")","empty_indices","=","[","i","for","i",",","x","in","enumerate","(","texts",")","if","x","==","\"\"","]","texts_to_translate","=","[","x","for","x","in","texts","if","x","!=","\"\"","]","scores","=","[","]","predictions","=","[","]","if","len","(","texts_to_translate",")",">","0",":","try",":","scores",",","predictions","=","self",".","translator",".","translate","(","src_data_iter","=","texts_to_translate",",","batch_size","=","self",".","opt",".","batch_size",")","except","RuntimeError","as","e",":","raise","ServerModelError","(","\"Runtime Error: %s\"","%","str","(","e",")",")","timer",".","tick","(","name","=","\"translation\"",")","self",".","logger",".","info","(","\"\"\"Using model #%d\\t%d inputs\n \\ttranslation time: %f\"\"\"","%","(","self",".","model_id",",","len","(","texts",")",",","timer",".","times","[","'translation'","]",")",")","self",".","reset_unload_timer","(",")","# NOTE: translator returns lists of `n_best` list","# we can ignore that (i.e. flatten lists) only because","# we restrict `n_best=1`","def","flatten_list","(","_list",")",":","return","sum","(","_list",",","[","]",")","results","=","flatten_list","(","predictions",")","scores","=","[","score_tensor",".","item","(",")","for","score_tensor","in","flatten_list","(","scores",")","]","results","=","[","self",".","maybe_detokenize","(","item",")","for","item","in","results","]","# build back results with empty texts","for","i","in","empty_indices",":","results",".","insert","(","i",",","\"\"",")","scores",".","insert","(","i",",","0",")","results","=","[","\"\"",".","join","(","items",")","for","items","in","zip","(","head_spaces",",","results",",","tail_spaces",")","]","self",".","logger",".","info","(","\"Translation Results: %d\"",",","len","(","results",")",")","return","results",",","scores",",","self",".","opt",".","n_best",",","timer",".","times"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L286-L382"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.do_timeout","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Timeout function that free GPU memory by moving the model to CPU\n or unloading it; depending on `self.on_timemout` value","docstring_summary":"Timeout function that free GPU memory by moving the model to CPU\n or unloading it; depending on `self.on_timemout` value","docstring_tokens":["Timeout","function","that","free","GPU","memory","by","moving","the","model","to","CPU","or","unloading","it",";","depending","on","self",".","on_timemout","value"],"function":"def do_timeout(self):\n \"\"\"Timeout function that free GPU memory by moving the model to CPU\n or unloading it; depending on `self.on_timemout` value\n \"\"\"\n if self.on_timeout == \"unload\":\n self.logger.info(\"Timeout: unloading model %d\" % self.model_id)\n self.unload()\n if self.on_timeout == \"to_cpu\":\n self.logger.info(\"Timeout: sending model %d to CPU\"\n % self.model_id)\n self.to_cpu()","function_tokens":["def","do_timeout","(","self",")",":","if","self",".","on_timeout","==","\"unload\"",":","self",".","logger",".","info","(","\"Timeout: unloading model %d\"","%","self",".","model_id",")","self",".","unload","(",")","if","self",".","on_timeout","==","\"to_cpu\"",":","self",".","logger",".","info","(","\"Timeout: sending model %d to CPU\"","%","self",".","model_id",")","self",".","to_cpu","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L384-L394"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.to_cpu","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Move the model to CPU and clear CUDA cache","docstring_summary":"Move the model to CPU and clear CUDA cache","docstring_tokens":["Move","the","model","to","CPU","and","clear","CUDA","cache"],"function":"def to_cpu(self):\n \"\"\"Move the model to CPU and clear CUDA cache\n \"\"\"\n self.translator.model.cpu()\n if self.opt.cuda:\n torch.cuda.empty_cache()","function_tokens":["def","to_cpu","(","self",")",":","self",".","translator",".","model",".","cpu","(",")","if","self",".","opt",".","cuda",":","torch",".","cuda",".","empty_cache","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L428-L433"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.to_gpu","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Move the model to GPU","docstring_summary":"Move the model to GPU","docstring_tokens":["Move","the","model","to","GPU"],"function":"def to_gpu(self):\n \"\"\"Move the model to GPU\n \"\"\"\n torch.cuda.set_device(self.opt.gpu)\n self.translator.model.cuda()","function_tokens":["def","to_gpu","(","self",")",":","torch",".","cuda",".","set_device","(","self",".","opt",".","gpu",")","self",".","translator",".","model",".","cuda","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L435-L439"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.maybe_tokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return sequence","docstring":"Tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`","docstring_summary":"Tokenize the sequence (or not)","docstring_tokens":["Tokenize","the","sequence","(","or","not",")"],"function":"def maybe_tokenize(self, sequence):\n \"\"\"Tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`\n \"\"\"\n if self.tokenizer_opt is not None:\n return self.tokenize(sequence)\n return sequence","function_tokens":["def","maybe_tokenize","(","self",",","sequence",")",":","if","self",".","tokenizer_opt","is","not","None",":","return","self",".","tokenize","(","sequence",")","return","sequence"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L441-L448"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.tokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return tok","docstring":"Tokenize a single sequence\n\n Args:\n sequence: (str) the sequence to tokenize\n\n Returns:\n tok: (str) the tokenized sequence","docstring_summary":"Tokenize a single sequence","docstring_tokens":["Tokenize","a","single","sequence"],"function":"def tokenize(self, sequence):\n \"\"\"Tokenize a single sequence\n\n Args:\n sequence: (str) the sequence to tokenize\n\n Returns:\n tok: (str) the tokenized sequence\n\n \"\"\"\n if self.tokenizer is None:\n raise ValueError(\"No tokenizer loaded\")\n\n if self.tokenizer_opt[\"type\"] == \"sentencepiece\":\n tok = self.tokenizer.EncodeAsPieces(sequence)\n tok = \" \".join(tok)\n elif self.tokenizer_opt[\"type\"] == \"pyonmttok\":\n tok, _ = self.tokenizer.tokenize(sequence)\n tok = \" \".join(tok)\n return tok","function_tokens":["def","tokenize","(","self",",","sequence",")",":","if","self",".","tokenizer","is","None",":","raise","ValueError","(","\"No tokenizer loaded\"",")","if","self",".","tokenizer_opt","[","\"type\"","]","==","\"sentencepiece\"",":","tok","=","self",".","tokenizer",".","EncodeAsPieces","(","sequence",")","tok","=","\" \"",".","join","(","tok",")","elif","self",".","tokenizer_opt","[","\"type\"","]","==","\"pyonmttok\"",":","tok",",","_","=","self",".","tokenizer",".","tokenize","(","sequence",")","tok","=","\" \"",".","join","(","tok",")","return","tok"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L450-L469"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.maybe_detokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return sequence","docstring":"De-tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`","docstring_summary":"De-tokenize the sequence (or not)","docstring_tokens":["De","-","tokenize","the","sequence","(","or","not",")"],"function":"def maybe_detokenize(self, sequence):\n \"\"\"De-tokenize the sequence (or not)\n\n Same args\/returns as `tokenize`\n \"\"\"\n if self.tokenizer_opt is not None and ''.join(sequence.split()) != '':\n return self.detokenize(sequence)\n return sequence","function_tokens":["def","maybe_detokenize","(","self",",","sequence",")",":","if","self",".","tokenizer_opt","is","not","None","and","''",".","join","(","sequence",".","split","(",")",")","!=","''",":","return","self",".","detokenize","(","sequence",")","return","sequence"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L471-L478"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation_server.py","language":"python","identifier":"ServerModel.detokenize","parameters":"(self, sequence)","argument_list":"","return_statement":"return detok","docstring":"Detokenize a single sequence\n\n Same args\/returns as `tokenize`","docstring_summary":"Detokenize a single sequence","docstring_tokens":["Detokenize","a","single","sequence"],"function":"def detokenize(self, sequence):\n \"\"\"Detokenize a single sequence\n\n Same args\/returns as `tokenize`\n \"\"\"\n if self.tokenizer is None:\n raise ValueError(\"No tokenizer loaded\")\n\n if self.tokenizer_opt[\"type\"] == \"sentencepiece\":\n detok = self.tokenizer.DecodePieces(sequence.split())\n elif self.tokenizer_opt[\"type\"] == \"pyonmttok\":\n detok = self.tokenizer.detokenize(sequence.split())\n\n return detok","function_tokens":["def","detokenize","(","self",",","sequence",")",":","if","self",".","tokenizer","is","None",":","raise","ValueError","(","\"No tokenizer loaded\"",")","if","self",".","tokenizer_opt","[","\"type\"","]","==","\"sentencepiece\"",":","detok","=","self",".","tokenizer",".","DecodePieces","(","sequence",".","split","(",")",")","elif","self",".","tokenizer_opt","[","\"type\"","]","==","\"pyonmttok\"",":","detok","=","self",".","tokenizer",".","detokenize","(","sequence",".","split","(",")",")","return","detok"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation_server.py#L480-L493"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.coverage_wu","parameters":"(self, beam, cov, beta=0.)","argument_list":"","return_statement":"return beta * penalty","docstring":"NMT coverage re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_summary":"NMT coverage re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_tokens":["NMT","coverage","re","-","ranking","score","from","Google","s","Neural","Machine","Translation","System",":","cite",":","wu2016google","."],"function":"def coverage_wu(self, beam, cov, beta=0.):\n \"\"\"\n NMT coverage re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.\n \"\"\"\n penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(1)\n return beta * penalty","function_tokens":["def","coverage_wu","(","self",",","beam",",","cov",",","beta","=","0.",")",":","penalty","=","-","torch",".","min","(","cov",",","cov",".","clone","(",")",".","fill_","(","1.0",")",")",".","log","(",")",".","sum","(","1",")","return","beta","*","penalty"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/penalties.py#L38-L44"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.coverage_summary","parameters":"(self, beam, cov, beta=0.)","argument_list":"","return_statement":"return beta * penalty","docstring":"Our summary penalty.","docstring_summary":"Our summary penalty.","docstring_tokens":["Our","summary","penalty","."],"function":"def coverage_summary(self, beam, cov, beta=0.):\n \"\"\"\n Our summary penalty.\n \"\"\"\n penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(1)\n penalty -= cov.size(1)\n return beta * penalty","function_tokens":["def","coverage_summary","(","self",",","beam",",","cov",",","beta","=","0.",")",":","penalty","=","torch",".","max","(","cov",",","cov",".","clone","(",")",".","fill_","(","1.0",")",")",".","sum","(","1",")","penalty","-=","cov",".","size","(","1",")","return","beta","*","penalty"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/penalties.py#L46-L52"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.coverage_none","parameters":"(self, beam, cov, beta=0.)","argument_list":"","return_statement":"return beam.scores.clone().fill_(0.0)","docstring":"returns zero as penalty","docstring_summary":"returns zero as penalty","docstring_tokens":["returns","zero","as","penalty"],"function":"def coverage_none(self, beam, cov, beta=0.):\n \"\"\"\n returns zero as penalty\n \"\"\"\n return beam.scores.clone().fill_(0.0)","function_tokens":["def","coverage_none","(","self",",","beam",",","cov",",","beta","=","0.",")",":","return","beam",".","scores",".","clone","(",")",".","fill_","(","0.0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/penalties.py#L54-L58"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.length_wu","parameters":"(self, beam, logprobs, alpha=0.)","argument_list":"","return_statement":"return (logprobs \/ modifier)","docstring":"NMT length re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_summary":"NMT length re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.","docstring_tokens":["NMT","length","re","-","ranking","score","from","Google","s","Neural","Machine","Translation","System",":","cite",":","wu2016google","."],"function":"def length_wu(self, beam, logprobs, alpha=0.):\n \"\"\"\n NMT length re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.\n \"\"\"\n\n modifier = (((5 + len(beam.next_ys)) ** alpha) \/\n ((5 + 1) ** alpha))\n return (logprobs \/ modifier)","function_tokens":["def","length_wu","(","self",",","beam",",","logprobs",",","alpha","=","0.",")",":","modifier","=","(","(","(","5","+","len","(","beam",".","next_ys",")",")","**","alpha",")","\/","(","(","5","+","1",")","**","alpha",")",")","return","(","logprobs","\/","modifier",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/penalties.py#L60-L68"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.length_average","parameters":"(self, beam, logprobs, alpha=0.)","argument_list":"","return_statement":"return logprobs \/ len(beam.next_ys)","docstring":"Returns the average probability of tokens in a sequence.","docstring_summary":"Returns the average probability of tokens in a sequence.","docstring_tokens":["Returns","the","average","probability","of","tokens","in","a","sequence","."],"function":"def length_average(self, beam, logprobs, alpha=0.):\n \"\"\"\n Returns the average probability of tokens in a sequence.\n \"\"\"\n return logprobs \/ len(beam.next_ys)","function_tokens":["def","length_average","(","self",",","beam",",","logprobs",",","alpha","=","0.",")",":","return","logprobs","\/","len","(","beam",".","next_ys",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/penalties.py#L70-L74"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/penalties.py","language":"python","identifier":"PenaltyBuilder.length_none","parameters":"(self, beam, logprobs, alpha=0., beta=0.)","argument_list":"","return_statement":"return logprobs","docstring":"Returns unmodified scores.","docstring_summary":"Returns unmodified scores.","docstring_tokens":["Returns","unmodified","scores","."],"function":"def length_none(self, beam, logprobs, alpha=0., beta=0.):\n \"\"\"\n Returns unmodified scores.\n \"\"\"\n return logprobs","function_tokens":["def","length_none","(","self",",","beam",",","logprobs",",","alpha","=","0.",",","beta","=","0.",")",":","return","logprobs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/penalties.py#L76-L80"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translator.py","language":"python","identifier":"Translator.translate","parameters":"(self,\n src_path=None,\n src_data_iter=None,\n tgt_path=None,\n tgt_data_iter=None,\n src_dir=None,\n batch_size=None,\n attn_debug=False)","argument_list":"","return_statement":"return all_scores, all_predictions","docstring":"Translate content of `src_data_iter` (if not None) or `src_path`\n and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.\n\n Note: batch_size must not be None\n Note: one of ('src_path', 'src_data_iter') must not be None\n\n Args:\n src_path (str): filepath of source data\n src_data_iter (iterator): an interator generating source data\n e.g. it may be a list or an openned file\n tgt_path (str): filepath of target data\n tgt_data_iter (iterator): an interator generating target data\n src_dir (str): source directory path\n (used for Audio and Image datasets)\n batch_size (int): size of examples per mini-batch\n attn_debug (bool): enables the attention logging\n\n Returns:\n (`list`, `list`)\n\n * all_scores is a list of `batch_size` lists of `n_best` scores\n * all_predictions is a list of `batch_size` lists\n of `n_best` predictions","docstring_summary":"Translate content of `src_data_iter` (if not None) or `src_path`\n and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.","docstring_tokens":["Translate","content","of","src_data_iter","(","if","not","None",")","or","src_path","and","get","gold","scores","if","one","of","tgt_data_iter","or","tgt_path","is","set","."],"function":"def translate(self,\n src_path=None,\n src_data_iter=None,\n tgt_path=None,\n tgt_data_iter=None,\n src_dir=None,\n batch_size=None,\n attn_debug=False):\n \"\"\"\n Translate content of `src_data_iter` (if not None) or `src_path`\n and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.\n\n Note: batch_size must not be None\n Note: one of ('src_path', 'src_data_iter') must not be None\n\n Args:\n src_path (str): filepath of source data\n src_data_iter (iterator): an interator generating source data\n e.g. it may be a list or an openned file\n tgt_path (str): filepath of target data\n tgt_data_iter (iterator): an interator generating target data\n src_dir (str): source directory path\n (used for Audio and Image datasets)\n batch_size (int): size of examples per mini-batch\n attn_debug (bool): enables the attention logging\n\n Returns:\n (`list`, `list`)\n\n * all_scores is a list of `batch_size` lists of `n_best` scores\n * all_predictions is a list of `batch_size` lists\n of `n_best` predictions\n \"\"\"\n\n assert src_data_iter is not None or src_path is not None\n\n if batch_size is None:\n raise ValueError(\"batch_size must be set\")\n data = inputters. \\\n build_dataset(self.fields,\n self.data_type,\n src_path=src_path,\n src_data_iter=src_data_iter,\n tgt_path=tgt_path,\n tgt_data_iter=tgt_data_iter,\n src_dir=src_dir,\n sample_rate=self.sample_rate,\n window_size=self.window_size,\n window_stride=self.window_stride,\n window=self.window,\n use_filter_pred=self.use_filter_pred,\n image_channel_size=self.image_channel_size)\n\n if self.cuda:\n cur_device = \"cuda\"\n else:\n cur_device = \"cpu\"\n\n data_iter = inputters.OrderedIterator(\n dataset=data, device=cur_device,\n batch_size=batch_size, train=False, sort=False,\n sort_within_batch=True, shuffle=False)\n\n builder = onmt.translate.TranslationBuilder(\n data, self.fields,\n self.n_best, self.replace_unk, tgt_path)\n\n # Statistics\n counter = count(1)\n pred_score_total, pred_words_total = 0, 0\n gold_score_total, gold_words_total = 0, 0\n\n all_scores = []\n all_predictions = []\n\n for batch in data_iter:\n\n\n\n batch_data = self.translate_batch(batch, data, fast=self.fast)\n\n\n translations = builder.from_batch(batch_data)\n\n\n\n for trans in translations:\n all_scores += [trans.pred_scores[:self.n_best]]\n pred_score_total += trans.pred_scores[0]\n pred_words_total += len(trans.pred_sents[0])\n if tgt_path is not None:\n gold_score_total += trans.gold_score\n gold_words_total += len(trans.gold_sent) + 1\n\n n_best_preds = [\" \".join(pred)\n for pred in trans.pred_sents[:self.n_best]]\n all_predictions += [n_best_preds]\n self.out_file.write('\\n'.join(n_best_preds) + '\\n')\n self.out_file.flush()\n\n if self.verbose:\n sent_number = next(counter)\n output = trans.log(sent_number)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n # Debug attention.\n if attn_debug:\n srcs = trans.src_raw\n preds = trans.pred_sents[0]\n preds.append('<\/s>')\n attns = trans.attns[0].tolist()\n header_format = \"{:>10.10} \" + \"{:>10.7} \" * len(srcs)\n row_format = \"{:>10.10} \" + \"{:>10.7f} \" * len(srcs)\n output = header_format.format(\"\", *trans.src_raw) + '\\n'\n for word, row in zip(preds, attns):\n max_index = row.index(max(row))\n row_format = row_format.replace(\n \"{:>10.7f} \", \"{:*>10.7f} \", max_index + 1)\n row_format = row_format.replace(\n \"{:*>10.7f} \", \"{:>10.7f} \", max_index)\n output += row_format.format(word, *row) + '\\n'\n row_format = \"{:>10.10} \" + \"{:>10.7f} \" * len(srcs)\n os.write(1, output.encode('utf-8'))\n\n #TODO change back\n #if self.report_score:\n # msg = self._report_score('PRED', pred_score_total,\n # pred_words_total)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n # if tgt_path is not None:\n # msg = self._report_score('GOLD', gold_score_total,\n # gold_words_total)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n # if self.report_bleu:\n # msg = self._report_bleu(tgt_path)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n # if self.report_rouge:\n # msg = self._report_rouge(tgt_path)\n # if self.logger:\n # self.logger.info(msg)\n # else:\n # print(msg)\n\n if self.dump_beam:\n import json\n json.dump(self.translator.beam_accum,\n codecs.open(self.dump_beam, 'w', 'utf-8'))\n return all_scores, all_predictions","function_tokens":["def","translate","(","self",",","src_path","=","None",",","src_data_iter","=","None",",","tgt_path","=","None",",","tgt_data_iter","=","None",",","src_dir","=","None",",","batch_size","=","None",",","attn_debug","=","False",")",":","assert","src_data_iter","is","not","None","or","src_path","is","not","None","if","batch_size","is","None",":","raise","ValueError","(","\"batch_size must be set\"",")","data","=","inputters",".","build_dataset","(","self",".","fields",",","self",".","data_type",",","src_path","=","src_path",",","src_data_iter","=","src_data_iter",",","tgt_path","=","tgt_path",",","tgt_data_iter","=","tgt_data_iter",",","src_dir","=","src_dir",",","sample_rate","=","self",".","sample_rate",",","window_size","=","self",".","window_size",",","window_stride","=","self",".","window_stride",",","window","=","self",".","window",",","use_filter_pred","=","self",".","use_filter_pred",",","image_channel_size","=","self",".","image_channel_size",")","if","self",".","cuda",":","cur_device","=","\"cuda\"","else",":","cur_device","=","\"cpu\"","data_iter","=","inputters",".","OrderedIterator","(","dataset","=","data",",","device","=","cur_device",",","batch_size","=","batch_size",",","train","=","False",",","sort","=","False",",","sort_within_batch","=","True",",","shuffle","=","False",")","builder","=","onmt",".","translate",".","TranslationBuilder","(","data",",","self",".","fields",",","self",".","n_best",",","self",".","replace_unk",",","tgt_path",")","# Statistics","counter","=","count","(","1",")","pred_score_total",",","pred_words_total","=","0",",","0","gold_score_total",",","gold_words_total","=","0",",","0","all_scores","=","[","]","all_predictions","=","[","]","for","batch","in","data_iter",":","batch_data","=","self",".","translate_batch","(","batch",",","data",",","fast","=","self",".","fast",")","translations","=","builder",".","from_batch","(","batch_data",")","for","trans","in","translations",":","all_scores","+=","[","trans",".","pred_scores","[",":","self",".","n_best","]","]","pred_score_total","+=","trans",".","pred_scores","[","0","]","pred_words_total","+=","len","(","trans",".","pred_sents","[","0","]",")","if","tgt_path","is","not","None",":","gold_score_total","+=","trans",".","gold_score","gold_words_total","+=","len","(","trans",".","gold_sent",")","+","1","n_best_preds","=","[","\" \"",".","join","(","pred",")","for","pred","in","trans",".","pred_sents","[",":","self",".","n_best","]","]","all_predictions","+=","[","n_best_preds","]","self",".","out_file",".","write","(","'\\n'",".","join","(","n_best_preds",")","+","'\\n'",")","self",".","out_file",".","flush","(",")","if","self",".","verbose",":","sent_number","=","next","(","counter",")","output","=","trans",".","log","(","sent_number",")","if","self",".","logger",":","self",".","logger",".","info","(","output",")","else",":","os",".","write","(","1",",","output",".","encode","(","'utf-8'",")",")","# Debug attention.","if","attn_debug",":","srcs","=","trans",".","src_raw","preds","=","trans",".","pred_sents","[","0","]","preds",".","append","(","'<\/s>'",")","attns","=","trans",".","attns","[","0","]",".","tolist","(",")","header_format","=","\"{:>10.10} \"","+","\"{:>10.7} \"","*","len","(","srcs",")","row_format","=","\"{:>10.10} \"","+","\"{:>10.7f} \"","*","len","(","srcs",")","output","=","header_format",".","format","(","\"\"",",","*","trans",".","src_raw",")","+","'\\n'","for","word",",","row","in","zip","(","preds",",","attns",")",":","max_index","=","row",".","index","(","max","(","row",")",")","row_format","=","row_format",".","replace","(","\"{:>10.7f} \"",",","\"{:*>10.7f} \"",",","max_index","+","1",")","row_format","=","row_format",".","replace","(","\"{:*>10.7f} \"",",","\"{:>10.7f} \"",",","max_index",")","output","+=","row_format",".","format","(","word",",","*","row",")","+","'\\n'","row_format","=","\"{:>10.10} \"","+","\"{:>10.7f} \"","*","len","(","srcs",")","os",".","write","(","1",",","output",".","encode","(","'utf-8'",")",")","#TODO change back","#if self.report_score:","# msg = self._report_score('PRED', pred_score_total,","# pred_words_total)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","# if tgt_path is not None:","# msg = self._report_score('GOLD', gold_score_total,","# gold_words_total)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","# if self.report_bleu:","# msg = self._report_bleu(tgt_path)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","# if self.report_rouge:","# msg = self._report_rouge(tgt_path)","# if self.logger:","# self.logger.info(msg)","# else:","# print(msg)","if","self",".","dump_beam",":","import","json","json",".","dump","(","self",".","translator",".","beam_accum",",","codecs",".","open","(","self",".","dump_beam",",","'w'",",","'utf-8'",")",")","return","all_scores",",","all_predictions"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translator.py#L154-L313"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translator.py","language":"python","identifier":"Translator.translate_batch","parameters":"(self, batch, data, fast=False)","argument_list":"","return_statement":"","docstring":"Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.","docstring_summary":"Translate a batch of sentences.","docstring_tokens":["Translate","a","batch","of","sentences","."],"function":"def translate_batch(self, batch, data, fast=False):\n \"\"\"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n fast (bool): enables fast beam search (may not support all features)\n\n Todo:\n Shouldn't need the original dataset.\n \"\"\"\n\n\n with torch.no_grad():\n if fast:\n return self._fast_translate_batch(\n batch,\n data,\n self.max_length,\n min_length=self.min_length,\n n_best=self.n_best,\n return_attention=self.replace_unk)\n else:\n # 2333: go here\n return self._translate_batch(batch, data)","function_tokens":["def","translate_batch","(","self",",","batch",",","data",",","fast","=","False",")",":","with","torch",".","no_grad","(",")",":","if","fast",":","return","self",".","_fast_translate_batch","(","batch",",","data",",","self",".","max_length",",","min_length","=","self",".","min_length",",","n_best","=","self",".","n_best",",","return_attention","=","self",".","replace_unk",")","else",":","# 2333: go here","return","self",".","_translate_batch","(","batch",",","data",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translator.py#L315-L342"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.get_current_state","parameters":"(self)","argument_list":"","return_statement":"return self.next_ys[-1]","docstring":"Get the outputs for the current timestep.","docstring_summary":"Get the outputs for the current timestep.","docstring_tokens":["Get","the","outputs","for","the","current","timestep","."],"function":"def get_current_state(self):\n \"Get the outputs for the current timestep.\"\n return self.next_ys[-1]","function_tokens":["def","get_current_state","(","self",")",":","return","self",".","next_ys","[","-","1","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/beam.py#L68-L70"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.get_current_origin","parameters":"(self)","argument_list":"","return_statement":"return self.prev_ks[-1]","docstring":"Get the backpointers for the current timestep.","docstring_summary":"Get the backpointers for the current timestep.","docstring_tokens":["Get","the","backpointers","for","the","current","timestep","."],"function":"def get_current_origin(self):\n \"Get the backpointers for the current timestep.\"\n return self.prev_ks[-1]","function_tokens":["def","get_current_origin","(","self",")",":","return","self",".","prev_ks","[","-","1","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/beam.py#L72-L74"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.advance","parameters":"(self, word_probs, attn_out)","argument_list":"","return_statement":"","docstring":"Given prob over words for every last beam `wordLk` and attention\n `attn_out`: Compute and update the beam search.\n\n Parameters:\n\n * `word_probs`- probs of advancing from the last step (K x words)\n * `attn_out`- attention at the last step\n\n Returns: True if beam search is complete.","docstring_summary":"Given prob over words for every last beam `wordLk` and attention\n `attn_out`: Compute and update the beam search.","docstring_tokens":["Given","prob","over","words","for","every","last","beam","wordLk","and","attention","attn_out",":","Compute","and","update","the","beam","search","."],"function":"def advance(self, word_probs, attn_out):\n \"\"\"\n Given prob over words for every last beam `wordLk` and attention\n `attn_out`: Compute and update the beam search.\n\n Parameters:\n\n * `word_probs`- probs of advancing from the last step (K x words)\n * `attn_out`- attention at the last step\n\n Returns: True if beam search is complete.\n \"\"\"\n num_words = word_probs.size(1)\n if self.stepwise_penalty:\n self.global_scorer.update_score(self, attn_out)\n # force the output to be longer than self.min_length\n cur_len = len(self.next_ys)\n if cur_len < self.min_length:\n word_probs[:, self._eos] = -1e20\n # Sum the previous scores.\n if len(self.prev_ks) > 0:\n beam_scores = word_probs + \\\n self.scores.unsqueeze(1).expand_as(word_probs)\n # Don't let EOS have children. \n beam_scores[self.next_ys[-1] == self._eos] = -1e20\n # Block ngram repeats\n if self.block_ngram_repeat > 0:\n ngrams = []\n le = len(self.next_ys)\n for j in range(self.next_ys[-1].size(0)):\n hyp, _ = self.get_hyp(le - 1, j, requires_attn=False)\n ngrams = set()\n fail = False\n gram = []\n for i in range(le - 1):\n # Last n tokens, n = block_ngram_repeat\n gram = (gram +\n [hyp[i]])[-self.block_ngram_repeat:]\n # Skip the blocking if it is in the exclusion list\n if set(gram) & self.exclusion_tokens:\n continue\n if tuple(gram) in ngrams:\n fail = True\n ngrams.add(tuple(gram))\n if fail:\n beam_scores[j] = -10e20\n else:\n beam_scores = word_probs[0]\n flat_beam_scores = beam_scores.view(-1)\n best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,\n True, True)\n\n self.all_scores.append(self.scores)\n self.scores = best_scores\n\n # best_scores_id is flattened beam x word array, so calculate which\n # word and beam each score came from\n prev_k = best_scores_id \/ num_words\n self.prev_ks.append(prev_k)\n self.prev_ks_cpu.append(prev_k.tolist())\n\n self.next_ys.append((best_scores_id - prev_k * num_words))\n self.next_ys_cpu.append((best_scores_id - prev_k * num_words).tolist())\n\n self.attn.append(attn_out.index_select(0, prev_k))\n self.global_scorer.update_global_state(self)\n \n eos_indicator = self.next_ys[-1] == self._eos\n if eos_indicator.any():\n global_scores = self.global_scorer.score(self, self.scores)\n global_scores_eos = global_scores[eos_indicator]\n i_indexes = torch.where(eos_indicator)[0]\n for s, i, in zip(global_scores_eos.tolist(), i_indexes.tolist()):\n self.finished.append((s, len(self.next_ys) - 1, i))\n \n# for i in range(self.next_ys[-1].size(0)):\n# if self.next_ys[-1][i] == self._eos:\n# global_scores = self.global_scorer.score(self, self.scores)\n# s = global_scores[i]\n# self.finished.append((s, len(self.next_ys) - 1, i))\n\n # End condition is when top-of-beam is EOS and no global score.\n if self.next_ys[-1][0] == self._eos:\n self.all_scores.append(self.scores)\n self.eos_top = True","function_tokens":["def","advance","(","self",",","word_probs",",","attn_out",")",":","num_words","=","word_probs",".","size","(","1",")","if","self",".","stepwise_penalty",":","self",".","global_scorer",".","update_score","(","self",",","attn_out",")","# force the output to be longer than self.min_length","cur_len","=","len","(","self",".","next_ys",")","if","cur_len","<","self",".","min_length",":","word_probs","[",":",",","self",".","_eos","]","=","-","1e20","# Sum the previous scores.","if","len","(","self",".","prev_ks",")",">","0",":","beam_scores","=","word_probs","+","self",".","scores",".","unsqueeze","(","1",")",".","expand_as","(","word_probs",")","# Don't let EOS have children. ","beam_scores","[","self",".","next_ys","[","-","1","]","==","self",".","_eos","]","=","-","1e20","# Block ngram repeats","if","self",".","block_ngram_repeat",">","0",":","ngrams","=","[","]","le","=","len","(","self",".","next_ys",")","for","j","in","range","(","self",".","next_ys","[","-","1","]",".","size","(","0",")",")",":","hyp",",","_","=","self",".","get_hyp","(","le","-","1",",","j",",","requires_attn","=","False",")","ngrams","=","set","(",")","fail","=","False","gram","=","[","]","for","i","in","range","(","le","-","1",")",":","# Last n tokens, n = block_ngram_repeat","gram","=","(","gram","+","[","hyp","[","i","]","]",")","[","-","self",".","block_ngram_repeat",":","]","# Skip the blocking if it is in the exclusion list","if","set","(","gram",")","&","self",".","exclusion_tokens",":","continue","if","tuple","(","gram",")","in","ngrams",":","fail","=","True","ngrams",".","add","(","tuple","(","gram",")",")","if","fail",":","beam_scores","[","j","]","=","-","10e20","else",":","beam_scores","=","word_probs","[","0","]","flat_beam_scores","=","beam_scores",".","view","(","-","1",")","best_scores",",","best_scores_id","=","flat_beam_scores",".","topk","(","self",".","size",",","0",",","True",",","True",")","self",".","all_scores",".","append","(","self",".","scores",")","self",".","scores","=","best_scores","# best_scores_id is flattened beam x word array, so calculate which","# word and beam each score came from","prev_k","=","best_scores_id","\/","num_words","self",".","prev_ks",".","append","(","prev_k",")","self",".","prev_ks_cpu",".","append","(","prev_k",".","tolist","(",")",")","self",".","next_ys",".","append","(","(","best_scores_id","-","prev_k","*","num_words",")",")","self",".","next_ys_cpu",".","append","(","(","best_scores_id","-","prev_k","*","num_words",")",".","tolist","(",")",")","self",".","attn",".","append","(","attn_out",".","index_select","(","0",",","prev_k",")",")","self",".","global_scorer",".","update_global_state","(","self",")","eos_indicator","=","self",".","next_ys","[","-","1","]","==","self",".","_eos","if","eos_indicator",".","any","(",")",":","global_scores","=","self",".","global_scorer",".","score","(","self",",","self",".","scores",")","global_scores_eos","=","global_scores","[","eos_indicator","]","i_indexes","=","torch",".","where","(","eos_indicator",")","[","0","]","for","s",",","i",",","in","zip","(","global_scores_eos",".","tolist","(",")",",","i_indexes",".","tolist","(",")",")",":","self",".","finished",".","append","(","(","s",",","len","(","self",".","next_ys",")","-","1",",","i",")",")","# for i in range(self.next_ys[-1].size(0)):","# if self.next_ys[-1][i] == self._eos:","# global_scores = self.global_scorer.score(self, self.scores)","# s = global_scores[i]","# self.finished.append((s, len(self.next_ys) - 1, i))","# End condition is when top-of-beam is EOS and no global score.","if","self",".","next_ys","[","-","1","]","[","0","]","==","self",".","_eos",":","self",".","all_scores",".","append","(","self",".","scores",")","self",".","eos_top","=","True"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/beam.py#L76-L160"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/beam.py","language":"python","identifier":"Beam.get_hyp","parameters":"(self, timestep, k, requires_attn=True)","argument_list":"","return_statement":"return hyp[::-1], attn","docstring":"Walk back to construct the full hypothesis.","docstring_summary":"Walk back to construct the full hypothesis.","docstring_tokens":["Walk","back","to","construct","the","full","hypothesis","."],"function":"def get_hyp(self, timestep, k, requires_attn=True):\n \"\"\"\n Walk back to construct the full hypothesis.\n \"\"\"\n hyp, attn = [], []\n for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):\n hyp.append(self.next_ys_cpu[j + 1][k])\n if requires_attn:\n attn.append(self.attn[j][k])\n k = self.prev_ks_cpu[j][k]\n\n if requires_attn:\n attn = torch.stack(attn[::-1])\n return hyp[::-1], attn","function_tokens":["def","get_hyp","(","self",",","timestep",",","k",",","requires_attn","=","True",")",":","hyp",",","attn","=","[","]",",","[","]","for","j","in","range","(","len","(","self",".","prev_ks","[",":","timestep","]",")","-","1",",","-","1",",","-","1",")",":","hyp",".","append","(","self",".","next_ys_cpu","[","j","+","1","]","[","k","]",")","if","requires_attn",":","attn",".","append","(","self",".","attn","[","j","]","[","k","]",")","k","=","self",".","prev_ks_cpu","[","j","]","[","k","]","if","requires_attn",":","attn","=","torch",".","stack","(","attn","[",":",":","-","1","]",")","return","hyp","[",":",":","-","1","]",",","attn"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/beam.py#L180-L193"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/beam.py","language":"python","identifier":"GNMTGlobalScorer.score","parameters":"(self, beam, logprobs)","argument_list":"","return_statement":"return normalized_probs","docstring":"Rescores a prediction based on penalty functions","docstring_summary":"Rescores a prediction based on penalty functions","docstring_tokens":["Rescores","a","prediction","based","on","penalty","functions"],"function":"def score(self, beam, logprobs):\n \"\"\"\n Rescores a prediction based on penalty functions\n \"\"\"\n normalized_probs = self.length_penalty(beam,\n logprobs,\n self.alpha)\n if not beam.stepwise_penalty:\n penalty = self.cov_penalty(beam,\n beam.global_state[\"coverage\"],\n self.beta)\n normalized_probs -= penalty\n\n return normalized_probs","function_tokens":["def","score","(","self",",","beam",",","logprobs",")",":","normalized_probs","=","self",".","length_penalty","(","beam",",","logprobs",",","self",".","alpha",")","if","not","beam",".","stepwise_penalty",":","penalty","=","self",".","cov_penalty","(","beam",",","beam",".","global_state","[","\"coverage\"","]",",","self",".","beta",")","normalized_probs","-=","penalty","return","normalized_probs"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/beam.py#L216-L229"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/beam.py","language":"python","identifier":"GNMTGlobalScorer.update_score","parameters":"(self, beam, attn)","argument_list":"","return_statement":"","docstring":"Function to update scores of a Beam that is not finished","docstring_summary":"Function to update scores of a Beam that is not finished","docstring_tokens":["Function","to","update","scores","of","a","Beam","that","is","not","finished"],"function":"def update_score(self, beam, attn):\n \"\"\"\n Function to update scores of a Beam that is not finished\n \"\"\"\n if \"prev_penalty\" in beam.global_state.keys():\n beam.scores.add_(beam.global_state[\"prev_penalty\"])\n penalty = self.cov_penalty(beam,\n beam.global_state[\"coverage\"] + attn,\n self.beta)\n beam.scores.sub_(penalty)","function_tokens":["def","update_score","(","self",",","beam",",","attn",")",":","if","\"prev_penalty\"","in","beam",".","global_state",".","keys","(",")",":","beam",".","scores",".","add_","(","beam",".","global_state","[","\"prev_penalty\"","]",")","penalty","=","self",".","cov_penalty","(","beam",",","beam",".","global_state","[","\"coverage\"","]","+","attn",",","self",".","beta",")","beam",".","scores",".","sub_","(","penalty",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/beam.py#L231-L240"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/beam.py","language":"python","identifier":"GNMTGlobalScorer.update_global_state","parameters":"(self, beam)","argument_list":"","return_statement":"","docstring":"Keeps the coverage vector as sum of attentions","docstring_summary":"Keeps the coverage vector as sum of attentions","docstring_tokens":["Keeps","the","coverage","vector","as","sum","of","attentions"],"function":"def update_global_state(self, beam):\n \"Keeps the coverage vector as sum of attentions\"\n if len(beam.prev_ks) == 1:\n beam.global_state[\"prev_penalty\"] = beam.scores.clone().fill_(0.0)\n beam.global_state[\"coverage\"] = beam.attn[-1]\n self.cov_total = beam.attn[-1].sum(1)\n else:\n self.cov_total += torch.min(beam.attn[-1],\n beam.global_state['coverage']).sum(1)\n beam.global_state[\"coverage\"] = beam.global_state[\"coverage\"] \\\n .index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])\n\n prev_penalty = self.cov_penalty(beam,\n beam.global_state[\"coverage\"],\n self.beta)\n beam.global_state[\"prev_penalty\"] = prev_penalty","function_tokens":["def","update_global_state","(","self",",","beam",")",":","if","len","(","beam",".","prev_ks",")","==","1",":","beam",".","global_state","[","\"prev_penalty\"","]","=","beam",".","scores",".","clone","(",")",".","fill_","(","0.0",")","beam",".","global_state","[","\"coverage\"","]","=","beam",".","attn","[","-","1","]","self",".","cov_total","=","beam",".","attn","[","-","1","]",".","sum","(","1",")","else",":","self",".","cov_total","+=","torch",".","min","(","beam",".","attn","[","-","1","]",",","beam",".","global_state","[","'coverage'","]",")",".","sum","(","1",")","beam",".","global_state","[","\"coverage\"","]","=","beam",".","global_state","[","\"coverage\"","]",".","index_select","(","0",",","beam",".","prev_ks","[","-","1","]",")",".","add","(","beam",".","attn","[","-","1","]",")","prev_penalty","=","self",".","cov_penalty","(","beam",",","beam",".","global_state","[","\"coverage\"","]",",","self",".","beta",")","beam",".","global_state","[","\"prev_penalty\"","]","=","prev_penalty"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/beam.py#L242-L257"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/translate\/translation.py","language":"python","identifier":"Translation.log","parameters":"(self, sent_number)","argument_list":"","return_statement":"return output","docstring":"Log translation.","docstring_summary":"Log translation.","docstring_tokens":["Log","translation","."],"function":"def log(self, sent_number):\n \"\"\"\n Log translation.\n \"\"\"\n\n output = '\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n output += 'PRED {}: {}\\n'.format(sent_number, pred_sent)\n output += \"PRED SCORE: {:.4f}\\n\".format(best_score)\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n output += 'GOLD {}: {}\\n'.format(sent_number, tgt_sent)\n output += (\"GOLD SCORE: {:.4f}\\n\".format(self.gold_score))\n if len(self.pred_sents) > 1:\n output += '\\nBEST HYP:\\n'\n for score, sent in zip(self.pred_scores, self.pred_sents):\n output += \"[{:.4f}] {}\\n\".format(score, sent)\n\n return output","function_tokens":["def","log","(","self",",","sent_number",")",":","output","=","'\\nSENT {}: {}\\n'",".","format","(","sent_number",",","self",".","src_raw",")","best_pred","=","self",".","pred_sents","[","0","]","best_score","=","self",".","pred_scores","[","0","]","pred_sent","=","' '",".","join","(","best_pred",")","output","+=","'PRED {}: {}\\n'",".","format","(","sent_number",",","pred_sent",")","output","+=","\"PRED SCORE: {:.4f}\\n\"",".","format","(","best_score",")","if","self",".","gold_sent","is","not","None",":","tgt_sent","=","' '",".","join","(","self",".","gold_sent",")","output","+=","'GOLD {}: {}\\n'",".","format","(","sent_number",",","tgt_sent",")","output","+=","(","\"GOLD SCORE: {:.4f}\\n\"",".","format","(","self",".","gold_score",")",")","if","len","(","self",".","pred_sents",")",">","1",":","output","+=","'\\nBEST HYP:\\n'","for","score",",","sent","in","zip","(","self",".","pred_scores",",","self",".","pred_sents",")",":","output","+=","\"[{:.4f}] {}\\n\"",".","format","(","score",",","sent",")","return","output"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/translate\/translation.py#L134-L156"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderBase.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None,sent_encoder=None,src_sents=None,dec=None)","argument_list":"","return_statement":"return decoder_outputs, state, attns","docstring":"Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.","docstring_summary":"Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.","docstring_tokens":["Args",":","tgt","(","LongTensor",")",":","sequences","of","padded","tokens","[","tgt_len","x","batch","x","nfeats","]",".","memory_bank","(","FloatTensor",")",":","vectors","from","the","encoder","[","src_len","x","batch","x","hidden","]",".","state","(",":","obj",":","onmt",".","models",".","DecoderState",")",":","decoder","state","object","to","initialize","the","decoder","memory_lengths","(","LongTensor",")",":","the","padded","source","lengths","[","batch","]",".","Returns",":","(","FloatTensor",":","obj",":","onmt",".","Models",".","DecoderState","FloatTensor",")",":","*","decoder_outputs",":","output","from","the","decoder","(","after","attn",")","[","tgt_len","x","batch","x","hidden","]",".","*","decoder_state",":","final","hidden","state","from","the","decoder","*","attns",":","distribution","over","src","at","each","tgt","[","tgt_len","x","batch","x","src_len","]","."],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None,sent_encoder=None,src_sents=None,dec=None):\n \"\"\"\n Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.\n \"\"\"\n # Check\n assert isinstance(state, RNNDecoderState)\n # tgt.size() returns tgt length and batch\n _, tgt_batch, _ = tgt.size()\n _, memory_batch, _ = memory_bank.size()\n aeq(tgt_batch, memory_batch)\n # END\n\n\n # 23333: TODO I changed this return value 'sent_decoder'\n\n # Run the forward pass of the RNN.\n decoder_final, decoder_outputs, attns = self._run_forward_pass(\n tgt, memory_bank, state, memory_lengths=memory_lengths,sent_encoder=sent_encoder,src_sents=src_sents,dec=dec)\n\n # Update the state with the result.\n final_output = decoder_outputs[-1]\n coverage = None\n if \"coverage\" in attns:\n coverage = attns[\"coverage\"][-1].unsqueeze(0)\n state.update_state(decoder_final, final_output.unsqueeze(0), coverage)\n\n # Concatenates sequence of tensors along a new dimension.\n # NOTE: v0.3 to 0.4: decoder_outputs \/ attns[*] may not be list\n # (in particular in case of SRU) it was not raising error in 0.3\n # since stack(Variable) was allowed.\n # In 0.4, SRU returns a tensor that shouldn't be stacke\n\n\n if type(decoder_outputs) == list:\n decoder_outputs = torch.stack(decoder_outputs)\n\n for k in attns:\n if type(attns[k]) == list:\n\n attns[k] = torch.stack(attns[k])\n\n return decoder_outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",",","sent_encoder","=","None",",","src_sents","=","None",",","dec","=","None",")",":","# Check","assert","isinstance","(","state",",","RNNDecoderState",")","# tgt.size() returns tgt length and batch","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","_",",","memory_batch",",","_","=","memory_bank",".","size","(",")","aeq","(","tgt_batch",",","memory_batch",")","# END","# 23333: TODO I changed this return value 'sent_decoder'","# Run the forward pass of the RNN.","decoder_final",",","decoder_outputs",",","attns","=","self",".","_run_forward_pass","(","tgt",",","memory_bank",",","state",",","memory_lengths","=","memory_lengths",",","sent_encoder","=","sent_encoder",",","src_sents","=","src_sents",",","dec","=","dec",")","# Update the state with the result.","final_output","=","decoder_outputs","[","-","1","]","coverage","=","None","if","\"coverage\"","in","attns",":","coverage","=","attns","[","\"coverage\"","]","[","-","1","]",".","unsqueeze","(","0",")","state",".","update_state","(","decoder_final",",","final_output",".","unsqueeze","(","0",")",",","coverage",")","# Concatenates sequence of tensors along a new dimension.","# NOTE: v0.3 to 0.4: decoder_outputs \/ attns[*] may not be list","# (in particular in case of SRU) it was not raising error in 0.3","# since stack(Variable) was allowed.","# In 0.4, SRU returns a tensor that shouldn't be stacke","if","type","(","decoder_outputs",")","==","list",":","decoder_outputs","=","torch",".","stack","(","decoder_outputs",")","for","k","in","attns",":","if","type","(","attns","[","k","]",")","==","list",":","attns","[","k","]","=","torch",".","stack","(","attns","[","k","]",")","return","decoder_outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L115-L172"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderBase.init_decoder_state","parameters":"(self, src, memory_bank, encoder_final,\n with_cache=False)","argument_list":"","return_statement":"","docstring":"Init decoder state with last state of the encoder","docstring_summary":"Init decoder state with last state of the encoder","docstring_tokens":["Init","decoder","state","with","last","state","of","the","encoder"],"function":"def init_decoder_state(self, src, memory_bank, encoder_final,\n with_cache=False):\n \"\"\" Init decoder state with last state of the encoder \"\"\"\n def _fix_enc_hidden(hidden):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n if self.bidirectional_encoder:\n hidden = torch.cat([hidden[0:hidden.size(0):2],\n hidden[1:hidden.size(0):2]], 2)\n return hidden\n\n if isinstance(encoder_final, tuple): # LSTM\n return RNNDecoderState(self.hidden_size,\n tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final]))\n else: # GRU\n return RNNDecoderState(self.hidden_size,\n _fix_enc_hidden(encoder_final))","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","encoder_final",",","with_cache","=","False",")",":","def","_fix_enc_hidden","(","hidden",")",":","# The encoder hidden is (layers*directions) x batch x dim.","# We need to convert it to layers x batch x (directions*dim).","if","self",".","bidirectional_encoder",":","hidden","=","torch",".","cat","(","[","hidden","[","0",":","hidden",".","size","(","0",")",":","2","]",",","hidden","[","1",":","hidden",".","size","(","0",")",":","2","]","]",",","2",")","return","hidden","if","isinstance","(","encoder_final",",","tuple",")",":","# LSTM","return","RNNDecoderState","(","self",".","hidden_size",",","tuple","(","[","_fix_enc_hidden","(","enc_hid",")","for","enc_hid","in","encoder_final","]",")",")","else",":","# GRU","return","RNNDecoderState","(","self",".","hidden_size",",","_fix_enc_hidden","(","encoder_final",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L174-L191"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"StdRNNDecoder._run_forward_pass","parameters":"(self, tgt, memory_bank, state, memory_lengths=None, dec=False)","argument_list":"","return_statement":"return decoder_final, decoder_outputs, attns","docstring":"Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.","docstring_summary":"Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.","docstring_tokens":["Private","helper","for","running","the","specific","RNN","forward","pass",".","Must","be","overriden","by","all","subclasses",".","Args",":","tgt","(","LongTensor",")",":","a","sequence","of","input","tokens","tensors","[","len","x","batch","x","nfeats","]",".","memory_bank","(","FloatTensor",")",":","output","(","tensor","sequence",")","from","the","encoder","RNN","of","size","(","src_len","x","batch","x","hidden_size",")",".","state","(","FloatTensor",")",":","hidden","state","from","the","encoder","RNN","for","initializing","the","decoder",".","memory_lengths","(","LongTensor",")",":","the","source","memory_bank","lengths",".","Returns",":","decoder_final","(","Tensor",")",":","final","hidden","state","from","the","decoder",".","decoder_outputs","(","[","FloatTensor","]",")",":","an","array","of","output","of","every","time","step","from","the","decoder",".","attns","(","dict","of","(","str","[","FloatTensor","]",")",":","a","dictionary","of","different","type","of","attention","Tensor","array","of","every","time","step","from","the","decoder","."],"function":"def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None, dec=False):\n \"\"\"\n Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Tensor): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.\n \"\"\"\n assert not self._copy # TODO, no support yet.\n assert not self._coverage # TODO, no support yet.\n\n # Initialize local and return variables.\n attns = {}\n emb = self.embeddings(tgt)\n\n # Run the forward pass of the RNN.\n if isinstance(self.rnn, nn.GRU):\n rnn_output, decoder_final = self.rnn(emb, state.hidden[0])\n else:\n rnn_output, decoder_final = self.rnn(emb, state.hidden)\n\n # Check\n tgt_len, tgt_batch, _ = tgt.size()\n output_len, output_batch, _ = rnn_output.size()\n aeq(tgt_len, output_len)\n aeq(tgt_batch, output_batch)\n # END\n\n # Calculate the attention.\n decoder_outputs, p_attn = self.attn(\n rnn_output.transpose(0, 1).contiguous(),\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths\n )\n attns[\"std\"] = p_attn\n\n # Calculate the context gate.\n if self.context_gate is not None:\n decoder_outputs = self.context_gate(\n emb.view(-1, emb.size(2)),\n rnn_output.view(-1, rnn_output.size(2)),\n decoder_outputs.view(-1, decoder_outputs.size(2))\n )\n decoder_outputs = \\\n decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)\n\n decoder_outputs = self.dropout(decoder_outputs)\n\n\n return decoder_final, decoder_outputs, attns","function_tokens":["def","_run_forward_pass","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","dec","=","False",")",":","assert","not","self",".","_copy","# TODO, no support yet.","assert","not","self",".","_coverage","# TODO, no support yet.","# Initialize local and return variables.","attns","=","{","}","emb","=","self",".","embeddings","(","tgt",")","# Run the forward pass of the RNN.","if","isinstance","(","self",".","rnn",",","nn",".","GRU",")",":","rnn_output",",","decoder_final","=","self",".","rnn","(","emb",",","state",".","hidden","[","0","]",")","else",":","rnn_output",",","decoder_final","=","self",".","rnn","(","emb",",","state",".","hidden",")","# Check","tgt_len",",","tgt_batch",",","_","=","tgt",".","size","(",")","output_len",",","output_batch",",","_","=","rnn_output",".","size","(",")","aeq","(","tgt_len",",","output_len",")","aeq","(","tgt_batch",",","output_batch",")","# END","# Calculate the attention.","decoder_outputs",",","p_attn","=","self",".","attn","(","rnn_output",".","transpose","(","0",",","1",")",".","contiguous","(",")",",","memory_bank",".","transpose","(","0",",","1",")",",","memory_lengths","=","memory_lengths",")","attns","[","\"std\"","]","=","p_attn","# Calculate the context gate.","if","self",".","context_gate","is","not","None",":","decoder_outputs","=","self",".","context_gate","(","emb",".","view","(","-","1",",","emb",".","size","(","2",")",")",",","rnn_output",".","view","(","-","1",",","rnn_output",".","size","(","2",")",")",",","decoder_outputs",".","view","(","-","1",",","decoder_outputs",".","size","(","2",")",")",")","decoder_outputs","=","decoder_outputs",".","view","(","tgt_len",",","tgt_batch",",","self",".","hidden_size",")","decoder_outputs","=","self",".","dropout","(","decoder_outputs",")","return","decoder_final",",","decoder_outputs",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L210-L271"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"StdRNNDecoder._input_size","parameters":"(self)","argument_list":"","return_statement":"return self.embeddings.embedding_size","docstring":"Private helper returning the number of expected features.","docstring_summary":"Private helper returning the number of expected features.","docstring_tokens":["Private","helper","returning","the","number","of","expected","features","."],"function":"def _input_size(self):\n \"\"\"\n Private helper returning the number of expected features.\n \"\"\"\n return self.embeddings.embedding_size","function_tokens":["def","_input_size","(","self",")",":","return","self",".","embeddings",".","embedding_size"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L278-L282"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._run_mmr","parameters":"(self,sent_encoder,sent_decoder,src_sents, input_step)","argument_list":"","return_statement":"return mmr_among_words","docstring":"# sent_encoder: size (sent_len=9,batch=2,dim=512)\n # sent_decoder: size (sent_len=1,batch=2,dim=512)\n # src_sents: size (batch=2,sent_len=9)\n function to calculate mmr\n :param sent_encoder:\n :param sent_decoder:\n :param src_sents:\n :return:","docstring_summary":"# sent_encoder: size (sent_len=9,batch=2,dim=512)\n # sent_decoder: size (sent_len=1,batch=2,dim=512)\n # src_sents: size (batch=2,sent_len=9)\n function to calculate mmr\n :param sent_encoder:\n :param sent_decoder:\n :param src_sents:\n :return:","docstring_tokens":["#","sent_encoder",":","size","(","sent_len","=","9","batch","=","2","dim","=","512",")","#","sent_decoder",":","size","(","sent_len","=","1","batch","=","2","dim","=","512",")","#","src_sents",":","size","(","batch","=","2","sent_len","=","9",")","function","to","calculate","mmr",":","param","sent_encoder",":",":","param","sent_decoder",":",":","param","src_sents",":",":","return",":"],"function":"def _run_mmr(self,sent_encoder,sent_decoder,src_sents, input_step):\n '''\n # sent_encoder: size (sent_len=9,batch=2,dim=512)\n # sent_decoder: size (sent_len=1,batch=2,dim=512)\n # src_sents: size (batch=2,sent_len=9)\n function to calculate mmr\n :param sent_encoder:\n :param sent_decoder:\n :param src_sents:\n :return:\n '''\n pdist = nn.PairwiseDistance(p=2)\n sent_decoder=sent_decoder.permute(1,0,2) # (2,1,512)\n\n scores =[]\n # define sent matrix and current vector distance as the Euclidean distance\n for sent in sent_encoder: # iterate over each batch sample\n # distance: https:\/\/pytorch.org\/docs\/stable\/_modules\/torch\/nn\/modules\/distance.html\n\n # import pdb;\n # pdb.set_trace()\n\n # sim1=torch.sum(pdist(sent_encoder.permute(1,0,2),sent.unsqueeze(1)),1).unsqueeze(1) # -> this is sim2 on my equation, note this is distance!\n\n sim1 = 1 - torch.mean(pdist(sent_encoder.permute(1, 0, 2), sent.unsqueeze(1)), 1).unsqueeze(1) # this is a similarity function\n # sim1 shape: (batch_size,1)\n\n sim2=torch.bmm(self.mmr_W(sent_decoder),sent.unsqueeze(2)).squeeze(2) # (2,1) -> this is sim1 on my equation\n\n # scores.append(sim1-sim2)\n scores.append(sim2 - sim1)\n\n\n sent_ranking_att = torch.t(torch.cat(scores,1)) #(sent_len=9,batch_size)\n sent_ranking_att = torch.softmax(sent_ranking_att, dim=0).permute(1,0) #(sent_len=9,batch_size)\n # scores is a list of score (sent_len=9, tensor shape (batch_size, 1))\n mmr_among_words = [] # should be (batch=2,input_step=200)\n for batch_id in range(sent_ranking_att.size()[0]):\n # iterate each batch, create zero weight on the input steps\n # mmr= torch.zeros([input_step], dtype=torch.float32).cuda()\n\n tmp = []\n for id,position in enumerate(src_sents[batch_id]):\n\n for x in range(position):\n tmp.append(sent_ranking_att[batch_id][id])\n\n\n mmr = torch.stack(tmp) # make to 1-d\n\n\n if len(mmr) < input_step: # pad with 0\n tmp = torch.zeros(input_step - len(mmr)).float().cuda()\n # for x in range(input_step-len(mmr)):\n mmr = torch.cat((mmr, tmp), 0)\n else:\n mmr = mmr[:input_step]\n\n mmr_among_words.append(mmr.unsqueeze(0))\n\n mmr_among_words = torch.cat(mmr_among_words,0)\n\n # shape: (batch=2, input_step=200)\n\n return mmr_among_words","function_tokens":["def","_run_mmr","(","self",",","sent_encoder",",","sent_decoder",",","src_sents",",","input_step",")",":","pdist","=","nn",".","PairwiseDistance","(","p","=","2",")","sent_decoder","=","sent_decoder",".","permute","(","1",",","0",",","2",")","# (2,1,512)","scores","=","[","]","# define sent matrix and current vector distance as the Euclidean distance","for","sent","in","sent_encoder",":","# iterate over each batch sample","# distance: https:\/\/pytorch.org\/docs\/stable\/_modules\/torch\/nn\/modules\/distance.html","# import pdb;","# pdb.set_trace()","# sim1=torch.sum(pdist(sent_encoder.permute(1,0,2),sent.unsqueeze(1)),1).unsqueeze(1) # -> this is sim2 on my equation, note this is distance!","sim1","=","1","-","torch",".","mean","(","pdist","(","sent_encoder",".","permute","(","1",",","0",",","2",")",",","sent",".","unsqueeze","(","1",")",")",",","1",")",".","unsqueeze","(","1",")","# this is a similarity function","# sim1 shape: (batch_size,1)","sim2","=","torch",".","bmm","(","self",".","mmr_W","(","sent_decoder",")",",","sent",".","unsqueeze","(","2",")",")",".","squeeze","(","2",")","# (2,1) -> this is sim1 on my equation","# scores.append(sim1-sim2)","scores",".","append","(","sim2","-","sim1",")","sent_ranking_att","=","torch",".","t","(","torch",".","cat","(","scores",",","1",")",")","#(sent_len=9,batch_size)","sent_ranking_att","=","torch",".","softmax","(","sent_ranking_att",",","dim","=","0",")",".","permute","(","1",",","0",")","#(sent_len=9,batch_size)","# scores is a list of score (sent_len=9, tensor shape (batch_size, 1))","mmr_among_words","=","[","]","# should be (batch=2,input_step=200)","for","batch_id","in","range","(","sent_ranking_att",".","size","(",")","[","0","]",")",":","# iterate each batch, create zero weight on the input steps","# mmr= torch.zeros([input_step], dtype=torch.float32).cuda()","tmp","=","[","]","for","id",",","position","in","enumerate","(","src_sents","[","batch_id","]",")",":","for","x","in","range","(","position",")",":","tmp",".","append","(","sent_ranking_att","[","batch_id","]","[","id","]",")","mmr","=","torch",".","stack","(","tmp",")","# make to 1-d","if","len","(","mmr",")","<","input_step",":","# pad with 0","tmp","=","torch",".","zeros","(","input_step","-","len","(","mmr",")",")",".","float","(",")",".","cuda","(",")","# for x in range(input_step-len(mmr)):","mmr","=","torch",".","cat","(","(","mmr",",","tmp",")",",","0",")","else",":","mmr","=","mmr","[",":","input_step","]","mmr_among_words",".","append","(","mmr",".","unsqueeze","(","0",")",")","mmr_among_words","=","torch",".","cat","(","mmr_among_words",",","0",")","# shape: (batch=2, input_step=200)","return","mmr_among_words"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L315-L379"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._run_forward_pass","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None,dec=None)","argument_list":"","return_statement":"return hidden, decoder_outputs, attns","docstring":"See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns[\"mmr\"] = [].","docstring_summary":"See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns[\"mmr\"] = [].","docstring_tokens":["See","StdRNNDecoder",".","_run_forward_pass","()","for","description","of","arguments","and","return","values",".","TODO",":","added","a","new","param",":","sent_encoder","from","model",".","py","this","is","the","sentence","matrix",";","add","attns","[","mmr","]","=","[]","."],"function":"def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None,dec=None):\n \"\"\"\n See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns[\"mmr\"] = [].\n\n \"\"\"\n\n # Additional args check.\n input_feed = state.input_feed.squeeze(0)\n #print(\"input feed size: {}\\n\".format(input_feed.size()))\n input_feed_batch, _ = input_feed.size()\n _, tgt_batch, _ = tgt.size()\n aeq(tgt_batch, input_feed_batch)\n # END Additional args check.\n\n # Initialize local and return variables.\n decoder_outputs = []\n attns = {\"std\": []}\n\n if self._copy:\n attns[\"copy\"] = []\n if self._coverage:\n attns[\"coverage\"] = []\n\n emb = self.embeddings(tgt)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n hidden = state.hidden\n coverage = state.coverage.squeeze(0) \\\n if state.coverage is not None else None\n\n # Input feed concatenates hidden state with\n # input at every time step.\n\n #print(\"emb size: {}\\n\".format(emb.size()));exit()\n for _, emb_t in enumerate(emb.split(1)):\n # for each output time step in the loop\n\n emb_t = emb_t.squeeze(0)\n decoder_input = torch.cat([emb_t, input_feed], 1)\n\n # TODO: the following is where we get attention!\n rnn_output, hidden = self.rnn(decoder_input, hidden)\n decoder_output, p_attn = self.attn(\n rnn_output,\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths)\n # p_attn: size (batch=2,input_step=200)\n\n if self.context_gate is not None:\n # TODO: context gate should be employed (not me)\n # instead of second RNN transform.\n decoder_output = self.context_gate(\n decoder_input, rnn_output, decoder_output\n )\n decoder_output = self.dropout(decoder_output)\n input_feed = decoder_output\n\n decoder_outputs += [decoder_output]\n attns[\"std\"] += [p_attn]\n\n\n # Update the coverage attention.\n if self._coverage:\n coverage = coverage + p_attn \\\n if coverage is not None else p_attn\n attns[\"coverage\"] += [coverage]\n\n # Run the forward pass of the copy attention layer.\n #\n\n if self._copy and not self._reuse_copy_attn:\n\n _, copy_attn = self.copy_attn(decoder_output, memory_bank.transpose(0, 1))\n attns[\"copy\"] += [copy_attn]\n elif self._copy:\n attns[\"copy\"] = attns[\"std\"] # attns[\"copy\"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]\n\n\n if not dec: #if this is not dec?\n attns[\"mmr\"] = []\n # 2333: TODO : the sentence representation for decoder\n sent_decoder = decoder_outputs[-1].unsqueeze(0) # shape: (1, batch_size=2,dim=512)\n\n # Return result.\n # 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)\n\n # 2333: TODO: compute mmr attention here:\n\n mmr_among_words = self._run_mmr(sent_encoder, sent_decoder, src_sents,attns[\"std\"][0].size()[-1])\n\n # 2333: TODO: bring mmr to attention...\n\n for output_step in attns[\"std\"]:\n attention_weight = output_step\n # pairwise multiplication\n attention_weight = torch.mul(mmr_among_words,attention_weight)\n attns[\"mmr\"].append(attention_weight.cuda())\n # pdb.set_trace()\n\n attns[\"std\"] = attns[\"mmr\"]\n\n # decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)\n return hidden, decoder_outputs, attns","function_tokens":["def","_run_forward_pass","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","sent_encoder","=","None",",","src_sents","=","None",",","dec","=","None",")",":","# Additional args check.","input_feed","=","state",".","input_feed",".","squeeze","(","0",")","#print(\"input feed size: {}\\n\".format(input_feed.size()))","input_feed_batch",",","_","=","input_feed",".","size","(",")","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","aeq","(","tgt_batch",",","input_feed_batch",")","# END Additional args check.","# Initialize local and return variables.","decoder_outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","if","self",".","_coverage",":","attns","[","\"coverage\"","]","=","[","]","emb","=","self",".","embeddings","(","tgt",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","hidden","=","state",".","hidden","coverage","=","state",".","coverage",".","squeeze","(","0",")","if","state",".","coverage","is","not","None","else","None","# Input feed concatenates hidden state with","# input at every time step.","#print(\"emb size: {}\\n\".format(emb.size()));exit()","for","_",",","emb_t","in","enumerate","(","emb",".","split","(","1",")",")",":","# for each output time step in the loop","emb_t","=","emb_t",".","squeeze","(","0",")","decoder_input","=","torch",".","cat","(","[","emb_t",",","input_feed","]",",","1",")","# TODO: the following is where we get attention!","rnn_output",",","hidden","=","self",".","rnn","(","decoder_input",",","hidden",")","decoder_output",",","p_attn","=","self",".","attn","(","rnn_output",",","memory_bank",".","transpose","(","0",",","1",")",",","memory_lengths","=","memory_lengths",")","# p_attn: size (batch=2,input_step=200)","if","self",".","context_gate","is","not","None",":","# TODO: context gate should be employed (not me)","# instead of second RNN transform.","decoder_output","=","self",".","context_gate","(","decoder_input",",","rnn_output",",","decoder_output",")","decoder_output","=","self",".","dropout","(","decoder_output",")","input_feed","=","decoder_output","decoder_outputs","+=","[","decoder_output","]","attns","[","\"std\"","]","+=","[","p_attn","]","# Update the coverage attention.","if","self",".","_coverage",":","coverage","=","coverage","+","p_attn","if","coverage","is","not","None","else","p_attn","attns","[","\"coverage\"","]","+=","[","coverage","]","# Run the forward pass of the copy attention layer.","#","if","self",".","_copy","and","not","self",".","_reuse_copy_attn",":","_",",","copy_attn","=","self",".","copy_attn","(","decoder_output",",","memory_bank",".","transpose","(","0",",","1",")",")","attns","[","\"copy\"","]","+=","[","copy_attn","]","elif","self",".","_copy",":","attns","[","\"copy\"","]","=","attns","[","\"std\"","]","# attns[\"copy\"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]","if","not","dec",":","#if this is not dec?","attns","[","\"mmr\"","]","=","[","]","# 2333: TODO : the sentence representation for decoder","sent_decoder","=","decoder_outputs","[","-","1","]",".","unsqueeze","(","0",")","# shape: (1, batch_size=2,dim=512)","# Return result.","# 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)","# 2333: TODO: compute mmr attention here:","mmr_among_words","=","self",".","_run_mmr","(","sent_encoder",",","sent_decoder",",","src_sents",",","attns","[","\"std\"","]","[","0","]",".","size","(",")","[","-","1","]",")","# 2333: TODO: bring mmr to attention...","for","output_step","in","attns","[","\"std\"","]",":","attention_weight","=","output_step","# pairwise multiplication","attention_weight","=","torch",".","mul","(","mmr_among_words",",","attention_weight",")","attns","[","\"mmr\"","]",".","append","(","attention_weight",".","cuda","(",")",")","# pdb.set_trace()","attns","[","\"std\"","]","=","attns","[","\"mmr\"","]","# decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)","return","hidden",",","decoder_outputs",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L381-L485"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"InputFeedRNNDecoder._input_size","parameters":"(self)","argument_list":"","return_statement":"return self.embeddings.embedding_size + self.hidden_size","docstring":"Using input feed by concatenating input with attention vectors.","docstring_summary":"Using input feed by concatenating input with attention vectors.","docstring_tokens":["Using","input","feed","by","concatenating","input","with","attention","vectors","."],"function":"def _input_size(self):\n \"\"\"\n Using input feed by concatenating input with attention vectors.\n \"\"\"\n return self.embeddings.embedding_size + self.hidden_size","function_tokens":["def","_input_size","(","self",")",":","return","self",".","embeddings",".","embedding_size","+","self",".","hidden_size"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L499-L503"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"DecoderState.detach","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Need to document this","docstring_summary":"Need to document this","docstring_tokens":["Need","to","document","this"],"function":"def detach(self):\n \"\"\" Need to document this \"\"\"\n self.hidden = tuple([_.detach() for _ in self.hidden])\n self.input_feed = self.input_feed.detach()","function_tokens":["def","detach","(","self",")",":","self",".","hidden","=","tuple","(","[","_",".","detach","(",")","for","_","in","self",".","hidden","]",")","self",".","input_feed","=","self",".","input_feed",".","detach","(",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L514-L517"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"DecoderState.beam_update","parameters":"(self, idx, positions, beam_size)","argument_list":"","return_statement":"","docstring":"Need to document this","docstring_summary":"Need to document this","docstring_tokens":["Need","to","document","this"],"function":"def beam_update(self, idx, positions, beam_size):\n \"\"\" Need to document this \"\"\"\n for e in self._all:\n sizes = e.size()\n br = sizes[1]\n if len(sizes) == 3:\n sent_states = e.view(sizes[0], beam_size, br \/\/ beam_size,\n sizes[2])[:, :, idx]\n else:\n sent_states = e.view(sizes[0], beam_size,\n br \/\/ beam_size,\n sizes[2],\n sizes[3])[:, :, idx]\n\n sent_states.data.copy_(\n sent_states.data.index_select(1, positions))","function_tokens":["def","beam_update","(","self",",","idx",",","positions",",","beam_size",")",":","for","e","in","self",".","_all",":","sizes","=","e",".","size","(",")","br","=","sizes","[","1","]","if","len","(","sizes",")","==","3",":","sent_states","=","e",".","view","(","sizes","[","0","]",",","beam_size",",","br","\/\/","beam_size",",","sizes","[","2","]",")","[",":",",",":",",","idx","]","else",":","sent_states","=","e",".","view","(","sizes","[","0","]",",","beam_size",",","br","\/\/","beam_size",",","sizes","[","2","]",",","sizes","[","3","]",")","[",":",",",":",",","idx","]","sent_states",".","data",".","copy_","(","sent_states",".","data",".","index_select","(","1",",","positions",")",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L519-L534"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderState.__init__","parameters":"(self, hidden_size, rnnstate)","argument_list":"","return_statement":"","docstring":"Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).","docstring_summary":"Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).","docstring_tokens":["Args",":","hidden_size","(","int",")",":","the","size","of","hidden","layer","of","the","decoder",".","rnnstate",":","final","hidden","state","from","the","encoder",".","transformed","to","shape",":","layers","x","batch","x","(","directions","*","dim",")","."],"function":"def __init__(self, hidden_size, rnnstate):\n \"\"\"\n Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).\n \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.coverage = None\n\n # Init the input feed.\n batch_size = self.hidden[0].size(1)\n h_size = (batch_size, hidden_size)\n self.input_feed = self.hidden[0].data.new(*h_size).zero_() \\\n .unsqueeze(0)","function_tokens":["def","__init__","(","self",",","hidden_size",",","rnnstate",")",":","if","not","isinstance","(","rnnstate",",","tuple",")",":","self",".","hidden","=","(","rnnstate",",",")","else",":","self",".","hidden","=","rnnstate","self",".","coverage","=","None","# Init the input feed.","batch_size","=","self",".","hidden","[","0","]",".","size","(","1",")","h_size","=","(","batch_size",",","hidden_size",")","self",".","input_feed","=","self",".","hidden","[","0","]",".","data",".","new","(","*","h_size",")",".","zero_","(",")",".","unsqueeze","(","0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L543-L560"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderState.update_state","parameters":"(self, rnnstate, input_feed, coverage)","argument_list":"","return_statement":"","docstring":"Update decoder state","docstring_summary":"Update decoder state","docstring_tokens":["Update","decoder","state"],"function":"def update_state(self, rnnstate, input_feed, coverage):\n \"\"\" Update decoder state \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.input_feed = input_feed\n self.coverage = coverage","function_tokens":["def","update_state","(","self",",","rnnstate",",","input_feed",",","coverage",")",":","if","not","isinstance","(","rnnstate",",","tuple",")",":","self",".","hidden","=","(","rnnstate",",",")","else",":","self",".","hidden","=","rnnstate","self",".","input_feed","=","input_feed","self",".","coverage","=","coverage"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L566-L573"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/decoder.py","language":"python","identifier":"RNNDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n vars = [e.data.repeat(1, beam_size, 1)\n for e in self._all]\n self.hidden = tuple(vars[:-1])\n self.input_feed = vars[-1]","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","vars","=","[","e",".","data",".","repeat","(","1",",","beam_size",",","1",")","for","e","in","self",".","_all","]","self",".","hidden","=","tuple","(","vars","[",":","-","1","]",")","self",".","input_feed","=","vars","[","-","1","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/decoder.py#L575-L580"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/ensemble.py","language":"python","identifier":"load_test_model","parameters":"(opt, dummy_opt)","argument_list":"","return_statement":"return shared_fields, ensemble_model, shared_model_opt","docstring":"Read in multiple models for ensemble","docstring_summary":"Read in multiple models for ensemble","docstring_tokens":["Read","in","multiple","models","for","ensemble"],"function":"def load_test_model(opt, dummy_opt):\n \"\"\" Read in multiple models for ensemble \"\"\"\n shared_fields = None\n shared_model_opt = None\n models = []\n for model_path in opt.models:\n fields, model, model_opt = \\\n onmt.model_builder.load_test_model(opt,\n dummy_opt,\n model_path=model_path)\n import pdb;pdb.set_trace()\n if shared_fields is None:\n shared_fields = fields\n else:\n for key, field in fields.items():\n if field is not None and 'vocab' in field.__dict__:\n assert field.vocab.stoi == shared_fields[key].vocab.stoi, \\\n 'Ensemble models must use the same preprocessed data'\n models.append(model)\n if shared_model_opt is None:\n shared_model_opt = model_opt\n ensemble_model = EnsembleModel(models)\n return shared_fields, ensemble_model, shared_model_opt","function_tokens":["def","load_test_model","(","opt",",","dummy_opt",")",":","shared_fields","=","None","shared_model_opt","=","None","models","=","[","]","for","model_path","in","opt",".","models",":","fields",",","model",",","model_opt","=","onmt",".","model_builder",".","load_test_model","(","opt",",","dummy_opt",",","model_path","=","model_path",")","import","pdb","pdb",".","set_trace","(",")","if","shared_fields","is","None",":","shared_fields","=","fields","else",":","for","key",",","field","in","fields",".","items","(",")",":","if","field","is","not","None","and","'vocab'","in","field",".","__dict__",":","assert","field",".","vocab",".","stoi","==","shared_fields","[","key","]",".","vocab",".","stoi",",","'Ensemble models must use the same preprocessed data'","models",".","append","(","model",")","if","shared_model_opt","is","None",":","shared_model_opt","=","model_opt","ensemble_model","=","EnsembleModel","(","models",")","return","shared_fields",",","ensemble_model",",","shared_model_opt"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/ensemble.py#L135-L157"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n for model_state in self.model_decoder_states:\n model_state.repeat_beam_size_times(beam_size)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","for","model_state","in","self",".","model_decoder_states",":","model_state",".","repeat_beam_size_times","(","beam_size",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/ensemble.py#L27-L30"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoderOutput.squeeze","parameters":"(self, dim=None)","argument_list":"","return_statement":"return EnsembleDecoderOutput([\n x.squeeze(dim) for x in self.model_outputs])","docstring":"Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`","docstring_summary":"Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`","docstring_tokens":["Delegate","squeeze","to","avoid","modifying",":","obj",":","Translator",".","translate_batch","()"],"function":"def squeeze(self, dim=None):\n \"\"\"\n Delegate squeeze to avoid modifying\n :obj:`Translator.translate_batch()`\n \"\"\"\n return EnsembleDecoderOutput([\n x.squeeze(dim) for x in self.model_outputs])","function_tokens":["def","squeeze","(","self",",","dim","=","None",")",":","return","EnsembleDecoderOutput","(","[","x",".","squeeze","(","dim",")","for","x","in","self",".","model_outputs","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/ensemble.py#L41-L47"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None)","argument_list":"","return_statement":"return (EnsembleDecoderOutput(outputs),\n EnsembleDecoderState(states),\n mean_attns)","docstring":"See :obj:`RNNDecoderBase.forward()`","docstring_summary":"See :obj:`RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None):\n \"\"\" See :obj:`RNNDecoderBase.forward()` \"\"\"\n # Memory_lengths is a single tensor shared between all models.\n # This assumption will not hold if Translator is modified\n # to calculate memory_lengths as something other than the length\n # of the input.\n outputs, states, attns = zip(*[\n model_decoder.forward(\n tgt, memory_bank[i], state[i], memory_lengths, step=step)\n for (i, model_decoder)\n in enumerate(self.model_decoders)])\n mean_attns = self.combine_attns(attns)\n return (EnsembleDecoderOutput(outputs),\n EnsembleDecoderState(states),\n mean_attns)","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",")",":","# Memory_lengths is a single tensor shared between all models.","# This assumption will not hold if Translator is modified","# to calculate memory_lengths as something other than the length","# of the input.","outputs",",","states",",","attns","=","zip","(","*","[","model_decoder",".","forward","(","tgt",",","memory_bank","[","i","]",",","state","[","i","]",",","memory_lengths",",","step","=","step",")","for","(","i",",","model_decoder",")","in","enumerate","(","self",".","model_decoders",")","]",")","mean_attns","=","self",".","combine_attns","(","attns",")","return","(","EnsembleDecoderOutput","(","outputs",")",",","EnsembleDecoderState","(","states",")",",","mean_attns",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/ensemble.py#L72-L87"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleDecoder.init_decoder_state","parameters":"(self, src, memory_bank, enc_hidden)","argument_list":"","return_statement":"return EnsembleDecoderState(\n [model_decoder.init_decoder_state(src,\n memory_bank[i],\n enc_hidden[i])\n for (i, model_decoder) in enumerate(self.model_decoders)])","docstring":"See :obj:`RNNDecoderBase.init_decoder_state()`","docstring_summary":"See :obj:`RNNDecoderBase.init_decoder_state()`","docstring_tokens":["See",":","obj",":","RNNDecoderBase",".","init_decoder_state","()"],"function":"def init_decoder_state(self, src, memory_bank, enc_hidden):\n \"\"\" See :obj:`RNNDecoderBase.init_decoder_state()` \"\"\"\n return EnsembleDecoderState(\n [model_decoder.init_decoder_state(src,\n memory_bank[i],\n enc_hidden[i])\n for (i, model_decoder) in enumerate(self.model_decoders)])","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","enc_hidden",")",":","return","EnsembleDecoderState","(","[","model_decoder",".","init_decoder_state","(","src",",","memory_bank","[","i","]",",","enc_hidden","[","i","]",")","for","(","i",",","model_decoder",")","in","enumerate","(","self",".","model_decoders",")","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/ensemble.py#L95-L101"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/ensemble.py","language":"python","identifier":"EnsembleGenerator.forward","parameters":"(self, hidden)","argument_list":"","return_statement":"return torch.stack(distributions).mean(0)","docstring":"Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.","docstring_summary":"Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.","docstring_tokens":["Compute","a","distribution","over","the","target","dictionary","by","averaging","distributions","from","models","in","the","ensemble",".","All","models","in","the","ensemble","must","share","a","target","vocabulary","."],"function":"def forward(self, hidden):\n \"\"\"\n Compute a distribution over the target dictionary\n by averaging distributions from models in the ensemble.\n All models in the ensemble must share a target vocabulary.\n \"\"\"\n distributions = [model_generator.forward(hidden[i])\n for (i, model_generator)\n in enumerate(self.model_generators)]\n return torch.stack(distributions).mean(0)","function_tokens":["def","forward","(","self",",","hidden",")",":","distributions","=","[","model_generator",".","forward","(","hidden","[","i","]",")","for","(","i",",","model_generator",")","in","enumerate","(","self",".","model_generators",")","]","return","torch",".","stack","(","distributions",")",".","mean","(","0",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/ensemble.py#L113-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderLayer.forward","parameters":"(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,\n previous_input=None, layer_cache=None, step=None)","argument_list":"","return_statement":"return output, attn, all_input","docstring":"Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`, `FloatTensor`):\n\n * output `[batch_size x 1 x model_dim]`\n * attn `[batch_size x 1 x src_len]`\n * all_input `[batch_size x current_step x model_dim]`","docstring_summary":"Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`","docstring_tokens":["Args",":","inputs","(","FloatTensor",")",":","[","batch_size","x","1","x","model_dim","]","memory_bank","(","FloatTensor",")",":","[","batch_size","x","src_len","x","model_dim","]","src_pad_mask","(","LongTensor",")",":","[","batch_size","x","1","x","src_len","]","tgt_pad_mask","(","LongTensor",")",":","[","batch_size","x","1","x","1","]"],"function":"def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,\n previous_input=None, layer_cache=None, step=None):\n \"\"\"\n Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`, `FloatTensor`):\n\n * output `[batch_size x 1 x model_dim]`\n * attn `[batch_size x 1 x src_len]`\n * all_input `[batch_size x current_step x model_dim]`\n\n \"\"\"\n dec_mask = torch.gt(tgt_pad_mask +\n self.mask[:, :tgt_pad_mask.size(1),\n :tgt_pad_mask.size(1)], 0)\n input_norm = self.layer_norm_1(inputs)\n all_input = input_norm\n if previous_input is not None:\n all_input = torch.cat((previous_input, input_norm), dim=1)\n dec_mask = None\n\n if self.self_attn_type == \"scaled-dot\":\n query, attn = self.self_attn(all_input, all_input, input_norm,\n mask=dec_mask,\n layer_cache=layer_cache,\n type=\"self\")\n elif self.self_attn_type == \"average\":\n query, attn = self.self_attn(input_norm, mask=dec_mask,\n layer_cache=layer_cache, step=step)\n\n query = self.drop(query) + inputs\n\n query_norm = self.layer_norm_2(query)\n mid, attn = self.context_attn(memory_bank, memory_bank, query_norm,\n mask=src_pad_mask,\n layer_cache=layer_cache,\n type=\"context\")\n output = self.feed_forward(self.drop(mid) + query)\n\n return output, attn, all_input","function_tokens":["def","forward","(","self",",","inputs",",","memory_bank",",","src_pad_mask",",","tgt_pad_mask",",","previous_input","=","None",",","layer_cache","=","None",",","step","=","None",")",":","dec_mask","=","torch",".","gt","(","tgt_pad_mask","+","self",".","mask","[",":",",",":","tgt_pad_mask",".","size","(","1",")",",",":","tgt_pad_mask",".","size","(","1",")","]",",","0",")","input_norm","=","self",".","layer_norm_1","(","inputs",")","all_input","=","input_norm","if","previous_input","is","not","None",":","all_input","=","torch",".","cat","(","(","previous_input",",","input_norm",")",",","dim","=","1",")","dec_mask","=","None","if","self",".","self_attn_type","==","\"scaled-dot\"",":","query",",","attn","=","self",".","self_attn","(","all_input",",","all_input",",","input_norm",",","mask","=","dec_mask",",","layer_cache","=","layer_cache",",","type","=","\"self\"",")","elif","self",".","self_attn_type","==","\"average\"",":","query",",","attn","=","self",".","self_attn","(","input_norm",",","mask","=","dec_mask",",","layer_cache","=","layer_cache",",","step","=","step",")","query","=","self",".","drop","(","query",")","+","inputs","query_norm","=","self",".","layer_norm_2","(","query",")","mid",",","attn","=","self",".","context_attn","(","memory_bank",",","memory_bank",",","query_norm",",","mask","=","src_pad_mask",",","layer_cache","=","layer_cache",",","type","=","\"context\"",")","output","=","self",".","feed_forward","(","self",".","drop","(","mid",")","+","query",")","return","output",",","attn",",","all_input"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/transformer.py#L53-L97"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderLayer._get_attn_subsequent_mask","parameters":"(self, size)","argument_list":"","return_statement":"return subsequent_mask","docstring":"Get an attention mask to avoid using the subsequent info.\n\n Args:\n size: int\n\n Returns:\n (`LongTensor`):\n\n * subsequent_mask `[1 x size x size]`","docstring_summary":"Get an attention mask to avoid using the subsequent info.","docstring_tokens":["Get","an","attention","mask","to","avoid","using","the","subsequent","info","."],"function":"def _get_attn_subsequent_mask(self, size):\n \"\"\"\n Get an attention mask to avoid using the subsequent info.\n\n Args:\n size: int\n\n Returns:\n (`LongTensor`):\n\n * subsequent_mask `[1 x size x size]`\n \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n subsequent_mask = torch.from_numpy(subsequent_mask)\n return subsequent_mask","function_tokens":["def","_get_attn_subsequent_mask","(","self",",","size",")",":","attn_shape","=","(","1",",","size",",","size",")","subsequent_mask","=","np",".","triu","(","np",".","ones","(","attn_shape",")",",","k","=","1",")",".","astype","(","'uint8'",")","subsequent_mask","=","torch",".","from_numpy","(","subsequent_mask",")","return","subsequent_mask"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/transformer.py#L99-L114"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None,\n step=None, cache=None)","argument_list":"","return_statement":"return outputs, state, attns","docstring":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_summary":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","modules",".","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None,\n step=None, cache=None):\n \"\"\"\n See :obj:`onmt.modules.RNNDecoderBase.forward()`\n \"\"\"\n src = state.src\n src_words = src[:, :, 0].transpose(0, 1)\n tgt_words = tgt[:, :, 0].transpose(0, 1)\n src_batch, src_len = src_words.size()\n tgt_batch, tgt_len = tgt_words.size()\n\n # Initialize return variables.\n outputs = []\n attns = {\"std\": []}\n if self._copy:\n attns[\"copy\"] = []\n\n # Run the forward pass of the TransformerDecoder.\n emb = self.embeddings(tgt, step=step)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n output = emb.transpose(0, 1).contiguous()\n src_memory_bank = memory_bank.transpose(0, 1).contiguous()\n\n padding_idx = self.embeddings.word_padding_idx\n src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(src_batch, tgt_len, src_len)\n tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(tgt_batch, tgt_len, tgt_len)\n\n if state.cache is None:\n saved_inputs = []\n\n for i in range(self.num_layers):\n prev_layer_input = None\n if state.cache is None:\n if state.previous_input is not None:\n prev_layer_input = state.previous_layer_inputs[i]\n output, attn, all_input \\\n = self.transformer_layers[i](\n output, src_memory_bank,\n src_pad_mask, tgt_pad_mask,\n previous_input=prev_layer_input,\n layer_cache=state.cache[\"layer_{}\".format(i)]\n if state.cache is not None else None,\n step=step)\n if state.cache is None:\n saved_inputs.append(all_input)\n\n if state.cache is None:\n saved_inputs = torch.stack(saved_inputs)\n\n output = self.layer_norm(output)\n\n # Process the result and update the attentions.\n outputs = output.transpose(0, 1).contiguous()\n attn = attn.transpose(0, 1).contiguous()\n\n attns[\"std\"] = attn\n if self._copy:\n attns[\"copy\"] = attn\n\n if state.cache is None:\n state = state.update_state(tgt, saved_inputs)\n\n return outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",",","cache","=","None",")",":","src","=","state",".","src","src_words","=","src","[",":",",",":",",","0","]",".","transpose","(","0",",","1",")","tgt_words","=","tgt","[",":",",",":",",","0","]",".","transpose","(","0",",","1",")","src_batch",",","src_len","=","src_words",".","size","(",")","tgt_batch",",","tgt_len","=","tgt_words",".","size","(",")","# Initialize return variables.","outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","# Run the forward pass of the TransformerDecoder.","emb","=","self",".","embeddings","(","tgt",",","step","=","step",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","output","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","src_memory_bank","=","memory_bank",".","transpose","(","0",",","1",")",".","contiguous","(",")","padding_idx","=","self",".","embeddings",".","word_padding_idx","src_pad_mask","=","src_words",".","data",".","eq","(","padding_idx",")",".","unsqueeze","(","1",")",".","expand","(","src_batch",",","tgt_len",",","src_len",")","tgt_pad_mask","=","tgt_words",".","data",".","eq","(","padding_idx",")",".","unsqueeze","(","1",")",".","expand","(","tgt_batch",",","tgt_len",",","tgt_len",")","if","state",".","cache","is","None",":","saved_inputs","=","[","]","for","i","in","range","(","self",".","num_layers",")",":","prev_layer_input","=","None","if","state",".","cache","is","None",":","if","state",".","previous_input","is","not","None",":","prev_layer_input","=","state",".","previous_layer_inputs","[","i","]","output",",","attn",",","all_input","=","self",".","transformer_layers","[","i","]","(","output",",","src_memory_bank",",","src_pad_mask",",","tgt_pad_mask",",","previous_input","=","prev_layer_input",",","layer_cache","=","state",".","cache","[","\"layer_{}\"",".","format","(","i",")","]","if","state",".","cache","is","not","None","else","None",",","step","=","step",")","if","state",".","cache","is","None",":","saved_inputs",".","append","(","all_input",")","if","state",".","cache","is","None",":","saved_inputs","=","torch",".","stack","(","saved_inputs",")","output","=","self",".","layer_norm","(","output",")","# Process the result and update the attentions.","outputs","=","output",".","transpose","(","0",",","1",")",".","contiguous","(",")","attn","=","attn",".","transpose","(","0",",","1",")",".","contiguous","(",")","attns","[","\"std\"","]","=","attn","if","self",".","_copy",":","attns","[","\"copy\"","]","=","attn","if","state",".","cache","is","None",":","state","=","state",".","update_state","(","tgt",",","saved_inputs",")","return","outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/transformer.py#L172-L237"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoder.init_decoder_state","parameters":"(self, src, memory_bank, enc_hidden,\n with_cache=False)","argument_list":"","return_statement":"return state","docstring":"Init decoder state","docstring_summary":"Init decoder state","docstring_tokens":["Init","decoder","state"],"function":"def init_decoder_state(self, src, memory_bank, enc_hidden,\n with_cache=False):\n \"\"\" Init decoder state \"\"\"\n state = TransformerDecoderState(src)\n if with_cache:\n state._init_cache(memory_bank, self.num_layers,\n self.self_attn_type)\n return state","function_tokens":["def","init_decoder_state","(","self",",","src",",","memory_bank",",","enc_hidden",",","with_cache","=","False",")",":","state","=","TransformerDecoderState","(","src",")","if","with_cache",":","state",".","_init_cache","(","memory_bank",",","self",".","num_layers",",","self",".","self_attn_type",")","return","state"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/transformer.py#L239-L246"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderState.__init__","parameters":"(self, src)","argument_list":"","return_statement":"","docstring":"Args:\n src (FloatTensor): a sequence of source words tensors\n with optional feature tensors, of size (len x batch).","docstring_summary":"Args:\n src (FloatTensor): a sequence of source words tensors\n with optional feature tensors, of size (len x batch).","docstring_tokens":["Args",":","src","(","FloatTensor",")",":","a","sequence","of","source","words","tensors","with","optional","feature","tensors","of","size","(","len","x","batch",")","."],"function":"def __init__(self, src):\n \"\"\"\n Args:\n src (FloatTensor): a sequence of source words tensors\n with optional feature tensors, of size (len x batch).\n \"\"\"\n self.src = src\n self.previous_input = None\n self.previous_layer_inputs = None\n self.cache = None","function_tokens":["def","__init__","(","self",",","src",")",":","self",".","src","=","src","self",".","previous_input","=","None","self",".","previous_layer_inputs","=","None","self",".","cache","=","None"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/transformer.py#L252-L261"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderState._all","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Contains attributes that need to be updated in self.beam_update().","docstring_summary":"Contains attributes that need to be updated in self.beam_update().","docstring_tokens":["Contains","attributes","that","need","to","be","updated","in","self",".","beam_update","()","."],"function":"def _all(self):\n \"\"\"\n Contains attributes that need to be updated in self.beam_update().\n \"\"\"\n if (self.previous_input is not None\n and self.previous_layer_inputs is not None):\n return (self.previous_input,\n self.previous_layer_inputs,\n self.src)\n else:\n return (self.src,)","function_tokens":["def","_all","(","self",")",":","if","(","self",".","previous_input","is","not","None","and","self",".","previous_layer_inputs","is","not","None",")",":","return","(","self",".","previous_input",",","self",".","previous_layer_inputs",",","self",".","src",")","else",":","return","(","self",".","src",",",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/transformer.py#L264-L274"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/transformer.py","language":"python","identifier":"TransformerDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n self.src = self.src.data.repeat(1, beam_size, 1)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","self",".","src","=","self",".","src",".","data",".","repeat","(","1",",","beam_size",",","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/transformer.py#L309-L311"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoder.forward","parameters":"(self, tgt, memory_bank, state, memory_lengths=None, step=None)","argument_list":"","return_statement":"return outputs, state, attns","docstring":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_summary":"See :obj:`onmt.modules.RNNDecoderBase.forward()`","docstring_tokens":["See",":","obj",":","onmt",".","modules",".","RNNDecoderBase",".","forward","()"],"function":"def forward(self, tgt, memory_bank, state, memory_lengths=None, step=None):\n \"\"\" See :obj:`onmt.modules.RNNDecoderBase.forward()`\"\"\"\n # NOTE: memory_lengths is only here for compatibility reasons\n # with onmt.modules.RNNDecoderBase.forward()\n # CHECKS\n assert isinstance(state, CNNDecoderState)\n _, tgt_batch, _ = tgt.size()\n _, contxt_batch, _ = memory_bank.size()\n aeq(tgt_batch, contxt_batch)\n # END CHECKS\n\n if state.previous_input is not None:\n tgt = torch.cat([state.previous_input, tgt], 0)\n\n # Initialize return variables.\n outputs = []\n attns = {\"std\": []}\n assert not self._copy, \"Copy mechanism not yet tested in conv2conv\"\n if self._copy:\n attns[\"copy\"] = []\n\n emb = self.embeddings(tgt)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n tgt_emb = emb.transpose(0, 1).contiguous()\n # The output of CNNEncoder.\n src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()\n # The combination of output of CNNEncoder and source embeddings.\n src_memory_bank_c = state.init_src.transpose(0, 1).contiguous()\n\n # Run the forward pass of the CNNDecoder.\n emb_reshape = tgt_emb.contiguous().view(\n tgt_emb.size(0) * tgt_emb.size(1), -1)\n linear_out = self.linear(emb_reshape)\n x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)\n x = shape_transform(x)\n\n pad = torch.zeros(x.size(0), x.size(1),\n self.cnn_kernel_width - 1, 1)\n\n pad = pad.type_as(x)\n base_target_emb = x\n\n for conv, attention in zip(self.conv_layers, self.attn_layers):\n new_target_input = torch.cat([pad, x], 2)\n out = conv(new_target_input)\n c, attn = attention(base_target_emb, out,\n src_memory_bank_t, src_memory_bank_c)\n x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT\n output = x.squeeze(3).transpose(1, 2)\n\n # Process the result and update the attentions.\n outputs = output.transpose(0, 1).contiguous()\n if state.previous_input is not None:\n outputs = outputs[state.previous_input.size(0):]\n attn = attn[:, state.previous_input.size(0):].squeeze()\n attn = torch.stack([attn])\n attns[\"std\"] = attn\n if self._copy:\n attns[\"copy\"] = attn\n\n # Update the state.\n state.update_state(tgt)\n\n return outputs, state, attns","function_tokens":["def","forward","(","self",",","tgt",",","memory_bank",",","state",",","memory_lengths","=","None",",","step","=","None",")",":","# NOTE: memory_lengths is only here for compatibility reasons","# with onmt.modules.RNNDecoderBase.forward()","# CHECKS","assert","isinstance","(","state",",","CNNDecoderState",")","_",",","tgt_batch",",","_","=","tgt",".","size","(",")","_",",","contxt_batch",",","_","=","memory_bank",".","size","(",")","aeq","(","tgt_batch",",","contxt_batch",")","# END CHECKS","if","state",".","previous_input","is","not","None",":","tgt","=","torch",".","cat","(","[","state",".","previous_input",",","tgt","]",",","0",")","# Initialize return variables.","outputs","=","[","]","attns","=","{","\"std\"",":","[","]","}","assert","not","self",".","_copy",",","\"Copy mechanism not yet tested in conv2conv\"","if","self",".","_copy",":","attns","[","\"copy\"","]","=","[","]","emb","=","self",".","embeddings","(","tgt",")","assert","emb",".","dim","(",")","==","3","# len x batch x embedding_dim","tgt_emb","=","emb",".","transpose","(","0",",","1",")",".","contiguous","(",")","# The output of CNNEncoder.","src_memory_bank_t","=","memory_bank",".","transpose","(","0",",","1",")",".","contiguous","(",")","# The combination of output of CNNEncoder and source embeddings.","src_memory_bank_c","=","state",".","init_src",".","transpose","(","0",",","1",")",".","contiguous","(",")","# Run the forward pass of the CNNDecoder.","emb_reshape","=","tgt_emb",".","contiguous","(",")",".","view","(","tgt_emb",".","size","(","0",")","*","tgt_emb",".","size","(","1",")",",","-","1",")","linear_out","=","self",".","linear","(","emb_reshape",")","x","=","linear_out",".","view","(","tgt_emb",".","size","(","0",")",",","tgt_emb",".","size","(","1",")",",","-","1",")","x","=","shape_transform","(","x",")","pad","=","torch",".","zeros","(","x",".","size","(","0",")",",","x",".","size","(","1",")",",","self",".","cnn_kernel_width","-","1",",","1",")","pad","=","pad",".","type_as","(","x",")","base_target_emb","=","x","for","conv",",","attention","in","zip","(","self",".","conv_layers",",","self",".","attn_layers",")",":","new_target_input","=","torch",".","cat","(","[","pad",",","x","]",",","2",")","out","=","conv","(","new_target_input",")","c",",","attn","=","attention","(","base_target_emb",",","out",",","src_memory_bank_t",",","src_memory_bank_c",")","x","=","(","x","+","(","c","+","out",")","*","SCALE_WEIGHT",")","*","SCALE_WEIGHT","output","=","x",".","squeeze","(","3",")",".","transpose","(","1",",","2",")","# Process the result and update the attentions.","outputs","=","output",".","transpose","(","0",",","1",")",".","contiguous","(",")","if","state",".","previous_input","is","not","None",":","outputs","=","outputs","[","state",".","previous_input",".","size","(","0",")",":","]","attn","=","attn","[",":",",","state",".","previous_input",".","size","(","0",")",":","]",".","squeeze","(",")","attn","=","torch",".","stack","(","[","attn","]",")","attns","[","\"std\"","]","=","attn","if","self",".","_copy",":","attns","[","\"copy\"","]","=","attn","# Update the state.","state",".","update_state","(","tgt",")","return","outputs",",","state",",","attns"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py#L58-L122"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoder.init_decoder_state","parameters":"(self, _, memory_bank, enc_hidden, with_cache=False)","argument_list":"","return_statement":"return CNNDecoderState(memory_bank, enc_hidden)","docstring":"Init decoder state.","docstring_summary":"Init decoder state.","docstring_tokens":["Init","decoder","state","."],"function":"def init_decoder_state(self, _, memory_bank, enc_hidden, with_cache=False):\n \"\"\"\n Init decoder state.\n \"\"\"\n return CNNDecoderState(memory_bank, enc_hidden)","function_tokens":["def","init_decoder_state","(","self",",","_",",","memory_bank",",","enc_hidden",",","with_cache","=","False",")",":","return","CNNDecoderState","(","memory_bank",",","enc_hidden",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py#L124-L128"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState._all","parameters":"(self)","argument_list":"","return_statement":"return (self.previous_input,)","docstring":"Contains attributes that need to be updated in self.beam_update().","docstring_summary":"Contains attributes that need to be updated in self.beam_update().","docstring_tokens":["Contains","attributes","that","need","to","be","updated","in","self",".","beam_update","()","."],"function":"def _all(self):\n \"\"\"\n Contains attributes that need to be updated in self.beam_update().\n \"\"\"\n return (self.previous_input,)","function_tokens":["def","_all","(","self",")",":","return","(","self",".","previous_input",",",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py#L141-L145"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState.update_state","parameters":"(self, new_input)","argument_list":"","return_statement":"","docstring":"Called for every decoder forward pass.","docstring_summary":"Called for every decoder forward pass.","docstring_tokens":["Called","for","every","decoder","forward","pass","."],"function":"def update_state(self, new_input):\n \"\"\" Called for every decoder forward pass. \"\"\"\n self.previous_input = new_input","function_tokens":["def","update_state","(","self",",","new_input",")",":","self",".","previous_input","=","new_input"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py#L150-L152"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py","language":"python","identifier":"CNNDecoderState.repeat_beam_size_times","parameters":"(self, beam_size)","argument_list":"","return_statement":"","docstring":"Repeat beam_size times along batch dimension.","docstring_summary":"Repeat beam_size times along batch dimension.","docstring_tokens":["Repeat","beam_size","times","along","batch","dimension","."],"function":"def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n self.init_src = self.init_src.data.repeat(1, beam_size, 1)","function_tokens":["def","repeat_beam_size_times","(","self",",","beam_size",")",":","self",".","init_src","=","self",".","init_src",".","data",".","repeat","(","1",",","beam_size",",","1",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/code\/Hi_MAP\/onmt\/decoders\/cnn_decoder.py#L154-L156"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments._tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return self._en(text, disable = [\"tagger\", \"parser\", \"ner\", \"textcat\"])","docstring":"Tokenizes input using the fastest possible SpaCy configuration.\n This is optional, can be disabled in constructor.","docstring_summary":"","docstring_tokens":[],"function":"def _tokenize(self, text):\n\n \"\"\"\n\n Tokenizes input using the fastest possible SpaCy configuration.\n This is optional, can be disabled in constructor.\n\n \"\"\"\n\n return self._en(text, disable = [\"tagger\", \"parser\", \"ner\", \"textcat\"])","function_tokens":["def","_tokenize","(","self",",","text",")",":","return","self",".","_en","(","text",",","disable","=","[","\"tagger\"",",","\"parser\"",",","\"ner\"",",","\"textcat\"","]",")"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L49-L58"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments._normalize","parameters":"(self, tokens, case = False)","argument_list":"","return_statement":"return [\n str(t).lower()\n if not case\n else str(t)\n for t in tokens\n ]","docstring":"Lowercases and turns tokens into distinct words.","docstring_summary":"","docstring_tokens":[],"function":"def _normalize(self, tokens, case = False):\n\n \"\"\"\n\n Lowercases and turns tokens into distinct words.\n\n \"\"\"\n\n return [\n str(t).lower()\n if not case\n else str(t)\n for t in tokens\n ]","function_tokens":["def","_normalize","(","self",",","tokens",",","case","=","False",")",":","return","[","str","(","t",")",".","lower","(",")","if","not","case","else","str","(","t",")","for","t","in","tokens","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L61-L74"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments.overlaps","parameters":"(self)","argument_list":"","return_statement":"return self._matches","docstring":"Return a list of Fragments.Match objects between summary and text.\n This is a list of named tuples of the form (summary, text, length):\n\n - summary (int): the start index of the match in the summary\n - text (int): the start index of the match in the reference\n - length (int): the length of the extractive fragment","docstring_summary":"","docstring_tokens":[],"function":"def overlaps(self):\n\n \"\"\"\n\n Return a list of Fragments.Match objects between summary and text.\n This is a list of named tuples of the form (summary, text, length):\n\n - summary (int): the start index of the match in the summary\n - text (int): the start index of the match in the reference\n - length (int): the length of the extractive fragment\n\n \"\"\"\n\n return self._matches","function_tokens":["def","overlaps","(","self",")",":","return","self",".","_matches"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L77-L90"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments.strings","parameters":"(self, min_length = 0, raw = None, summary_base = True)","argument_list":"","return_statement":"return strings","docstring":"Return a list of explicit match strings between the summary and reference.\n Note that this will be in the same format as the strings are input. This is\n important to remember if tokenization is done manually. If tokenization is\n specified automatically on the raw strings, raw strings will automatically\n be returned rather than SpaCy tokenized sequences.\n\n Arguments:\n\n - min_length (int): filter out overlaps shorter than this (default = 0)\n - raw (bool): return raw input rather than stringified\n - (default = False if automatic tokenization, True otherwise)\n - summary_base (true): strings are based of summary text (default = True)\n\n Returns:\n\n - list of overlaps, where overlaps are strings or token sequences","docstring_summary":"","docstring_tokens":[],"function":"def strings(self, min_length = 0, raw = None, summary_base = True):\n\n \"\"\"\n\n Return a list of explicit match strings between the summary and reference.\n Note that this will be in the same format as the strings are input. This is\n important to remember if tokenization is done manually. If tokenization is\n specified automatically on the raw strings, raw strings will automatically\n be returned rather than SpaCy tokenized sequences.\n\n Arguments:\n\n - min_length (int): filter out overlaps shorter than this (default = 0)\n - raw (bool): return raw input rather than stringified\n - (default = False if automatic tokenization, True otherwise)\n - summary_base (true): strings are based of summary text (default = True)\n\n Returns:\n\n - list of overlaps, where overlaps are strings or token sequences\n\n \"\"\"\n\n # Compute the strings against the summary or the text?\n\n base = self.summary if summary_base else self.text\n\n # Generate strings, filtering out strings below the minimum length.\n\n strings = [\n base[i : i + length]\n for i, j, length\n in self.overlaps()\n if length > min_length\n ]\n\n # By default, we just return the tokenization being used.\n # But if they user wants a raw string, then we convert.\n # Mostly, this will be used along with spacy.\n\n if self._tokens and raw:\n\n for i, s in enumerate(strings):\n strings[i] = str(s)\n\n # Return the list of strings.\n\n return strings","function_tokens":["def","strings","(","self",",","min_length","=","0",",","raw","=","None",",","summary_base","=","True",")",":","# Compute the strings against the summary or the text?","base","=","self",".","summary","if","summary_base","else","self",".","text","# Generate strings, filtering out strings below the minimum length.","strings","=","[","base","[","i",":","i","+","length","]","for","i",",","j",",","length","in","self",".","overlaps","(",")","if","length",">","min_length","]","# By default, we just return the tokenization being used.","# But if they user wants a raw string, then we convert.","# Mostly, this will be used along with spacy.","if","self",".","_tokens","and","raw",":","for","i",",","s","in","enumerate","(","strings",")",":","strings","[","i","]","=","str","(","s",")","# Return the list of strings.","return","strings"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L93-L140"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments.coverage","parameters":"(self, summary_base = True)","argument_list":"","return_statement":"","docstring":"Return the COVERAGE score of the summary and text.\n\n Arguments:\n\n - summary_base (bool): use summary as numerator (default = True)\n\n Returns:\n\n - decimal COVERAGE score within [0, 1]","docstring_summary":"","docstring_tokens":[],"function":"def coverage(self, summary_base = True):\n\n \"\"\"\n\n Return the COVERAGE score of the summary and text.\n\n Arguments:\n\n - summary_base (bool): use summary as numerator (default = True)\n\n Returns:\n\n - decimal COVERAGE score within [0, 1]\n\n \"\"\"\n\n numerator = sum(o.length for o in self.overlaps())\n\n if summary_base: denominator = len(self.summary)\n else: denominator = len(self.reference)\n\n if denominator == 0: return 0\n else: return numerator \/ denominator","function_tokens":["def","coverage","(","self",",","summary_base","=","True",")",":","numerator","=","sum","(","o",".","length","for","o","in","self",".","overlaps","(",")",")","if","summary_base",":","denominator","=","len","(","self",".","summary",")","else",":","denominator","=","len","(","self",".","reference",")","if","denominator","==","0",":","return","0","else",":","return","numerator","\/","denominator"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L143-L165"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments.density","parameters":"(self, summary_base = True)","argument_list":"","return_statement":"","docstring":"Return the DENSITY score of summary and text.\n\n Arguments:\n\n - summary_base (bool): use summary as numerator (default = True)\n\n Returns:\n\n - decimal DENSITY score within [0, ...]","docstring_summary":"","docstring_tokens":[],"function":"def density(self, summary_base = True):\n\n \"\"\"\n\n Return the DENSITY score of summary and text.\n\n Arguments:\n\n - summary_base (bool): use summary as numerator (default = True)\n\n Returns:\n\n - decimal DENSITY score within [0, ...]\n\n \"\"\"\n\n numerator = sum(o.length ** 2 for o in self.overlaps())\n\n if summary_base: denominator = len(self.summary)\n else: denominator = len(self.reference)\n\n if denominator == 0: return 0\n else: return numerator \/ denominator","function_tokens":["def","density","(","self",",","summary_base","=","True",")",":","numerator","=","sum","(","o",".","length","**","2","for","o","in","self",".","overlaps","(",")",")","if","summary_base",":","denominator","=","len","(","self",".","summary",")","else",":","denominator","=","len","(","self",".","reference",")","if","denominator","==","0",":","return","0","else",":","return","numerator","\/","denominator"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L168-L190"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments.compression","parameters":"(self, text_to_summary = True)","argument_list":"","return_statement":"","docstring":"Return compression ratio between summary and text.\n\n Arguments:\n\n - text_to_summary (bool): compute text\/summary ratio (default = True)\n\n Returns:\n\n - decimal compression score within [0, ...]","docstring_summary":"","docstring_tokens":[],"function":"def compression(self, text_to_summary = True):\n\n \"\"\"\n\n Return compression ratio between summary and text.\n\n Arguments:\n\n - text_to_summary (bool): compute text\/summary ratio (default = True)\n\n Returns:\n\n - decimal compression score within [0, ...]\n\n \"\"\"\n\n ratio = [len(self.text), len(self.summary)]\n\n try:\n\n if text_to_summary: return ratio[0] \/ ratio[1]\n else: return ratio[1] \/ ratio[0]\n\n except ZeroDivisionError:\n\n return 0","function_tokens":["def","compression","(","self",",","text_to_summary","=","True",")",":","ratio","=","[","len","(","self",".","text",")",",","len","(","self",".","summary",")","]","try",":","if","text_to_summary",":","return","ratio","[","0","]","\/","ratio","[","1","]","else",":","return","ratio","[","1","]","\/","ratio","[","0","]","except","ZeroDivisionError",":","return","0"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L193-L218"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments._match","parameters":"(self, a, b)","argument_list":"","return_statement":"","docstring":"Raw procedure for matching summary in text, described in paper.","docstring_summary":"","docstring_tokens":[],"function":"def _match(self, a, b):\n\n \"\"\"\n\n Raw procedure for matching summary in text, described in paper.\n\n \"\"\"\n\n self._matches = []\n\n a_start = b_start = 0\n\n while a_start < len(a):\n\n best_match = None\n best_match_length = 0\n\n while b_start < len(b):\n\n if a[a_start] == b[b_start]:\n\n a_end = a_start\n b_end = b_start\n\n while a_end < len(a) and b_end < len(b) \\\n and b[b_end] == a[a_end]:\n\n b_end += 1\n a_end += 1\n\n length = a_end - a_start\n\n if length > best_match_length:\n\n best_match = Fragments.Match(a_start, b_start, length)\n best_match_length = length\n\n b_start = b_end\n\n else:\n\n b_start += 1\n\n b_start = 0\n\n if best_match:\n\n if best_match_length > 0:\n self._matches.append(best_match)\n\n a_start += best_match_length\n\n else:\n\n a_start += 1","function_tokens":["def","_match","(","self",",","a",",","b",")",":","self",".","_matches","=","[","]","a_start","=","b_start","=","0","while","a_start","<","len","(","a",")",":","best_match","=","None","best_match_length","=","0","while","b_start","<","len","(","b",")",":","if","a","[","a_start","]","==","b","[","b_start","]",":","a_end","=","a_start","b_end","=","b_start","while","a_end","<","len","(","a",")","and","b_end","<","len","(","b",")","and","b","[","b_end","]","==","a","[","a_end","]",":","b_end","+=","1","a_end","+=","1","length","=","a_end","-","a_start","if","length",">","best_match_length",":","best_match","=","Fragments",".","Match","(","a_start",",","b_start",",","length",")","best_match_length","=","length","b_start","=","b_end","else",":","b_start","+=","1","b_start","=","0","if","best_match",":","if","best_match_length",">","0",":","self",".","_matches",".","append","(","best_match",")","a_start","+=","best_match_length","else",":","a_start","+=","1"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L221-L275"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments._htmltokens","parameters":"(self, tokens)","argument_list":"","return_statement":"return [\n [\n _html.escape(t.text).replace(\"\\n\", \"\"),\n _html.escape(t.whitespace_).replace(\"\\n\", \"\")\n ]\n\n for t in tokens\n ]","docstring":"Carefully process tokens to handle whitespace and HTML characters.","docstring_summary":"","docstring_tokens":[],"function":"def _htmltokens(self, tokens):\n\n \"\"\"\n\n Carefully process tokens to handle whitespace and HTML characters.\n\n \"\"\"\n\n return [\n [\n _html.escape(t.text).replace(\"\\n\", \"\"),\n _html.escape(t.whitespace_).replace(\"\\n\", \"\")\n ]\n\n for t in tokens\n ]","function_tokens":["def","_htmltokens","(","self",",","tokens",")",":","return","[","[","_html",".","escape","(","t",".","text",")",".","replace","(","\"\\n\"",",","\"\"",")",",","_html",".","escape","(","t",".","whitespace_",")",".","replace","(","\"\\n\"",",","\"\"",")","]","for","t","in","tokens","]"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L278-L293"} {"nwo":"Alex-Fabbri\/Multi-News","sha":"f6476d1f114662eb93db32e9b704b7c4fe047217","path":"data\/scripts\/fragments.py","language":"python","identifier":"Fragments.annotate","parameters":"(self, min_length = 0, text_truncation = None, novel_italics = False)","argument_list":"","return_statement":"return summary, text","docstring":"Used to annotate fragments for website visualization.\n\n Arguments:\n\n - min_length (int): minimum length overlap to count (default = 0)\n - text_truncation (int): tuncated text length (default = None)\n - novel_italics (bool): italicize novel words (default = True)\n\n Returns:\n\n - a tuple of strings: (summary HTML, text HTML)","docstring_summary":"","docstring_tokens":[],"function":"def annotate(self, min_length = 0, text_truncation = None, novel_italics = False):\n\n \"\"\"\n\n Used to annotate fragments for website visualization.\n\n Arguments:\n\n - min_length (int): minimum length overlap to count (default = 0)\n - text_truncation (int): tuncated text length (default = None)\n - novel_italics (bool): italicize novel words (default = True)\n\n Returns:\n\n - a tuple of strings: (summary HTML, text HTML)\n\n \"\"\"\n\n start = \"\"\"\n \n \"\"\".strip()\n\n end = \"\"\"\n <\/u>\n \"\"\".strip()\n\n # Here we tokenize carefully to preserve sane-looking whitespace.\n # (This part does require text to use a SpaCy tokenization.)\n\n summary = self._htmltokens(self.summary)\n text = self._htmltokens(self.text)\n\n # Compute novel word set, if requested.\n\n if novel_italics:\n\n novel = set(self._norm_summary) - set(self._norm_text)\n\n for word_whitespace in summary:\n\n if word_whitespace[0].lower() in novel:\n word_whitespace[0] = \"\" + word_whitespace[0] + \"<\/em>\"\n\n # Truncate text, if requested.\n # Must be careful later on with this.\n\n if text_truncation is not None:\n text = text[:text_truncation]\n\n # March through overlaps, replacing tokens with HTML-tagged strings.\n\n colors = self._itercolors()\n\n for overlap in self.overlaps():\n\n # Skip overlaps that are too short.\n\n if overlap.length < min_length:\n continue\n\n # Reference ID for JavaScript highlighting.\n # This is random, but shared between corresponding fragments.\n\n ref = _random.randint(0, 1e10)\n color = next(colors)\n\n # Summary starting tag.\n\n summary[overlap.summary][0] = start.format(\n color = color,\n ref = ref,\n length = overlap.length,\n ) + summary[overlap.summary][0]\n\n # Text starting tag.\n\n text[overlap.text][0] = start.format(\n color = color,\n ref = ref,\n length = overlap.length,\n ) + text[overlap.text][0]\n\n # Summary ending tag.\n\n summary[overlap.summary + overlap.length - 1][0] += end\n\n # Text ending tag.\n\n text[overlap.text + overlap.length - 1][0] += end\n\n # Carefully join tokens and whitespace to reconstruct the string.\n\n summary = \" \".join(\"\".join(\"\".join(tw) for tw in summary).split())\n text = \" \".join(\"\".join(\"\".join(tw) for tw in text).split())\n\n # Return the tuple.\n\n return summary, text","function_tokens":["def","annotate","(","self",",","min_length","=","0",",","text_truncation","=","None",",","novel_italics","=","False",")",":","start","=","\"\"\"\n \n \"\"\"",".","strip","(",")","end","=","\"\"\"\n <\/u>\n \"\"\"",".","strip","(",")","# Here we tokenize carefully to preserve sane-looking whitespace.","# (This part does require text to use a SpaCy tokenization.)","summary","=","self",".","_htmltokens","(","self",".","summary",")","text","=","self",".","_htmltokens","(","self",".","text",")","# Compute novel word set, if requested.","if","novel_italics",":","novel","=","set","(","self",".","_norm_summary",")","-","set","(","self",".","_norm_text",")","for","word_whitespace","in","summary",":","if","word_whitespace","[","0","]",".","lower","(",")","in","novel",":","word_whitespace","[","0","]","=","\"\"","+","word_whitespace","[","0","]","+","\"<\/em>\"","# Truncate text, if requested.","# Must be careful later on with this.","if","text_truncation","is","not","None",":","text","=","text","[",":","text_truncation","]","# March through overlaps, replacing tokens with HTML-tagged strings.","colors","=","self",".","_itercolors","(",")","for","overlap","in","self",".","overlaps","(",")",":","# Skip overlaps that are too short.","if","overlap",".","length","<","min_length",":","continue","# Reference ID for JavaScript highlighting.","# This is random, but shared between corresponding fragments.","ref","=","_random",".","randint","(","0",",","1e10",")","color","=","next","(","colors",")","# Summary starting tag.","summary","[","overlap",".","summary","]","[","0","]","=","start",".","format","(","color","=","color",",","ref","=","ref",",","length","=","overlap",".","length",",",")","+","summary","[","overlap",".","summary","]","[","0","]","# Text starting tag.","text","[","overlap",".","text","]","[","0","]","=","start",".","format","(","color","=","color",",","ref","=","ref",",","length","=","overlap",".","length",",",")","+","text","[","overlap",".","text","]","[","0","]","# Summary ending tag.","summary","[","overlap",".","summary","+","overlap",".","length","-","1","]","[","0","]","+=","end","# Text ending tag.","text","[","overlap",".","text","+","overlap",".","length","-","1","]","[","0","]","+=","end","# Carefully join tokens and whitespace to reconstruct the string.","summary","=","\" \"",".","join","(","\"\"",".","join","(","\"\"",".","join","(","tw",")","for","tw","in","summary",")",".","split","(",")",")","text","=","\" \"",".","join","(","\"\"",".","join","(","\"\"",".","join","(","tw",")","for","tw","in","text",")",".","split","(",")",")","# Return the tuple.","return","summary",",","text"],"url":"https:\/\/github.com\/Alex-Fabbri\/Multi-News\/blob\/f6476d1f114662eb93db32e9b704b7c4fe047217\/data\/scripts\/fragments.py#L296-L396"}