{"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"eval_utils.py","language":"python","identifier":"normalize_answer","parameters":"(s)","argument_list":"","return_statement":"return white_space_fix(remove_articles(remove_punc(lower(s))))","docstring":"Lower text and remove punctuation, articles and extra whitespace.","docstring_summary":"Lower text and remove punctuation, articles and extra whitespace.","docstring_tokens":["Lower","text","and","remove","punctuation","articles","and","extra","whitespace","."],"function":"def normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))","function_tokens":["def","normalize_answer","(","s",")",":","def","remove_articles","(","text",")",":","return","re",".","sub","(","r'\\b(a|an|the)\\b'",",","' '",",","text",")","def","white_space_fix","(","text",")",":","return","' '",".","join","(","text",".","split","(",")",")","def","remove_punc","(","text",")",":","exclude","=","set","(","string",".","punctuation",")","return","''",".","join","(","ch","for","ch","in","text","if","ch","not","in","exclude",")","def","lower","(","text",")",":","return","text",".","lower","(",")","return","white_space_fix","(","remove_articles","(","remove_punc","(","lower","(","s",")",")",")",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/eval_utils.py#L13-L28"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"sequential_sentence_selector\/run_sequential_sentence_selector.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, max_seq_length, max_sent_num, max_sf_num, tokenizer, train=False)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, max_seq_length, max_sent_num, max_sf_num, tokenizer, train=False):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n DUMMY = [0] * max_seq_length\n DUMMY_ = [0.0] * max_sent_num\n features = []\n logger.info('#### Constructing features... ####')\n for (ex_index, example) in enumerate(tqdm(examples, desc='Example')):\n\n tokens_q = tokenizer.tokenize(\n 'Q: {} A: {}'.format(example.question, example.answer))\n tokens_q = ['[CLS]'] + tokens_q + ['[SEP]']\n\n input_ids = []\n input_masks = []\n segment_ids = []\n\n for title in example.titles:\n sents = example.context[title]\n for (i, s) in enumerate(sents):\n\n if len(input_ids) == max_sent_num:\n break\n\n tokens_s = tokenizer.tokenize(\n s)[:max_seq_length-len(tokens_q)-1]\n tokens_s = tokens_s + ['[SEP]']\n\n padding = [0] * (max_seq_length -\n len(tokens_s) - len(tokens_q))\n\n input_ids_ = tokenizer.convert_tokens_to_ids(\n tokens_q + tokens_s)\n input_masks_ = [1] * len(input_ids_)\n segment_ids_ = [0] * len(tokens_q) + [1] * len(tokens_s)\n\n input_ids_ += padding\n input_ids.append(input_ids_)\n\n input_masks_ += padding\n input_masks.append(input_masks_)\n\n segment_ids_ += padding\n segment_ids.append(segment_ids_)\n\n assert len(input_ids_) == max_seq_length\n assert len(input_masks_) == max_seq_length\n assert len(segment_ids_) == max_seq_length\n\n target_ids = []\n target_offset = 0\n\n for title in example.titles:\n sfs = example.supporting_facts[title]\n for i in sfs:\n if i < len(example.context[title]) and i+target_offset < len(input_ids):\n target_ids.append(i+target_offset)\n else:\n logger.warning('')\n logger.warning('Invalid annotation: {}'.format(sfs))\n logger.warning('Invalid annotation: {}'.format(\n example.context[title]))\n\n target_offset += len(example.context[title])\n\n assert len(input_ids) <= max_sent_num\n assert len(target_ids) <= max_sf_num\n\n num_sents = len(input_ids)\n num_sfs = len(target_ids)\n\n output_masks = [([1.0] * len(input_ids) + [0.0] * (max_sent_num -\n len(input_ids) + 1)) for _ in range(max_sent_num + 2)]\n\n if train:\n\n for i in range(len(target_ids)):\n for j in range(len(target_ids)):\n if i == j:\n continue\n\n output_masks[i][target_ids[j]] = 0.0\n\n for i in range(len(output_masks)):\n if i >= num_sfs+1:\n for j in range(len(output_masks[i])):\n output_masks[i][j] = 0.0\n\n else:\n for i in range(len(input_ids)):\n output_masks[i+1][i] = 0.0\n\n target_ids += [0] * (max_sf_num - len(target_ids))\n\n padding = [DUMMY] * (max_sent_num - len(input_ids))\n input_ids += padding\n input_masks += padding\n segment_ids += padding\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_masks=input_masks,\n segment_ids=segment_ids,\n target_ids=target_ids,\n output_masks=output_masks,\n num_sents=num_sents,\n num_sfs=num_sfs,\n ex_index=ex_index))\n\n logger.info('Done!')\n\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","max_seq_length",",","max_sent_num",",","max_sf_num",",","tokenizer",",","train","=","False",")",":","DUMMY","=","[","0","]","*","max_seq_length","DUMMY_","=","[","0.0","]","*","max_sent_num","features","=","[","]","logger",".","info","(","'#### Constructing features... ####'",")","for","(","ex_index",",","example",")","in","enumerate","(","tqdm","(","examples",",","desc","=","'Example'",")",")",":","tokens_q","=","tokenizer",".","tokenize","(","'Q: {} A: {}'",".","format","(","example",".","question",",","example",".","answer",")",")","tokens_q","=","[","'[CLS]'","]","+","tokens_q","+","[","'[SEP]'","]","input_ids","=","[","]","input_masks","=","[","]","segment_ids","=","[","]","for","title","in","example",".","titles",":","sents","=","example",".","context","[","title","]","for","(","i",",","s",")","in","enumerate","(","sents",")",":","if","len","(","input_ids",")","==","max_sent_num",":","break","tokens_s","=","tokenizer",".","tokenize","(","s",")","[",":","max_seq_length","-","len","(","tokens_q",")","-","1","]","tokens_s","=","tokens_s","+","[","'[SEP]'","]","padding","=","[","0","]","*","(","max_seq_length","-","len","(","tokens_s",")","-","len","(","tokens_q",")",")","input_ids_","=","tokenizer",".","convert_tokens_to_ids","(","tokens_q","+","tokens_s",")","input_masks_","=","[","1","]","*","len","(","input_ids_",")","segment_ids_","=","[","0","]","*","len","(","tokens_q",")","+","[","1","]","*","len","(","tokens_s",")","input_ids_","+=","padding","input_ids",".","append","(","input_ids_",")","input_masks_","+=","padding","input_masks",".","append","(","input_masks_",")","segment_ids_","+=","padding","segment_ids",".","append","(","segment_ids_",")","assert","len","(","input_ids_",")","==","max_seq_length","assert","len","(","input_masks_",")","==","max_seq_length","assert","len","(","segment_ids_",")","==","max_seq_length","target_ids","=","[","]","target_offset","=","0","for","title","in","example",".","titles",":","sfs","=","example",".","supporting_facts","[","title","]","for","i","in","sfs",":","if","i","<","len","(","example",".","context","[","title","]",")","and","i","+","target_offset","<","len","(","input_ids",")",":","target_ids",".","append","(","i","+","target_offset",")","else",":","logger",".","warning","(","''",")","logger",".","warning","(","'Invalid annotation: {}'",".","format","(","sfs",")",")","logger",".","warning","(","'Invalid annotation: {}'",".","format","(","example",".","context","[","title","]",")",")","target_offset","+=","len","(","example",".","context","[","title","]",")","assert","len","(","input_ids",")","<=","max_sent_num","assert","len","(","target_ids",")","<=","max_sf_num","num_sents","=","len","(","input_ids",")","num_sfs","=","len","(","target_ids",")","output_masks","=","[","(","[","1.0","]","*","len","(","input_ids",")","+","[","0.0","]","*","(","max_sent_num","-","len","(","input_ids",")","+","1",")",")","for","_","in","range","(","max_sent_num","+","2",")","]","if","train",":","for","i","in","range","(","len","(","target_ids",")",")",":","for","j","in","range","(","len","(","target_ids",")",")",":","if","i","==","j",":","continue","output_masks","[","i","]","[","target_ids","[","j","]","]","=","0.0","for","i","in","range","(","len","(","output_masks",")",")",":","if","i",">=","num_sfs","+","1",":","for","j","in","range","(","len","(","output_masks","[","i","]",")",")",":","output_masks","[","i","]","[","j","]","=","0.0","else",":","for","i","in","range","(","len","(","input_ids",")",")",":","output_masks","[","i","+","1","]","[","i","]","=","0.0","target_ids","+=","[","0","]","*","(","max_sf_num","-","len","(","target_ids",")",")","padding","=","[","DUMMY","]","*","(","max_sent_num","-","len","(","input_ids",")",")","input_ids","+=","padding","input_masks","+=","padding","segment_ids","+=","padding","features",".","append","(","InputFeatures","(","input_ids","=","input_ids",",","input_masks","=","input_masks",",","segment_ids","=","segment_ids",",","target_ids","=","target_ids",",","output_masks","=","output_masks",",","num_sents","=","num_sents",",","num_sfs","=","num_sfs",",","ex_index","=","ex_index",")",")","logger",".","info","(","'Done!'",")","return","features"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/sequential_sentence_selector\/run_sequential_sentence_selector.py#L87-L198"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"graph_retriever\/utils.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer, train = False)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer, train = False):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n if not train and graph_retriever_config.db_save_path is not None:\n max_para_num = graph_retriever_config.max_context_size\n graph_retriever_config.max_para_num = max(graph_retriever_config.max_para_num, max_para_num)\n \n max_steps = graph_retriever_config.max_select_num\n \n DUMMY = [0] * max_seq_length\n features = []\n\n logger.info('#### Converting examples to features... ####')\n for (ex_index, example) in enumerate(tqdm(examples, desc='Example')):\n tokens_q = tokenize_question(example.question, tokenizer)\n \n ##############\n # Short gold #\n ##############\n title2index = {}\n input_ids = []\n input_masks = []\n segment_ids = []\n\n # Append gold and non-gold paragraphs from context\n if train and graph_retriever_config.use_redundant and len(example.redundant_gold) > 0:\n if graph_retriever_config.use_multiple_redundant:\n titles_list = example.short_gold + [redundant[0] for redundant in example.all_redundant_gold] + list(example.context.keys())\n else:\n titles_list = example.short_gold + [example.redundant_gold[0]] + list(example.context.keys())\n else:\n titles_list = example.short_gold + list(example.context.keys())\n for p in titles_list:\n\n if len(input_ids) == max_para_num:\n break\n\n # Avoid appending gold paragraphs as negative\n if p in title2index:\n continue\n\n # fullwiki eval\n # Gold paragraphs are not always in context\n if not train and graph_retriever_config.open and p not in example.context:\n continue\n \n title2index[p] = len(title2index)\n example.title_order.append(p)\n p = example.context[p]\n\n input_ids_, input_masks_, segment_ids_ = tokenize_paragraph(p, tokens_q, max_seq_length, tokenizer)\n input_ids.append(input_ids_)\n input_masks.append(input_masks_)\n segment_ids.append(segment_ids_)\n\n # Open-domain setting\n if graph_retriever_config.open:\n num_paragraphs_no_links = len(input_ids)\n \n for p_ in example.context:\n\n if not train and graph_retriever_config.db_save_path is not None:\n break\n\n if len(input_ids) == max_para_num:\n break\n\n if p_ not in example.all_linked_paras_dic:\n continue\n \n for l in example.all_linked_paras_dic[p_]:\n\n if len(input_ids) == max_para_num:\n break\n \n if l in title2index:\n continue\n\n title2index[l] = len(title2index)\n example.title_order.append(l)\n p = example.all_linked_paras_dic[p_][l]\n\n input_ids_, input_masks_, segment_ids_ = tokenize_paragraph(p, tokens_q, max_seq_length, tokenizer)\n input_ids.append(input_ids_)\n input_masks.append(input_masks_)\n segment_ids.append(segment_ids_)\n \n assert len(input_ids) <= max_para_num\n \n num_paragraphs = len(input_ids)\n num_steps = len(example.short_gold)+1 # 1 for EOE\n\n if train:\n assert num_steps <= max_steps\n \n output_masks = [([1.0] * len(input_ids) + [0.0] * (max_para_num - len(input_ids) + 1)) for _ in range(max_para_num + 2)]\n\n if (not train) and graph_retriever_config.open:\n assert len(example.context) == num_paragraphs_no_links\n for i in range(len(output_masks[0])):\n if i >= num_paragraphs_no_links:\n output_masks[0][i] = 0.0\n \n for i in range(len(input_ids)):\n output_masks[i+1][i] = 0.0 \n\n if train:\n size = num_steps-1\n\n for i in range(size):\n for j in range(size):\n if i != j:\n output_masks[i][j] = 0.0\n\n for i in range(size):\n output_masks[size][i] = 0.0\n \n for i in range(max_steps):\n if i > size:\n for j in range(len(output_masks[i])):\n output_masks[i][j] = 0.0\n\n # Use REDUNDANT setting\n # Avoid treating the redundant paragraph as a negative example at the first step\n if graph_retriever_config.use_redundant and len(example.redundant_gold) > 0:\n if graph_retriever_config.use_multiple_redundant:\n for redundant in example.all_redundant_gold:\n output_masks[0][title2index[redundant[0]]] = 0.0\n else:\n output_masks[0][title2index[example.redundant_gold[0]]] = 0.0\n \n padding = [DUMMY] * (max_para_num - len(input_ids))\n input_ids += padding\n input_masks += padding\n segment_ids += padding\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_masks=input_masks,\n segment_ids=segment_ids,\n output_masks = output_masks,\n num_paragraphs = num_paragraphs,\n num_steps = num_steps,\n ex_index = ex_index))\n\n if not train or not graph_retriever_config.use_redundant or len(example.redundant_gold) == 0:\n continue\n\n\n ##################\n # Redundant gold #\n ##################\n for redundant_gold in example.all_redundant_gold:\n hist = set()\n input_ids_r = []\n input_masks_r = []\n segment_ids_r = []\n\n # Append gold and non-gold paragraphs from context\n for p in redundant_gold + list(example.context.keys()):\n\n if len(input_ids_r) == max_para_num:\n break\n\n #assert p in title2index\n if p not in title2index:\n assert p not in redundant_gold\n continue\n\n if p in hist:\n continue\n hist.add(p)\n\n index = title2index[p]\n input_ids_r.append(input_ids[index])\n input_masks_r.append(input_masks[index])\n segment_ids_r.append(segment_ids[index])\n\n # Open-domain setting (mainly for HotpotQA fullwiki)\n if graph_retriever_config.open:\n\n for p in title2index:\n\n if len(input_ids_r) == max_para_num:\n break\n\n if p in hist:\n continue\n hist.add(p)\n\n index = title2index[p]\n input_ids_r.append(input_ids[index])\n input_masks_r.append(input_masks[index])\n segment_ids_r.append(segment_ids[index])\n\n assert len(input_ids_r) <= max_para_num\n\n num_paragraphs_r = len(input_ids_r)\n num_steps_r = len(redundant_gold)+1\n\n assert num_steps_r <= max_steps\n\n output_masks_r = [([1.0] * len(input_ids_r) + [0.0] * (max_para_num - len(input_ids_r) + 1)) for _ in range(max_para_num + 2)]\n\n size = num_steps_r-1\n\n for i in range(size):\n for j in range(size):\n if i != j:\n output_masks_r[i][j] = 0.0\n\n if i > 0:\n output_masks_r[i][0] = 1.0\n\n for i in range(size): #size-1\n output_masks_r[size][i] = 0.0\n\n for i in range(max_steps):\n if i > size:\n for j in range(len(output_masks_r[i])):\n output_masks_r[i][j] = 0.0\n\n padding = [DUMMY] * (max_para_num - len(input_ids_r))\n input_ids_r += padding\n input_masks_r += padding\n segment_ids_r += padding\n\n features.append(\n InputFeatures(input_ids=input_ids_r,\n input_masks=input_masks_r,\n segment_ids=segment_ids_r,\n output_masks = output_masks_r,\n num_paragraphs = num_paragraphs_r,\n num_steps = num_steps_r,\n ex_index = None))\n\n if not graph_retriever_config.use_multiple_redundant:\n break\n\n logger.info('Done!')\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","max_seq_length",",","max_para_num",",","graph_retriever_config",",","tokenizer",",","train","=","False",")",":","if","not","train","and","graph_retriever_config",".","db_save_path","is","not","None",":","max_para_num","=","graph_retriever_config",".","max_context_size","graph_retriever_config",".","max_para_num","=","max","(","graph_retriever_config",".","max_para_num",",","max_para_num",")","max_steps","=","graph_retriever_config",".","max_select_num","DUMMY","=","[","0","]","*","max_seq_length","features","=","[","]","logger",".","info","(","'#### Converting examples to features... ####'",")","for","(","ex_index",",","example",")","in","enumerate","(","tqdm","(","examples",",","desc","=","'Example'",")",")",":","tokens_q","=","tokenize_question","(","example",".","question",",","tokenizer",")","##############","# Short gold #","##############","title2index","=","{","}","input_ids","=","[","]","input_masks","=","[","]","segment_ids","=","[","]","# Append gold and non-gold paragraphs from context","if","train","and","graph_retriever_config",".","use_redundant","and","len","(","example",".","redundant_gold",")",">","0",":","if","graph_retriever_config",".","use_multiple_redundant",":","titles_list","=","example",".","short_gold","+","[","redundant","[","0","]","for","redundant","in","example",".","all_redundant_gold","]","+","list","(","example",".","context",".","keys","(",")",")","else",":","titles_list","=","example",".","short_gold","+","[","example",".","redundant_gold","[","0","]","]","+","list","(","example",".","context",".","keys","(",")",")","else",":","titles_list","=","example",".","short_gold","+","list","(","example",".","context",".","keys","(",")",")","for","p","in","titles_list",":","if","len","(","input_ids",")","==","max_para_num",":","break","# Avoid appending gold paragraphs as negative","if","p","in","title2index",":","continue","# fullwiki eval","# Gold paragraphs are not always in context","if","not","train","and","graph_retriever_config",".","open","and","p","not","in","example",".","context",":","continue","title2index","[","p","]","=","len","(","title2index",")","example",".","title_order",".","append","(","p",")","p","=","example",".","context","[","p","]","input_ids_",",","input_masks_",",","segment_ids_","=","tokenize_paragraph","(","p",",","tokens_q",",","max_seq_length",",","tokenizer",")","input_ids",".","append","(","input_ids_",")","input_masks",".","append","(","input_masks_",")","segment_ids",".","append","(","segment_ids_",")","# Open-domain setting","if","graph_retriever_config",".","open",":","num_paragraphs_no_links","=","len","(","input_ids",")","for","p_","in","example",".","context",":","if","not","train","and","graph_retriever_config",".","db_save_path","is","not","None",":","break","if","len","(","input_ids",")","==","max_para_num",":","break","if","p_","not","in","example",".","all_linked_paras_dic",":","continue","for","l","in","example",".","all_linked_paras_dic","[","p_","]",":","if","len","(","input_ids",")","==","max_para_num",":","break","if","l","in","title2index",":","continue","title2index","[","l","]","=","len","(","title2index",")","example",".","title_order",".","append","(","l",")","p","=","example",".","all_linked_paras_dic","[","p_","]","[","l","]","input_ids_",",","input_masks_",",","segment_ids_","=","tokenize_paragraph","(","p",",","tokens_q",",","max_seq_length",",","tokenizer",")","input_ids",".","append","(","input_ids_",")","input_masks",".","append","(","input_masks_",")","segment_ids",".","append","(","segment_ids_",")","assert","len","(","input_ids",")","<=","max_para_num","num_paragraphs","=","len","(","input_ids",")","num_steps","=","len","(","example",".","short_gold",")","+","1","# 1 for EOE","if","train",":","assert","num_steps","<=","max_steps","output_masks","=","[","(","[","1.0","]","*","len","(","input_ids",")","+","[","0.0","]","*","(","max_para_num","-","len","(","input_ids",")","+","1",")",")","for","_","in","range","(","max_para_num","+","2",")","]","if","(","not","train",")","and","graph_retriever_config",".","open",":","assert","len","(","example",".","context",")","==","num_paragraphs_no_links","for","i","in","range","(","len","(","output_masks","[","0","]",")",")",":","if","i",">=","num_paragraphs_no_links",":","output_masks","[","0","]","[","i","]","=","0.0","for","i","in","range","(","len","(","input_ids",")",")",":","output_masks","[","i","+","1","]","[","i","]","=","0.0","if","train",":","size","=","num_steps","-","1","for","i","in","range","(","size",")",":","for","j","in","range","(","size",")",":","if","i","!=","j",":","output_masks","[","i","]","[","j","]","=","0.0","for","i","in","range","(","size",")",":","output_masks","[","size","]","[","i","]","=","0.0","for","i","in","range","(","max_steps",")",":","if","i",">","size",":","for","j","in","range","(","len","(","output_masks","[","i","]",")",")",":","output_masks","[","i","]","[","j","]","=","0.0","# Use REDUNDANT setting","# Avoid treating the redundant paragraph as a negative example at the first step","if","graph_retriever_config",".","use_redundant","and","len","(","example",".","redundant_gold",")",">","0",":","if","graph_retriever_config",".","use_multiple_redundant",":","for","redundant","in","example",".","all_redundant_gold",":","output_masks","[","0","]","[","title2index","[","redundant","[","0","]","]","]","=","0.0","else",":","output_masks","[","0","]","[","title2index","[","example",".","redundant_gold","[","0","]","]","]","=","0.0","padding","=","[","DUMMY","]","*","(","max_para_num","-","len","(","input_ids",")",")","input_ids","+=","padding","input_masks","+=","padding","segment_ids","+=","padding","features",".","append","(","InputFeatures","(","input_ids","=","input_ids",",","input_masks","=","input_masks",",","segment_ids","=","segment_ids",",","output_masks","=","output_masks",",","num_paragraphs","=","num_paragraphs",",","num_steps","=","num_steps",",","ex_index","=","ex_index",")",")","if","not","train","or","not","graph_retriever_config",".","use_redundant","or","len","(","example",".","redundant_gold",")","==","0",":","continue","##################","# Redundant gold #","##################","for","redundant_gold","in","example",".","all_redundant_gold",":","hist","=","set","(",")","input_ids_r","=","[","]","input_masks_r","=","[","]","segment_ids_r","=","[","]","# Append gold and non-gold paragraphs from context","for","p","in","redundant_gold","+","list","(","example",".","context",".","keys","(",")",")",":","if","len","(","input_ids_r",")","==","max_para_num",":","break","#assert p in title2index","if","p","not","in","title2index",":","assert","p","not","in","redundant_gold","continue","if","p","in","hist",":","continue","hist",".","add","(","p",")","index","=","title2index","[","p","]","input_ids_r",".","append","(","input_ids","[","index","]",")","input_masks_r",".","append","(","input_masks","[","index","]",")","segment_ids_r",".","append","(","segment_ids","[","index","]",")","# Open-domain setting (mainly for HotpotQA fullwiki)","if","graph_retriever_config",".","open",":","for","p","in","title2index",":","if","len","(","input_ids_r",")","==","max_para_num",":","break","if","p","in","hist",":","continue","hist",".","add","(","p",")","index","=","title2index","[","p","]","input_ids_r",".","append","(","input_ids","[","index","]",")","input_masks_r",".","append","(","input_masks","[","index","]",")","segment_ids_r",".","append","(","segment_ids","[","index","]",")","assert","len","(","input_ids_r",")","<=","max_para_num","num_paragraphs_r","=","len","(","input_ids_r",")","num_steps_r","=","len","(","redundant_gold",")","+","1","assert","num_steps_r","<=","max_steps","output_masks_r","=","[","(","[","1.0","]","*","len","(","input_ids_r",")","+","[","0.0","]","*","(","max_para_num","-","len","(","input_ids_r",")","+","1",")",")","for","_","in","range","(","max_para_num","+","2",")","]","size","=","num_steps_r","-","1","for","i","in","range","(","size",")",":","for","j","in","range","(","size",")",":","if","i","!=","j",":","output_masks_r","[","i","]","[","j","]","=","0.0","if","i",">","0",":","output_masks_r","[","i","]","[","0","]","=","1.0","for","i","in","range","(","size",")",":","#size-1","output_masks_r","[","size","]","[","i","]","=","0.0","for","i","in","range","(","max_steps",")",":","if","i",">","size",":","for","j","in","range","(","len","(","output_masks_r","[","i","]",")",")",":","output_masks_r","[","i","]","[","j","]","=","0.0","padding","=","[","DUMMY","]","*","(","max_para_num","-","len","(","input_ids_r",")",")","input_ids_r","+=","padding","input_masks_r","+=","padding","segment_ids_r","+=","padding","features",".","append","(","InputFeatures","(","input_ids","=","input_ids_r",",","input_masks","=","input_masks_r",",","segment_ids","=","segment_ids_r",",","output_masks","=","output_masks_r",",","num_paragraphs","=","num_paragraphs_r",",","num_steps","=","num_steps_r",",","ex_index","=","None",")",")","if","not","graph_retriever_config",".","use_multiple_redundant",":","break","logger",".","info","(","'Done!'",")","return","features"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/graph_retriever\/utils.py#L470-L710"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"graph_retriever\/utils.py","language":"python","identifier":"DataProcessor._create_examples","parameters":"(self, file_name, graph_retriever_config, set_type)","argument_list":"","return_statement":"return examples","docstring":"Limit the number of examples used.\n This is mainly for sanity-chacking new settings.","docstring_summary":"Limit the number of examples used.\n This is mainly for sanity-chacking new settings.","docstring_tokens":["Limit","the","number","of","examples","used",".","This","is","mainly","for","sanity","-","chacking","new","settings","."],"function":"def _create_examples(self, file_name, graph_retriever_config, set_type):\n\n task = graph_retriever_config.task\n jsn = json.load(open(file_name, 'r'))\n \n examples = []\n\n '''\n Limit the number of examples used.\n This is mainly for sanity-chacking new settings.\n '''\n if graph_retriever_config.example_limit is not None:\n random.shuffle(jsn)\n jsn = sorted(jsn, key = lambda x: x['q_id'])\n jsn = jsn[:graph_retriever_config.example_limit]\n\n '''\n Find the mximum size of the initial context (links are not included)\n '''\n graph_retriever_config.max_context_size = 0\n \n logger.info('#### Loading examples... from {} ####'.format(file_name))\n for (_, data) in enumerate(tqdm(jsn, desc='Example')):\n\n guid = data['q_id']\n question = data['question']\n context = data['context'] # {context title: paragraph}\n all_linked_paras_dic = data['all_linked_paras_dic'] # {context title: {linked title: paragraph}}\n short_gold = data['short_gold'] # [title 1, title 2] (Both are gold)\n redundant_gold = data['redundant_gold'] # [title 1, title 2, title 3] (\"title 1\" is not gold)\n all_redundant_gold = data['all_redundant_gold']\n\n '''\n Limit the number of redundant examples\n '''\n all_redundant_gold = all_redundant_gold[:graph_retriever_config.max_redundant_num]\n\n '''\n Control the size of the initial TF-IDF retrieved paragraphs\n *** Training time: to take a blalance between TF-IDF-based and link-based negative examples ***\n '''\n if graph_retriever_config.tfidf_limit is not None:\n new_context = {}\n for title in context:\n if len(new_context) == graph_retriever_config.tfidf_limit:\n break\n new_context[title] = context[title]\n context = new_context\n\n '''\n Use TagMe-based context at test time.\n '''\n if set_type == 'dev' and task == 'nq' and graph_retriever_config.tagme:\n assert 'tagged_context' in data\n\n '''\n Reformat \"tagged_context\" if needed (c.f. the \"context\" case above)\n '''\n if type(data['tagged_context']) == list:\n tagged_context = {c[0]: c[1] for c in data['tagged_context']}\n data['tagged_context'] = tagged_context\n\n '''\n Append valid paragraphs from \"tagged_context\" to \"context\"\n '''\n for tagged_title in data['tagged_context']:\n tagged_text = data['tagged_context'][tagged_title]\n if tagged_title not in context and tagged_title is not None and tagged_title.strip() != '' and tagged_text is not None and tagged_text.strip() != '':\n context[tagged_title] = tagged_text\n\n '''\n Clean \"context\" by removing invalid paragraphs\n '''\n removed_keys = []\n for title in context:\n if title is None or title.strip() == '' or context[title] is None or context[title].strip() == '':\n removed_keys.append(title)\n for key in removed_keys:\n context.pop(key)\n\n if task in ['squad', 'nq'] and set_type == 'train':\n new_context = {}\n\n orig_title = list(context.keys())[0].split('_')[0]\n \n orig_titles = []\n other_titles = []\n\n for title in context:\n title_ = title.split('_')[0]\n\n if title_ == orig_title:\n orig_titles.append(title)\n else:\n other_titles.append(title)\n\n orig_index = 0\n other_index = 0\n\n while orig_index < len(orig_titles) or other_index < len(other_titles):\n if orig_index < len(orig_titles):\n new_context[orig_titles[orig_index]] = context[orig_titles[orig_index]]\n orig_index += 1\n\n if other_index < len(other_titles):\n new_context[other_titles[other_index]] = context[other_titles[other_index]]\n other_index += 1\n\n context = new_context\n\n\n '''\n Convert link format\n '''\n new_all_linked_paras_dic = {} # {context title: {linked title: paragraph}}\n\n all_linked_paras_dic # {linked_title: paragraph} or mixed\n all_linked_para_title_dic = data['all_linked_para_title_dic'] # {context_title: [linked_title_1, linked_title_2, ...]}\n\n removed_keys = []\n tmp = {}\n for key in all_linked_paras_dic:\n if type(all_linked_paras_dic[key]) == dict:\n removed_keys.append(key)\n\n for linked_title in all_linked_paras_dic[key]:\n if linked_title not in all_linked_paras_dic:\n tmp[linked_title] = all_linked_paras_dic[key][linked_title]\n\n if key in all_linked_para_title_dic:\n all_linked_para_title_dic[key].append(linked_title)\n else:\n all_linked_para_title_dic[key] = [linked_title]\n\n for key in removed_keys:\n all_linked_paras_dic.pop(key)\n\n for key in tmp:\n if key not in all_linked_paras_dic:\n all_linked_paras_dic[key] = tmp[key]\n\n for context_title in context:\n if context_title not in all_linked_para_title_dic:\n continue\n\n new_entry = {}\n\n for linked_title in all_linked_para_title_dic[context_title]:\n if linked_title not in all_linked_paras_dic:\n continue\n\n new_entry[linked_title] = all_linked_paras_dic[linked_title]\n\n if len(new_entry) > 0:\n new_all_linked_paras_dic[context_title] = new_entry\n\n all_linked_paras_dic = new_all_linked_paras_dic\n\n if set_type == 'dev':\n '''\n Clean \"all_linked_paras_dic\" by removing invalid paragraphs\n '''\n for c in all_linked_paras_dic:\n removed_keys = []\n links = all_linked_paras_dic[c]\n for title in links:\n if title is None or title.strip() == '' or links[title] is None or type(links[title]) != str or links[title].strip() == '':\n removed_keys.append(title)\n for key in removed_keys:\n links.pop(key)\n\n all_paras = {}\n for title in context:\n all_paras[title] = context[title]\n\n if not graph_retriever_config.open:\n continue\n \n if title not in all_linked_paras_dic:\n continue\n for title_ in all_linked_paras_dic[title]:\n if title_ not in all_paras:\n all_paras[title_] = all_linked_paras_dic[title][title_]\n else:\n all_paras = None\n\n if set_type == 'dev' and graph_retriever_config.expand_links:\n expand_links(context, all_linked_paras_dic, all_paras)\n\n if set_type == 'dev' and graph_retriever_config.no_links:\n all_linked_paras_dic = {}\n \n graph_retriever_config.max_context_size = max(graph_retriever_config.max_context_size, len(context))\n\n '''\n Ensure that all the gold paragraphs are included in \"context\"\n '''\n if set_type == 'train':\n for t in short_gold + redundant_gold:\n assert t in context\n \n examples.append(InputExample(guid = guid,\n q = question,\n c = context,\n para_dic = all_linked_paras_dic,\n s_g = short_gold,\n r_g = redundant_gold,\n all_r_g = all_redundant_gold,\n all_paras = all_paras))\n\n if set_type == 'dev':\n examples = sorted(examples, key = lambda x: len(x.all_paras))\n logger.info('Done!')\n \n return examples","function_tokens":["def","_create_examples","(","self",",","file_name",",","graph_retriever_config",",","set_type",")",":","task","=","graph_retriever_config",".","task","jsn","=","json",".","load","(","open","(","file_name",",","'r'",")",")","examples","=","[","]","if","graph_retriever_config",".","example_limit","is","not","None",":","random",".","shuffle","(","jsn",")","jsn","=","sorted","(","jsn",",","key","=","lambda","x",":","x","[","'q_id'","]",")","jsn","=","jsn","[",":","graph_retriever_config",".","example_limit","]","'''\n Find the mximum size of the initial context (links are not included)\n '''","graph_retriever_config",".","max_context_size","=","0","logger",".","info","(","'#### Loading examples... from {} ####'",".","format","(","file_name",")",")","for","(","_",",","data",")","in","enumerate","(","tqdm","(","jsn",",","desc","=","'Example'",")",")",":","guid","=","data","[","'q_id'","]","question","=","data","[","'question'","]","context","=","data","[","'context'","]","# {context title: paragraph}","all_linked_paras_dic","=","data","[","'all_linked_paras_dic'","]","# {context title: {linked title: paragraph}}","short_gold","=","data","[","'short_gold'","]","# [title 1, title 2] (Both are gold)","redundant_gold","=","data","[","'redundant_gold'","]","# [title 1, title 2, title 3] (\"title 1\" is not gold)","all_redundant_gold","=","data","[","'all_redundant_gold'","]","'''\n Limit the number of redundant examples\n '''","all_redundant_gold","=","all_redundant_gold","[",":","graph_retriever_config",".","max_redundant_num","]","'''\n Control the size of the initial TF-IDF retrieved paragraphs\n *** Training time: to take a blalance between TF-IDF-based and link-based negative examples ***\n '''","if","graph_retriever_config",".","tfidf_limit","is","not","None",":","new_context","=","{","}","for","title","in","context",":","if","len","(","new_context",")","==","graph_retriever_config",".","tfidf_limit",":","break","new_context","[","title","]","=","context","[","title","]","context","=","new_context","'''\n Use TagMe-based context at test time.\n '''","if","set_type","==","'dev'","and","task","==","'nq'","and","graph_retriever_config",".","tagme",":","assert","'tagged_context'","in","data","'''\n Reformat \"tagged_context\" if needed (c.f. the \"context\" case above)\n '''","if","type","(","data","[","'tagged_context'","]",")","==","list",":","tagged_context","=","{","c","[","0","]",":","c","[","1","]","for","c","in","data","[","'tagged_context'","]","}","data","[","'tagged_context'","]","=","tagged_context","'''\n Append valid paragraphs from \"tagged_context\" to \"context\"\n '''","for","tagged_title","in","data","[","'tagged_context'","]",":","tagged_text","=","data","[","'tagged_context'","]","[","tagged_title","]","if","tagged_title","not","in","context","and","tagged_title","is","not","None","and","tagged_title",".","strip","(",")","!=","''","and","tagged_text","is","not","None","and","tagged_text",".","strip","(",")","!=","''",":","context","[","tagged_title","]","=","tagged_text","'''\n Clean \"context\" by removing invalid paragraphs\n '''","removed_keys","=","[","]","for","title","in","context",":","if","title","is","None","or","title",".","strip","(",")","==","''","or","context","[","title","]","is","None","or","context","[","title","]",".","strip","(",")","==","''",":","removed_keys",".","append","(","title",")","for","key","in","removed_keys",":","context",".","pop","(","key",")","if","task","in","[","'squad'",",","'nq'","]","and","set_type","==","'train'",":","new_context","=","{","}","orig_title","=","list","(","context",".","keys","(",")",")","[","0","]",".","split","(","'_'",")","[","0","]","orig_titles","=","[","]","other_titles","=","[","]","for","title","in","context",":","title_","=","title",".","split","(","'_'",")","[","0","]","if","title_","==","orig_title",":","orig_titles",".","append","(","title",")","else",":","other_titles",".","append","(","title",")","orig_index","=","0","other_index","=","0","while","orig_index","<","len","(","orig_titles",")","or","other_index","<","len","(","other_titles",")",":","if","orig_index","<","len","(","orig_titles",")",":","new_context","[","orig_titles","[","orig_index","]","]","=","context","[","orig_titles","[","orig_index","]","]","orig_index","+=","1","if","other_index","<","len","(","other_titles",")",":","new_context","[","other_titles","[","other_index","]","]","=","context","[","other_titles","[","other_index","]","]","other_index","+=","1","context","=","new_context","'''\n Convert link format\n '''","new_all_linked_paras_dic","=","{","}","# {context title: {linked title: paragraph}}","all_linked_paras_dic","# {linked_title: paragraph} or mixed","all_linked_para_title_dic","=","data","[","'all_linked_para_title_dic'","]","# {context_title: [linked_title_1, linked_title_2, ...]}","removed_keys","=","[","]","tmp","=","{","}","for","key","in","all_linked_paras_dic",":","if","type","(","all_linked_paras_dic","[","key","]",")","==","dict",":","removed_keys",".","append","(","key",")","for","linked_title","in","all_linked_paras_dic","[","key","]",":","if","linked_title","not","in","all_linked_paras_dic",":","tmp","[","linked_title","]","=","all_linked_paras_dic","[","key","]","[","linked_title","]","if","key","in","all_linked_para_title_dic",":","all_linked_para_title_dic","[","key","]",".","append","(","linked_title",")","else",":","all_linked_para_title_dic","[","key","]","=","[","linked_title","]","for","key","in","removed_keys",":","all_linked_paras_dic",".","pop","(","key",")","for","key","in","tmp",":","if","key","not","in","all_linked_paras_dic",":","all_linked_paras_dic","[","key","]","=","tmp","[","key","]","for","context_title","in","context",":","if","context_title","not","in","all_linked_para_title_dic",":","continue","new_entry","=","{","}","for","linked_title","in","all_linked_para_title_dic","[","context_title","]",":","if","linked_title","not","in","all_linked_paras_dic",":","continue","new_entry","[","linked_title","]","=","all_linked_paras_dic","[","linked_title","]","if","len","(","new_entry",")",">","0",":","new_all_linked_paras_dic","[","context_title","]","=","new_entry","all_linked_paras_dic","=","new_all_linked_paras_dic","if","set_type","==","'dev'",":","'''\n Clean \"all_linked_paras_dic\" by removing invalid paragraphs\n '''","for","c","in","all_linked_paras_dic",":","removed_keys","=","[","]","links","=","all_linked_paras_dic","[","c","]","for","title","in","links",":","if","title","is","None","or","title",".","strip","(",")","==","''","or","links","[","title","]","is","None","or","type","(","links","[","title","]",")","!=","str","or","links","[","title","]",".","strip","(",")","==","''",":","removed_keys",".","append","(","title",")","for","key","in","removed_keys",":","links",".","pop","(","key",")","all_paras","=","{","}","for","title","in","context",":","all_paras","[","title","]","=","context","[","title","]","if","not","graph_retriever_config",".","open",":","continue","if","title","not","in","all_linked_paras_dic",":","continue","for","title_","in","all_linked_paras_dic","[","title","]",":","if","title_","not","in","all_paras",":","all_paras","[","title_","]","=","all_linked_paras_dic","[","title","]","[","title_","]","else",":","all_paras","=","None","if","set_type","==","'dev'","and","graph_retriever_config",".","expand_links",":","expand_links","(","context",",","all_linked_paras_dic",",","all_paras",")","if","set_type","==","'dev'","and","graph_retriever_config",".","no_links",":","all_linked_paras_dic","=","{","}","graph_retriever_config",".","max_context_size","=","max","(","graph_retriever_config",".","max_context_size",",","len","(","context",")",")","'''\n Ensure that all the gold paragraphs are included in \"context\"\n '''","if","set_type","==","'train'",":","for","t","in","short_gold","+","redundant_gold",":","assert","t","in","context","examples",".","append","(","InputExample","(","guid","=","guid",",","q","=","question",",","c","=","context",",","para_dic","=","all_linked_paras_dic",",","s_g","=","short_gold",",","r_g","=","redundant_gold",",","all_r_g","=","all_redundant_gold",",","all_paras","=","all_paras",")",")","if","set_type","==","'dev'",":","examples","=","sorted","(","examples",",","key","=","lambda","x",":","len","(","x",".","all_paras",")",")","logger",".","info","(","'Done!'",")","return","examples"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/graph_retriever\/utils.py#L228-L442"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_tfidf.py","language":"python","identifier":"count","parameters":"(ngram, hash_size, multi_para, doc_id)","argument_list":"","return_statement":"return row, col, data","docstring":"Fetch the text of a document and compute hashed ngrams counts.","docstring_summary":"Fetch the text of a document and compute hashed ngrams counts.","docstring_tokens":["Fetch","the","text","of","a","document","and","compute","hashed","ngrams","counts","."],"function":"def count(ngram, hash_size, multi_para, doc_id):\n \"\"\"Fetch the text of a document and compute hashed ngrams counts.\"\"\"\n global DOC2IDX\n # FIXME: remove hard coding.\n row, col, data = [], [], []\n # Tokenize\n \n if multi_para is True:\n # 1. if multi_para is true, the doc contains multiple paragraphs separated by \\n\\n and with links.\n tokens = tokenize(fetch_text_multi_para(doc_id))\n else:\n # 2. if not, only intro docs are retrieved and the sentences are separated by \\t.\n # remove sentence separations (\"\\t\") (only for HotpotQA).\n tokens = tokenize(fetch_text(doc_id).replace(\"\\t\", \"\"))\n\n # Get ngrams from tokens, with stopword\/punctuation filtering.\n ngrams = tokens.ngrams(\n n=ngram, uncased=True, filter_fn=filter_ngram\n )\n\n # Hash ngrams and count occurences\n counts = Counter([hash(gram, hash_size)\n for gram in ngrams])\n\n # Return in sparse matrix data format.\n row.extend(counts.keys())\n col.extend([DOC2IDX[doc_id]] * len(counts))\n data.extend(counts.values())\n return row, col, data","function_tokens":["def","count","(","ngram",",","hash_size",",","multi_para",",","doc_id",")",":","global","DOC2IDX","# FIXME: remove hard coding.","row",",","col",",","data","=","[","]",",","[","]",",","[","]","# Tokenize","if","multi_para","is","True",":","# 1. if multi_para is true, the doc contains multiple paragraphs separated by \\n\\n and with links.","tokens","=","tokenize","(","fetch_text_multi_para","(","doc_id",")",")","else",":","# 2. if not, only intro docs are retrieved and the sentences are separated by \\t.","# remove sentence separations (\"\\t\") (only for HotpotQA).","tokens","=","tokenize","(","fetch_text","(","doc_id",")",".","replace","(","\"\\t\"",",","\"\"",")",")","# Get ngrams from tokens, with stopword\/punctuation filtering.","ngrams","=","tokens",".","ngrams","(","n","=","ngram",",","uncased","=","True",",","filter_fn","=","filter_ngram",")","# Hash ngrams and count occurences","counts","=","Counter","(","[","hash","(","gram",",","hash_size",")","for","gram","in","ngrams","]",")","# Return in sparse matrix data format.","row",".","extend","(","counts",".","keys","(",")",")","col",".","extend","(","[","DOC2IDX","[","doc_id","]","]","*","len","(","counts",")",")","data",".","extend","(","counts",".","values","(",")",")","return","row",",","col",",","data"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_tfidf.py#L88-L116"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_tfidf.py","language":"python","identifier":"get_count_matrix","parameters":"(args, db, db_opts)","argument_list":"","return_statement":"return count_matrix, (DOC2IDX, doc_ids)","docstring":"Form a sparse word to document count matrix (inverted index).\n\n M[i, j] = # times word i appears in document j.","docstring_summary":"Form a sparse word to document count matrix (inverted index).","docstring_tokens":["Form","a","sparse","word","to","document","count","matrix","(","inverted","index",")","."],"function":"def get_count_matrix(args, db, db_opts):\n \"\"\"Form a sparse word to document count matrix (inverted index).\n\n M[i, j] = # times word i appears in document j.\n \"\"\"\n # Map doc_ids to indexes\n global DOC2IDX\n db_class = get_class(db)\n with db_class(**db_opts) as doc_db:\n doc_ids = doc_db.get_doc_ids()\n DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}\n\n # Setup worker pool\n # TODO: Add tokenizer's choice.\n tok_class = SimpleTokenizer\n workers = ProcessPool(\n args.num_workers,\n initializer=init,\n initargs=(tok_class, db_class, db_opts)\n )\n\n # Compute the count matrix in steps (to keep in memory)\n logger.info('Mapping...')\n row, col, data = [], [], []\n step = max(int(len(doc_ids) \/ 10), 1)\n batches = [doc_ids[i:i + step] for i in range(0, len(doc_ids), step)]\n _count = partial(count, args.ngram, args.hash_size, args.multi_para)\n for i, batch in enumerate(batches):\n logger.info('-' * 25 + 'Batch %d\/%d' %\n (i + 1, len(batches)) + '-' * 25)\n for b_row, b_col, b_data in workers.imap_unordered(_count, batch):\n row.extend(b_row)\n col.extend(b_col)\n data.extend(b_data)\n workers.close()\n workers.join()\n\n logger.info('Creating sparse matrix...')\n count_matrix = sp.csr_matrix(\n (data, (row, col)), shape=(args.hash_size, len(doc_ids))\n )\n count_matrix.sum_duplicates()\n return count_matrix, (DOC2IDX, doc_ids)","function_tokens":["def","get_count_matrix","(","args",",","db",",","db_opts",")",":","# Map doc_ids to indexes","global","DOC2IDX","db_class","=","get_class","(","db",")","with","db_class","(","*","*","db_opts",")","as","doc_db",":","doc_ids","=","doc_db",".","get_doc_ids","(",")","DOC2IDX","=","{","doc_id",":","i","for","i",",","doc_id","in","enumerate","(","doc_ids",")","}","# Setup worker pool","# TODO: Add tokenizer's choice.","tok_class","=","SimpleTokenizer","workers","=","ProcessPool","(","args",".","num_workers",",","initializer","=","init",",","initargs","=","(","tok_class",",","db_class",",","db_opts",")",")","# Compute the count matrix in steps (to keep in memory)","logger",".","info","(","'Mapping...'",")","row",",","col",",","data","=","[","]",",","[","]",",","[","]","step","=","max","(","int","(","len","(","doc_ids",")","\/","10",")",",","1",")","batches","=","[","doc_ids","[","i",":","i","+","step","]","for","i","in","range","(","0",",","len","(","doc_ids",")",",","step",")","]","_count","=","partial","(","count",",","args",".","ngram",",","args",".","hash_size",",","args",".","multi_para",")","for","i",",","batch","in","enumerate","(","batches",")",":","logger",".","info","(","'-'","*","25","+","'Batch %d\/%d'","%","(","i","+","1",",","len","(","batches",")",")","+","'-'","*","25",")","for","b_row",",","b_col",",","b_data","in","workers",".","imap_unordered","(","_count",",","batch",")",":","row",".","extend","(","b_row",")","col",".","extend","(","b_col",")","data",".","extend","(","b_data",")","workers",".","close","(",")","workers",".","join","(",")","logger",".","info","(","'Creating sparse matrix...'",")","count_matrix","=","sp",".","csr_matrix","(","(","data",",","(","row",",","col",")",")",",","shape","=","(","args",".","hash_size",",","len","(","doc_ids",")",")",")","count_matrix",".","sum_duplicates","(",")","return","count_matrix",",","(","DOC2IDX",",","doc_ids",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_tfidf.py#L119-L161"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_tfidf.py","language":"python","identifier":"get_tfidf_matrix","parameters":"(cnts)","argument_list":"","return_statement":"return tfidfs","docstring":"Convert the word count matrix into tfidf one.\n\n tfidf = log(tf + 1) * log((N - Nt + 0.5) \/ (Nt + 0.5))\n * tf = term frequency in document\n * N = number of documents\n * Nt = number of occurences of term in all documents","docstring_summary":"Convert the word count matrix into tfidf one.","docstring_tokens":["Convert","the","word","count","matrix","into","tfidf","one","."],"function":"def get_tfidf_matrix(cnts):\n \"\"\"Convert the word count matrix into tfidf one.\n\n tfidf = log(tf + 1) * log((N - Nt + 0.5) \/ (Nt + 0.5))\n * tf = term frequency in document\n * N = number of documents\n * Nt = number of occurences of term in all documents\n \"\"\"\n Ns = get_doc_freqs(cnts)\n idfs = np.log((cnts.shape[1] - Ns + 0.5) \/ (Ns + 0.5))\n idfs[idfs < 0] = 0\n idfs = sp.diags(idfs, 0)\n tfs = cnts.log1p()\n tfidfs = idfs.dot(tfs)\n return tfidfs","function_tokens":["def","get_tfidf_matrix","(","cnts",")",":","Ns","=","get_doc_freqs","(","cnts",")","idfs","=","np",".","log","(","(","cnts",".","shape","[","1","]","-","Ns","+","0.5",")","\/","(","Ns","+","0.5",")",")","idfs","[","idfs","<","0","]","=","0","idfs","=","sp",".","diags","(","idfs",",","0",")","tfs","=","cnts",".","log1p","(",")","tfidfs","=","idfs",".","dot","(","tfs",")","return","tfidfs"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_tfidf.py#L169-L183"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_tfidf.py","language":"python","identifier":"get_doc_freqs","parameters":"(cnts)","argument_list":"","return_statement":"return freqs","docstring":"Return word --> # of docs it appears in.","docstring_summary":"Return word --> # of docs it appears in.","docstring_tokens":["Return","word","--",">","#","of","docs","it","appears","in","."],"function":"def get_doc_freqs(cnts):\n \"\"\"Return word --> # of docs it appears in.\"\"\"\n binary = (cnts > 0).astype(int)\n freqs = np.array(binary.sum(1)).squeeze()\n return freqs","function_tokens":["def","get_doc_freqs","(","cnts",")",":","binary","=","(","cnts",">","0",")",".","astype","(","int",")","freqs","=","np",".","array","(","binary",".","sum","(","1",")",")",".","squeeze","(",")","return","freqs"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_tfidf.py#L186-L190"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/doc_db.py","language":"python","identifier":"DocDB.close","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Close the connection to the database.","docstring_summary":"Close the connection to the database.","docstring_tokens":["Close","the","connection","to","the","database","."],"function":"def close(self):\n \"\"\"Close the connection to the database.\"\"\"\n self.connection.close()","function_tokens":["def","close","(","self",")",":","self",".","connection",".","close","(",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/doc_db.py#L25-L27"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/doc_db.py","language":"python","identifier":"DocDB.get_doc_ids","parameters":"(self)","argument_list":"","return_statement":"return results","docstring":"Fetch all ids of docs stored in the db.","docstring_summary":"Fetch all ids of docs stored in the db.","docstring_tokens":["Fetch","all","ids","of","docs","stored","in","the","db","."],"function":"def get_doc_ids(self):\n \"\"\"Fetch all ids of docs stored in the db.\"\"\"\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results","function_tokens":["def","get_doc_ids","(","self",")",":","cursor","=","self",".","connection",".","cursor","(",")","cursor",".","execute","(","\"SELECT id FROM documents\"",")","results","=","[","r","[","0","]","for","r","in","cursor",".","fetchall","(",")","]","cursor",".","close","(",")","return","results"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/doc_db.py#L29-L35"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/doc_db.py","language":"python","identifier":"DocDB.get_doc_text","parameters":"(self, doc_id)","argument_list":"","return_statement":"return result if result is None else result[0]","docstring":"Fetch the raw text of the doc for 'doc_id'.","docstring_summary":"Fetch the raw text of the doc for 'doc_id'.","docstring_tokens":["Fetch","the","raw","text","of","the","doc","for","doc_id","."],"function":"def get_doc_text(self, doc_id):\n \"\"\"Fetch the raw text of the doc for 'doc_id'.\"\"\"\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT text FROM documents WHERE id = ?\",\n (doc_id,)\n )\n result = cursor.fetchone()\n cursor.close()\n return result if result is None else result[0]","function_tokens":["def","get_doc_text","(","self",",","doc_id",")",":","cursor","=","self",".","connection",".","cursor","(",")","cursor",".","execute","(","\"SELECT text FROM documents WHERE id = ?\"",",","(","doc_id",",",")",")","result","=","cursor",".","fetchone","(",")","cursor",".","close","(",")","return","result","if","result","is","None","else","result","[","0","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/doc_db.py#L37-L46"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/doc_db.py","language":"python","identifier":"DocDB.get_hyper_linked","parameters":"(self, doc_id)","argument_list":"","return_statement":"return result if (result is None or len(result[0]) == 0) else [normalize(title) for title in result[0].split(\"\\t\")]","docstring":"Fetch the hyper-linked titles of the doc for 'doc_id'.","docstring_summary":"Fetch the hyper-linked titles of the doc for 'doc_id'.","docstring_tokens":["Fetch","the","hyper","-","linked","titles","of","the","doc","for","doc_id","."],"function":"def get_hyper_linked(self, doc_id):\n \"\"\"Fetch the hyper-linked titles of the doc for 'doc_id'.\"\"\"\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT linked_title FROM documents WHERE id = ?\",\n (doc_id,)\n )\n result = cursor.fetchone()\n cursor.close()\n return result if (result is None or len(result[0]) == 0) else [normalize(title) for title in result[0].split(\"\\t\")]","function_tokens":["def","get_hyper_linked","(","self",",","doc_id",")",":","cursor","=","self",".","connection",".","cursor","(",")","cursor",".","execute","(","\"SELECT linked_title FROM documents WHERE id = ?\"",",","(","doc_id",",",")",")","result","=","cursor",".","fetchone","(",")","cursor",".","close","(",")","return","result","if","(","result","is","None","or","len","(","result","[","0","]",")","==","0",")","else","[","normalize","(","title",")","for","title","in","result","[","0","]",".","split","(","\"\\t\"",")","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/doc_db.py#L48-L57"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/doc_db.py","language":"python","identifier":"DocDB.get_original_title","parameters":"(self, doc_id)","argument_list":"","return_statement":"return result if result is None else result[0]","docstring":"Fetch the original title name of the doc.","docstring_summary":"Fetch the original title name of the doc.","docstring_tokens":["Fetch","the","original","title","name","of","the","doc","."],"function":"def get_original_title(self, doc_id):\n \"\"\"Fetch the original title name of the doc.\"\"\"\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT original_title FROM documents WHERE id = ?\",\n (doc_id,)\n )\n result = cursor.fetchone()\n cursor.close()\n return result if result is None else result[0]","function_tokens":["def","get_original_title","(","self",",","doc_id",")",":","cursor","=","self",".","connection",".","cursor","(",")","cursor",".","execute","(","\"SELECT original_title FROM documents WHERE id = ?\"",",","(","doc_id",",",")",")","result","=","cursor",".","fetchone","(",")","cursor",".","close","(",")","return","result","if","result","is","None","else","result","[","0","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/doc_db.py#L59-L68"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/doc_db.py","language":"python","identifier":"DocDB.get_doc_text_hyper_linked_titles_for_articles","parameters":"(self, doc_id)","argument_list":"","return_statement":"","docstring":"fetch all of the paragraphs with their corresponding hyperlink titles.\n e.g., \n >>> paras, links = db.get_doc_text_hyper_linked_titles_for_articles(\"Tokyo Imperial Palace_0\")\n >>> paras[2]\n 'It is built on the site of the old Edo Castle. The total area including the gardens is . During the height of the 1980s Japanese property bubble, the palace grounds were valued by some to be more than the value of all of the real estate in the state of California.'\n >>> links[2]\n ['Edo Castle', 'Japanese asset price bubble', 'Real estate', 'California']","docstring_summary":"fetch all of the paragraphs with their corresponding hyperlink titles.\n e.g., \n >>> paras, links = db.get_doc_text_hyper_linked_titles_for_articles(\"Tokyo Imperial Palace_0\")\n >>> paras[2]\n 'It is built on the site of the old Edo Castle. The total area including the gardens is . During the height of the 1980s Japanese property bubble, the palace grounds were valued by some to be more than the value of all of the real estate in the state of California.'\n >>> links[2]\n ['Edo Castle', 'Japanese asset price bubble', 'Real estate', 'California']","docstring_tokens":["fetch","all","of","the","paragraphs","with","their","corresponding","hyperlink","titles",".","e",".","g",".",">>>","paras","links","=","db",".","get_doc_text_hyper_linked_titles_for_articles","(","Tokyo","Imperial","Palace_0",")",">>>","paras","[","2","]","It","is","built","on","the","site","of","the","old","Edo","Castle",".","The","total","area","including","the","gardens","is",".","During","the","height","of","the","1980s","Japanese","property","bubble","the","palace","grounds","were","valued","by","some","to","be","more","than","the","value","of","all","of","the","real","estate","in","the","state","of","California",".",">>>","links","[","2","]","[","Edo","Castle","Japanese","asset","price","bubble","Real","estate","California","]"],"function":"def get_doc_text_hyper_linked_titles_for_articles(self, doc_id):\n \"\"\"\n fetch all of the paragraphs with their corresponding hyperlink titles.\n e.g., \n >>> paras, links = db.get_doc_text_hyper_linked_titles_for_articles(\"Tokyo Imperial Palace_0\")\n >>> paras[2]\n 'It is built on the site of the old Edo Castle. The total area including the gardens is . During the height of the 1980s Japanese property bubble, the palace grounds were valued by some to be more than the value of all of the real estate in the state of California.'\n >>> links[2]\n ['Edo Castle', 'Japanese asset price bubble', 'Real estate', 'California']\n \"\"\"\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT text FROM documents WHERE id = ?\",\n (doc_id,)\n )\n result = cursor.fetchone()\n cursor.close()\n if result is None:\n return [], []\n else:\n hyper_linked_paragraphs = result[0].split(\"\\n\\n\")\n paragraphs, hyper_linked_titles = [], []\n \n for hyper_linked_paragraph in hyper_linked_paragraphs:\n paragraphs.append(remove_tags(hyper_linked_paragraph))\n hyper_linked_titles.append([normalize(title) for title in find_hyper_linked_titles(\n hyper_linked_paragraph)])\n \n return paragraphs, hyper_linked_titles","function_tokens":["def","get_doc_text_hyper_linked_titles_for_articles","(","self",",","doc_id",")",":","cursor","=","self",".","connection",".","cursor","(",")","cursor",".","execute","(","\"SELECT text FROM documents WHERE id = ?\"",",","(","doc_id",",",")",")","result","=","cursor",".","fetchone","(",")","cursor",".","close","(",")","if","result","is","None",":","return","[","]",",","[","]","else",":","hyper_linked_paragraphs","=","result","[","0","]",".","split","(","\"\\n\\n\"",")","paragraphs",",","hyper_linked_titles","=","[","]",",","[","]","for","hyper_linked_paragraph","in","hyper_linked_paragraphs",":","paragraphs",".","append","(","remove_tags","(","hyper_linked_paragraph",")",")","hyper_linked_titles",".","append","(","[","normalize","(","title",")","for","title","in","find_hyper_linked_titles","(","hyper_linked_paragraph",")","]",")","return","paragraphs",",","hyper_linked_titles"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/doc_db.py#L70-L98"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_db.py","language":"python","identifier":"import_module","parameters":"(filename)","argument_list":"","return_statement":"return module","docstring":"Import a module given a full path to the file.","docstring_summary":"Import a module given a full path to the file.","docstring_tokens":["Import","a","module","given","a","full","path","to","the","file","."],"function":"def import_module(filename):\n \"\"\"Import a module given a full path to the file.\"\"\"\n spec = importlib.util.spec_from_file_location('doc_filter', filename)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module","function_tokens":["def","import_module","(","filename",")",":","spec","=","importlib",".","util",".","spec_from_file_location","(","'doc_filter'",",","filename",")","module","=","importlib",".","util",".","module_from_spec","(","spec",")","spec",".","loader",".","exec_module","(","module",")","return","module"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_db.py#L43-L48"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_db.py","language":"python","identifier":"iter_files","parameters":"(path)","argument_list":"","return_statement":"","docstring":"Walk through all files located under a root path.","docstring_summary":"Walk through all files located under a root path.","docstring_tokens":["Walk","through","all","files","located","under","a","root","path","."],"function":"def iter_files(path):\n \"\"\"Walk through all files located under a root path.\"\"\"\n if os.path.isfile(path):\n yield path\n elif os.path.isdir(path):\n for dirpath, _, filenames in os.walk(path):\n for f in filenames:\n yield os.path.join(dirpath, f)\n else:\n raise RuntimeError('Path %s is invalid' % path)","function_tokens":["def","iter_files","(","path",")",":","if","os",".","path",".","isfile","(","path",")",":","yield","path","elif","os",".","path",".","isdir","(","path",")",":","for","dirpath",",","_",",","filenames","in","os",".","walk","(","path",")",":","for","f","in","filenames",":","yield","os",".","path",".","join","(","dirpath",",","f",")","else",":","raise","RuntimeError","(","'Path %s is invalid'","%","path",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_db.py#L56-L65"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_db.py","language":"python","identifier":"get_contents_hotpotqa","parameters":"(filename)","argument_list":"","return_statement":"return documents","docstring":"Parse the contents of a file. Each line is a JSON encoded document.","docstring_summary":"Parse the contents of a file. Each line is a JSON encoded document.","docstring_tokens":["Parse","the","contents","of","a","file",".","Each","line","is","a","JSON","encoded","document","."],"function":"def get_contents_hotpotqa(filename):\n \"\"\"Parse the contents of a file. Each line is a JSON encoded document.\"\"\"\n global PREPROCESS_FN\n documents = []\n extracted_items = process_jsonlines_hotpotqa(filename)\n for extracted_item in extracted_items:\n title = extracted_item[\"title\"]\n text = extracted_item[\"plain_text\"]\n original_title = extracted_item[\"original_title\"]\n hyper_linked_titles = extracted_item[\"hyper_linked_titles\"]\n\n documents.append((title, text,\n hyper_linked_titles, original_title))\n return documents","function_tokens":["def","get_contents_hotpotqa","(","filename",")",":","global","PREPROCESS_FN","documents","=","[","]","extracted_items","=","process_jsonlines_hotpotqa","(","filename",")","for","extracted_item","in","extracted_items",":","title","=","extracted_item","[","\"title\"","]","text","=","extracted_item","[","\"plain_text\"","]","original_title","=","extracted_item","[","\"original_title\"","]","hyper_linked_titles","=","extracted_item","[","\"hyper_linked_titles\"","]","documents",".","append","(","(","title",",","text",",","hyper_linked_titles",",","original_title",")",")","return","documents"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_db.py#L68-L81"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_db.py","language":"python","identifier":"get_contents","parameters":"(filename)","argument_list":"","return_statement":"return documents","docstring":"Parse the contents of a file. Each line is a JSON encoded document.","docstring_summary":"Parse the contents of a file. Each line is a JSON encoded document.","docstring_tokens":["Parse","the","contents","of","a","file",".","Each","line","is","a","JSON","encoded","document","."],"function":"def get_contents(filename):\n \"\"\"Parse the contents of a file. Each line is a JSON encoded document.\"\"\"\n global PREPROCESS_FN\n documents = []\n extracted_items = process_jsonlines(filename)\n for extracted_item in extracted_items:\n title = extracted_item[\"title\"]\n text = extracted_item[\"plain_text\"]\n original_title = extracted_item[\"original_title\"]\n hyper_linked_titles = extracted_item[\"hyper_linked_titles\"]\n\n documents.append((title, text,\n hyper_linked_titles, original_title))\n return documents","function_tokens":["def","get_contents","(","filename",")",":","global","PREPROCESS_FN","documents","=","[","]","extracted_items","=","process_jsonlines","(","filename",")","for","extracted_item","in","extracted_items",":","title","=","extracted_item","[","\"title\"","]","text","=","extracted_item","[","\"plain_text\"","]","original_title","=","extracted_item","[","\"original_title\"","]","hyper_linked_titles","=","extracted_item","[","\"hyper_linked_titles\"","]","documents",".","append","(","(","title",",","text",",","hyper_linked_titles",",","original_title",")",")","return","documents"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_db.py#L83-L96"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/build_db.py","language":"python","identifier":"store_contents","parameters":"(wiki_dir, save_path, preprocess, num_workers=None, hotpotqa_format=False)","argument_list":"","return_statement":"","docstring":"Preprocess and store a corpus of documents in sqlite.\n\n Args:\n data_path: Root path to directory (or directory of directories) of files\n containing json encoded documents (must have `id` and `text` fields).\n save_path: Path to output sqlite db.\n preprocess: Path to file defining a custom `preprocess` function. Takes\n in and outputs a structured doc.\n num_workers: Number of parallel processes to use when reading docs.","docstring_summary":"Preprocess and store a corpus of documents in sqlite.","docstring_tokens":["Preprocess","and","store","a","corpus","of","documents","in","sqlite","."],"function":"def store_contents(wiki_dir, save_path, preprocess, num_workers=None, hotpotqa_format=False):\n \"\"\"Preprocess and store a corpus of documents in sqlite.\n\n Args:\n data_path: Root path to directory (or directory of directories) of files\n containing json encoded documents (must have `id` and `text` fields).\n save_path: Path to output sqlite db.\n preprocess: Path to file defining a custom `preprocess` function. Takes\n in and outputs a structured doc.\n num_workers: Number of parallel processes to use when reading docs.\n \"\"\"\n filenames = [f for f in glob.glob(\n wiki_dir + \"\/*\/wiki_*\", recursive=True) if \".bz2\" not in f]\n if os.path.isfile(save_path):\n raise RuntimeError('%s already exists! Not overwriting.' % save_path)\n\n logger.info('Reading into database...')\n conn = sqlite3.connect(save_path)\n c = conn.cursor()\n c.execute(\n \"CREATE TABLE documents (id PRIMARY KEY, text, linked_title, original_title);\")\n\n workers = ProcessPool(num_workers, initializer=init,\n initargs=(preprocess,))\n count = 0\n # Due to the slight difference of input format between preprocessed HotpotQA wikipedia data and \n # the ones by Wikiextractor, we call different functions for data process.\n if hotpotqa_format is True:\n content_processing_method = get_contents_hotpotqa\n else:\n content_processing_method = get_contents\n \n with tqdm(total=len(filenames)) as pbar:\n for pairs in tqdm(workers.imap_unordered(content_processing_method, filenames)):\n count += len(pairs)\n c.executemany(\n \"INSERT OR REPLACE INTO documents VALUES (?,?,?,?)\", pairs)\n pbar.update()\n logger.info('Read %d docs.' % count)\n logger.info('Committing...')\n conn.commit()\n conn.close()","function_tokens":["def","store_contents","(","wiki_dir",",","save_path",",","preprocess",",","num_workers","=","None",",","hotpotqa_format","=","False",")",":","filenames","=","[","f","for","f","in","glob",".","glob","(","wiki_dir","+","\"\/*\/wiki_*\"",",","recursive","=","True",")","if","\".bz2\"","not","in","f","]","if","os",".","path",".","isfile","(","save_path",")",":","raise","RuntimeError","(","'%s already exists! Not overwriting.'","%","save_path",")","logger",".","info","(","'Reading into database...'",")","conn","=","sqlite3",".","connect","(","save_path",")","c","=","conn",".","cursor","(",")","c",".","execute","(","\"CREATE TABLE documents (id PRIMARY KEY, text, linked_title, original_title);\"",")","workers","=","ProcessPool","(","num_workers",",","initializer","=","init",",","initargs","=","(","preprocess",",",")",")","count","=","0","# Due to the slight difference of input format between preprocessed HotpotQA wikipedia data and ","# the ones by Wikiextractor, we call different functions for data process.","if","hotpotqa_format","is","True",":","content_processing_method","=","get_contents_hotpotqa","else",":","content_processing_method","=","get_contents","with","tqdm","(","total","=","len","(","filenames",")",")","as","pbar",":","for","pairs","in","tqdm","(","workers",".","imap_unordered","(","content_processing_method",",","filenames",")",")",":","count","+=","len","(","pairs",")","c",".","executemany","(","\"INSERT OR REPLACE INTO documents VALUES (?,?,?,?)\"",",","pairs",")","pbar",".","update","(",")","logger",".","info","(","'Read %d docs.'","%","count",")","logger",".","info","(","'Committing...'",")","conn",".","commit","(",")","conn",".","close","(",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/build_db.py#L98-L139"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.__len__","parameters":"(self)","argument_list":"","return_statement":"return len(self.data)","docstring":"The number of tokens.","docstring_summary":"The number of tokens.","docstring_tokens":["The","number","of","tokens","."],"function":"def __len__(self):\n \"\"\"The number of tokens.\"\"\"\n return len(self.data)","function_tokens":["def","__len__","(","self",")",":","return","len","(","self",".","data",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L32-L34"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.slice","parameters":"(self, i=None, j=None)","argument_list":"","return_statement":"return new_tokens","docstring":"Return a view of the list of tokens from [i, j).","docstring_summary":"Return a view of the list of tokens from [i, j).","docstring_tokens":["Return","a","view","of","the","list","of","tokens","from","[","i","j",")","."],"function":"def slice(self, i=None, j=None):\n \"\"\"Return a view of the list of tokens from [i, j).\"\"\"\n new_tokens = copy.copy(self)\n new_tokens.data = self.data[i: j]\n return new_tokens","function_tokens":["def","slice","(","self",",","i","=","None",",","j","=","None",")",":","new_tokens","=","copy",".","copy","(","self",")","new_tokens",".","data","=","self",".","data","[","i",":","j","]","return","new_tokens"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L36-L40"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.untokenize","parameters":"(self)","argument_list":"","return_statement":"return ''.join([t[self.TEXT_WS] for t in self.data]).strip()","docstring":"Returns the original text (with whitespace reinserted).","docstring_summary":"Returns the original text (with whitespace reinserted).","docstring_tokens":["Returns","the","original","text","(","with","whitespace","reinserted",")","."],"function":"def untokenize(self):\n \"\"\"Returns the original text (with whitespace reinserted).\"\"\"\n return ''.join([t[self.TEXT_WS] for t in self.data]).strip()","function_tokens":["def","untokenize","(","self",")",":","return","''",".","join","(","[","t","[","self",".","TEXT_WS","]","for","t","in","self",".","data","]",")",".","strip","(",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L42-L44"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.words","parameters":"(self, uncased=False)","argument_list":"","return_statement":"","docstring":"Returns a list of the text of each token\n Args:\n uncased: lower cases text","docstring_summary":"Returns a list of the text of each token\n Args:\n uncased: lower cases text","docstring_tokens":["Returns","a","list","of","the","text","of","each","token","Args",":","uncased",":","lower","cases","text"],"function":"def words(self, uncased=False):\n \"\"\"Returns a list of the text of each token\n Args:\n uncased: lower cases text\n \"\"\"\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]","function_tokens":["def","words","(","self",",","uncased","=","False",")",":","if","uncased",":","return","[","t","[","self",".","TEXT","]",".","lower","(",")","for","t","in","self",".","data","]","else",":","return","[","t","[","self",".","TEXT","]","for","t","in","self",".","data","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L46-L54"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.offsets","parameters":"(self)","argument_list":"","return_statement":"return [t[self.SPAN] for t in self.data]","docstring":"Returns a list of [start, end) character offsets of each token.","docstring_summary":"Returns a list of [start, end) character offsets of each token.","docstring_tokens":["Returns","a","list","of","[","start","end",")","character","offsets","of","each","token","."],"function":"def offsets(self):\n \"\"\"Returns a list of [start, end) character offsets of each token.\"\"\"\n return [t[self.SPAN] for t in self.data]","function_tokens":["def","offsets","(","self",")",":","return","[","t","[","self",".","SPAN","]","for","t","in","self",".","data","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L56-L58"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.pos","parameters":"(self)","argument_list":"","return_statement":"return [t[self.POS] for t in self.data]","docstring":"Returns a list of part-of-speech tags of each token.\n Returns None if this annotation was not included.","docstring_summary":"Returns a list of part-of-speech tags of each token.\n Returns None if this annotation was not included.","docstring_tokens":["Returns","a","list","of","part","-","of","-","speech","tags","of","each","token",".","Returns","None","if","this","annotation","was","not","included","."],"function":"def pos(self):\n \"\"\"Returns a list of part-of-speech tags of each token.\n Returns None if this annotation was not included.\n \"\"\"\n if 'pos' not in self.annotators:\n return None\n return [t[self.POS] for t in self.data]","function_tokens":["def","pos","(","self",")",":","if","'pos'","not","in","self",".","annotators",":","return","None","return","[","t","[","self",".","POS","]","for","t","in","self",".","data","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L60-L66"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.lemmas","parameters":"(self)","argument_list":"","return_statement":"return [t[self.LEMMA] for t in self.data]","docstring":"Returns a list of the lemmatized text of each token.\n Returns None if this annotation was not included.","docstring_summary":"Returns a list of the lemmatized text of each token.\n Returns None if this annotation was not included.","docstring_tokens":["Returns","a","list","of","the","lemmatized","text","of","each","token",".","Returns","None","if","this","annotation","was","not","included","."],"function":"def lemmas(self):\n \"\"\"Returns a list of the lemmatized text of each token.\n Returns None if this annotation was not included.\n \"\"\"\n if 'lemma' not in self.annotators:\n return None\n return [t[self.LEMMA] for t in self.data]","function_tokens":["def","lemmas","(","self",")",":","if","'lemma'","not","in","self",".","annotators",":","return","None","return","[","t","[","self",".","LEMMA","]","for","t","in","self",".","data","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L68-L74"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.entities","parameters":"(self)","argument_list":"","return_statement":"return [t[self.NER] for t in self.data]","docstring":"Returns a list of named-entity-recognition tags of each token.\n Returns None if this annotation was not included.","docstring_summary":"Returns a list of named-entity-recognition tags of each token.\n Returns None if this annotation was not included.","docstring_tokens":["Returns","a","list","of","named","-","entity","-","recognition","tags","of","each","token",".","Returns","None","if","this","annotation","was","not","included","."],"function":"def entities(self):\n \"\"\"Returns a list of named-entity-recognition tags of each token.\n Returns None if this annotation was not included.\n \"\"\"\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]","function_tokens":["def","entities","(","self",")",":","if","'ner'","not","in","self",".","annotators",":","return","None","return","[","t","[","self",".","NER","]","for","t","in","self",".","data","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L76-L82"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.ngrams","parameters":"(self, n=1, uncased=False, filter_fn=None, as_strings=True)","argument_list":"","return_statement":"return ngrams","docstring":"Returns a list of all ngrams from length 1 to n.\n Args:\n n: upper limit of ngram length\n uncased: lower cases text\n filter_fn: user function that takes in an ngram list and returns\n True or False to keep or not keep the ngram\n as_string: return the ngram as a string vs list","docstring_summary":"Returns a list of all ngrams from length 1 to n.\n Args:\n n: upper limit of ngram length\n uncased: lower cases text\n filter_fn: user function that takes in an ngram list and returns\n True or False to keep or not keep the ngram\n as_string: return the ngram as a string vs list","docstring_tokens":["Returns","a","list","of","all","ngrams","from","length","1","to","n",".","Args",":","n",":","upper","limit","of","ngram","length","uncased",":","lower","cases","text","filter_fn",":","user","function","that","takes","in","an","ngram","list","and","returns","True","or","False","to","keep","or","not","keep","the","ngram","as_string",":","return","the","ngram","as","a","string","vs","list"],"function":"def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n \"\"\"Returns a list of all ngrams from length 1 to n.\n Args:\n n: upper limit of ngram length\n uncased: lower cases text\n filter_fn: user function that takes in an ngram list and returns\n True or False to keep or not keep the ngram\n as_string: return the ngram as a string vs list\n \"\"\"\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(uncased)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams","function_tokens":["def","ngrams","(","self",",","n","=","1",",","uncased","=","False",",","filter_fn","=","None",",","as_strings","=","True",")",":","def","_skip","(","gram",")",":","if","not","filter_fn",":","return","False","return","filter_fn","(","gram",")","words","=","self",".","words","(","uncased",")","ngrams","=","[","(","s",",","e","+","1",")","for","s","in","range","(","len","(","words",")",")","for","e","in","range","(","s",",","min","(","s","+","n",",","len","(","words",")",")",")","if","not","_skip","(","words","[","s",":","e","+","1","]",")","]","# Concatenate into strings","if","as_strings",":","ngrams","=","[","'{}'",".","format","(","' '",".","join","(","words","[","s",":","e","]",")",")","for","(","s",",","e",")","in","ngrams","]","return","ngrams"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L84-L108"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"Tokens.entity_groups","parameters":"(self)","argument_list":"","return_statement":"return groups","docstring":"Group consecutive entity tokens with the same NER tag.","docstring_summary":"Group consecutive entity tokens with the same NER tag.","docstring_tokens":["Group","consecutive","entity","tokens","with","the","same","NER","tag","."],"function":"def entity_groups(self):\n \"\"\"Group consecutive entity tokens with the same NER tag.\"\"\"\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups","function_tokens":["def","entity_groups","(","self",")",":","entities","=","self",".","entities","(",")","if","not","entities",":","return","None","non_ent","=","self",".","opts",".","get","(","'non_ent'",",","'O'",")","groups","=","[","]","idx","=","0","while","idx","<","len","(","entities",")",":","ner_tag","=","entities","[","idx","]","# Check for entity tag","if","ner_tag","!=","non_ent",":","# Chomp the sequence","start","=","idx","while","(","idx","<","len","(","entities",")","and","entities","[","idx","]","==","ner_tag",")",":","idx","+=","1","groups",".","append","(","(","self",".","slice","(","start",",","idx",")",".","untokenize","(",")",",","ner_tag",")",")","else",":","idx","+=","1","return","groups"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L110-L129"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tokenizers.py","language":"python","identifier":"SimpleTokenizer.__init__","parameters":"(self, **kwargs)","argument_list":"","return_statement":"","docstring":"Args:\n annotators: None or empty set (only tokenizes).","docstring_summary":"Args:\n annotators: None or empty set (only tokenizes).","docstring_tokens":["Args",":","annotators",":","None","or","empty","set","(","only","tokenizes",")","."],"function":"def __init__(self, **kwargs):\n \"\"\"\n Args:\n annotators: None or empty set (only tokenizes).\n \"\"\"\n self._regexp = regex.compile(\n '(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),\n flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE\n )\n if len(kwargs.get('annotators', {})) > 0:\n logger.warning('%s only tokenizes! Skipping annotators: %s' %\n (type(self).__name__, kwargs.get('annotators')))\n self.annotators = set()","function_tokens":["def","__init__","(","self",",","*","*","kwargs",")",":","self",".","_regexp","=","regex",".","compile","(","'(%s)|(%s)'","%","(","self",".","ALPHA_NUM",",","self",".","NON_WS",")",",","flags","=","regex",".","IGNORECASE","+","regex",".","UNICODE","+","regex",".","MULTILINE",")","if","len","(","kwargs",".","get","(","'annotators'",",","{","}",")",")",">","0",":","logger",".","warning","(","'%s only tokenizes! Skipping annotators: %s'","%","(","type","(","self",")",".","__name__",",","kwargs",".","get","(","'annotators'",")",")",")","self",".","annotators","=","set","(",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tokenizers.py#L151-L163"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"normalize","parameters":"(text)","argument_list":"","return_statement":"return text[0].capitalize() + text[1:]","docstring":"Resolve different type of unicode encodings \/ capitarization in HotpotQA data.","docstring_summary":"Resolve different type of unicode encodings \/ capitarization in HotpotQA data.","docstring_tokens":["Resolve","different","type","of","unicode","encodings","\/","capitarization","in","HotpotQA","data","."],"function":"def normalize(text):\n \"\"\"Resolve different type of unicode encodings \/ capitarization in HotpotQA data.\"\"\"\n text = unicodedata.normalize('NFD', text)\n return text[0].capitalize() + text[1:]","function_tokens":["def","normalize","(","text",")",":","text","=","unicodedata",".","normalize","(","'NFD'",",","text",")","return","text","[","0","]",".","capitalize","(",")","+","text","[","1",":","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L17-L20"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"process_jsonlines","parameters":"(filename)","argument_list":"","return_statement":"return extracted_items","docstring":"This is process_jsonlines method for extracted Wikipedia file.\n After extracting items by using Wikiextractor (with `--json` and `--links` options), \n you will get the files named with wiki_xx, where each line contains the information of each article.\n e.g., \n {\"id\": \"316\", \"url\": \"https:\/\/en.wikipedia.org\/wiki?curid=316\", \"title\": \"Academy Award for Best Production Design\", \n \"text\": \"Academy Award for Best Production Design\\n\\nThe Academy Award<\/a> for \n Best Production Design recognizes achievement for art direction<\/a> \\n\\n\"}\n This function takes these input and extract items.\n Each article contains one or more than one paragraphs, and each paragraphs are separeated by \\n\\n.","docstring_summary":"This is process_jsonlines method for extracted Wikipedia file.\n After extracting items by using Wikiextractor (with `--json` and `--links` options), \n you will get the files named with wiki_xx, where each line contains the information of each article.\n e.g., \n {\"id\": \"316\", \"url\": \"https:\/\/en.wikipedia.org\/wiki?curid=316\", \"title\": \"Academy Award for Best Production Design\", \n \"text\": \"Academy Award for Best Production Design\\n\\nThe Academy Award<\/a> for \n Best Production Design recognizes achievement for art direction<\/a> \\n\\n\"}\n This function takes these input and extract items.\n Each article contains one or more than one paragraphs, and each paragraphs are separeated by \\n\\n.","docstring_tokens":["This","is","process_jsonlines","method","for","extracted","Wikipedia","file",".","After","extracting","items","by","using","Wikiextractor","(","with","--","json","and","--","links","options",")","you","will","get","the","files","named","with","wiki_xx","where","each","line","contains","the","information","of","each","article",".","e",".","g",".","{","id",":","316","url",":","https",":","\/\/","en",".","wikipedia",".","org","\/","wiki?curid","=","316","title",":","Academy","Award","for","Best","Production","Design","text",":","Academy","Award","for","Best","Production","Design","\\","n","\\","nThe","","Academy","Award<","\/","a",">","for","Best","Production","Design","recognizes","achievement","for","","art","direction<","\/","a",">","\\","n","\\","n","}","This","function","takes","these","input","and","extract","items",".","Each","article","contains","one","or","more","than","one","paragraphs","and","each","paragraphs","are","separeated","by","\\","n","\\","n","."],"function":"def process_jsonlines(filename):\n \"\"\"\n This is process_jsonlines method for extracted Wikipedia file.\n After extracting items by using Wikiextractor (with `--json` and `--links` options), \n you will get the files named with wiki_xx, where each line contains the information of each article.\n e.g., \n {\"id\": \"316\", \"url\": \"https:\/\/en.wikipedia.org\/wiki?curid=316\", \"title\": \"Academy Award for Best Production Design\", \n \"text\": \"Academy Award for Best Production Design\\n\\nThe Academy Award<\/a> for \n Best Production Design recognizes achievement for art direction<\/a> \\n\\n\"}\n This function takes these input and extract items.\n Each article contains one or more than one paragraphs, and each paragraphs are separeated by \\n\\n.\n \"\"\"\n # item should be nested list\n extracted_items = []\n with jsonlines.open(filename) as reader:\n for obj in reader:\n wiki_id = obj[\"id\"]\n title = obj[\"title\"]\n title_id = make_wiki_id(title, 0)\n text_with_links = obj[\"text\"]\n\n hyper_linked_titles_text = \"\"\n # When we consider the whole article as a document unit (e.g., SQuAD Open, Natural Questions Open)\n # we'll keep the links with the original articles, and dynamically process and extract the links\n # when we process with our selector.\n extracted_items.append({\"wiki_id\": wiki_id, \"title\": title_id,\n \"plain_text\": text_with_links,\n \"hyper_linked_titles\": hyper_linked_titles_text,\n \"original_title\": title})\n\n return extracted_items","function_tokens":["def","process_jsonlines","(","filename",")",":","# item should be nested list","extracted_items","=","[","]","with","jsonlines",".","open","(","filename",")","as","reader",":","for","obj","in","reader",":","wiki_id","=","obj","[","\"id\"","]","title","=","obj","[","\"title\"","]","title_id","=","make_wiki_id","(","title",",","0",")","text_with_links","=","obj","[","\"text\"","]","hyper_linked_titles_text","=","\"\"","# When we consider the whole article as a document unit (e.g., SQuAD Open, Natural Questions Open)","# we'll keep the links with the original articles, and dynamically process and extract the links","# when we process with our selector.","extracted_items",".","append","(","{","\"wiki_id\"",":","wiki_id",",","\"title\"",":","title_id",",","\"plain_text\"",":","text_with_links",",","\"hyper_linked_titles\"",":","hyper_linked_titles_text",",","\"original_title\"",":","title","}",")","return","extracted_items"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L42-L72"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"process_jsonlines_hotpotqa","parameters":"(filename)","argument_list":"","return_statement":"return extracted_items","docstring":"This is process_jsonlines method for intro-only processed_wikipedia file.\n The item example:\n {\"id\": \"45668011\", \"url\": \"https:\/\/en.wikipedia.org\/wiki?curid=45668011\", \"title\": \"Flouch Roundabout\",\n \"text\": [\"Flouch Roundabout is a roundabout near Penistone, South Yorkshire, England, where the A628 meets the A616.\"],\n \"charoffset\": [[[0, 6],...]]\n \"text_with_links\" : [\"Flouch Roundabout is a roundabout near Penistone<\/a>,\n South Yorkshire<\/a>, England, where the A628<\/a>\n meets the A616<\/a>.\"],\n \"charoffset_with_links\": [[[0, 6], ... [213, 214]]]}","docstring_summary":"This is process_jsonlines method for intro-only processed_wikipedia file.\n The item example:\n {\"id\": \"45668011\", \"url\": \"https:\/\/en.wikipedia.org\/wiki?curid=45668011\", \"title\": \"Flouch Roundabout\",\n \"text\": [\"Flouch Roundabout is a roundabout near Penistone, South Yorkshire, England, where the A628 meets the A616.\"],\n \"charoffset\": [[[0, 6],...]]\n \"text_with_links\" : [\"Flouch Roundabout is a roundabout near Penistone<\/a>,\n South Yorkshire<\/a>, England, where the A628<\/a>\n meets the A616<\/a>.\"],\n \"charoffset_with_links\": [[[0, 6], ... [213, 214]]]}","docstring_tokens":["This","is","process_jsonlines","method","for","intro","-","only","processed_wikipedia","file",".","The","item","example",":","{","id",":","45668011","url",":","https",":","\/\/","en",".","wikipedia",".","org","\/","wiki?curid","=","45668011","title",":","Flouch","Roundabout","text",":","[","Flouch","Roundabout","is","a","roundabout","near","Penistone","South","Yorkshire","England","where","the","A628","meets","the","A616",".","]","charoffset",":","[[[","0","6","]","...","]]","text_with_links",":","[","Flouch","Roundabout","is","a","roundabout","near","","Penistone<","\/","a",">","","South","Yorkshire<","\/","a",">","England","where","the","","A628<","\/","a",">","meets","the","","A616<","\/","a",">",".","]","charoffset_with_links",":","[[[","0","6","]","...","[","213","214","]]]","}"],"function":"def process_jsonlines_hotpotqa(filename):\n \"\"\"\n This is process_jsonlines method for intro-only processed_wikipedia file.\n The item example:\n {\"id\": \"45668011\", \"url\": \"https:\/\/en.wikipedia.org\/wiki?curid=45668011\", \"title\": \"Flouch Roundabout\",\n \"text\": [\"Flouch Roundabout is a roundabout near Penistone, South Yorkshire, England, where the A628 meets the A616.\"],\n \"charoffset\": [[[0, 6],...]]\n \"text_with_links\" : [\"Flouch Roundabout is a roundabout near Penistone<\/a>,\n South Yorkshire<\/a>, England, where the A628<\/a>\n meets the A616<\/a>.\"],\n \"charoffset_with_links\": [[[0, 6], ... [213, 214]]]}\n \"\"\"\n # item should be nested list\n extracted_items = []\n with jsonlines.open(filename) as reader:\n for obj in reader:\n wiki_id = obj[\"id\"]\n title = obj[\"title\"]\n title_id = make_wiki_id(title, 0)\n plain_text = \"\\t\".join(obj[\"text\"])\n text_with_links = \"\\t\".join(obj[\"text_with_links\"])\n\n hyper_linked_titles = []\n hyper_linked_titles = find_hyper_linked_titles(text_with_links)\n if len(hyper_linked_titles) > 0:\n hyper_linked_titles_text = \"\\t\".join(hyper_linked_titles)\n else:\n hyper_linked_titles_text = \"\"\n extracted_items.append({\"wiki_id\": wiki_id, \"title\": title_id,\n \"plain_text\": plain_text,\n \"hyper_linked_titles\": hyper_linked_titles_text,\n \"original_title\": title})\n\n return extracted_items","function_tokens":["def","process_jsonlines_hotpotqa","(","filename",")",":","# item should be nested list","extracted_items","=","[","]","with","jsonlines",".","open","(","filename",")","as","reader",":","for","obj","in","reader",":","wiki_id","=","obj","[","\"id\"","]","title","=","obj","[","\"title\"","]","title_id","=","make_wiki_id","(","title",",","0",")","plain_text","=","\"\\t\"",".","join","(","obj","[","\"text\"","]",")","text_with_links","=","\"\\t\"",".","join","(","obj","[","\"text_with_links\"","]",")","hyper_linked_titles","=","[","]","hyper_linked_titles","=","find_hyper_linked_titles","(","text_with_links",")","if","len","(","hyper_linked_titles",")",">","0",":","hyper_linked_titles_text","=","\"\\t\"",".","join","(","hyper_linked_titles",")","else",":","hyper_linked_titles_text","=","\"\"","extracted_items",".","append","(","{","\"wiki_id\"",":","wiki_id",",","\"title\"",":","title_id",",","\"plain_text\"",":","plain_text",",","\"hyper_linked_titles\"",":","hyper_linked_titles_text",",","\"original_title\"",":","title","}",")","return","extracted_items"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L74-L107"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"hash","parameters":"(token, num_buckets)","argument_list":"","return_statement":"return murmurhash3_32(token, positive=True) % num_buckets","docstring":"Unsigned 32 bit murmurhash for feature hashing.","docstring_summary":"Unsigned 32 bit murmurhash for feature hashing.","docstring_tokens":["Unsigned","32","bit","murmurhash","for","feature","hashing","."],"function":"def hash(token, num_buckets):\n \"\"\"Unsigned 32 bit murmurhash for feature hashing.\"\"\"\n return murmurhash3_32(token, positive=True) % num_buckets","function_tokens":["def","hash","(","token",",","num_buckets",")",":","return","murmurhash3_32","(","token",",","positive","=","True",")","%","num_buckets"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L137-L139"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"filter_word","parameters":"(text)","argument_list":"","return_statement":"return False","docstring":"Take out english stopwords, punctuation, and compound endings.","docstring_summary":"Take out english stopwords, punctuation, and compound endings.","docstring_tokens":["Take","out","english","stopwords","punctuation","and","compound","endings","."],"function":"def filter_word(text):\n \"\"\"Take out english stopwords, punctuation, and compound endings.\"\"\"\n text = normalize(text)\n if regex.match(r'^\\p{P}+$', text):\n return True\n if text.lower() in STOPWORDS:\n return True\n return False","function_tokens":["def","filter_word","(","text",")",":","text","=","normalize","(","text",")","if","regex",".","match","(","r'^\\p{P}+$'",",","text",")",":","return","True","if","text",".","lower","(",")","in","STOPWORDS",":","return","True","return","False"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L167-L174"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"filter_ngram","parameters":"(gram, mode='any')","argument_list":"","return_statement":"","docstring":"Decide whether to keep or discard an n-gram.\n Args:\n gram: list of tokens (length N)\n mode: Option to throw out ngram if\n 'any': any single token passes filter_word\n 'all': all tokens pass filter_word\n 'ends': book-ended by filterable tokens","docstring_summary":"Decide whether to keep or discard an n-gram.\n Args:\n gram: list of tokens (length N)\n mode: Option to throw out ngram if\n 'any': any single token passes filter_word\n 'all': all tokens pass filter_word\n 'ends': book-ended by filterable tokens","docstring_tokens":["Decide","whether","to","keep","or","discard","an","n","-","gram",".","Args",":","gram",":","list","of","tokens","(","length","N",")","mode",":","Option","to","throw","out","ngram","if","any",":","any","single","token","passes","filter_word","all",":","all","tokens","pass","filter_word","ends",":","book","-","ended","by","filterable","tokens"],"function":"def filter_ngram(gram, mode='any'):\n \"\"\"Decide whether to keep or discard an n-gram.\n Args:\n gram: list of tokens (length N)\n mode: Option to throw out ngram if\n 'any': any single token passes filter_word\n 'all': all tokens pass filter_word\n 'ends': book-ended by filterable tokens\n \"\"\"\n filtered = [filter_word(w) for w in gram]\n if mode == 'any':\n return any(filtered)\n elif mode == 'all':\n return all(filtered)\n elif mode == 'ends':\n return filtered[0] or filtered[-1]\n else:\n raise ValueError('Invalid mode: %s' % mode)","function_tokens":["def","filter_ngram","(","gram",",","mode","=","'any'",")",":","filtered","=","[","filter_word","(","w",")","for","w","in","gram","]","if","mode","==","'any'",":","return","any","(","filtered",")","elif","mode","==","'all'",":","return","all","(","filtered",")","elif","mode","==","'ends'",":","return","filtered","[","0","]","or","filtered","[","-","1","]","else",":","raise","ValueError","(","'Invalid mode: %s'","%","mode",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L177-L194"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"get_field","parameters":"(d, field_list)","argument_list":"","return_statement":"","docstring":"get the subfield associated to a list of elastic fields\n E.g. ['file', 'filename'] to d['file']['filename']","docstring_summary":"get the subfield associated to a list of elastic fields\n E.g. ['file', 'filename'] to d['file']['filename']","docstring_tokens":["get","the","subfield","associated","to","a","list","of","elastic","fields","E",".","g",".","[","file","filename","]","to","d","[","file","]","[","filename","]"],"function":"def get_field(d, field_list):\n \"\"\"get the subfield associated to a list of elastic fields\n E.g. ['file', 'filename'] to d['file']['filename']\n \"\"\"\n if isinstance(field_list, str):\n return d[field_list]\n else:\n idx = d.copy()\n for field in field_list:\n idx = idx[field]\n return idx","function_tokens":["def","get_field","(","d",",","field_list",")",":","if","isinstance","(","field_list",",","str",")",":","return","d","[","field_list","]","else",":","idx","=","d",".","copy","(",")","for","field","in","field_list",":","idx","=","idx","[","field","]","return","idx"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L197-L207"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/utils.py","language":"python","identifier":"load_para_and_linked_titles_dict_from_tfidf_id","parameters":"(tfidf_id, db)","argument_list":"","return_statement":"return paras_dict, linked_titles_dict","docstring":"load paragraphs and hyperlinked titles from DB. \n This method is mainly for Natural Questions Open benchmark.","docstring_summary":"load paragraphs and hyperlinked titles from DB. \n This method is mainly for Natural Questions Open benchmark.","docstring_tokens":["load","paragraphs","and","hyperlinked","titles","from","DB",".","This","method","is","mainly","for","Natural","Questions","Open","benchmark","."],"function":"def load_para_and_linked_titles_dict_from_tfidf_id(tfidf_id, db):\n \"\"\"\n load paragraphs and hyperlinked titles from DB. \n This method is mainly for Natural Questions Open benchmark.\n \"\"\"\n # will be fixed in the later version; current tfidf weights use indexed titles as keys.\n if \"_0\" not in tfidf_id:\n tfidf_id = \"{0}_0\".format(tfidf_id)\n paras, linked_titles = db.get_doc_text_hyper_linked_titles_for_articles(\n tfidf_id)\n if len(paras) == 0:\n logger.warning(\"{0} is missing\".format(tfidf_id))\n return [], []\n\n paras_dict = {}\n linked_titles_dict = {}\n article_name = tfidf_id.split(\"_0\")[0]\n # store the para_dict and linked_titles_dict; skip the first para (title)\n for para_idx, (para, linked_title_list) in enumerate(zip(paras[1:], linked_titles[1:])):\n paras_dict[\"{0}_{1}\".format(article_name, para_idx)] = para\n linked_titles_dict[\"{0}_{1}\".format(\n article_name, para_idx)] = linked_title_list\n\n return paras_dict, linked_titles_dict","function_tokens":["def","load_para_and_linked_titles_dict_from_tfidf_id","(","tfidf_id",",","db",")",":","# will be fixed in the later version; current tfidf weights use indexed titles as keys.","if","\"_0\"","not","in","tfidf_id",":","tfidf_id","=","\"{0}_0\"",".","format","(","tfidf_id",")","paras",",","linked_titles","=","db",".","get_doc_text_hyper_linked_titles_for_articles","(","tfidf_id",")","if","len","(","paras",")","==","0",":","logger",".","warning","(","\"{0} is missing\"",".","format","(","tfidf_id",")",")","return","[","]",",","[","]","paras_dict","=","{","}","linked_titles_dict","=","{","}","article_name","=","tfidf_id",".","split","(","\"_0\"",")","[","0","]","# store the para_dict and linked_titles_dict; skip the first para (title)","for","para_idx",",","(","para",",","linked_title_list",")","in","enumerate","(","zip","(","paras","[","1",":","]",",","linked_titles","[","1",":","]",")",")",":","paras_dict","[","\"{0}_{1}\"",".","format","(","article_name",",","para_idx",")","]","=","para","linked_titles_dict","[","\"{0}_{1}\"",".","format","(","article_name",",","para_idx",")","]","=","linked_title_list","return","paras_dict",",","linked_titles_dict"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/utils.py#L227-L250"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tfidf_doc_ranker.py","language":"python","identifier":"TfidfDocRanker.__init__","parameters":"(self, tfidf_path=None, strict=True)","argument_list":"","return_statement":"","docstring":"Args:\n tfidf_path: path to saved model file\n strict: fail on empty queries or continue (and return empty result)","docstring_summary":"Args:\n tfidf_path: path to saved model file\n strict: fail on empty queries or continue (and return empty result)","docstring_tokens":["Args",":","tfidf_path",":","path","to","saved","model","file","strict",":","fail","on","empty","queries","or","continue","(","and","return","empty","result",")"],"function":"def __init__(self, tfidf_path=None, strict=True):\n \"\"\"\n Args:\n tfidf_path: path to saved model file\n strict: fail on empty queries or continue (and return empty result)\n \"\"\"\n # Load from disk\n tfidf_path = tfidf_path\n logger.info('Loading %s' % tfidf_path)\n matrix, metadata = load_sparse_csr(tfidf_path)\n self.doc_mat = matrix\n self.ngrams = metadata['ngram']\n self.hash_size = metadata['hash_size']\n self.tokenizer = SimpleTokenizer()\n self.doc_freqs = metadata['doc_freqs'].squeeze()\n self.doc_dict = metadata['doc_dict']\n self.num_docs = len(self.doc_dict[0])\n self.strict = strict","function_tokens":["def","__init__","(","self",",","tfidf_path","=","None",",","strict","=","True",")",":","# Load from disk","tfidf_path","=","tfidf_path","logger",".","info","(","'Loading %s'","%","tfidf_path",")","matrix",",","metadata","=","load_sparse_csr","(","tfidf_path",")","self",".","doc_mat","=","matrix","self",".","ngrams","=","metadata","[","'ngram'","]","self",".","hash_size","=","metadata","[","'hash_size'","]","self",".","tokenizer","=","SimpleTokenizer","(",")","self",".","doc_freqs","=","metadata","[","'doc_freqs'","]",".","squeeze","(",")","self",".","doc_dict","=","metadata","[","'doc_dict'","]","self",".","num_docs","=","len","(","self",".","doc_dict","[","0","]",")","self",".","strict","=","strict"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tfidf_doc_ranker.py#L31-L48"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tfidf_doc_ranker.py","language":"python","identifier":"TfidfDocRanker.get_doc_index","parameters":"(self, doc_id)","argument_list":"","return_statement":"return self.doc_dict[0][doc_id]","docstring":"Convert doc_id --> doc_index","docstring_summary":"Convert doc_id --> doc_index","docstring_tokens":["Convert","doc_id","--",">","doc_index"],"function":"def get_doc_index(self, doc_id):\n \"\"\"Convert doc_id --> doc_index\"\"\"\n return self.doc_dict[0][doc_id]","function_tokens":["def","get_doc_index","(","self",",","doc_id",")",":","return","self",".","doc_dict","[","0","]","[","doc_id","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tfidf_doc_ranker.py#L50-L52"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tfidf_doc_ranker.py","language":"python","identifier":"TfidfDocRanker.get_doc_id","parameters":"(self, doc_index)","argument_list":"","return_statement":"return self.doc_dict[1][doc_index]","docstring":"Convert doc_index --> doc_id","docstring_summary":"Convert doc_index --> doc_id","docstring_tokens":["Convert","doc_index","--",">","doc_id"],"function":"def get_doc_id(self, doc_index):\n \"\"\"Convert doc_index --> doc_id\"\"\"\n return self.doc_dict[1][doc_index]","function_tokens":["def","get_doc_id","(","self",",","doc_index",")",":","return","self",".","doc_dict","[","1","]","[","doc_index","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tfidf_doc_ranker.py#L54-L56"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tfidf_doc_ranker.py","language":"python","identifier":"TfidfDocRanker.closest_docs","parameters":"(self, query, k=1)","argument_list":"","return_statement":"return doc_ids, doc_scores","docstring":"Closest docs by dot product between query and documents\n in tfidf weighted word vector space.","docstring_summary":"Closest docs by dot product between query and documents\n in tfidf weighted word vector space.","docstring_tokens":["Closest","docs","by","dot","product","between","query","and","documents","in","tfidf","weighted","word","vector","space","."],"function":"def closest_docs(self, query, k=1):\n \"\"\"Closest docs by dot product between query and documents\n in tfidf weighted word vector space.\n \"\"\"\n spvec = self.text2spvec(query)\n res = spvec * self.doc_mat\n\n if len(res.data) <= k:\n o_sort = np.argsort(-res.data)\n else:\n o = np.argpartition(-res.data, k)[0:k]\n o_sort = o[np.argsort(-res.data[o])]\n\n doc_scores = res.data[o_sort]\n doc_ids = [self.get_doc_id(i) for i in res.indices[o_sort]]\n return doc_ids, doc_scores","function_tokens":["def","closest_docs","(","self",",","query",",","k","=","1",")",":","spvec","=","self",".","text2spvec","(","query",")","res","=","spvec","*","self",".","doc_mat","if","len","(","res",".","data",")","<=","k",":","o_sort","=","np",".","argsort","(","-","res",".","data",")","else",":","o","=","np",".","argpartition","(","-","res",".","data",",","k",")","[","0",":","k","]","o_sort","=","o","[","np",".","argsort","(","-","res",".","data","[","o","]",")","]","doc_scores","=","res",".","data","[","o_sort","]","doc_ids","=","[","self",".","get_doc_id","(","i",")","for","i","in","res",".","indices","[","o_sort","]","]","return","doc_ids",",","doc_scores"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tfidf_doc_ranker.py#L58-L73"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tfidf_doc_ranker.py","language":"python","identifier":"TfidfDocRanker.batch_closest_docs","parameters":"(self, queries, k=1, num_workers=None)","argument_list":"","return_statement":"return results","docstring":"Process a batch of closest_docs requests multithreaded.\n Note: we can use plain threads here as scipy is outside of the GIL.","docstring_summary":"Process a batch of closest_docs requests multithreaded.\n Note: we can use plain threads here as scipy is outside of the GIL.","docstring_tokens":["Process","a","batch","of","closest_docs","requests","multithreaded",".","Note",":","we","can","use","plain","threads","here","as","scipy","is","outside","of","the","GIL","."],"function":"def batch_closest_docs(self, queries, k=1, num_workers=None):\n \"\"\"Process a batch of closest_docs requests multithreaded.\n Note: we can use plain threads here as scipy is outside of the GIL.\n \"\"\"\n with ThreadPool(num_workers) as threads:\n closest_docs = partial(self.closest_docs, k=k)\n results = threads.map(closest_docs, queries)\n return results","function_tokens":["def","batch_closest_docs","(","self",",","queries",",","k","=","1",",","num_workers","=","None",")",":","with","ThreadPool","(","num_workers",")","as","threads",":","closest_docs","=","partial","(","self",".","closest_docs",",","k","=","k",")","results","=","threads",".","map","(","closest_docs",",","queries",")","return","results"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tfidf_doc_ranker.py#L75-L82"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tfidf_doc_ranker.py","language":"python","identifier":"TfidfDocRanker.parse","parameters":"(self, query)","argument_list":"","return_statement":"return tokens.ngrams(n=self.ngrams, uncased=True,\n filter_fn=filter_ngram)","docstring":"Parse the query into tokens (either ngrams or tokens).","docstring_summary":"Parse the query into tokens (either ngrams or tokens).","docstring_tokens":["Parse","the","query","into","tokens","(","either","ngrams","or","tokens",")","."],"function":"def parse(self, query):\n \"\"\"Parse the query into tokens (either ngrams or tokens).\"\"\"\n tokens = self.tokenizer.tokenize(query)\n return tokens.ngrams(n=self.ngrams, uncased=True,\n filter_fn=filter_ngram)","function_tokens":["def","parse","(","self",",","query",")",":","tokens","=","self",".","tokenizer",".","tokenize","(","query",")","return","tokens",".","ngrams","(","n","=","self",".","ngrams",",","uncased","=","True",",","filter_fn","=","filter_ngram",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tfidf_doc_ranker.py#L84-L88"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"retriever\/tfidf_doc_ranker.py","language":"python","identifier":"TfidfDocRanker.text2spvec","parameters":"(self, query)","argument_list":"","return_statement":"return spvec","docstring":"Create a sparse tfidf-weighted word vector from query.\n\n tfidf = log(tf + 1) * log((N - Nt + 0.5) \/ (Nt + 0.5))","docstring_summary":"Create a sparse tfidf-weighted word vector from query.","docstring_tokens":["Create","a","sparse","tfidf","-","weighted","word","vector","from","query","."],"function":"def text2spvec(self, query):\n \"\"\"Create a sparse tfidf-weighted word vector from query.\n\n tfidf = log(tf + 1) * log((N - Nt + 0.5) \/ (Nt + 0.5))\n \"\"\"\n # Get hashed ngrams\n # TODO: do we need to have normalize?\n words = self.parse(normalize(query))\n wids = [hash(w, self.hash_size) for w in words]\n\n if len(wids) == 0:\n if self.strict:\n raise RuntimeError('No valid word in: %s' % query)\n else:\n logger.warning('No valid word in: %s' % query)\n return sp.csr_matrix((1, self.hash_size))\n\n # Count TF\n wids_unique, wids_counts = np.unique(wids, return_counts=True)\n tfs = np.log1p(wids_counts)\n\n # Count IDF\n Ns = self.doc_freqs[wids_unique]\n idfs = np.log((self.num_docs - Ns + 0.5) \/ (Ns + 0.5))\n idfs[idfs < 0] = 0\n\n # TF-IDF\n data = np.multiply(tfs, idfs)\n\n # One row, sparse csr matrix\n indptr = np.array([0, len(wids_unique)])\n spvec = sp.csr_matrix(\n (data, wids_unique, indptr), shape=(1, self.hash_size)\n )\n\n return spvec","function_tokens":["def","text2spvec","(","self",",","query",")",":","# Get hashed ngrams","# TODO: do we need to have normalize?","words","=","self",".","parse","(","normalize","(","query",")",")","wids","=","[","hash","(","w",",","self",".","hash_size",")","for","w","in","words","]","if","len","(","wids",")","==","0",":","if","self",".","strict",":","raise","RuntimeError","(","'No valid word in: %s'","%","query",")","else",":","logger",".","warning","(","'No valid word in: %s'","%","query",")","return","sp",".","csr_matrix","(","(","1",",","self",".","hash_size",")",")","# Count TF","wids_unique",",","wids_counts","=","np",".","unique","(","wids",",","return_counts","=","True",")","tfs","=","np",".","log1p","(","wids_counts",")","# Count IDF","Ns","=","self",".","doc_freqs","[","wids_unique","]","idfs","=","np",".","log","(","(","self",".","num_docs","-","Ns","+","0.5",")","\/","(","Ns","+","0.5",")",")","idfs","[","idfs","<","0","]","=","0","# TF-IDF","data","=","np",".","multiply","(","tfs",",","idfs",")","# One row, sparse csr matrix","indptr","=","np",".","array","(","[","0",",","len","(","wids_unique",")","]",")","spvec","=","sp",".","csr_matrix","(","(","data",",","wids_unique",",","indptr",")",",","shape","=","(","1",",","self",".","hash_size",")",")","return","spvec"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/retriever\/tfidf_doc_ranker.py#L90-L125"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"read_squad_examples","parameters":"(input_file, is_training, version_2_with_negative, max_answer_len=100000, skip_negatives=False)","argument_list":"","return_statement":"return examples","docstring":"Read a SQuAD json file into a list of SquadExample.","docstring_summary":"Read a SQuAD json file into a list of SquadExample.","docstring_tokens":["Read","a","SQuAD","json","file","into","a","list","of","SquadExample","."],"function":"def read_squad_examples(input_file, is_training, version_2_with_negative, max_answer_len=100000, skip_negatives=False):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with open(input_file, \"r\", encoding='utf-8') as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = str(qa[\"id\"])\n # this is temporary added to see whether reducing the negatives\n # improves the performance.\n if skip_negatives is True and \"_NEGATIVE_\" in qas_id:\n continue\n if \"FAKE2\" in qas_id:\n continue\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n if version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if is_impossible is False:\n switch = 0\n else:\n switch = 1\n # if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n # raise ValueError(\n # \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answers = qa['answers']\n if type(answers) == list:\n answer = qa[\"answers\"][0]\n else:\n answer = answers\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset +\n answer_length - 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n logger.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n\n if len(orig_answer_text.split()) > max_answer_len:\n logger.info(\n \"Omitting a long answer: '%s'\", orig_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n if len(qa[\"answers\"]) > 0:\n answer = qa[\"answers\"][0]\n # Make sure that answer text will be preserved for\n # yes\/no.\n if answer[\"text\"] in [\"yes\", \"no\"]:\n orig_answer_text = answer[\"text\"]\n if not is_training:\n switch = None\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n switch=switch,\n is_impossible=is_impossible)\n examples.append(example)\n return examples","function_tokens":["def","read_squad_examples","(","input_file",",","is_training",",","version_2_with_negative",",","max_answer_len","=","100000",",","skip_negatives","=","False",")",":","with","open","(","input_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","input_data","=","json",".","load","(","reader",")","[","\"data\"","]","def","is_whitespace","(","c",")",":","if","c","==","\" \"","or","c","==","\"\\t\"","or","c","==","\"\\r\"","or","c","==","\"\\n\"","or","ord","(","c",")","==","0x202F",":","return","True","return","False","examples","=","[","]","for","entry","in","input_data",":","for","paragraph","in","entry","[","\"paragraphs\"","]",":","paragraph_text","=","paragraph","[","\"context\"","]","doc_tokens","=","[","]","char_to_word_offset","=","[","]","prev_is_whitespace","=","True","for","c","in","paragraph_text",":","if","is_whitespace","(","c",")",":","prev_is_whitespace","=","True","else",":","if","prev_is_whitespace",":","doc_tokens",".","append","(","c",")","else",":","doc_tokens","[","-","1","]","+=","c","prev_is_whitespace","=","False","char_to_word_offset",".","append","(","len","(","doc_tokens",")","-","1",")","for","qa","in","paragraph","[","\"qas\"","]",":","qas_id","=","str","(","qa","[","\"id\"","]",")","# this is temporary added to see whether reducing the negatives","# improves the performance.","if","skip_negatives","is","True","and","\"_NEGATIVE_\"","in","qas_id",":","continue","if","\"FAKE2\"","in","qas_id",":","continue","question_text","=","qa","[","\"question\"","]","start_position","=","None","end_position","=","None","orig_answer_text","=","None","is_impossible","=","False","if","is_training",":","if","version_2_with_negative",":","is_impossible","=","qa","[","\"is_impossible\"","]","if","is_impossible","is","False",":","switch","=","0","else",":","switch","=","1","# if (len(qa[\"answers\"]) != 1) and (not is_impossible):","# raise ValueError(","# \"For training, each question should have exactly 1 answer.\")","if","not","is_impossible",":","answers","=","qa","[","'answers'","]","if","type","(","answers",")","==","list",":","answer","=","qa","[","\"answers\"","]","[","0","]","else",":","answer","=","answers","orig_answer_text","=","answer","[","\"text\"","]","answer_offset","=","answer","[","\"answer_start\"","]","answer_length","=","len","(","orig_answer_text",")","start_position","=","char_to_word_offset","[","answer_offset","]","end_position","=","char_to_word_offset","[","answer_offset","+","answer_length","-","1","]","# Only add answers where the text can be exactly recovered from the","# document. If this CAN'T happen it's likely due to weird Unicode","# stuff so we will just skip the example.","#","# Note that this means for training mode, every example is NOT","# guaranteed to be preserved.","actual_text","=","\" \"",".","join","(","doc_tokens","[","start_position",":","(","end_position","+","1",")","]",")","cleaned_answer_text","=","\" \"",".","join","(","whitespace_tokenize","(","orig_answer_text",")",")","if","actual_text",".","find","(","cleaned_answer_text",")","==","-","1",":","logger",".","warning","(","\"Could not find answer: '%s' vs. '%s'\"",",","actual_text",",","cleaned_answer_text",")","continue","if","len","(","orig_answer_text",".","split","(",")",")",">","max_answer_len",":","logger",".","info","(","\"Omitting a long answer: '%s'\"",",","orig_answer_text",")","continue","else",":","start_position","=","-","1","end_position","=","-","1","orig_answer_text","=","\"\"","if","len","(","qa","[","\"answers\"","]",")",">","0",":","answer","=","qa","[","\"answers\"","]","[","0","]","# Make sure that answer text will be preserved for","# yes\/no.","if","answer","[","\"text\"","]","in","[","\"yes\"",",","\"no\"","]",":","orig_answer_text","=","answer","[","\"text\"","]","if","not","is_training",":","switch","=","None","example","=","SquadExample","(","qas_id","=","qas_id",",","question_text","=","question_text",",","doc_tokens","=","doc_tokens",",","orig_answer_text","=","orig_answer_text",",","start_position","=","start_position",",","end_position","=","end_position",",","switch","=","switch",",","is_impossible","=","is_impossible",")","examples",".","append","(","example",")","return","examples"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L101-L206"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n cls_token_at_end=False,\n cls_token='[CLS]', sep_token='[SEP]', pad_token=0,\n sequence_a_segment_id=0, sequence_b_segment_id=1,\n cls_token_segment_id=0, pad_token_segment_id=0,\n mask_padding_with_zero=True,\n quiet=False)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n cls_token_at_end=False,\n cls_token='[CLS]', sep_token='[SEP]', pad_token=0,\n sequence_a_segment_id=0, sequence_b_segment_id=1,\n cls_token_segment_id=0, pad_token_segment_id=0,\n mask_padding_with_zero=True,\n quiet=False):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n features = []\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n\n p_mask = []\n\n # CLS token at the beginning\n if not cls_token_at_end:\n tokens.append(cls_token)\n segment_ids.append(cls_token_segment_id)\n p_mask.append(0)\n cls_index = 0\n\n # Query\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(sequence_a_segment_id)\n p_mask.append(1)\n\n # SEP token\n tokens.append(sep_token)\n segment_ids.append(sequence_a_segment_id)\n p_mask.append(1)\n\n # Paragraph\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(\n tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(sequence_b_segment_id)\n p_mask.append(0)\n paragraph_len = doc_span.length\n\n # SEP token\n tokens.append(sep_token)\n segment_ids.append(sequence_b_segment_id)\n p_mask.append(1)\n\n # CLS token at the end\n if cls_token_at_end:\n tokens.append(cls_token)\n segment_ids.append(cls_token_segment_id)\n p_mask.append(0)\n cls_index = len(tokens) - 1 # Index of classification token\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(pad_token)\n input_mask.append(0 if mask_padding_with_zero else 1)\n segment_ids.append(pad_token_segment_id)\n p_mask.append(1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n span_is_impossible = example.is_impossible\n start_position = None\n end_position = None\n switch = None\n if is_training and not span_is_impossible:\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n span_is_impossible = True\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and span_is_impossible:\n start_position = cls_index\n end_position = cls_index\n switch = 1\n elif is_training and not span_is_impossible:\n switch = 0\n\n # The questions whose ``is_impossible'' are originally True should\n # be 1.\n if example.is_impossible is True:\n switch = 1\n\n if example_index < 20 and not quiet:\n logger.info(\"*** Example ***\")\n logger.info(\"unique_id: %s\" % (unique_id))\n logger.info(\"example_index: %s\" % (example_index))\n logger.info(\"doc_span_index: %s\" % (doc_span_index))\n logger.info(\"tokens: %s\" % \" \".join(tokens))\n logger.info(\"token_to_orig_map: %s\" % \" \".join([\n \"%d:%d\" % (x, y) for (x, y) in token_to_orig_map.items()]))\n logger.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in token_is_max_context.items()\n ]))\n logger.info(\"input_ids: %s\" %\n \" \".join([str(x) for x in input_ids]))\n logger.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and span_is_impossible:\n logger.info(\"impossible example\")\n if is_training and not span_is_impossible:\n answer_text = \" \".join(\n tokens[start_position:(end_position + 1)])\n logger.info(\"start_position: %d\" % (start_position))\n logger.info(\"end_position: %d\" % (end_position))\n logger.info(\n \"answer: %s\" % (answer_text))\n\n features.append(\n InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n cls_index=cls_index,\n p_mask=p_mask,\n paragraph_len=paragraph_len,\n start_position=start_position,\n end_position=end_position,\n switch=switch,\n is_impossible=span_is_impossible))\n unique_id += 1\n\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","tokenizer",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","cls_token_at_end","=","False",",","cls_token","=","'[CLS]'",",","sep_token","=","'[SEP]'",",","pad_token","=","0",",","sequence_a_segment_id","=","0",",","sequence_b_segment_id","=","1",",","cls_token_segment_id","=","0",",","pad_token_segment_id","=","0",",","mask_padding_with_zero","=","True",",","quiet","=","False",")",":","unique_id","=","1000000000","features","=","[","]","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","query_tokens","=","tokenizer",".","tokenize","(","example",".","question_text",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","tok_to_orig_index","=","[","]","orig_to_tok_index","=","[","]","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","orig_to_tok_index",".","append","(","len","(","all_doc_tokens",")",")","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","tok_to_orig_index",".","append","(","i",")","all_doc_tokens",".","append","(","sub_token",")","tok_start_position","=","None","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","tok_start_position","=","orig_to_tok_index","[","example",".","start_position","]","if","example",".","end_position","<","len","(","example",".","doc_tokens",")","-","1",":","tok_end_position","=","orig_to_tok_index","[","example",".","end_position","+","1","]","-","1","else",":","tok_end_position","=","len","(","all_doc_tokens",")","-","1","(","tok_start_position",",","tok_end_position",")","=","_improve_answer_span","(","all_doc_tokens",",","tok_start_position",",","tok_end_position",",","tokenizer",",","example",".","orig_answer_text",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_to_orig_map","=","{","}","token_is_max_context","=","{","}","segment_ids","=","[","]","p_mask","=","[","]","# CLS token at the beginning","if","not","cls_token_at_end",":","tokens",".","append","(","cls_token",")","segment_ids",".","append","(","cls_token_segment_id",")","p_mask",".","append","(","0",")","cls_index","=","0","# Query","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","sequence_a_segment_id",")","p_mask",".","append","(","1",")","# SEP token","tokens",".","append","(","sep_token",")","segment_ids",".","append","(","sequence_a_segment_id",")","p_mask",".","append","(","1",")","# Paragraph","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","token_to_orig_map","[","len","(","tokens",")","]","=","tok_to_orig_index","[","split_token_index","]","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","sequence_b_segment_id",")","p_mask",".","append","(","0",")","paragraph_len","=","doc_span",".","length","# SEP token","tokens",".","append","(","sep_token",")","segment_ids",".","append","(","sequence_b_segment_id",")","p_mask",".","append","(","1",")","# CLS token at the end","if","cls_token_at_end",":","tokens",".","append","(","cls_token",")","segment_ids",".","append","(","cls_token_segment_id",")","p_mask",".","append","(","0",")","cls_index","=","len","(","tokens",")","-","1","# Index of classification token","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","if","mask_padding_with_zero","else","0","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","pad_token",")","input_mask",".","append","(","0","if","mask_padding_with_zero","else","1",")","segment_ids",".","append","(","pad_token_segment_id",")","p_mask",".","append","(","1",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","span_is_impossible","=","example",".","is_impossible","start_position","=","None","end_position","=","None","switch","=","None","if","is_training","and","not","span_is_impossible",":","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","start_position","=","0","end_position","=","0","span_is_impossible","=","True","else",":","doc_offset","=","len","(","query_tokens",")","+","2","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","span_is_impossible",":","start_position","=","cls_index","end_position","=","cls_index","switch","=","1","elif","is_training","and","not","span_is_impossible",":","switch","=","0","# The questions whose ``is_impossible'' are originally True should","# be 1.","if","example",".","is_impossible","is","True",":","switch","=","1","if","example_index","<","20","and","not","quiet",":","logger",".","info","(","\"*** Example ***\"",")","logger",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","logger",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","logger",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","logger",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","tokens",")",")","logger",".","info","(","\"token_to_orig_map: %s\"","%","\" \"",".","join","(","[","\"%d:%d\"","%","(","x",",","y",")","for","(","x",",","y",")","in","token_to_orig_map",".","items","(",")","]",")",")","logger",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","token_is_max_context",".","items","(",")","]",")",")","logger",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","logger",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","logger",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","span_is_impossible",":","logger",".","info","(","\"impossible example\"",")","if","is_training","and","not","span_is_impossible",":","answer_text","=","\" \"",".","join","(","tokens","[","start_position",":","(","end_position","+","1",")","]",")","logger",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","logger",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","logger",".","info","(","\"answer: %s\"","%","(","answer_text",")",")","features",".","append","(","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","example_index",",","doc_span_index","=","doc_span_index",",","tokens","=","tokens",",","token_to_orig_map","=","token_to_orig_map",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","cls_index","=","cls_index",",","p_mask","=","p_mask",",","paragraph_len","=","paragraph_len",",","start_position","=","start_position",",","end_position","=","end_position",",","switch","=","switch",",","is_impossible","=","span_is_impossible",")",")","unique_id","+=","1","return","features"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L209-L420"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"convert_examples_to_features_yes_no","parameters":"(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n cls_token_at_end=False,\n cls_token='[CLS]', sep_token='[SEP]', pad_token=0,\n sequence_a_segment_id=0, sequence_b_segment_id=1,\n cls_token_segment_id=0, pad_token_segment_id=0,\n mask_padding_with_zero=True)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features_yes_no(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n cls_token_at_end=False,\n cls_token='[CLS]', sep_token='[SEP]', pad_token=0,\n sequence_a_segment_id=0, sequence_b_segment_id=1,\n cls_token_segment_id=0, pad_token_segment_id=0,\n mask_padding_with_zero=True):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n features = []\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n\n # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)\n # Original TF implem also keep the classification token (set to 0)\n # (not sure why...)\n p_mask = []\n\n # CLS token at the beginning\n if not cls_token_at_end:\n tokens.append(cls_token)\n segment_ids.append(cls_token_segment_id)\n p_mask.append(0)\n cls_index = 0\n\n # Query\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(sequence_a_segment_id)\n p_mask.append(1)\n\n # SEP token\n tokens.append(sep_token)\n segment_ids.append(sequence_a_segment_id)\n p_mask.append(1)\n\n # Paragraph\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(\n tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(sequence_b_segment_id)\n p_mask.append(0)\n paragraph_len = doc_span.length\n\n # SEP token\n tokens.append(sep_token)\n segment_ids.append(sequence_b_segment_id)\n p_mask.append(1)\n\n # CLS token at the end\n if cls_token_at_end:\n tokens.append(cls_token)\n segment_ids.append(cls_token_segment_id)\n p_mask.append(0)\n cls_index = len(tokens) - 1 # Index of classification token\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(pad_token)\n input_mask.append(0 if mask_padding_with_zero else 1)\n segment_ids.append(pad_token_segment_id)\n p_mask.append(1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n span_is_impossible = example.is_impossible\n start_position = None\n end_position = None\n switch = None\n\n if is_training and not span_is_impossible:\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n span_is_impossible = True\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and span_is_impossible:\n start_position = cls_index\n end_position = cls_index\n switch = 1\n elif is_training and not span_is_impossible:\n switch = 0\n\n # The questions whose ``is_impossible'' are originally True should\n # be 1. Change switch to 2 or 3 if the answer is yes\/no.\n if example.is_impossible is True:\n if example.orig_answer_text == \"yes\":\n switch = 2\n elif example.orig_answer_text == \"no\":\n switch = 3\n else:\n switch = 1\n\n if example_index < 20:\n logger.info(\"*** Example ***\")\n logger.info(\"unique_id: %s\" % (unique_id))\n logger.info(\"example_index: %s\" % (example_index))\n logger.info(\"doc_span_index: %s\" % (doc_span_index))\n logger.info(\"tokens: %s\" % \" \".join(tokens))\n logger.info(\"token_to_orig_map: %s\" % \" \".join([\n \"%d:%d\" % (x, y) for (x, y) in token_to_orig_map.items()]))\n logger.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in token_is_max_context.items()\n ]))\n logger.info(\"input_ids: %s\" %\n \" \".join([str(x) for x in input_ids]))\n logger.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and span_is_impossible:\n logger.info(\"impossible example\")\n if is_training and not span_is_impossible:\n answer_text = \" \".join(\n tokens[start_position:(end_position + 1)])\n logger.info(\"start_position: %d\" % (start_position))\n logger.info(\"end_position: %d\" % (end_position))\n logger.info(\n \"answer: %s\" % (answer_text))\n\n features.append(\n InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n cls_index=cls_index,\n p_mask=p_mask,\n paragraph_len=paragraph_len,\n start_position=start_position,\n end_position=end_position,\n switch=switch,\n is_impossible=span_is_impossible))\n unique_id += 1\n\n return features","function_tokens":["def","convert_examples_to_features_yes_no","(","examples",",","tokenizer",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","cls_token_at_end","=","False",",","cls_token","=","'[CLS]'",",","sep_token","=","'[SEP]'",",","pad_token","=","0",",","sequence_a_segment_id","=","0",",","sequence_b_segment_id","=","1",",","cls_token_segment_id","=","0",",","pad_token_segment_id","=","0",",","mask_padding_with_zero","=","True",")",":","unique_id","=","1000000000","features","=","[","]","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","query_tokens","=","tokenizer",".","tokenize","(","example",".","question_text",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","tok_to_orig_index","=","[","]","orig_to_tok_index","=","[","]","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","orig_to_tok_index",".","append","(","len","(","all_doc_tokens",")",")","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","tok_to_orig_index",".","append","(","i",")","all_doc_tokens",".","append","(","sub_token",")","tok_start_position","=","None","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","tok_start_position","=","orig_to_tok_index","[","example",".","start_position","]","if","example",".","end_position","<","len","(","example",".","doc_tokens",")","-","1",":","tok_end_position","=","orig_to_tok_index","[","example",".","end_position","+","1","]","-","1","else",":","tok_end_position","=","len","(","all_doc_tokens",")","-","1","(","tok_start_position",",","tok_end_position",")","=","_improve_answer_span","(","all_doc_tokens",",","tok_start_position",",","tok_end_position",",","tokenizer",",","example",".","orig_answer_text",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_to_orig_map","=","{","}","token_is_max_context","=","{","}","segment_ids","=","[","]","# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)","# Original TF implem also keep the classification token (set to 0)","# (not sure why...)","p_mask","=","[","]","# CLS token at the beginning","if","not","cls_token_at_end",":","tokens",".","append","(","cls_token",")","segment_ids",".","append","(","cls_token_segment_id",")","p_mask",".","append","(","0",")","cls_index","=","0","# Query","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","sequence_a_segment_id",")","p_mask",".","append","(","1",")","# SEP token","tokens",".","append","(","sep_token",")","segment_ids",".","append","(","sequence_a_segment_id",")","p_mask",".","append","(","1",")","# Paragraph","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","token_to_orig_map","[","len","(","tokens",")","]","=","tok_to_orig_index","[","split_token_index","]","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","sequence_b_segment_id",")","p_mask",".","append","(","0",")","paragraph_len","=","doc_span",".","length","# SEP token","tokens",".","append","(","sep_token",")","segment_ids",".","append","(","sequence_b_segment_id",")","p_mask",".","append","(","1",")","# CLS token at the end","if","cls_token_at_end",":","tokens",".","append","(","cls_token",")","segment_ids",".","append","(","cls_token_segment_id",")","p_mask",".","append","(","0",")","cls_index","=","len","(","tokens",")","-","1","# Index of classification token","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","if","mask_padding_with_zero","else","0","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","pad_token",")","input_mask",".","append","(","0","if","mask_padding_with_zero","else","1",")","segment_ids",".","append","(","pad_token_segment_id",")","p_mask",".","append","(","1",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","span_is_impossible","=","example",".","is_impossible","start_position","=","None","end_position","=","None","switch","=","None","if","is_training","and","not","span_is_impossible",":","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","start_position","=","0","end_position","=","0","span_is_impossible","=","True","else",":","doc_offset","=","len","(","query_tokens",")","+","2","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","span_is_impossible",":","start_position","=","cls_index","end_position","=","cls_index","switch","=","1","elif","is_training","and","not","span_is_impossible",":","switch","=","0","# The questions whose ``is_impossible'' are originally True should","# be 1. Change switch to 2 or 3 if the answer is yes\/no.","if","example",".","is_impossible","is","True",":","if","example",".","orig_answer_text","==","\"yes\"",":","switch","=","2","elif","example",".","orig_answer_text","==","\"no\"",":","switch","=","3","else",":","switch","=","1","if","example_index","<","20",":","logger",".","info","(","\"*** Example ***\"",")","logger",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","logger",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","logger",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","logger",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","tokens",")",")","logger",".","info","(","\"token_to_orig_map: %s\"","%","\" \"",".","join","(","[","\"%d:%d\"","%","(","x",",","y",")","for","(","x",",","y",")","in","token_to_orig_map",".","items","(",")","]",")",")","logger",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","token_is_max_context",".","items","(",")","]",")",")","logger",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","logger",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","logger",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","span_is_impossible",":","logger",".","info","(","\"impossible example\"",")","if","is_training","and","not","span_is_impossible",":","answer_text","=","\" \"",".","join","(","tokens","[","start_position",":","(","end_position","+","1",")","]",")","logger",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","logger",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","logger",".","info","(","\"answer: %s\"","%","(","answer_text",")",")","features",".","append","(","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","example_index",",","doc_span_index","=","doc_span_index",",","tokens","=","tokens",",","token_to_orig_map","=","token_to_orig_map",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","cls_index","=","cls_index",",","p_mask","=","p_mask",",","paragraph_len","=","paragraph_len",",","start_position","=","start_position",",","end_position","=","end_position",",","switch","=","switch",",","is_impossible","=","span_is_impossible",")",")","unique_id","+=","1","return","features"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L424-L643"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L646-L680"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + \\\n 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L683-L718"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"write_predictions_yes_no_no_empty_answer","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file, verbose_logging,\n version_2_with_negative, null_score_diff_threshold, no_masking=False)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions_yes_no_no_empty_answer(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file, verbose_logging,\n version_2_with_negative, null_score_diff_threshold, no_masking=False):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n logger.info(\"Writing predictions to: %s\" % (output_prediction_file))\n logger.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min null score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n if no_masking is True:\n feature_null_score = result.start_logits[0] + \\\n result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n # if we could have irrelevant answers, get the min score of\n # irrelevant\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n if no_masking is True:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\", \"no_answer_logit\", \"switch\", \"switch_logits\"])\n no_answer_logit = result.switch_logits[1]\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(\n pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(\n orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(\n tok_text, orig_text, do_lower_case, verbose_logging)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit,\n no_answer_logit=no_answer_logit,\n switch=np.argmax(result.switch_logits),\n switch_logits=result.switch_logits\n ))\n # if we didn't include the empty option in the n-best, include it\n if no_masking is True:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\",\n start_logit=null_start_logit,\n end_logit=null_end_logit,\n no_answer_logit=no_answer_logit,\n switch=np.argmax(result.switch_logits),\n switch_logits=result.switch_logits\n ))\n\n # In very rare edge cases we could only have single null prediction.\n # So we just create a nonce prediction in this case to avoid\n # failure.\n if no_masking is True:\n if len(nbest) == 1:\n nbest.insert(0,\n _NbestPrediction(text=\"\", start_logit=0.0, end_logit=0.0, no_answer_logit=1.0, switch=1, switch_logits=[0.0, 0.0, 0.0, 0.0]))\n else:\n if len(nbest) == 0:\n nbest.insert(0,\n _NbestPrediction(text=\"\", start_logit=0.0, end_logit=0.0, no_answer_logit=1.0, switch=1, switch_logits=[0.0, 0.0, 0.0, 0.0]))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"\", start_logit=0.0, end_logit=0.0, no_answer_logit=1.0, switch=1, switch_logits=[0.0, 0.0, 0.0, 0.0]))\n\n assert len(nbest) >= 1\n\n total_scores = []\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n output[\"no_answer_prob\"] = entry.no_answer_logit\n output[\"switch\"] = entry.switch\n output[\"switch_scores\"] = entry.switch_logits\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n # if the n-best is high enough, pick up no answer.\n possible_answers = np.argsort(\n nbest_json[0][\"switch_scores\"])[::-1]\n if possible_answers[0] == 1:\n all_predictions[example.qas_id] = switch_answers(\n possible_answers[1], nbest_json[0][\"text\"])\n else:\n all_predictions[example.qas_id] = switch_answers(\n possible_answers[0], nbest_json[0][\"text\"])\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")","function_tokens":["def","write_predictions_yes_no_no_empty_answer","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","output_null_log_odds_file",",","verbose_logging",",","version_2_with_negative",",","null_score_diff_threshold",",","no_masking","=","False",")",":","logger",".","info","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","logger",".","info","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature",".","example_index","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min null score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature",".","unique_id","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","if","no_masking","is","True",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","# if we could have irrelevant answers, get the min score of","# irrelevant","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature",".","tokens",")",":","continue","if","end_index",">=","len","(","feature",".","tokens",")",":","continue","if","start_index","not","in","feature",".","token_to_orig_map",":","continue","if","end_index","not","in","feature",".","token_to_orig_map",":","continue","if","not","feature",".","token_is_max_context",".","get","(","start_index",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","no_masking","is","True",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"",",","\"no_answer_logit\"",",","\"switch\"",",","\"switch_logits\"","]",")","no_answer_logit","=","result",".","switch_logits","[","1","]","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature",".","tokens","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature",".","token_to_orig_map","[","pred",".","start_index","]","orig_doc_end","=","feature",".","token_to_orig_map","[","pred",".","end_index","]","orig_tokens","=","example",".","doc_tokens","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\" \"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\" \"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",",","verbose_logging",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",",","no_answer_logit","=","no_answer_logit",",","switch","=","np",".","argmax","(","result",".","switch_logits",")",",","switch_logits","=","result",".","switch_logits",")",")","# if we didn't include the empty option in the n-best, include it","if","no_masking","is","True",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",",","no_answer_logit","=","no_answer_logit",",","switch","=","np",".","argmax","(","result",".","switch_logits",")",",","switch_logits","=","result",".","switch_logits",")",")","# In very rare edge cases we could only have single null prediction.","# So we just create a nonce prediction in this case to avoid","# failure.","if","no_masking","is","True",":","if","len","(","nbest",")","==","1",":","nbest",".","insert","(","0",",","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","0.0",",","end_logit","=","0.0",",","no_answer_logit","=","1.0",",","switch","=","1",",","switch_logits","=","[","0.0",",","0.0",",","0.0",",","0.0","]",")",")","else",":","if","len","(","nbest",")","==","0",":","nbest",".","insert","(","0",",","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","0.0",",","end_logit","=","0.0",",","no_answer_logit","=","1.0",",","switch","=","1",",","switch_logits","=","[","0.0",",","0.0",",","0.0",",","0.0","]",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","0.0",",","end_logit","=","0.0",",","no_answer_logit","=","1.0",",","switch","=","1",",","switch_logits","=","[","0.0",",","0.0",",","0.0",",","0.0","]",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_logit\"","]","=","entry",".","start_logit","output","[","\"end_logit\"","]","=","entry",".","end_logit","output","[","\"no_answer_prob\"","]","=","entry",".","no_answer_logit","output","[","\"switch\"","]","=","entry",".","switch","output","[","\"switch_scores\"","]","=","entry",".","switch_logits","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","# if the n-best is high enough, pick up no answer.","possible_answers","=","np",".","argsort","(","nbest_json","[","0","]","[","\"switch_scores\"","]",")","[",":",":","-","1","]","if","possible_answers","[","0","]","==","1",":","all_predictions","[","example",".","qas_id","]","=","switch_answers","(","possible_answers","[","1","]",",","nbest_json","[","0","]","[","\"text\"","]",")","else",":","all_predictions","[","example",".","qas_id","]","=","switch_answers","(","possible_answers","[","0","]",",","nbest_json","[","0","]","[","\"text\"","]",")","all_nbest_json","[","example",".","qas_id","]","=","nbest_json","with","open","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L724-L923"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case, verbose_logging=False)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heuristic between\n # `pred_text` and `orig_text` to get a character-to-character alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n logger.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n logger.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",",","verbose_logging","=","False",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heuristic between","# `pred_text` and `orig_text` to get a character-to-character alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","verbose_logging",":","logger",".","info","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","verbose_logging",":","logger",".","info","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"",",","orig_ns_text",",","tok_ns_text",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","tok_ns_to_s_map",".","items","(",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","verbose_logging",":","logger",".","info","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","verbose_logging",":","logger",".","info","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L1160-L1253"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(\n enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L1256-L1266"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/rc_utils.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/rc_utils.py#L1269-L1289"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"reader\/modeling_reader.py","language":"python","identifier":"BERTLayerNorm.__init__","parameters":"(self, config, variance_epsilon=1e-12)","argument_list":"","return_statement":"","docstring":"Construct a layernorm module in the TF style (epsilon inside the square root).","docstring_summary":"Construct a layernorm module in the TF style (epsilon inside the square root).","docstring_tokens":["Construct","a","layernorm","module","in","the","TF","style","(","epsilon","inside","the","square","root",")","."],"function":"def __init__(self, config, variance_epsilon=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BERTLayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.ones(config.hidden_size))\n self.beta = nn.Parameter(torch.zeros(config.hidden_size))\n self.variance_epsilon = variance_epsilon","function_tokens":["def","__init__","(","self",",","config",",","variance_epsilon","=","1e-12",")",":","super","(","BERTLayerNorm",",","self",")",".","__init__","(",")","self",".","gamma","=","nn",".","Parameter","(","torch",".","ones","(","config",".","hidden_size",")",")","self",".","beta","=","nn",".","Parameter","(","torch",".","zeros","(","config",".","hidden_size",")",")","self",".","variance_epsilon","=","variance_epsilon"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/reader\/modeling_reader.py#L8-L14"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"pipeline\/graph_retriever.py","language":"python","identifier":"create_examples","parameters":"(jsn, graph_retriever_config)","argument_list":"","return_statement":"return examples","docstring":"Find the mximum size of the initial context (links are not included)","docstring_summary":"Find the mximum size of the initial context (links are not included)","docstring_tokens":["Find","the","mximum","size","of","the","initial","context","(","links","are","not","included",")"],"function":"def create_examples(jsn, graph_retriever_config):\n\n task = graph_retriever_config.task\n\n examples = []\n\n '''\n Find the mximum size of the initial context (links are not included)\n '''\n graph_retriever_config.max_context_size = 0\n\n for data in jsn:\n\n guid = data['q_id']\n question = data['question']\n context = data['context'] # {context title: paragraph}\n all_linked_paras_dic = {} # {context title: {linked title: paragraph}}\n\n '''\n Use TagMe-based context at test time.\n '''\n if graph_retriever_config.tagme:\n assert 'tagged_context' in data\n\n '''\n Reformat \"tagged_context\" if needed (c.f. the \"context\" case above)\n '''\n if type(data['tagged_context']) == list:\n tagged_context = {c[0]: c[1] for c in data['tagged_context']}\n data['tagged_context'] = tagged_context\n\n '''\n Append valid paragraphs from \"tagged_context\" to \"context\"\n '''\n for tagged_title in data['tagged_context']:\n tagged_text = data['tagged_context'][tagged_title]\n if tagged_title not in context and tagged_title is not None and tagged_title.strip() != '' and tagged_text is not None and tagged_text.strip() != '':\n context[tagged_title] = tagged_text\n\n '''\n Clean \"context\" by removing invalid paragraphs\n '''\n removed_keys = []\n for title in context:\n if title is None or title.strip() == '' or context[title] is None or context[title].strip() == '':\n removed_keys.append(title)\n for key in removed_keys:\n context.pop(key)\n\n all_paras = {}\n for title in context:\n all_paras[title] = context[title]\n\n if graph_retriever_config.expand_links:\n expand_links(context, all_linked_paras_dic, all_paras)\n \n graph_retriever_config.max_context_size = max(graph_retriever_config.max_context_size, len(context))\n\n examples.append(InputExample(guid = guid,\n q = question,\n c = context,\n para_dic = all_linked_paras_dic,\n s_g = None, r_g = None, all_r_g = None,\n all_paras = all_paras))\n\n return examples","function_tokens":["def","create_examples","(","jsn",",","graph_retriever_config",")",":","task","=","graph_retriever_config",".","task","examples","=","[","]","graph_retriever_config",".","max_context_size","=","0","for","data","in","jsn",":","guid","=","data","[","'q_id'","]","question","=","data","[","'question'","]","context","=","data","[","'context'","]","# {context title: paragraph}","all_linked_paras_dic","=","{","}","# {context title: {linked title: paragraph}}","'''\n Use TagMe-based context at test time.\n '''","if","graph_retriever_config",".","tagme",":","assert","'tagged_context'","in","data","'''\n Reformat \"tagged_context\" if needed (c.f. the \"context\" case above)\n '''","if","type","(","data","[","'tagged_context'","]",")","==","list",":","tagged_context","=","{","c","[","0","]",":","c","[","1","]","for","c","in","data","[","'tagged_context'","]","}","data","[","'tagged_context'","]","=","tagged_context","'''\n Append valid paragraphs from \"tagged_context\" to \"context\"\n '''","for","tagged_title","in","data","[","'tagged_context'","]",":","tagged_text","=","data","[","'tagged_context'","]","[","tagged_title","]","if","tagged_title","not","in","context","and","tagged_title","is","not","None","and","tagged_title",".","strip","(",")","!=","''","and","tagged_text","is","not","None","and","tagged_text",".","strip","(",")","!=","''",":","context","[","tagged_title","]","=","tagged_text","'''\n Clean \"context\" by removing invalid paragraphs\n '''","removed_keys","=","[","]","for","title","in","context",":","if","title","is","None","or","title",".","strip","(",")","==","''","or","context","[","title","]","is","None","or","context","[","title","]",".","strip","(",")","==","''",":","removed_keys",".","append","(","title",")","for","key","in","removed_keys",":","context",".","pop","(","key",")","all_paras","=","{","}","for","title","in","context",":","all_paras","[","title","]","=","context","[","title","]","if","graph_retriever_config",".","expand_links",":","expand_links","(","context",",","all_linked_paras_dic",",","all_paras",")","graph_retriever_config",".","max_context_size","=","max","(","graph_retriever_config",".","max_context_size",",","len","(","context",")",")","examples",".","append","(","InputExample","(","guid","=","guid",",","q","=","question",",","c","=","context",",","para_dic","=","all_linked_paras_dic",",","s_g","=","None",",","r_g","=","None",",","all_r_g","=","None",",","all_paras","=","all_paras",")",")","return","examples"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/pipeline\/graph_retriever.py#L24-L89"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"pipeline\/graph_retriever.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n max_para_num = graph_retriever_config.max_context_size\n graph_retriever_config.max_para_num = max(graph_retriever_config.max_para_num, max_para_num)\n \n max_steps = graph_retriever_config.max_select_num\n \n DUMMY = [0] * max_seq_length\n features = []\n\n for (ex_index, example) in enumerate(examples):\n tokens_q = tokenize_question(example.question, tokenizer)\n\n title2index = {}\n input_ids = []\n input_masks = []\n segment_ids = []\n\n titles_list = list(example.context.keys())\n for p in titles_list:\n\n if len(input_ids) == max_para_num:\n break\n\n if p in title2index:\n continue\n\n title2index[p] = len(title2index)\n example.title_order.append(p)\n p = example.context[p]\n\n input_ids_, input_masks_, segment_ids_ = tokenize_paragraph(p, tokens_q, max_seq_length, tokenizer)\n input_ids.append(input_ids_)\n input_masks.append(input_masks_)\n segment_ids.append(segment_ids_)\n\n num_paragraphs_no_links = len(input_ids)\n \n assert len(input_ids) <= max_para_num\n \n num_paragraphs = len(input_ids)\n \n output_masks = [([1.0] * len(input_ids) + [0.0] * (max_para_num - len(input_ids) + 1)) for _ in range(max_para_num + 2)]\n\n assert len(example.context) == num_paragraphs_no_links\n for i in range(len(output_masks[0])):\n if i >= num_paragraphs_no_links:\n output_masks[0][i] = 0.0\n \n for i in range(len(input_ids)):\n output_masks[i+1][i] = 0.0 \n \n padding = [DUMMY] * (max_para_num - len(input_ids))\n input_ids += padding\n input_masks += padding\n segment_ids += padding\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_masks=input_masks,\n segment_ids=segment_ids,\n output_masks = output_masks,\n num_paragraphs = num_paragraphs,\n num_steps = -1,\n ex_index = ex_index))\n\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","max_seq_length",",","max_para_num",",","graph_retriever_config",",","tokenizer",")",":","max_para_num","=","graph_retriever_config",".","max_context_size","graph_retriever_config",".","max_para_num","=","max","(","graph_retriever_config",".","max_para_num",",","max_para_num",")","max_steps","=","graph_retriever_config",".","max_select_num","DUMMY","=","[","0","]","*","max_seq_length","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","tokens_q","=","tokenize_question","(","example",".","question",",","tokenizer",")","title2index","=","{","}","input_ids","=","[","]","input_masks","=","[","]","segment_ids","=","[","]","titles_list","=","list","(","example",".","context",".","keys","(",")",")","for","p","in","titles_list",":","if","len","(","input_ids",")","==","max_para_num",":","break","if","p","in","title2index",":","continue","title2index","[","p","]","=","len","(","title2index",")","example",".","title_order",".","append","(","p",")","p","=","example",".","context","[","p","]","input_ids_",",","input_masks_",",","segment_ids_","=","tokenize_paragraph","(","p",",","tokens_q",",","max_seq_length",",","tokenizer",")","input_ids",".","append","(","input_ids_",")","input_masks",".","append","(","input_masks_",")","segment_ids",".","append","(","segment_ids_",")","num_paragraphs_no_links","=","len","(","input_ids",")","assert","len","(","input_ids",")","<=","max_para_num","num_paragraphs","=","len","(","input_ids",")","output_masks","=","[","(","[","1.0","]","*","len","(","input_ids",")","+","[","0.0","]","*","(","max_para_num","-","len","(","input_ids",")","+","1",")",")","for","_","in","range","(","max_para_num","+","2",")","]","assert","len","(","example",".","context",")","==","num_paragraphs_no_links","for","i","in","range","(","len","(","output_masks","[","0","]",")",")",":","if","i",">=","num_paragraphs_no_links",":","output_masks","[","0","]","[","i","]","=","0.0","for","i","in","range","(","len","(","input_ids",")",")",":","output_masks","[","i","+","1","]","[","i","]","=","0.0","padding","=","[","DUMMY","]","*","(","max_para_num","-","len","(","input_ids",")",")","input_ids","+=","padding","input_masks","+=","padding","segment_ids","+=","padding","features",".","append","(","InputFeatures","(","input_ids","=","input_ids",",","input_masks","=","input_masks",",","segment_ids","=","segment_ids",",","output_masks","=","output_masks",",","num_paragraphs","=","num_paragraphs",",","num_steps","=","-","1",",","ex_index","=","ex_index",")",")","return","features"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/pipeline\/graph_retriever.py#L91-L158"} {"nwo":"AkariAsai\/learning_to_retrieve_reasoning_paths","sha":"a020d52cfbbb7d7fca9fa25361e549c85e81875c","path":"pipeline\/tfidf_retriever.py","language":"python","identifier":"TfidfRetriever.get_article_tfidf_with_hyperlinked_titles","parameters":"(self, q_id,question, args)","argument_list":"","return_statement":"","docstring":"Retrieve articles with their corresponding hyperlinked titles.\n Due to efficiency, we sample top k articles, and then sample top l paragraphs from each article. \n (so, eventually we get k*l paragraphs with tfidf-based pruning.)\n We also store the hyperlinked titles for each paragraph.","docstring_summary":"Retrieve articles with their corresponding hyperlinked titles.\n Due to efficiency, we sample top k articles, and then sample top l paragraphs from each article. \n (so, eventually we get k*l paragraphs with tfidf-based pruning.)\n We also store the hyperlinked titles for each paragraph.","docstring_tokens":["Retrieve","articles","with","their","corresponding","hyperlinked","titles",".","Due","to","efficiency","we","sample","top","k","articles","and","then","sample","top","l","paragraphs","from","each","article",".","(","so","eventually","we","get","k","*","l","paragraphs","with","tfidf","-","based","pruning",".",")","We","also","store","the","hyperlinked","titles","for","each","paragraph","."],"function":"def get_article_tfidf_with_hyperlinked_titles(self, q_id,question, args):\n \"\"\"\n Retrieve articles with their corresponding hyperlinked titles.\n Due to efficiency, we sample top k articles, and then sample top l paragraphs from each article. \n (so, eventually we get k*l paragraphs with tfidf-based pruning.)\n We also store the hyperlinked titles for each paragraph. \n \"\"\"\n\n tfidf_limit, pruning_l, prune_after_agg = args.tfidf_limit, args.pruning_l, args.prune_after_agg\n doc_names, _ = self.ranker.closest_docs(question, k=tfidf_limit)\n context, hyper_linked_titles = self.load_sampled_para_text_and_linked_titles(\n doc_names, question, pruning_l, prune_after_agg)\n \n if args.tagme is True and args.tagme_api_key is not None:\n # if add TagMe\n tagged_context = self.load_sampled_tagged_para_text(\n question, pruning_l, args.tagme_api_key)\n \n return [{\"question\": question,\n \"context\": context,\n \"tagged_context\": tagged_context,\n \"all_linked_para_title_dic\": hyper_linked_titles,\n \"q_id\": q_id}]\n else:\n return [{\"question\": question,\n \"context\": context,\n \"all_linked_para_title_dic\": hyper_linked_titles,\n \"q_id\": q_id}]","function_tokens":["def","get_article_tfidf_with_hyperlinked_titles","(","self",",","q_id",",","question",",","args",")",":","tfidf_limit",",","pruning_l",",","prune_after_agg","=","args",".","tfidf_limit",",","args",".","pruning_l",",","args",".","prune_after_agg","doc_names",",","_","=","self",".","ranker",".","closest_docs","(","question",",","k","=","tfidf_limit",")","context",",","hyper_linked_titles","=","self",".","load_sampled_para_text_and_linked_titles","(","doc_names",",","question",",","pruning_l",",","prune_after_agg",")","if","args",".","tagme","is","True","and","args",".","tagme_api_key","is","not","None",":","# if add TagMe","tagged_context","=","self",".","load_sampled_tagged_para_text","(","question",",","pruning_l",",","args",".","tagme_api_key",")","return","[","{","\"question\"",":","question",",","\"context\"",":","context",",","\"tagged_context\"",":","tagged_context",",","\"all_linked_para_title_dic\"",":","hyper_linked_titles",",","\"q_id\"",":","q_id","}","]","else",":","return","[","{","\"question\"",":","question",",","\"context\"",":","context",",","\"all_linked_para_title_dic\"",":","hyper_linked_titles",",","\"q_id\"",":","q_id","}","]"],"url":"https:\/\/github.com\/AkariAsai\/learning_to_retrieve_reasoning_paths\/blob\/a020d52cfbbb7d7fca9fa25361e549c85e81875c\/pipeline\/tfidf_retriever.py#L132-L159"}